nwo
stringlengths
5
106
sha
stringlengths
40
40
path
stringlengths
4
174
language
stringclasses
1 value
identifier
stringlengths
1
140
parameters
stringlengths
0
87.7k
argument_list
stringclasses
1 value
return_statement
stringlengths
0
426k
docstring
stringlengths
0
64.3k
docstring_summary
stringlengths
0
26.3k
docstring_tokens
list
function
stringlengths
18
4.83M
function_tokens
list
url
stringlengths
83
304
django/django
0a17666045de6739ae1c2ac695041823d5f827f7
django/contrib/gis/geos/collections.py
python
GeometryCollection._set_list
(self, length, items)
Create a new collection, and destroy the contents of the previous pointer.
Create a new collection, and destroy the contents of the previous pointer.
[ "Create", "a", "new", "collection", "and", "destroy", "the", "contents", "of", "the", "previous", "pointer", "." ]
def _set_list(self, length, items): "Create a new collection, and destroy the contents of the previous pointer." prev_ptr = self.ptr srid = self.srid self.ptr = self._create_collection(length, items) if srid: self.srid = srid capi.destroy_geom(prev_ptr)
[ "def", "_set_list", "(", "self", ",", "length", ",", "items", ")", ":", "prev_ptr", "=", "self", ".", "ptr", "srid", "=", "self", ".", "srid", "self", ".", "ptr", "=", "self", ".", "_create_collection", "(", "length", ",", "items", ")", "if", "srid", ":", "self", ".", "srid", "=", "srid", "capi", ".", "destroy_geom", "(", "prev_ptr", ")" ]
https://github.com/django/django/blob/0a17666045de6739ae1c2ac695041823d5f827f7/django/contrib/gis/geos/collections.py#L64-L71
Xilinx/finn
d1cc9cf94f1c33354cc169c5a6517314d0e94e3b
src/finn/transformation/fpgadataflow/annotate_cycles.py
python
AnnotateCycles.apply
(self, model)
return (model, False)
[]
def apply(self, model): graph = model.graph # annotate node cycles for node in graph.node: if _is_fpgadataflow_node(node): op_inst = registry.getCustomOp(node) cycles = op_inst.get_exp_cycles() op_inst.set_nodeattr("cycles_estimate", cycles) elif node.op_type == "StreamingDataflowPartition": # recurse into model to manually annotate per-layer cycles sdp_model_filename = getCustomOp(node).get_nodeattr("model") sdp_model = ModelWrapper(sdp_model_filename) sdp_model = sdp_model.transform(AnnotateCycles()) # save transformed model sdp_model.save(sdp_model_filename) return (model, False)
[ "def", "apply", "(", "self", ",", "model", ")", ":", "graph", "=", "model", ".", "graph", "# annotate node cycles", "for", "node", "in", "graph", ".", "node", ":", "if", "_is_fpgadataflow_node", "(", "node", ")", ":", "op_inst", "=", "registry", ".", "getCustomOp", "(", "node", ")", "cycles", "=", "op_inst", ".", "get_exp_cycles", "(", ")", "op_inst", ".", "set_nodeattr", "(", "\"cycles_estimate\"", ",", "cycles", ")", "elif", "node", ".", "op_type", "==", "\"StreamingDataflowPartition\"", ":", "# recurse into model to manually annotate per-layer cycles", "sdp_model_filename", "=", "getCustomOp", "(", "node", ")", ".", "get_nodeattr", "(", "\"model\"", ")", "sdp_model", "=", "ModelWrapper", "(", "sdp_model_filename", ")", "sdp_model", "=", "sdp_model", ".", "transform", "(", "AnnotateCycles", "(", ")", ")", "# save transformed model", "sdp_model", ".", "save", "(", "sdp_model_filename", ")", "return", "(", "model", ",", "False", ")" ]
https://github.com/Xilinx/finn/blob/d1cc9cf94f1c33354cc169c5a6517314d0e94e3b/src/finn/transformation/fpgadataflow/annotate_cycles.py#L44-L59
jmcnamara/XlsxWriter
fd30f221bf4326ca7814cec0d3a87a89b9e3edd5
xlsxwriter/worksheet.py
python
Worksheet.hide
(self)
Hide the current worksheet. Args: None. Returns: Nothing.
Hide the current worksheet.
[ "Hide", "the", "current", "worksheet", "." ]
def hide(self): """ Hide the current worksheet. Args: None. Returns: Nothing. """ self.hidden = 1 # A hidden worksheet shouldn't be active or selected. self.selected = 0
[ "def", "hide", "(", "self", ")", ":", "self", ".", "hidden", "=", "1", "# A hidden worksheet shouldn't be active or selected.", "self", ".", "selected", "=", "0" ]
https://github.com/jmcnamara/XlsxWriter/blob/fd30f221bf4326ca7814cec0d3a87a89b9e3edd5/xlsxwriter/worksheet.py#L1681-L1695
zhl2008/awd-platform
0416b31abea29743387b10b3914581fbe8e7da5e
web_hxb2/lib/python3.5/site-packages/pip/utils/appdirs.py
python
_get_win_folder_with_ctypes
(csidl_name)
return buf.value
[]
def _get_win_folder_with_ctypes(csidl_name): csidl_const = { "CSIDL_APPDATA": 26, "CSIDL_COMMON_APPDATA": 35, "CSIDL_LOCAL_APPDATA": 28, }[csidl_name] buf = ctypes.create_unicode_buffer(1024) ctypes.windll.shell32.SHGetFolderPathW(None, csidl_const, None, 0, buf) # Downgrade to short path name if have highbit chars. See # <http://bugs.activestate.com/show_bug.cgi?id=85099>. has_high_char = False for c in buf: if ord(c) > 255: has_high_char = True break if has_high_char: buf2 = ctypes.create_unicode_buffer(1024) if ctypes.windll.kernel32.GetShortPathNameW(buf.value, buf2, 1024): buf = buf2 return buf.value
[ "def", "_get_win_folder_with_ctypes", "(", "csidl_name", ")", ":", "csidl_const", "=", "{", "\"CSIDL_APPDATA\"", ":", "26", ",", "\"CSIDL_COMMON_APPDATA\"", ":", "35", ",", "\"CSIDL_LOCAL_APPDATA\"", ":", "28", ",", "}", "[", "csidl_name", "]", "buf", "=", "ctypes", ".", "create_unicode_buffer", "(", "1024", ")", "ctypes", ".", "windll", ".", "shell32", ".", "SHGetFolderPathW", "(", "None", ",", "csidl_const", ",", "None", ",", "0", ",", "buf", ")", "# Downgrade to short path name if have highbit chars. See", "# <http://bugs.activestate.com/show_bug.cgi?id=85099>.", "has_high_char", "=", "False", "for", "c", "in", "buf", ":", "if", "ord", "(", "c", ")", ">", "255", ":", "has_high_char", "=", "True", "break", "if", "has_high_char", ":", "buf2", "=", "ctypes", ".", "create_unicode_buffer", "(", "1024", ")", "if", "ctypes", ".", "windll", ".", "kernel32", ".", "GetShortPathNameW", "(", "buf", ".", "value", ",", "buf2", ",", "1024", ")", ":", "buf", "=", "buf2", "return", "buf", ".", "value" ]
https://github.com/zhl2008/awd-platform/blob/0416b31abea29743387b10b3914581fbe8e7da5e/web_hxb2/lib/python3.5/site-packages/pip/utils/appdirs.py#L201-L223
python-control/python-control
df6b35212f8f657469627c227c893a175a6902cc
control/descfcn.py
python
describing_function_plot
( H, F, A, omega=None, refine=True, label="%5.2g @ %-5.2g", **kwargs)
return intersections
Plot a Nyquist plot with a describing function for a nonlinear system. This function generates a Nyquist plot for a closed loop system consisting of a linear system with a static nonlinear function in the feedback path. Parameters ---------- H : LTI system Linear time-invariant (LTI) system (state space, transfer function, or FRD) F : static nonlinear function A static nonlinearity, either a scalar function or a single-input, single-output, static input/output system. A : list List of amplitudes to be used for the describing function plot. omega : list, optional List of frequencies to be used for the linear system Nyquist curve. label : str, optional Formatting string used to label intersection points on the Nyquist plot. Defaults to "%5.2g @ %-5.2g". Set to `None` to omit labels. Returns ------- intersections : 1D array of 2-tuples or None A list of all amplitudes and frequencies in which :math:`H(j\\omega) N(a) = -1`, where :math:`N(a)` is the describing function associated with `F`, or `None` if there are no such points. Each pair represents a potential limit cycle for the closed loop system with amplitude given by the first value of the tuple and frequency given by the second value. Example ------- >>> H_simple = ct.tf([8], [1, 2, 2, 1]) >>> F_saturation = ct.descfcn.saturation_nonlinearity(1) >>> amp = np.linspace(1, 4, 10) >>> ct.describing_function_plot(H_simple, F_saturation, amp) [(3.344008947853124, 1.414213099755523)]
Plot a Nyquist plot with a describing function for a nonlinear system.
[ "Plot", "a", "Nyquist", "plot", "with", "a", "describing", "function", "for", "a", "nonlinear", "system", "." ]
def describing_function_plot( H, F, A, omega=None, refine=True, label="%5.2g @ %-5.2g", **kwargs): """Plot a Nyquist plot with a describing function for a nonlinear system. This function generates a Nyquist plot for a closed loop system consisting of a linear system with a static nonlinear function in the feedback path. Parameters ---------- H : LTI system Linear time-invariant (LTI) system (state space, transfer function, or FRD) F : static nonlinear function A static nonlinearity, either a scalar function or a single-input, single-output, static input/output system. A : list List of amplitudes to be used for the describing function plot. omega : list, optional List of frequencies to be used for the linear system Nyquist curve. label : str, optional Formatting string used to label intersection points on the Nyquist plot. Defaults to "%5.2g @ %-5.2g". Set to `None` to omit labels. Returns ------- intersections : 1D array of 2-tuples or None A list of all amplitudes and frequencies in which :math:`H(j\\omega) N(a) = -1`, where :math:`N(a)` is the describing function associated with `F`, or `None` if there are no such points. Each pair represents a potential limit cycle for the closed loop system with amplitude given by the first value of the tuple and frequency given by the second value. Example ------- >>> H_simple = ct.tf([8], [1, 2, 2, 1]) >>> F_saturation = ct.descfcn.saturation_nonlinearity(1) >>> amp = np.linspace(1, 4, 10) >>> ct.describing_function_plot(H_simple, F_saturation, amp) [(3.344008947853124, 1.414213099755523)] """ # Start by drawing a Nyquist curve count, contour = nyquist_plot( H, omega, plot=True, return_contour=True, **kwargs) H_omega, H_vals = contour.imag, H(contour) # Compute the describing function df = describing_function(F, A) N_vals = -1/df # Now add the describing function curve to the plot plt.plot(N_vals.real, N_vals.imag) # Look for intersection points intersections = [] for i in range(N_vals.size - 1): for j in range(H_vals.size - 1): intersect = _find_intersection( N_vals[i], N_vals[i+1], H_vals[j], H_vals[j+1]) if intersect == None: continue # Found an intersection, compute a and omega s_amp, s_omega = intersect a_guess = (1 - s_amp) * A[i] + s_amp * A[i+1] omega_guess = (1 - s_omega) * H_omega[j] + s_omega * H_omega[j+1] # Refine the coarse estimate to get better intersection point a_final, omega_final = a_guess, omega_guess if refine: # Refine the answer to get more accuracy def _cost(x): # If arguments are invalid, return a "large" value # Note: imposing bounds messed up the optimization (?) if x[0] < 0 or x[1] < 0: return 1 return abs(1 + H(1j * x[1]) * describing_function(F, x[0]))**2 res = scipy.optimize.minimize( _cost, [a_guess, omega_guess]) # bounds=[(A[i], A[i+1]), (H_omega[j], H_omega[j+1])]) if not res.success: warn("not able to refine result; returning estimate") else: a_final, omega_final = res.x[0], res.x[1] # Add labels to the intersection points if isinstance(label, str): pos = H(1j * omega_final) plt.text(pos.real, pos.imag, label % (a_final, omega_final)) elif label is not None or label is not False: raise ValueError("label must be formatting string or None") # Save the final estimate intersections.append((a_final, omega_final)) return intersections
[ "def", "describing_function_plot", "(", "H", ",", "F", ",", "A", ",", "omega", "=", "None", ",", "refine", "=", "True", ",", "label", "=", "\"%5.2g @ %-5.2g\"", ",", "*", "*", "kwargs", ")", ":", "# Start by drawing a Nyquist curve", "count", ",", "contour", "=", "nyquist_plot", "(", "H", ",", "omega", ",", "plot", "=", "True", ",", "return_contour", "=", "True", ",", "*", "*", "kwargs", ")", "H_omega", ",", "H_vals", "=", "contour", ".", "imag", ",", "H", "(", "contour", ")", "# Compute the describing function", "df", "=", "describing_function", "(", "F", ",", "A", ")", "N_vals", "=", "-", "1", "/", "df", "# Now add the describing function curve to the plot", "plt", ".", "plot", "(", "N_vals", ".", "real", ",", "N_vals", ".", "imag", ")", "# Look for intersection points", "intersections", "=", "[", "]", "for", "i", "in", "range", "(", "N_vals", ".", "size", "-", "1", ")", ":", "for", "j", "in", "range", "(", "H_vals", ".", "size", "-", "1", ")", ":", "intersect", "=", "_find_intersection", "(", "N_vals", "[", "i", "]", ",", "N_vals", "[", "i", "+", "1", "]", ",", "H_vals", "[", "j", "]", ",", "H_vals", "[", "j", "+", "1", "]", ")", "if", "intersect", "==", "None", ":", "continue", "# Found an intersection, compute a and omega", "s_amp", ",", "s_omega", "=", "intersect", "a_guess", "=", "(", "1", "-", "s_amp", ")", "*", "A", "[", "i", "]", "+", "s_amp", "*", "A", "[", "i", "+", "1", "]", "omega_guess", "=", "(", "1", "-", "s_omega", ")", "*", "H_omega", "[", "j", "]", "+", "s_omega", "*", "H_omega", "[", "j", "+", "1", "]", "# Refine the coarse estimate to get better intersection point", "a_final", ",", "omega_final", "=", "a_guess", ",", "omega_guess", "if", "refine", ":", "# Refine the answer to get more accuracy", "def", "_cost", "(", "x", ")", ":", "# If arguments are invalid, return a \"large\" value", "# Note: imposing bounds messed up the optimization (?)", "if", "x", "[", "0", "]", "<", "0", "or", "x", "[", "1", "]", "<", "0", ":", "return", "1", "return", "abs", "(", "1", "+", "H", "(", "1j", "*", "x", "[", "1", "]", ")", "*", "describing_function", "(", "F", ",", "x", "[", "0", "]", ")", ")", "**", "2", "res", "=", "scipy", ".", "optimize", ".", "minimize", "(", "_cost", ",", "[", "a_guess", ",", "omega_guess", "]", ")", "# bounds=[(A[i], A[i+1]), (H_omega[j], H_omega[j+1])])", "if", "not", "res", ".", "success", ":", "warn", "(", "\"not able to refine result; returning estimate\"", ")", "else", ":", "a_final", ",", "omega_final", "=", "res", ".", "x", "[", "0", "]", ",", "res", ".", "x", "[", "1", "]", "# Add labels to the intersection points", "if", "isinstance", "(", "label", ",", "str", ")", ":", "pos", "=", "H", "(", "1j", "*", "omega_final", ")", "plt", ".", "text", "(", "pos", ".", "real", ",", "pos", ".", "imag", ",", "label", "%", "(", "a_final", ",", "omega_final", ")", ")", "elif", "label", "is", "not", "None", "or", "label", "is", "not", "False", ":", "raise", "ValueError", "(", "\"label must be formatting string or None\"", ")", "# Save the final estimate", "intersections", ".", "append", "(", "(", "a_final", ",", "omega_final", ")", ")", "return", "intersections" ]
https://github.com/python-control/python-control/blob/df6b35212f8f657469627c227c893a175a6902cc/control/descfcn.py#L201-L299
sopel-irc/sopel
787baa6e39f9dad57d94600c92e10761c41b21ef
sopel/modules/isup.py
python
handle_isup
(bot, trigger, secure=True)
Handle the ``bot`` command from ``trigger`` :param bot: Sopel instance :type bot: :class:`sopel.bot.SopelWrapper` :param trigger: Command's trigger instance :type trigger: :class:`sopel.trigger.Trigger` :param bool secure: Check SSL error if ``True`` (the default)
Handle the ``bot`` command from ``trigger``
[ "Handle", "the", "bot", "command", "from", "trigger" ]
def handle_isup(bot, trigger, secure=True): """Handle the ``bot`` command from ``trigger`` :param bot: Sopel instance :type bot: :class:`sopel.bot.SopelWrapper` :param trigger: Command's trigger instance :type trigger: :class:`sopel.trigger.Trigger` :param bool secure: Check SSL error if ``True`` (the default) """ try: site = get_site_url(trigger.group(2)) except ValueError as error: bot.reply(str(error)) return try: response = requests.head(site, verify=secure, timeout=(10.0, 5.0)) response.raise_for_status() except requests.exceptions.SSLError: bot.say( '{} looks down to me (SSL error). Try using `{}isupinsecure`.' .format(site, bot.config.core.help_prefix)) except requests.HTTPError: bot.say( '{} looks down to me (HTTP {} "{}").' .format(site, response.status_code, response.reason)) except requests.ConnectTimeout: bot.say( '{} looks down to me (timed out while connecting).' .format(site)) except requests.ReadTimeout: bot.say( '{} looks down to me (timed out waiting for reply).' .format(site)) except requests.ConnectionError: bot.say( '{} looks down to me (connection error).' .format(site)) except ValueError: bot.reply('"{}" is not a valid URL.'.format(site)) else: # If no exception happened, the request must have succeeded. bot.say(site + ' looks fine to me.')
[ "def", "handle_isup", "(", "bot", ",", "trigger", ",", "secure", "=", "True", ")", ":", "try", ":", "site", "=", "get_site_url", "(", "trigger", ".", "group", "(", "2", ")", ")", "except", "ValueError", "as", "error", ":", "bot", ".", "reply", "(", "str", "(", "error", ")", ")", "return", "try", ":", "response", "=", "requests", ".", "head", "(", "site", ",", "verify", "=", "secure", ",", "timeout", "=", "(", "10.0", ",", "5.0", ")", ")", "response", ".", "raise_for_status", "(", ")", "except", "requests", ".", "exceptions", ".", "SSLError", ":", "bot", ".", "say", "(", "'{} looks down to me (SSL error). Try using `{}isupinsecure`.'", ".", "format", "(", "site", ",", "bot", ".", "config", ".", "core", ".", "help_prefix", ")", ")", "except", "requests", ".", "HTTPError", ":", "bot", ".", "say", "(", "'{} looks down to me (HTTP {} \"{}\").'", ".", "format", "(", "site", ",", "response", ".", "status_code", ",", "response", ".", "reason", ")", ")", "except", "requests", ".", "ConnectTimeout", ":", "bot", ".", "say", "(", "'{} looks down to me (timed out while connecting).'", ".", "format", "(", "site", ")", ")", "except", "requests", ".", "ReadTimeout", ":", "bot", ".", "say", "(", "'{} looks down to me (timed out waiting for reply).'", ".", "format", "(", "site", ")", ")", "except", "requests", ".", "ConnectionError", ":", "bot", ".", "say", "(", "'{} looks down to me (connection error).'", ".", "format", "(", "site", ")", ")", "except", "ValueError", ":", "bot", ".", "reply", "(", "'\"{}\" is not a valid URL.'", ".", "format", "(", "site", ")", ")", "else", ":", "# If no exception happened, the request must have succeeded.", "bot", ".", "say", "(", "site", "+", "' looks fine to me.'", ")" ]
https://github.com/sopel-irc/sopel/blob/787baa6e39f9dad57d94600c92e10761c41b21ef/sopel/modules/isup.py#L51-L93
Axelrod-Python/Axelrod
00e18323c1b1af74df873773e44f31e1b9a299c6
axelrod/result_set.py
python
ResultSet.__ne__
(self, other)
return not self.__eq__(other)
Check inequality of results set Parameters ---------- other : axelrod.ResultSet Another results set against which to check inequality
Check inequality of results set
[ "Check", "inequality", "of", "results", "set" ]
def __ne__(self, other): """ Check inequality of results set Parameters ---------- other : axelrod.ResultSet Another results set against which to check inequality """ return not self.__eq__(other)
[ "def", "__ne__", "(", "self", ",", "other", ")", ":", "return", "not", "self", ".", "__eq__", "(", "other", ")" ]
https://github.com/Axelrod-Python/Axelrod/blob/00e18323c1b1af74df873773e44f31e1b9a299c6/axelrod/result_set.py#L683-L692
TencentCloud/tencentcloud-sdk-python
3677fd1cdc8c5fd626ce001c13fd3b59d1f279d2
tencentcloud/apigateway/v20180808/models.py
python
BindEnvironmentRequest.__init__
(self)
r""" :param UsagePlanIds: 待绑定的使用计划唯一 ID 列表。 :type UsagePlanIds: list of str :param BindType: 绑定类型,取值为API、SERVICE,默认值为SERVICE。 :type BindType: str :param Environment: 待绑定的环境。 :type Environment: str :param ServiceId: 待绑定的服务唯一 ID。 :type ServiceId: str :param ApiIds: API唯一ID数组,当bindType=API时,需要传入此参数。 :type ApiIds: list of str
r""" :param UsagePlanIds: 待绑定的使用计划唯一 ID 列表。 :type UsagePlanIds: list of str :param BindType: 绑定类型,取值为API、SERVICE,默认值为SERVICE。 :type BindType: str :param Environment: 待绑定的环境。 :type Environment: str :param ServiceId: 待绑定的服务唯一 ID。 :type ServiceId: str :param ApiIds: API唯一ID数组,当bindType=API时,需要传入此参数。 :type ApiIds: list of str
[ "r", ":", "param", "UsagePlanIds", ":", "待绑定的使用计划唯一", "ID", "列表。", ":", "type", "UsagePlanIds", ":", "list", "of", "str", ":", "param", "BindType", ":", "绑定类型,取值为API、SERVICE,默认值为SERVICE。", ":", "type", "BindType", ":", "str", ":", "param", "Environment", ":", "待绑定的环境。", ":", "type", "Environment", ":", "str", ":", "param", "ServiceId", ":", "待绑定的服务唯一", "ID。", ":", "type", "ServiceId", ":", "str", ":", "param", "ApiIds", ":", "API唯一ID数组,当bindType", "=", "API时,需要传入此参数。", ":", "type", "ApiIds", ":", "list", "of", "str" ]
def __init__(self): r""" :param UsagePlanIds: 待绑定的使用计划唯一 ID 列表。 :type UsagePlanIds: list of str :param BindType: 绑定类型,取值为API、SERVICE,默认值为SERVICE。 :type BindType: str :param Environment: 待绑定的环境。 :type Environment: str :param ServiceId: 待绑定的服务唯一 ID。 :type ServiceId: str :param ApiIds: API唯一ID数组,当bindType=API时,需要传入此参数。 :type ApiIds: list of str """ self.UsagePlanIds = None self.BindType = None self.Environment = None self.ServiceId = None self.ApiIds = None
[ "def", "__init__", "(", "self", ")", ":", "self", ".", "UsagePlanIds", "=", "None", "self", ".", "BindType", "=", "None", "self", ".", "Environment", "=", "None", "self", ".", "ServiceId", "=", "None", "self", ".", "ApiIds", "=", "None" ]
https://github.com/TencentCloud/tencentcloud-sdk-python/blob/3677fd1cdc8c5fd626ce001c13fd3b59d1f279d2/tencentcloud/apigateway/v20180808/models.py#L1447-L1464
ambakick/Person-Detection-and-Tracking
f925394ac29b5cf321f1ce89a71b193381519a0b
utils/ops.py
python
retain_groundtruth_with_positive_classes
(tensor_dict)
return retain_groundtruth(tensor_dict, keep_indices)
Retains only groundtruth with positive class ids. Args: tensor_dict: a dictionary of following groundtruth tensors - fields.InputDataFields.groundtruth_boxes fields.InputDataFields.groundtruth_classes fields.InputDataFields.groundtruth_keypoints fields.InputDataFields.groundtruth_instance_masks fields.InputDataFields.groundtruth_is_crowd fields.InputDataFields.groundtruth_area fields.InputDataFields.groundtruth_label_types fields.InputDataFields.groundtruth_difficult Returns: a dictionary of tensors containing only the groundtruth with positive classes. Raises: ValueError: If groundtruth_classes tensor is not in tensor_dict.
Retains only groundtruth with positive class ids.
[ "Retains", "only", "groundtruth", "with", "positive", "class", "ids", "." ]
def retain_groundtruth_with_positive_classes(tensor_dict): """Retains only groundtruth with positive class ids. Args: tensor_dict: a dictionary of following groundtruth tensors - fields.InputDataFields.groundtruth_boxes fields.InputDataFields.groundtruth_classes fields.InputDataFields.groundtruth_keypoints fields.InputDataFields.groundtruth_instance_masks fields.InputDataFields.groundtruth_is_crowd fields.InputDataFields.groundtruth_area fields.InputDataFields.groundtruth_label_types fields.InputDataFields.groundtruth_difficult Returns: a dictionary of tensors containing only the groundtruth with positive classes. Raises: ValueError: If groundtruth_classes tensor is not in tensor_dict. """ if fields.InputDataFields.groundtruth_classes not in tensor_dict: raise ValueError('`groundtruth classes` not in tensor_dict.') keep_indices = tf.where(tf.greater( tensor_dict[fields.InputDataFields.groundtruth_classes], 0)) return retain_groundtruth(tensor_dict, keep_indices)
[ "def", "retain_groundtruth_with_positive_classes", "(", "tensor_dict", ")", ":", "if", "fields", ".", "InputDataFields", ".", "groundtruth_classes", "not", "in", "tensor_dict", ":", "raise", "ValueError", "(", "'`groundtruth classes` not in tensor_dict.'", ")", "keep_indices", "=", "tf", ".", "where", "(", "tf", ".", "greater", "(", "tensor_dict", "[", "fields", ".", "InputDataFields", ".", "groundtruth_classes", "]", ",", "0", ")", ")", "return", "retain_groundtruth", "(", "tensor_dict", ",", "keep_indices", ")" ]
https://github.com/ambakick/Person-Detection-and-Tracking/blob/f925394ac29b5cf321f1ce89a71b193381519a0b/utils/ops.py#L372-L397
sahana/eden
1696fa50e90ce967df69f66b571af45356cc18da
modules/s3db/inv.py
python
InventoryRequisitionItemModel.req_item_ondelete_cascade
(row)
On-delete Cascade actions for requested items: - delete any reservations (putting items back into stock)
On-delete Cascade actions for requested items: - delete any reservations (putting items back into stock)
[ "On", "-", "delete", "Cascade", "actions", "for", "requested", "items", ":", "-", "delete", "any", "reservations", "(", "putting", "items", "back", "into", "stock", ")" ]
def req_item_ondelete_cascade(row): """ On-delete Cascade actions for requested items: - delete any reservations (putting items back into stock) """ db = current.db s3db = current.db #settings = current.deployment_settings record_id = row.id #if settings.get_inv_req_reserve_items(): # Remove any old reservations rbtable = s3db.inv_req_item_inv reservations = db(rbtable.req_item_id == record_id).select(rbtable.id, rbtable.inv_item_id, rbtable.layout_id, rbtable.quantity, ) if reservations: iitable = s3db.inv_inv_item ibtable = s3db.inv_inv_item_bin for row in reservations: # Restore Inventory quantity = row.quantity inv_item_id = row.inv_item_id db(iitable.id == inv_item_id).update(quantity = iitable.quantity + quantity) layout_id = row.layout_id if layout_id: query = (ibtable.inv_item_id == inv_item_id) & \ (ibtable.layout_id == layout_id) db(query).update(quantity = ibtable.quantity + quantity) db(rbtable.id.belongs([row.id for row in reservations])).delete()
[ "def", "req_item_ondelete_cascade", "(", "row", ")", ":", "db", "=", "current", ".", "db", "s3db", "=", "current", ".", "db", "#settings = current.deployment_settings", "record_id", "=", "row", ".", "id", "#if settings.get_inv_req_reserve_items():", "# Remove any old reservations", "rbtable", "=", "s3db", ".", "inv_req_item_inv", "reservations", "=", "db", "(", "rbtable", ".", "req_item_id", "==", "record_id", ")", ".", "select", "(", "rbtable", ".", "id", ",", "rbtable", ".", "inv_item_id", ",", "rbtable", ".", "layout_id", ",", "rbtable", ".", "quantity", ",", ")", "if", "reservations", ":", "iitable", "=", "s3db", ".", "inv_inv_item", "ibtable", "=", "s3db", ".", "inv_inv_item_bin", "for", "row", "in", "reservations", ":", "# Restore Inventory", "quantity", "=", "row", ".", "quantity", "inv_item_id", "=", "row", ".", "inv_item_id", "db", "(", "iitable", ".", "id", "==", "inv_item_id", ")", ".", "update", "(", "quantity", "=", "iitable", ".", "quantity", "+", "quantity", ")", "layout_id", "=", "row", ".", "layout_id", "if", "layout_id", ":", "query", "=", "(", "ibtable", ".", "inv_item_id", "==", "inv_item_id", ")", "&", "(", "ibtable", ".", "layout_id", "==", "layout_id", ")", "db", "(", "query", ")", ".", "update", "(", "quantity", "=", "ibtable", ".", "quantity", "+", "quantity", ")", "db", "(", "rbtable", ".", "id", ".", "belongs", "(", "[", "row", ".", "id", "for", "row", "in", "reservations", "]", ")", ")", ".", "delete", "(", ")" ]
https://github.com/sahana/eden/blob/1696fa50e90ce967df69f66b571af45356cc18da/modules/s3db/inv.py#L4429-L4462
oracle/oci-python-sdk
3c1604e4e212008fb6718e2f68cdb5ef71fd5793
src/oci/database/database_client.py
python
DatabaseClient.get_database_upgrade_history_entry
(self, database_id, upgrade_history_entry_id, **kwargs)
gets the upgrade history for a specified database. :param str database_id: (required) The database `OCID`__. __ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm :param str upgrade_history_entry_id: (required) The database/db system upgrade History `OCID`__. __ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm :param str opc_request_id: (optional) Unique identifier for the request. :param obj retry_strategy: (optional) A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level. This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it. The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__. To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`. :return: A :class:`~oci.response.Response` object with data of type :class:`~oci.database.models.DatabaseUpgradeHistoryEntry` :rtype: :class:`~oci.response.Response` :example: Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/database/get_database_upgrade_history_entry.py.html>`__ to see an example of how to use get_database_upgrade_history_entry API.
gets the upgrade history for a specified database.
[ "gets", "the", "upgrade", "history", "for", "a", "specified", "database", "." ]
def get_database_upgrade_history_entry(self, database_id, upgrade_history_entry_id, **kwargs): """ gets the upgrade history for a specified database. :param str database_id: (required) The database `OCID`__. __ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm :param str upgrade_history_entry_id: (required) The database/db system upgrade History `OCID`__. __ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm :param str opc_request_id: (optional) Unique identifier for the request. :param obj retry_strategy: (optional) A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level. This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it. The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__. To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`. :return: A :class:`~oci.response.Response` object with data of type :class:`~oci.database.models.DatabaseUpgradeHistoryEntry` :rtype: :class:`~oci.response.Response` :example: Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/database/get_database_upgrade_history_entry.py.html>`__ to see an example of how to use get_database_upgrade_history_entry API. """ resource_path = "/databases/{databaseId}/upgradeHistoryEntries/{upgradeHistoryEntryId}" method = "GET" # Don't accept unknown kwargs expected_kwargs = [ "retry_strategy", "opc_request_id" ] extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs] if extra_kwargs: raise ValueError( "get_database_upgrade_history_entry got unknown kwargs: {!r}".format(extra_kwargs)) path_params = { "databaseId": database_id, "upgradeHistoryEntryId": upgrade_history_entry_id } path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing} for (k, v) in six.iteritems(path_params): if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0): raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k)) header_params = { "accept": "application/json", "content-type": "application/json", "opc-request-id": kwargs.get("opc_request_id", missing) } header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None} retry_strategy = self.base_client.get_preferred_retry_strategy( operation_retry_strategy=kwargs.get('retry_strategy'), client_retry_strategy=self.retry_strategy ) if retry_strategy: if not isinstance(retry_strategy, retry.NoneRetryStrategy): self.base_client.add_opc_client_retries_header(header_params) retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback) return retry_strategy.make_retrying_call( self.base_client.call_api, resource_path=resource_path, method=method, path_params=path_params, header_params=header_params, response_type="DatabaseUpgradeHistoryEntry") else: return self.base_client.call_api( resource_path=resource_path, method=method, path_params=path_params, header_params=header_params, response_type="DatabaseUpgradeHistoryEntry")
[ "def", "get_database_upgrade_history_entry", "(", "self", ",", "database_id", ",", "upgrade_history_entry_id", ",", "*", "*", "kwargs", ")", ":", "resource_path", "=", "\"/databases/{databaseId}/upgradeHistoryEntries/{upgradeHistoryEntryId}\"", "method", "=", "\"GET\"", "# Don't accept unknown kwargs", "expected_kwargs", "=", "[", "\"retry_strategy\"", ",", "\"opc_request_id\"", "]", "extra_kwargs", "=", "[", "_key", "for", "_key", "in", "six", ".", "iterkeys", "(", "kwargs", ")", "if", "_key", "not", "in", "expected_kwargs", "]", "if", "extra_kwargs", ":", "raise", "ValueError", "(", "\"get_database_upgrade_history_entry got unknown kwargs: {!r}\"", ".", "format", "(", "extra_kwargs", ")", ")", "path_params", "=", "{", "\"databaseId\"", ":", "database_id", ",", "\"upgradeHistoryEntryId\"", ":", "upgrade_history_entry_id", "}", "path_params", "=", "{", "k", ":", "v", "for", "(", "k", ",", "v", ")", "in", "six", ".", "iteritems", "(", "path_params", ")", "if", "v", "is", "not", "missing", "}", "for", "(", "k", ",", "v", ")", "in", "six", ".", "iteritems", "(", "path_params", ")", ":", "if", "v", "is", "None", "or", "(", "isinstance", "(", "v", ",", "six", ".", "string_types", ")", "and", "len", "(", "v", ".", "strip", "(", ")", ")", "==", "0", ")", ":", "raise", "ValueError", "(", "'Parameter {} cannot be None, whitespace or empty string'", ".", "format", "(", "k", ")", ")", "header_params", "=", "{", "\"accept\"", ":", "\"application/json\"", ",", "\"content-type\"", ":", "\"application/json\"", ",", "\"opc-request-id\"", ":", "kwargs", ".", "get", "(", "\"opc_request_id\"", ",", "missing", ")", "}", "header_params", "=", "{", "k", ":", "v", "for", "(", "k", ",", "v", ")", "in", "six", ".", "iteritems", "(", "header_params", ")", "if", "v", "is", "not", "missing", "and", "v", "is", "not", "None", "}", "retry_strategy", "=", "self", ".", "base_client", ".", "get_preferred_retry_strategy", "(", "operation_retry_strategy", "=", "kwargs", ".", "get", "(", "'retry_strategy'", ")", ",", "client_retry_strategy", "=", "self", ".", "retry_strategy", ")", "if", "retry_strategy", ":", "if", "not", "isinstance", "(", "retry_strategy", ",", "retry", ".", "NoneRetryStrategy", ")", ":", "self", ".", "base_client", ".", "add_opc_client_retries_header", "(", "header_params", ")", "retry_strategy", ".", "add_circuit_breaker_callback", "(", "self", ".", "circuit_breaker_callback", ")", "return", "retry_strategy", ".", "make_retrying_call", "(", "self", ".", "base_client", ".", "call_api", ",", "resource_path", "=", "resource_path", ",", "method", "=", "method", ",", "path_params", "=", "path_params", ",", "header_params", "=", "header_params", ",", "response_type", "=", "\"DatabaseUpgradeHistoryEntry\"", ")", "else", ":", "return", "self", ".", "base_client", ".", "call_api", "(", "resource_path", "=", "resource_path", ",", "method", "=", "method", ",", "path_params", "=", "path_params", ",", "header_params", "=", "header_params", ",", "response_type", "=", "\"DatabaseUpgradeHistoryEntry\"", ")" ]
https://github.com/oracle/oci-python-sdk/blob/3c1604e4e212008fb6718e2f68cdb5ef71fd5793/src/oci/database/database_client.py#L10497-L10582
glue-viz/glue
840b4c1364b0fa63bf67c914540c93dd71df41e1
glue/utils/array.py
python
find_chunk_shape
(shape, n_max=None)
return tuple(block_shape[::-1])
Given the shape of an n-dimensional array, and the maximum number of elements in a chunk, return the largest chunk shape to use for iteration. This currently assumes the optimal chunk shape to return is for C-contiguous arrays.
Given the shape of an n-dimensional array, and the maximum number of elements in a chunk, return the largest chunk shape to use for iteration.
[ "Given", "the", "shape", "of", "an", "n", "-", "dimensional", "array", "and", "the", "maximum", "number", "of", "elements", "in", "a", "chunk", "return", "the", "largest", "chunk", "shape", "to", "use", "for", "iteration", "." ]
def find_chunk_shape(shape, n_max=None): """ Given the shape of an n-dimensional array, and the maximum number of elements in a chunk, return the largest chunk shape to use for iteration. This currently assumes the optimal chunk shape to return is for C-contiguous arrays. """ if n_max is None: return tuple(shape) block_shape = [] max_repeat_remaining = n_max for size in shape[::-1]: if max_repeat_remaining > size: block_shape.append(size) max_repeat_remaining = max_repeat_remaining // size else: block_shape.append(max_repeat_remaining) max_repeat_remaining = 1 return tuple(block_shape[::-1])
[ "def", "find_chunk_shape", "(", "shape", ",", "n_max", "=", "None", ")", ":", "if", "n_max", "is", "None", ":", "return", "tuple", "(", "shape", ")", "block_shape", "=", "[", "]", "max_repeat_remaining", "=", "n_max", "for", "size", "in", "shape", "[", ":", ":", "-", "1", "]", ":", "if", "max_repeat_remaining", ">", "size", ":", "block_shape", ".", "append", "(", "size", ")", "max_repeat_remaining", "=", "max_repeat_remaining", "//", "size", "else", ":", "block_shape", ".", "append", "(", "max_repeat_remaining", ")", "max_repeat_remaining", "=", "1", "return", "tuple", "(", "block_shape", "[", ":", ":", "-", "1", "]", ")" ]
https://github.com/glue-viz/glue/blob/840b4c1364b0fa63bf67c914540c93dd71df41e1/glue/utils/array.py#L192-L217
samuelclay/NewsBlur
2c45209df01a1566ea105e04d499367f32ac9ad2
apps/profile/middleware.py
python
DBProfilerMiddleware.process_response
(self, request, response)
return response
[]
def process_response(self, request, response): if hasattr(request, 'sql_times_elapsed'): # middleware = SQLLogToConsoleMiddleware() # middleware.process_celery(self) # logging.debug(" ---> ~FGProfiling~FB app: %s" % request.sql_times_elapsed) self._save_times(request.sql_times_elapsed) return response
[ "def", "process_response", "(", "self", ",", "request", ",", "response", ")", ":", "if", "hasattr", "(", "request", ",", "'sql_times_elapsed'", ")", ":", "# middleware = SQLLogToConsoleMiddleware()", "# middleware.process_celery(self)", "# logging.debug(\" ---> ~FGProfiling~FB app: %s\" % request.sql_times_elapsed)", "self", ".", "_save_times", "(", "request", ".", "sql_times_elapsed", ")", "return", "response" ]
https://github.com/samuelclay/NewsBlur/blob/2c45209df01a1566ea105e04d499367f32ac9ad2/apps/profile/middleware.py#L80-L86
corelan/mona
fab15136880c62f4e8e8c39830e2b31ef8421c48
mona.py
python
toUnicode
(input)
return unicodebytes
Converts a series of bytes to unicode (UTF-16) bytes Arguments : input - the source bytes Return: the unicode expanded version of the input
Converts a series of bytes to unicode (UTF-16) bytes Arguments : input - the source bytes Return: the unicode expanded version of the input
[ "Converts", "a", "series", "of", "bytes", "to", "unicode", "(", "UTF", "-", "16", ")", "bytes", "Arguments", ":", "input", "-", "the", "source", "bytes", "Return", ":", "the", "unicode", "expanded", "version", "of", "the", "input" ]
def toUnicode(input): """ Converts a series of bytes to unicode (UTF-16) bytes Arguments : input - the source bytes Return: the unicode expanded version of the input """ unicodebytes = "" # try/except, just in case .encode bails out try: unicodebytes = input.encode('UTF-16LE') except: inputlst = list(input) for inputchar in inputlst: unicodebytes += inputchar + '\x00' return unicodebytes
[ "def", "toUnicode", "(", "input", ")", ":", "unicodebytes", "=", "\"\"", "# try/except, just in case .encode bails out", "try", ":", "unicodebytes", "=", "input", ".", "encode", "(", "'UTF-16LE'", ")", "except", ":", "inputlst", "=", "list", "(", "input", ")", "for", "inputchar", "in", "inputlst", ":", "unicodebytes", "+=", "inputchar", "+", "'\\x00'", "return", "unicodebytes" ]
https://github.com/corelan/mona/blob/fab15136880c62f4e8e8c39830e2b31ef8421c48/mona.py#L920-L938
bendmorris/static-python
2e0f8c4d7ed5b359dc7d8a75b6fb37e6b6c5c473
Lib/multiprocessing/managers.py
python
RebuildProxy
(func, token, serializer, kwds)
Function used for unpickling proxy objects. If possible the shared object is returned, or otherwise a proxy for it.
Function used for unpickling proxy objects.
[ "Function", "used", "for", "unpickling", "proxy", "objects", "." ]
def RebuildProxy(func, token, serializer, kwds): ''' Function used for unpickling proxy objects. If possible the shared object is returned, or otherwise a proxy for it. ''' server = getattr(process.current_process(), '_manager_server', None) if server and server.address == token.address: return server.id_to_obj[token.id][0] else: incref = ( kwds.pop('incref', True) and not getattr(process.current_process(), '_inheriting', False) ) return func(token, serializer, incref=incref, **kwds)
[ "def", "RebuildProxy", "(", "func", ",", "token", ",", "serializer", ",", "kwds", ")", ":", "server", "=", "getattr", "(", "process", ".", "current_process", "(", ")", ",", "'_manager_server'", ",", "None", ")", "if", "server", "and", "server", ".", "address", "==", "token", ".", "address", ":", "return", "server", ".", "id_to_obj", "[", "token", ".", "id", "]", "[", "0", "]", "else", ":", "incref", "=", "(", "kwds", ".", "pop", "(", "'incref'", ",", "True", ")", "and", "not", "getattr", "(", "process", ".", "current_process", "(", ")", ",", "'_inheriting'", ",", "False", ")", ")", "return", "func", "(", "token", ",", "serializer", ",", "incref", "=", "incref", ",", "*", "*", "kwds", ")" ]
https://github.com/bendmorris/static-python/blob/2e0f8c4d7ed5b359dc7d8a75b6fb37e6b6c5c473/Lib/multiprocessing/managers.py#L834-L849
microsoft/nni
31f11f51249660930824e888af0d4e022823285c
examples/trials/weight_sharing/ga_squad/graph.py
python
Layer.set_size
(self, graph_id, size)
return True
Set size.
Set size.
[ "Set", "size", "." ]
def set_size(self, graph_id, size): ''' Set size. ''' if self.graph_type == LayerType.attention.value: if self.input[0] == graph_id: self.size = size if self.graph_type == LayerType.rnn.value: self.size = size if self.graph_type == LayerType.self_attention.value: self.size = size if self.graph_type == LayerType.output.value: if self.size != size: return False return True
[ "def", "set_size", "(", "self", ",", "graph_id", ",", "size", ")", ":", "if", "self", ".", "graph_type", "==", "LayerType", ".", "attention", ".", "value", ":", "if", "self", ".", "input", "[", "0", "]", "==", "graph_id", ":", "self", ".", "size", "=", "size", "if", "self", ".", "graph_type", "==", "LayerType", ".", "rnn", ".", "value", ":", "self", ".", "size", "=", "size", "if", "self", ".", "graph_type", "==", "LayerType", ".", "self_attention", ".", "value", ":", "self", ".", "size", "=", "size", "if", "self", ".", "graph_type", "==", "LayerType", ".", "output", ".", "value", ":", "if", "self", ".", "size", "!=", "size", ":", "return", "False", "return", "True" ]
https://github.com/microsoft/nni/blob/31f11f51249660930824e888af0d4e022823285c/examples/trials/weight_sharing/ga_squad/graph.py#L98-L112
openstack/manila
142990edc027e14839d5deaf4954dd6fc88de15e
manila/share/drivers/netapp/dataontap/cluster_mode/lib_base.py
python
NetAppCmodeFileStorageLibrary._check_string_extra_specs_validity
(self, share, extra_specs)
Check if the string_extra_specs have valid values.
Check if the string_extra_specs have valid values.
[ "Check", "if", "the", "string_extra_specs", "have", "valid", "values", "." ]
def _check_string_extra_specs_validity(self, share, extra_specs): """Check if the string_extra_specs have valid values.""" if 'netapp:max_files' in extra_specs: self._check_if_max_files_is_valid(share, extra_specs['netapp:max_files']) if 'netapp:fpolicy_file_operations' in extra_specs: self._check_fpolicy_file_operations( share, extra_specs['netapp:fpolicy_file_operations'])
[ "def", "_check_string_extra_specs_validity", "(", "self", ",", "share", ",", "extra_specs", ")", ":", "if", "'netapp:max_files'", "in", "extra_specs", ":", "self", ".", "_check_if_max_files_is_valid", "(", "share", ",", "extra_specs", "[", "'netapp:max_files'", "]", ")", "if", "'netapp:fpolicy_file_operations'", "in", "extra_specs", ":", "self", ".", "_check_fpolicy_file_operations", "(", "share", ",", "extra_specs", "[", "'netapp:fpolicy_file_operations'", "]", ")" ]
https://github.com/openstack/manila/blob/142990edc027e14839d5deaf4954dd6fc88de15e/manila/share/drivers/netapp/dataontap/cluster_mode/lib_base.py#L1251-L1258
munificent/magpie
f5138e3d316ec1a664b5eadba1bcc8573d3faca3
dep/gyp/pylib/gyp/xcodeproj_file.py
python
XCBuildPhase._AddPathToDict
(self, pbxbuildfile, path)
Adds path to the dict tracking paths belonging to this build phase. If the path is already a member of this build phase, raises an exception.
Adds path to the dict tracking paths belonging to this build phase.
[ "Adds", "path", "to", "the", "dict", "tracking", "paths", "belonging", "to", "this", "build", "phase", "." ]
def _AddPathToDict(self, pbxbuildfile, path): """Adds path to the dict tracking paths belonging to this build phase. If the path is already a member of this build phase, raises an exception. """ if path in self._files_by_path: raise ValueError, 'Found multiple build files with path ' + path self._files_by_path[path] = pbxbuildfile
[ "def", "_AddPathToDict", "(", "self", ",", "pbxbuildfile", ",", "path", ")", ":", "if", "path", "in", "self", ".", "_files_by_path", ":", "raise", "ValueError", ",", "'Found multiple build files with path '", "+", "path", "self", ".", "_files_by_path", "[", "path", "]", "=", "pbxbuildfile" ]
https://github.com/munificent/magpie/blob/f5138e3d316ec1a664b5eadba1bcc8573d3faca3/dep/gyp/pylib/gyp/xcodeproj_file.py#L1763-L1771
wizyoung/googletranslate.popclipext
a3c465685a5a75213e2ec8517eb98d336984bc50
src/httpcore/_sync/http11.py
python
SyncHTTP11Connection._send_request_body
( self, stream: SyncByteStream, timeout: TimeoutDict )
Send the request body.
Send the request body.
[ "Send", "the", "request", "body", "." ]
def _send_request_body( self, stream: SyncByteStream, timeout: TimeoutDict ) -> None: """ Send the request body. """ # Send the request body. for chunk in stream: logger.trace("send_data=Data(<%d bytes>)", len(chunk)) event = h11.Data(data=chunk) self._send_event(event, timeout) # Finalize sending the request. event = h11.EndOfMessage() self._send_event(event, timeout)
[ "def", "_send_request_body", "(", "self", ",", "stream", ":", "SyncByteStream", ",", "timeout", ":", "TimeoutDict", ")", "->", "None", ":", "# Send the request body.", "for", "chunk", "in", "stream", ":", "logger", ".", "trace", "(", "\"send_data=Data(<%d bytes>)\"", ",", "len", "(", "chunk", ")", ")", "event", "=", "h11", ".", "Data", "(", "data", "=", "chunk", ")", "self", ".", "_send_event", "(", "event", ",", "timeout", ")", "# Finalize sending the request.", "event", "=", "h11", ".", "EndOfMessage", "(", ")", "self", ".", "_send_event", "(", "event", ",", "timeout", ")" ]
https://github.com/wizyoung/googletranslate.popclipext/blob/a3c465685a5a75213e2ec8517eb98d336984bc50/src/httpcore/_sync/http11.py#L84-L98
toddlerya/NebulaSolarDash
286ff86f0ad3550c1c92323d45e24f01c5c6fcd5
lib/bottle.py
python
_file_iter_range
(fp, offset, bytes, maxread=1024 * 1024)
Yield chunks from a range in a file. No chunk is bigger than maxread.
Yield chunks from a range in a file. No chunk is bigger than maxread.
[ "Yield", "chunks", "from", "a", "range", "in", "a", "file", ".", "No", "chunk", "is", "bigger", "than", "maxread", "." ]
def _file_iter_range(fp, offset, bytes, maxread=1024 * 1024): """ Yield chunks from a range in a file. No chunk is bigger than maxread.""" fp.seek(offset) while bytes > 0: part = fp.read(min(bytes, maxread)) if not part: break bytes -= len(part) yield part
[ "def", "_file_iter_range", "(", "fp", ",", "offset", ",", "bytes", ",", "maxread", "=", "1024", "*", "1024", ")", ":", "fp", ".", "seek", "(", "offset", ")", "while", "bytes", ">", "0", ":", "part", "=", "fp", ".", "read", "(", "min", "(", "bytes", ",", "maxread", ")", ")", "if", "not", "part", ":", "break", "bytes", "-=", "len", "(", "part", ")", "yield", "part" ]
https://github.com/toddlerya/NebulaSolarDash/blob/286ff86f0ad3550c1c92323d45e24f01c5c6fcd5/lib/bottle.py#L2641-L2648
sagemath/sage
f9b2db94f675ff16963ccdefba4f1a3393b3fe0d
src/sage/plot/graphics.py
python
Graphics.tick_label_color
(self, c=None)
Set the color of the axes tick labels. INPUT: - ``c`` - an RGB 3-tuple of numbers between 0 and 1 If called with no input, return the current tick_label_color setting. EXAMPLES:: sage: p = plot(cos, (-3,3)) sage: p.tick_label_color() (0, 0, 0) sage: p.tick_label_color((1,0,0)) sage: p.tick_label_color() (1.0, 0.0, 0.0) sage: p Graphics object consisting of 1 graphics primitive
Set the color of the axes tick labels.
[ "Set", "the", "color", "of", "the", "axes", "tick", "labels", "." ]
def tick_label_color(self, c=None): """ Set the color of the axes tick labels. INPUT: - ``c`` - an RGB 3-tuple of numbers between 0 and 1 If called with no input, return the current tick_label_color setting. EXAMPLES:: sage: p = plot(cos, (-3,3)) sage: p.tick_label_color() (0, 0, 0) sage: p.tick_label_color((1,0,0)) sage: p.tick_label_color() (1.0, 0.0, 0.0) sage: p Graphics object consisting of 1 graphics primitive """ if c is None: try: return self._tick_label_color except AttributeError: self._tick_label_color = (0, 0, 0) return self._tick_label_color self._tick_label_color = rgbcolor(c)
[ "def", "tick_label_color", "(", "self", ",", "c", "=", "None", ")", ":", "if", "c", "is", "None", ":", "try", ":", "return", "self", ".", "_tick_label_color", "except", "AttributeError", ":", "self", ".", "_tick_label_color", "=", "(", "0", ",", "0", ",", "0", ")", "return", "self", ".", "_tick_label_color", "self", ".", "_tick_label_color", "=", "rgbcolor", "(", "c", ")" ]
https://github.com/sagemath/sage/blob/f9b2db94f675ff16963ccdefba4f1a3393b3fe0d/src/sage/plot/graphics.py#L895-L925
networkx/networkx
1620568e36702b1cfeaf1c0277b167b6cb93e48d
examples/graph/plot_roget.py
python
roget_graph
()
return G
Return the thesaurus graph from the roget.dat example in the Stanford Graph Base.
Return the thesaurus graph from the roget.dat example in the Stanford Graph Base.
[ "Return", "the", "thesaurus", "graph", "from", "the", "roget", ".", "dat", "example", "in", "the", "Stanford", "Graph", "Base", "." ]
def roget_graph(): """Return the thesaurus graph from the roget.dat example in the Stanford Graph Base. """ # open file roget_dat.txt.gz fh = gzip.open("roget_dat.txt.gz", "r") G = nx.DiGraph() for line in fh.readlines(): line = line.decode() if line.startswith("*"): # skip comments continue if line.startswith(" "): # this is a continuation line, append line = oldline + line if line.endswith("\\\n"): # continuation line, buffer, goto next oldline = line.strip("\\\n") continue (headname, tails) = line.split(":") # head numfind = re.compile(r"^\d+") # re to find the number of this word head = numfind.findall(headname)[0] # get the number G.add_node(head) for tail in tails.split(): if head == tail: print("skipping self loop", head, tail, file=sys.stderr) G.add_edge(head, tail) return G
[ "def", "roget_graph", "(", ")", ":", "# open file roget_dat.txt.gz", "fh", "=", "gzip", ".", "open", "(", "\"roget_dat.txt.gz\"", ",", "\"r\"", ")", "G", "=", "nx", ".", "DiGraph", "(", ")", "for", "line", "in", "fh", ".", "readlines", "(", ")", ":", "line", "=", "line", ".", "decode", "(", ")", "if", "line", ".", "startswith", "(", "\"*\"", ")", ":", "# skip comments", "continue", "if", "line", ".", "startswith", "(", "\" \"", ")", ":", "# this is a continuation line, append", "line", "=", "oldline", "+", "line", "if", "line", ".", "endswith", "(", "\"\\\\\\n\"", ")", ":", "# continuation line, buffer, goto next", "oldline", "=", "line", ".", "strip", "(", "\"\\\\\\n\"", ")", "continue", "(", "headname", ",", "tails", ")", "=", "line", ".", "split", "(", "\":\"", ")", "# head", "numfind", "=", "re", ".", "compile", "(", "r\"^\\d+\"", ")", "# re to find the number of this word", "head", "=", "numfind", ".", "findall", "(", "headname", ")", "[", "0", "]", "# get the number", "G", ".", "add_node", "(", "head", ")", "for", "tail", "in", "tails", ".", "split", "(", ")", ":", "if", "head", "==", "tail", ":", "print", "(", "\"skipping self loop\"", ",", "head", ",", "tail", ",", "file", "=", "sys", ".", "stderr", ")", "G", ".", "add_edge", "(", "head", ",", "tail", ")", "return", "G" ]
https://github.com/networkx/networkx/blob/1620568e36702b1cfeaf1c0277b167b6cb93e48d/examples/graph/plot_roget.py#L31-L63
Sprytile/Sprytile
6b68d0069aef5bfed6ab40d1d5a94a3382b41619
rx/linq/observable/distinct.py
python
distinct
(self, key_selector=None, comparer=None)
return AnonymousObservable(subscribe)
Returns an observable sequence that contains only distinct elements according to the key_selector and the comparer. Usage of this operator should be considered carefully due to the maintenance of an internal lookup structure which can grow large. Example: res = obs = xs.distinct() obs = xs.distinct(lambda x: x.id) obs = xs.distinct(lambda x: x.id, lambda a,b: a == b) Keyword arguments: key_selector -- {Function} [Optional] A function to compute the comparison key for each element. comparer -- {Function} [Optional] Used to compare items in the collection. Returns an observable {Observable} sequence only containing the distinct elements, based on a computed key value, from the source sequence.
Returns an observable sequence that contains only distinct elements according to the key_selector and the comparer. Usage of this operator should be considered carefully due to the maintenance of an internal lookup structure which can grow large.
[ "Returns", "an", "observable", "sequence", "that", "contains", "only", "distinct", "elements", "according", "to", "the", "key_selector", "and", "the", "comparer", ".", "Usage", "of", "this", "operator", "should", "be", "considered", "carefully", "due", "to", "the", "maintenance", "of", "an", "internal", "lookup", "structure", "which", "can", "grow", "large", "." ]
def distinct(self, key_selector=None, comparer=None): """Returns an observable sequence that contains only distinct elements according to the key_selector and the comparer. Usage of this operator should be considered carefully due to the maintenance of an internal lookup structure which can grow large. Example: res = obs = xs.distinct() obs = xs.distinct(lambda x: x.id) obs = xs.distinct(lambda x: x.id, lambda a,b: a == b) Keyword arguments: key_selector -- {Function} [Optional] A function to compute the comparison key for each element. comparer -- {Function} [Optional] Used to compare items in the collection. Returns an observable {Observable} sequence only containing the distinct elements, based on a computed key value, from the source sequence. """ source = self comparer = comparer or default_comparer def subscribe(observer): hashset = HashSet(comparer) def on_next(x): key = x if key_selector: try: key = key_selector(x) except Exception as ex: observer.on_error(ex) return hashset.push(key) and observer.on_next(x) return source.subscribe(on_next, observer.on_error, observer.on_completed) return AnonymousObservable(subscribe)
[ "def", "distinct", "(", "self", ",", "key_selector", "=", "None", ",", "comparer", "=", "None", ")", ":", "source", "=", "self", "comparer", "=", "comparer", "or", "default_comparer", "def", "subscribe", "(", "observer", ")", ":", "hashset", "=", "HashSet", "(", "comparer", ")", "def", "on_next", "(", "x", ")", ":", "key", "=", "x", "if", "key_selector", ":", "try", ":", "key", "=", "key_selector", "(", "x", ")", "except", "Exception", "as", "ex", ":", "observer", ".", "on_error", "(", "ex", ")", "return", "hashset", ".", "push", "(", "key", ")", "and", "observer", ".", "on_next", "(", "x", ")", "return", "source", ".", "subscribe", "(", "on_next", ",", "observer", ".", "on_error", ",", "observer", ".", "on_completed", ")", "return", "AnonymousObservable", "(", "subscribe", ")" ]
https://github.com/Sprytile/Sprytile/blob/6b68d0069aef5bfed6ab40d1d5a94a3382b41619/rx/linq/observable/distinct.py#L24-L64
Komodo/KomodoEdit
61edab75dce2bdb03943b387b0608ea36f548e8e
src/codeintel/lib/codeintel2/database/catalog.py
python
CatalogsZone.toplevelname_index
(self)
return self._toplevelname_index
Load and return the top-level name index (toplevelname_index).
Load and return the top-level name index (toplevelname_index).
[ "Load", "and", "return", "the", "top", "-", "level", "name", "index", "(", "toplevelname_index", ")", "." ]
def toplevelname_index(self): """Load and return the top-level name index (toplevelname_index).""" if self._toplevelname_index is None: idxpath = join(self.base_dir, "toplevelname_index") self._toplevelname_index = self.db.load_pickle(idxpath, {}) return self._toplevelname_index
[ "def", "toplevelname_index", "(", "self", ")", ":", "if", "self", ".", "_toplevelname_index", "is", "None", ":", "idxpath", "=", "join", "(", "self", ".", "base_dir", ",", "\"toplevelname_index\"", ")", "self", ".", "_toplevelname_index", "=", "self", ".", "db", ".", "load_pickle", "(", "idxpath", ",", "{", "}", ")", "return", "self", ".", "_toplevelname_index" ]
https://github.com/Komodo/KomodoEdit/blob/61edab75dce2bdb03943b387b0608ea36f548e8e/src/codeintel/lib/codeintel2/database/catalog.py#L228-L233
golismero/golismero
7d605b937e241f51c1ca4f47b20f755eeefb9d76
thirdparty_libs/nltk/inference/api.py
python
ProverCommandDecorator.__init__
(self, proverCommand)
:param proverCommand: ``ProverCommand`` to decorate
:param proverCommand: ``ProverCommand`` to decorate
[ ":", "param", "proverCommand", ":", "ProverCommand", "to", "decorate" ]
def __init__(self, proverCommand): """ :param proverCommand: ``ProverCommand`` to decorate """ TheoremToolCommandDecorator.__init__(self, proverCommand) #The decorator has its own versions of 'result' and 'proof' #because they may be different from the underlying command self._proof = None
[ "def", "__init__", "(", "self", ",", "proverCommand", ")", ":", "TheoremToolCommandDecorator", ".", "__init__", "(", "self", ",", "proverCommand", ")", "#The decorator has its own versions of 'result' and 'proof'", "#because they may be different from the underlying command", "self", ".", "_proof", "=", "None" ]
https://github.com/golismero/golismero/blob/7d605b937e241f51c1ca4f47b20f755eeefb9d76/thirdparty_libs/nltk/inference/api.py#L398-L406
khanhnamle1994/natural-language-processing
01d450d5ac002b0156ef4cf93a07cb508c1bcdc5
assignment1/.env/lib/python2.7/site-packages/pip/_vendor/pyparsing.py
python
ParseResults.asDict
( self )
return dict((k,toItem(v)) for k,v in item_fn())
Returns the named parse results as a nested dictionary. Example:: integer = Word(nums) date_str = integer("year") + '/' + integer("month") + '/' + integer("day") result = date_str.parseString('12/31/1999') print(type(result), repr(result)) # -> <class 'pyparsing.ParseResults'> (['12', '/', '31', '/', '1999'], {'day': [('1999', 4)], 'year': [('12', 0)], 'month': [('31', 2)]}) result_dict = result.asDict() print(type(result_dict), repr(result_dict)) # -> <class 'dict'> {'day': '1999', 'year': '12', 'month': '31'} # even though a ParseResults supports dict-like access, sometime you just need to have a dict import json print(json.dumps(result)) # -> Exception: TypeError: ... is not JSON serializable print(json.dumps(result.asDict())) # -> {"month": "31", "day": "1999", "year": "12"}
Returns the named parse results as a nested dictionary.
[ "Returns", "the", "named", "parse", "results", "as", "a", "nested", "dictionary", "." ]
def asDict( self ): """ Returns the named parse results as a nested dictionary. Example:: integer = Word(nums) date_str = integer("year") + '/' + integer("month") + '/' + integer("day") result = date_str.parseString('12/31/1999') print(type(result), repr(result)) # -> <class 'pyparsing.ParseResults'> (['12', '/', '31', '/', '1999'], {'day': [('1999', 4)], 'year': [('12', 0)], 'month': [('31', 2)]}) result_dict = result.asDict() print(type(result_dict), repr(result_dict)) # -> <class 'dict'> {'day': '1999', 'year': '12', 'month': '31'} # even though a ParseResults supports dict-like access, sometime you just need to have a dict import json print(json.dumps(result)) # -> Exception: TypeError: ... is not JSON serializable print(json.dumps(result.asDict())) # -> {"month": "31", "day": "1999", "year": "12"} """ if PY_3: item_fn = self.items else: item_fn = self.iteritems def toItem(obj): if isinstance(obj, ParseResults): if obj.haskeys(): return obj.asDict() else: return [toItem(v) for v in obj] else: return obj return dict((k,toItem(v)) for k,v in item_fn())
[ "def", "asDict", "(", "self", ")", ":", "if", "PY_3", ":", "item_fn", "=", "self", ".", "items", "else", ":", "item_fn", "=", "self", ".", "iteritems", "def", "toItem", "(", "obj", ")", ":", "if", "isinstance", "(", "obj", ",", "ParseResults", ")", ":", "if", "obj", ".", "haskeys", "(", ")", ":", "return", "obj", ".", "asDict", "(", ")", "else", ":", "return", "[", "toItem", "(", "v", ")", "for", "v", "in", "obj", "]", "else", ":", "return", "obj", "return", "dict", "(", "(", "k", ",", "toItem", "(", "v", ")", ")", "for", "k", ",", "v", "in", "item_fn", "(", ")", ")" ]
https://github.com/khanhnamle1994/natural-language-processing/blob/01d450d5ac002b0156ef4cf93a07cb508c1bcdc5/assignment1/.env/lib/python2.7/site-packages/pip/_vendor/pyparsing.py#L697-L730
knitmesh/servos-framework
45fdc04580890c3aac039c023f104ce8dc00af08
servos/core/commands.py
python
CommandManager.execute
(self, callback=None)
Given the command-line arguments, this figures out which subcommand is being run, creates a parser appropriate to that command, and runs it.
Given the command-line arguments, this figures out which subcommand is being run, creates a parser appropriate to that command, and runs it.
[ "Given", "the", "command", "-", "line", "arguments", "this", "figures", "out", "which", "subcommand", "is", "being", "run", "creates", "a", "parser", "appropriate", "to", "that", "command", "and", "runs", "it", "." ]
def execute(self, callback=None): """ Given the command-line arguments, this figures out which subcommand is being run, creates a parser appropriate to that command, and runs it. """ # Preprocess options to extract --settings and --pythonpath. # These options could affect the commands that are available, so they # must be processed early. parser = NewOptionParser(prog=self.prog_name, usage=self.usage_info, formatter=NewFormatter(), add_help_option=False, option_list=self.option_list) if not self.global_options: global_options, args = parser.parse_args(self.argv) global_options.services_dir = os.path.normpath( global_options.services_dir) handle_default_options(global_options) args = args[1:] else: global_options = self.global_options args = self.argv global_options.settings = global_options.settings or os.environ.get( 'SETTINGS', 'settings.ini') global_options.local_settings = global_options.local_settings or os.environ.get( 'LOCAL_SETTINGS', 'local_settings.ini') if callback: callback(global_options) def print_help(global_options): parser.print_help() sys.stderr.write(self.print_help_info(global_options) + '\n') sys.exit(0) if len(args) == 0: if global_options.version: print self.get_version() sys.exit(0) else: print_help(global_options) sys.exit(1) try: subcommand = args[0] except IndexError: subcommand = 'help' # Display help if no arguments were given. if subcommand == 'help': if len(args) > 1: command = self.fetch_command(global_options, args[1]) if issubclass(command, CommandManager): cmd = command(['help'], None, '%s %s' % ( self.prog_name, args[1]), global_options=global_options) cmd.execute() else: command().print_help(self.prog_name, args[1]) sys.exit(0) else: print_help(global_options) if global_options.help: print_help(global_options) else: command = self.fetch_command(global_options, subcommand) if issubclass(command, CommandManager): cmd = command(args[1:], None, '%s %s' % ( self.prog_name, subcommand), global_options=global_options) cmd.execute() else: cmd = command() cmd.run_from_argv( self.prog_name, subcommand, global_options, args[1:])
[ "def", "execute", "(", "self", ",", "callback", "=", "None", ")", ":", "# Preprocess options to extract --settings and --pythonpath.", "# These options could affect the commands that are available, so they", "# must be processed early.", "parser", "=", "NewOptionParser", "(", "prog", "=", "self", ".", "prog_name", ",", "usage", "=", "self", ".", "usage_info", ",", "formatter", "=", "NewFormatter", "(", ")", ",", "add_help_option", "=", "False", ",", "option_list", "=", "self", ".", "option_list", ")", "if", "not", "self", ".", "global_options", ":", "global_options", ",", "args", "=", "parser", ".", "parse_args", "(", "self", ".", "argv", ")", "global_options", ".", "services_dir", "=", "os", ".", "path", ".", "normpath", "(", "global_options", ".", "services_dir", ")", "handle_default_options", "(", "global_options", ")", "args", "=", "args", "[", "1", ":", "]", "else", ":", "global_options", "=", "self", ".", "global_options", "args", "=", "self", ".", "argv", "global_options", ".", "settings", "=", "global_options", ".", "settings", "or", "os", ".", "environ", ".", "get", "(", "'SETTINGS'", ",", "'settings.ini'", ")", "global_options", ".", "local_settings", "=", "global_options", ".", "local_settings", "or", "os", ".", "environ", ".", "get", "(", "'LOCAL_SETTINGS'", ",", "'local_settings.ini'", ")", "if", "callback", ":", "callback", "(", "global_options", ")", "def", "print_help", "(", "global_options", ")", ":", "parser", ".", "print_help", "(", ")", "sys", ".", "stderr", ".", "write", "(", "self", ".", "print_help_info", "(", "global_options", ")", "+", "'\\n'", ")", "sys", ".", "exit", "(", "0", ")", "if", "len", "(", "args", ")", "==", "0", ":", "if", "global_options", ".", "version", ":", "print", "self", ".", "get_version", "(", ")", "sys", ".", "exit", "(", "0", ")", "else", ":", "print_help", "(", "global_options", ")", "sys", ".", "exit", "(", "1", ")", "try", ":", "subcommand", "=", "args", "[", "0", "]", "except", "IndexError", ":", "subcommand", "=", "'help'", "# Display help if no arguments were given.", "if", "subcommand", "==", "'help'", ":", "if", "len", "(", "args", ")", ">", "1", ":", "command", "=", "self", ".", "fetch_command", "(", "global_options", ",", "args", "[", "1", "]", ")", "if", "issubclass", "(", "command", ",", "CommandManager", ")", ":", "cmd", "=", "command", "(", "[", "'help'", "]", ",", "None", ",", "'%s %s'", "%", "(", "self", ".", "prog_name", ",", "args", "[", "1", "]", ")", ",", "global_options", "=", "global_options", ")", "cmd", ".", "execute", "(", ")", "else", ":", "command", "(", ")", ".", "print_help", "(", "self", ".", "prog_name", ",", "args", "[", "1", "]", ")", "sys", ".", "exit", "(", "0", ")", "else", ":", "print_help", "(", "global_options", ")", "if", "global_options", ".", "help", ":", "print_help", "(", "global_options", ")", "else", ":", "command", "=", "self", ".", "fetch_command", "(", "global_options", ",", "subcommand", ")", "if", "issubclass", "(", "command", ",", "CommandManager", ")", ":", "cmd", "=", "command", "(", "args", "[", "1", ":", "]", ",", "None", ",", "'%s %s'", "%", "(", "self", ".", "prog_name", ",", "subcommand", ")", ",", "global_options", "=", "global_options", ")", "cmd", ".", "execute", "(", ")", "else", ":", "cmd", "=", "command", "(", ")", "cmd", ".", "run_from_argv", "(", "self", ".", "prog_name", ",", "subcommand", ",", "global_options", ",", "args", "[", "1", ":", "]", ")" ]
https://github.com/knitmesh/servos-framework/blob/45fdc04580890c3aac039c023f104ce8dc00af08/servos/core/commands.py#L277-L350
anymail/django-anymail
dc0a46a815d062d52660b9237627b22f89093bce
anymail/utils.py
python
parse_single_address
(address, field=None)
Parses a single EmailAddress from str address, or raises AnymailInvalidAddress :param str address: the fully-formatted email str to parse :param str|None field: optional description of the source of this address, for error message :return :class:`EmailAddress`: if address contains a single email :raises :exc:`AnymailInvalidAddress`: if address contains no or multiple emails
Parses a single EmailAddress from str address, or raises AnymailInvalidAddress
[ "Parses", "a", "single", "EmailAddress", "from", "str", "address", "or", "raises", "AnymailInvalidAddress" ]
def parse_single_address(address, field=None): """Parses a single EmailAddress from str address, or raises AnymailInvalidAddress :param str address: the fully-formatted email str to parse :param str|None field: optional description of the source of this address, for error message :return :class:`EmailAddress`: if address contains a single email :raises :exc:`AnymailInvalidAddress`: if address contains no or multiple emails """ parsed = parse_address_list([address], field=field) count = len(parsed) if count > 1: raise AnymailInvalidAddress( "Only one email address is allowed; found {count} in '{address}'{where}.".format( count=count, address=address, where=" in `%s`" % field if field else "")) else: return parsed[0]
[ "def", "parse_single_address", "(", "address", ",", "field", "=", "None", ")", ":", "parsed", "=", "parse_address_list", "(", "[", "address", "]", ",", "field", "=", "field", ")", "count", "=", "len", "(", "parsed", ")", "if", "count", ">", "1", ":", "raise", "AnymailInvalidAddress", "(", "\"Only one email address is allowed; found {count} in '{address}'{where}.\"", ".", "format", "(", "count", "=", "count", ",", "address", "=", "address", ",", "where", "=", "\" in `%s`\"", "%", "field", "if", "field", "else", "\"\"", ")", ")", "else", ":", "return", "parsed", "[", "0", "]" ]
https://github.com/anymail/django-anymail/blob/dc0a46a815d062d52660b9237627b22f89093bce/anymail/utils.py#L164-L179
onnx/onnx-coreml
141fc33d7217674ea8bda36494fa8089a543a3f3
onnx_coreml/_operators_nd.py
python
_convert_constant
(builder, node, graph, err)
convert to CoreML Load Constant ND Layer: https://github.com/apple/coremltools/blob/655b3be5cc0d42c3c4fa49f0f0e4a93a26b3e492/mlmodel/format/NeuralNetwork.proto#L3596
convert to CoreML Load Constant ND Layer: https://github.com/apple/coremltools/blob/655b3be5cc0d42c3c4fa49f0f0e4a93a26b3e492/mlmodel/format/NeuralNetwork.proto#L3596
[ "convert", "to", "CoreML", "Load", "Constant", "ND", "Layer", ":", "https", ":", "//", "github", ".", "com", "/", "apple", "/", "coremltools", "/", "blob", "/", "655b3be5cc0d42c3c4fa49f0f0e4a93a26b3e492", "/", "mlmodel", "/", "format", "/", "NeuralNetwork", ".", "proto#L3596" ]
def _convert_constant(builder, node, graph, err): ''' convert to CoreML Load Constant ND Layer: https://github.com/apple/coremltools/blob/655b3be5cc0d42c3c4fa49f0f0e4a93a26b3e492/mlmodel/format/NeuralNetwork.proto#L3596 ''' value = node.attrs['value'] # HACK: If Value is 0-Rank then make it 1-Rank builder.add_load_constant_nd( name=node.name, output_name=node.outputs[0], constant_value=value, shape=[1] if value.shape == () else value.shape ) graph.constants_loaded(node.outputs[0])
[ "def", "_convert_constant", "(", "builder", ",", "node", ",", "graph", ",", "err", ")", ":", "value", "=", "node", ".", "attrs", "[", "'value'", "]", "# HACK: If Value is 0-Rank then make it 1-Rank", "builder", ".", "add_load_constant_nd", "(", "name", "=", "node", ".", "name", ",", "output_name", "=", "node", ".", "outputs", "[", "0", "]", ",", "constant_value", "=", "value", ",", "shape", "=", "[", "1", "]", "if", "value", ".", "shape", "==", "(", ")", "else", "value", ".", "shape", ")", "graph", ".", "constants_loaded", "(", "node", ".", "outputs", "[", "0", "]", ")" ]
https://github.com/onnx/onnx-coreml/blob/141fc33d7217674ea8bda36494fa8089a543a3f3/onnx_coreml/_operators_nd.py#L426-L439
turicas/brasil.io
f1c371fe828a090510259a5027b49e2e651936b4
traffic_control/blocked_list.py
python
BlockedRequestList.__len__
(self)
[]
def __len__(self): if self.redis_conn: return self.redis_conn.llen(settings.RQ_BLOCKED_REQUESTS_LIST) else: return len(self._requests_data)
[ "def", "__len__", "(", "self", ")", ":", "if", "self", ".", "redis_conn", ":", "return", "self", ".", "redis_conn", ".", "llen", "(", "settings", ".", "RQ_BLOCKED_REQUESTS_LIST", ")", "else", ":", "return", "len", "(", "self", ".", "_requests_data", ")" ]
https://github.com/turicas/brasil.io/blob/f1c371fe828a090510259a5027b49e2e651936b4/traffic_control/blocked_list.py#L38-L42
arizvisa/ida-minsc
8627a60f047b5e55d3efeecde332039cd1a16eea
base/_utils.py
python
character.unicodeQ
(cls, ch)
return cat[0] != 'C'
Returns whether a unicode character is printable or not.
Returns whether a unicode character is printable or not.
[ "Returns", "whether", "a", "unicode", "character", "is", "printable", "or", "not", "." ]
def unicodeQ(cls, ch): '''Returns whether a unicode character is printable or not.''' cat = cls.const._unicodedata.category(ch) return cat[0] != 'C'
[ "def", "unicodeQ", "(", "cls", ",", "ch", ")", ":", "cat", "=", "cls", ".", "const", ".", "_unicodedata", ".", "category", "(", "ch", ")", "return", "cat", "[", "0", "]", "!=", "'C'" ]
https://github.com/arizvisa/ida-minsc/blob/8627a60f047b5e55d3efeecde332039cd1a16eea/base/_utils.py#L799-L802
eyounx/ZOOpt
49c750daf842639ee6407848a867091689571810
zoopt/algos/opt_algorithms/racos/sracos.py
python
SRacosTune.strategy_rr
(self, iset, x)
return replace_ele
Replace a random solution in iset. :param iset: a solution set :param x: a Solution object :return: the replaced solution
Replace a random solution in iset.
[ "Replace", "a", "random", "solution", "in", "iset", "." ]
def strategy_rr(self, iset, x): """ Replace a random solution in iset. :param iset: a solution set :param x: a Solution object :return: the replaced solution """ len_iset = len(iset) replace_index = np.random.randint(0, len_iset) replace_ele = iset[replace_index] iset[replace_index] = x return replace_ele
[ "def", "strategy_rr", "(", "self", ",", "iset", ",", "x", ")", ":", "len_iset", "=", "len", "(", "iset", ")", "replace_index", "=", "np", ".", "random", ".", "randint", "(", "0", ",", "len_iset", ")", "replace_ele", "=", "iset", "[", "replace_index", "]", "iset", "[", "replace_index", "]", "=", "x", "return", "replace_ele" ]
https://github.com/eyounx/ZOOpt/blob/49c750daf842639ee6407848a867091689571810/zoopt/algos/opt_algorithms/racos/sracos.py#L390-L402
TencentCloud/tencentcloud-sdk-python
3677fd1cdc8c5fd626ce001c13fd3b59d1f279d2
tencentcloud/btoe/v20210514/models.py
python
VerifyEvidenceHashResponse.__init__
(self)
r""" :param Result: 核验结果,true为核验成功,false为核验失败 :type Result: bool :param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 :type RequestId: str
r""" :param Result: 核验结果,true为核验成功,false为核验失败 :type Result: bool :param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 :type RequestId: str
[ "r", ":", "param", "Result", ":", "核验结果,true为核验成功,false为核验失败", ":", "type", "Result", ":", "bool", ":", "param", "RequestId", ":", "唯一请求", "ID,每次请求都会返回。定位问题时需要提供该次请求的", "RequestId。", ":", "type", "RequestId", ":", "str" ]
def __init__(self): r""" :param Result: 核验结果,true为核验成功,false为核验失败 :type Result: bool :param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 :type RequestId: str """ self.Result = None self.RequestId = None
[ "def", "__init__", "(", "self", ")", ":", "self", ".", "Result", "=", "None", "self", ".", "RequestId", "=", "None" ]
https://github.com/TencentCloud/tencentcloud-sdk-python/blob/3677fd1cdc8c5fd626ce001c13fd3b59d1f279d2/tencentcloud/btoe/v20210514/models.py#L796-L804
getalp/Flaubert
ded1cf89820a22dbf885c85ba3dccc8ab360681b
xlm/optim.py
python
AdamCosineWithWarmup.step
(self, closure=None)
[]
def step(self, closure=None): super().step(closure) for param_group in self.param_groups: param_group['num_updates'] += 1 param_group['lr'] = self.get_lr_for_step(param_group['num_updates'])
[ "def", "step", "(", "self", ",", "closure", "=", "None", ")", ":", "super", "(", ")", ".", "step", "(", "closure", ")", "for", "param_group", "in", "self", ".", "param_groups", ":", "param_group", "[", "'num_updates'", "]", "+=", "1", "param_group", "[", "'lr'", "]", "=", "self", ".", "get_lr_for_step", "(", "param_group", "[", "'num_updates'", "]", ")" ]
https://github.com/getalp/Flaubert/blob/ded1cf89820a22dbf885c85ba3dccc8ab360681b/xlm/optim.py#L204-L208
IronLanguages/main
a949455434b1fda8c783289e897e78a9a0caabb5
External.LCA_RESTRICTED/Languages/IronPython/27/Lib/imaplib.py
python
IMAP4.setquota
(self, root, limits)
return self._untagged_response(typ, dat, 'QUOTA')
Set the quota root's resource limits. (typ, [data]) = <instance>.setquota(root, limits)
Set the quota root's resource limits.
[ "Set", "the", "quota", "root", "s", "resource", "limits", "." ]
def setquota(self, root, limits): """Set the quota root's resource limits. (typ, [data]) = <instance>.setquota(root, limits) """ typ, dat = self._simple_command('SETQUOTA', root, limits) return self._untagged_response(typ, dat, 'QUOTA')
[ "def", "setquota", "(", "self", ",", "root", ",", "limits", ")", ":", "typ", ",", "dat", "=", "self", ".", "_simple_command", "(", "'SETQUOTA'", ",", "root", ",", "limits", ")", "return", "self", ".", "_untagged_response", "(", "typ", ",", "dat", ",", "'QUOTA'", ")" ]
https://github.com/IronLanguages/main/blob/a949455434b1fda8c783289e897e78a9a0caabb5/External.LCA_RESTRICTED/Languages/IronPython/27/Lib/imaplib.py#L692-L698
holzschu/Carnets
44effb10ddfc6aa5c8b0687582a724ba82c6b547
Library/lib/python3.7/site-packages/matplotlib-3.0.3-py3.7-macosx-10.9-x86_64.egg/mpl_toolkits/mplot3d/axes3d.py
python
Axes3D.zaxis_inverted
(self)
return top < bottom
Returns True if the z-axis is inverted. .. versionadded :: 1.1.0 This function was added, but not tested. Please report any bugs.
Returns True if the z-axis is inverted.
[ "Returns", "True", "if", "the", "z", "-", "axis", "is", "inverted", "." ]
def zaxis_inverted(self): ''' Returns True if the z-axis is inverted. .. versionadded :: 1.1.0 This function was added, but not tested. Please report any bugs. ''' bottom, top = self.get_zlim() return top < bottom
[ "def", "zaxis_inverted", "(", "self", ")", ":", "bottom", ",", "top", "=", "self", ".", "get_zlim", "(", ")", "return", "top", "<", "bottom" ]
https://github.com/holzschu/Carnets/blob/44effb10ddfc6aa5c8b0687582a724ba82c6b547/Library/lib/python3.7/site-packages/matplotlib-3.0.3-py3.7-macosx-10.9-x86_64.egg/mpl_toolkits/mplot3d/axes3d.py#L1519-L1527
twisted/twisted
dee676b040dd38b847ea6fb112a712cb5e119490
src/twisted/internet/_sslverify.py
python
_handleattrhelper
(Class, transport, methodName)
return Class(cert)
(private) Helper for L{Certificate.peerFromTransport} and L{Certificate.hostFromTransport} which checks for incompatible handle types and null certificates and raises the appropriate exception or returns the appropriate certificate object.
(private) Helper for L{Certificate.peerFromTransport} and L{Certificate.hostFromTransport} which checks for incompatible handle types and null certificates and raises the appropriate exception or returns the appropriate certificate object.
[ "(", "private", ")", "Helper", "for", "L", "{", "Certificate", ".", "peerFromTransport", "}", "and", "L", "{", "Certificate", ".", "hostFromTransport", "}", "which", "checks", "for", "incompatible", "handle", "types", "and", "null", "certificates", "and", "raises", "the", "appropriate", "exception", "or", "returns", "the", "appropriate", "certificate", "object", "." ]
def _handleattrhelper(Class, transport, methodName): """ (private) Helper for L{Certificate.peerFromTransport} and L{Certificate.hostFromTransport} which checks for incompatible handle types and null certificates and raises the appropriate exception or returns the appropriate certificate object. """ method = getattr(transport.getHandle(), f"get_{methodName}_certificate", None) if method is None: raise CertificateError( "non-TLS transport {!r} did not have {} certificate".format( transport, methodName ) ) cert = method() if cert is None: raise CertificateError( "TLS transport {!r} did not have {} certificate".format( transport, methodName ) ) return Class(cert)
[ "def", "_handleattrhelper", "(", "Class", ",", "transport", ",", "methodName", ")", ":", "method", "=", "getattr", "(", "transport", ".", "getHandle", "(", ")", ",", "f\"get_{methodName}_certificate\"", ",", "None", ")", "if", "method", "is", "None", ":", "raise", "CertificateError", "(", "\"non-TLS transport {!r} did not have {} certificate\"", ".", "format", "(", "transport", ",", "methodName", ")", ")", "cert", "=", "method", "(", ")", "if", "cert", "is", "None", ":", "raise", "CertificateError", "(", "\"TLS transport {!r} did not have {} certificate\"", ".", "format", "(", "transport", ",", "methodName", ")", ")", "return", "Class", "(", "cert", ")" ]
https://github.com/twisted/twisted/blob/dee676b040dd38b847ea6fb112a712cb5e119490/src/twisted/internet/_sslverify.py#L404-L425
pythonarcade/arcade
1ee3eb1900683213e8e8df93943327c2ea784564
doc/tutorials/card_game/solitaire_11.py
python
MyGame.on_mouse_motion
(self, x: float, y: float, dx: float, dy: float)
User moves mouse
User moves mouse
[ "User", "moves", "mouse" ]
def on_mouse_motion(self, x: float, y: float, dx: float, dy: float): """ User moves mouse """ # If we are holding cards, move them with the mouse for card in self.held_cards: card.center_x += dx card.center_y += dy
[ "def", "on_mouse_motion", "(", "self", ",", "x", ":", "float", ",", "y", ":", "float", ",", "dx", ":", "float", ",", "dy", ":", "float", ")", ":", "# If we are holding cards, move them with the mouse", "for", "card", "in", "self", ".", "held_cards", ":", "card", ".", "center_x", "+=", "dx", "card", ".", "center_y", "+=", "dy" ]
https://github.com/pythonarcade/arcade/blob/1ee3eb1900683213e8e8df93943327c2ea784564/doc/tutorials/card_game/solitaire_11.py#L387-L393
leo-editor/leo-editor
383d6776d135ef17d73d935a2f0ecb3ac0e99494
leo/core/leoFrame.py
python
frame_cmd
(name)
return g.new_cmd_decorator(name, ['c', 'frame',])
Command decorator for the LeoFrame class.
Command decorator for the LeoFrame class.
[ "Command", "decorator", "for", "the", "LeoFrame", "class", "." ]
def frame_cmd(name): """Command decorator for the LeoFrame class.""" return g.new_cmd_decorator(name, ['c', 'frame',])
[ "def", "frame_cmd", "(", "name", ")", ":", "return", "g", ".", "new_cmd_decorator", "(", "name", ",", "[", "'c'", ",", "'frame'", ",", "]", ")" ]
https://github.com/leo-editor/leo-editor/blob/383d6776d135ef17d73d935a2f0ecb3ac0e99494/leo/core/leoFrame.py#L55-L57
google/rekall
55d1925f2df9759a989b35271b4fa48fc54a1c86
rekall-core/rekall/cache.py
python
Cache.Flush
(self)
Called to sync the cache to external storage if required.
Called to sync the cache to external storage if required.
[ "Called", "to", "sync", "the", "cache", "to", "external", "storage", "if", "required", "." ]
def Flush(self): """Called to sync the cache to external storage if required."""
[ "def", "Flush", "(", "self", ")", ":" ]
https://github.com/google/rekall/blob/55d1925f2df9759a989b35271b4fa48fc54a1c86/rekall-core/rekall/cache.py#L103-L104
holzschu/Carnets
44effb10ddfc6aa5c8b0687582a724ba82c6b547
Library/lib/python3.7/site-packages/pandas-0.24.2-py3.7-macosx-10.9-x86_64.egg/pandas/util/_decorators.py
python
deprecate
(name, alternative, version, alt_name=None, klass=None, stacklevel=2, msg=None)
return wrapper
Return a new function that emits a deprecation warning on use. To use this method for a deprecated function, another function `alternative` with the same signature must exist. The deprecated function will emit a deprecation warning, and in the docstring it will contain the deprecation directive with the provided version so it can be detected for future removal. Parameters ---------- name : str Name of function to deprecate. alternative : func Function to use instead. version : str Version of pandas in which the method has been deprecated. alt_name : str, optional Name to use in preference of alternative.__name__. klass : Warning, default FutureWarning stacklevel : int, default 2 msg : str The message to display in the warning. Default is '{name} is deprecated. Use {alt_name} instead.'
Return a new function that emits a deprecation warning on use.
[ "Return", "a", "new", "function", "that", "emits", "a", "deprecation", "warning", "on", "use", "." ]
def deprecate(name, alternative, version, alt_name=None, klass=None, stacklevel=2, msg=None): """Return a new function that emits a deprecation warning on use. To use this method for a deprecated function, another function `alternative` with the same signature must exist. The deprecated function will emit a deprecation warning, and in the docstring it will contain the deprecation directive with the provided version so it can be detected for future removal. Parameters ---------- name : str Name of function to deprecate. alternative : func Function to use instead. version : str Version of pandas in which the method has been deprecated. alt_name : str, optional Name to use in preference of alternative.__name__. klass : Warning, default FutureWarning stacklevel : int, default 2 msg : str The message to display in the warning. Default is '{name} is deprecated. Use {alt_name} instead.' """ alt_name = alt_name or alternative.__name__ klass = klass or FutureWarning warning_msg = msg or '{} is deprecated, use {} instead'.format(name, alt_name) @wraps(alternative) def wrapper(*args, **kwargs): warnings.warn(warning_msg, klass, stacklevel=stacklevel) return alternative(*args, **kwargs) # adding deprecated directive to the docstring msg = msg or 'Use `{alt_name}` instead.'.format(alt_name=alt_name) doc_error_msg = ('deprecate needs a correctly formatted docstring in ' 'the target function (should have a one liner short ' 'summary, and opening quotes should be in their own ' 'line). Found:\n{}'.format(alternative.__doc__)) # when python is running in optimized mode (i.e. `-OO`), docstrings are # removed, so we check that a docstring with correct formatting is used # but we allow empty docstrings if alternative.__doc__: if alternative.__doc__.count('\n') < 3: raise AssertionError(doc_error_msg) empty1, summary, empty2, doc = alternative.__doc__.split('\n', 3) if empty1 or empty2 and not summary: raise AssertionError(doc_error_msg) wrapper.__doc__ = dedent(""" {summary} .. deprecated:: {depr_version} {depr_msg} {rest_of_docstring}""").format(summary=summary.strip(), depr_version=version, depr_msg=msg, rest_of_docstring=dedent(doc)) return wrapper
[ "def", "deprecate", "(", "name", ",", "alternative", ",", "version", ",", "alt_name", "=", "None", ",", "klass", "=", "None", ",", "stacklevel", "=", "2", ",", "msg", "=", "None", ")", ":", "alt_name", "=", "alt_name", "or", "alternative", ".", "__name__", "klass", "=", "klass", "or", "FutureWarning", "warning_msg", "=", "msg", "or", "'{} is deprecated, use {} instead'", ".", "format", "(", "name", ",", "alt_name", ")", "@", "wraps", "(", "alternative", ")", "def", "wrapper", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "warnings", ".", "warn", "(", "warning_msg", ",", "klass", ",", "stacklevel", "=", "stacklevel", ")", "return", "alternative", "(", "*", "args", ",", "*", "*", "kwargs", ")", "# adding deprecated directive to the docstring", "msg", "=", "msg", "or", "'Use `{alt_name}` instead.'", ".", "format", "(", "alt_name", "=", "alt_name", ")", "doc_error_msg", "=", "(", "'deprecate needs a correctly formatted docstring in '", "'the target function (should have a one liner short '", "'summary, and opening quotes should be in their own '", "'line). Found:\\n{}'", ".", "format", "(", "alternative", ".", "__doc__", ")", ")", "# when python is running in optimized mode (i.e. `-OO`), docstrings are", "# removed, so we check that a docstring with correct formatting is used", "# but we allow empty docstrings", "if", "alternative", ".", "__doc__", ":", "if", "alternative", ".", "__doc__", ".", "count", "(", "'\\n'", ")", "<", "3", ":", "raise", "AssertionError", "(", "doc_error_msg", ")", "empty1", ",", "summary", ",", "empty2", ",", "doc", "=", "alternative", ".", "__doc__", ".", "split", "(", "'\\n'", ",", "3", ")", "if", "empty1", "or", "empty2", "and", "not", "summary", ":", "raise", "AssertionError", "(", "doc_error_msg", ")", "wrapper", ".", "__doc__", "=", "dedent", "(", "\"\"\"\n {summary}\n\n .. deprecated:: {depr_version}\n {depr_msg}\n\n {rest_of_docstring}\"\"\"", ")", ".", "format", "(", "summary", "=", "summary", ".", "strip", "(", ")", ",", "depr_version", "=", "version", ",", "depr_msg", "=", "msg", ",", "rest_of_docstring", "=", "dedent", "(", "doc", ")", ")", "return", "wrapper" ]
https://github.com/holzschu/Carnets/blob/44effb10ddfc6aa5c8b0687582a724ba82c6b547/Library/lib/python3.7/site-packages/pandas-0.24.2-py3.7-macosx-10.9-x86_64.egg/pandas/util/_decorators.py#L10-L74
Pylons/pyramid
0b24ac16cc04746b25cf460f1497c157f6d3d6f4
src/pyramid/config/__init__.py
python
Configurator.end
(self)
return self.manager.pop()
Indicate that application or test configuration has ended. This pops the last value pushed onto the :term:`thread local` stack (usually by the ``begin`` method) and returns that value.
Indicate that application or test configuration has ended. This pops the last value pushed onto the :term:`thread local` stack (usually by the ``begin`` method) and returns that value.
[ "Indicate", "that", "application", "or", "test", "configuration", "has", "ended", ".", "This", "pops", "the", "last", "value", "pushed", "onto", "the", ":", "term", ":", "thread", "local", "stack", "(", "usually", "by", "the", "begin", "method", ")", "and", "returns", "that", "value", "." ]
def end(self): """Indicate that application or test configuration has ended. This pops the last value pushed onto the :term:`thread local` stack (usually by the ``begin`` method) and returns that value. """ return self.manager.pop()
[ "def", "end", "(", "self", ")", ":", "return", "self", ".", "manager", ".", "pop", "(", ")" ]
https://github.com/Pylons/pyramid/blob/0b24ac16cc04746b25cf460f1497c157f6d3d6f4/src/pyramid/config/__init__.py#L783-L789
spender-sandbox/cuckoo-modified
eb93ef3d41b8fee51b4330306dcd315d8101e021
lib/cuckoo/common/abstracts.py
python
Machinery.shutdown
(self)
Shutdown the machine manager. Kills all alive machines. @raise CuckooMachineError: if unable to stop machine.
Shutdown the machine manager. Kills all alive machines.
[ "Shutdown", "the", "machine", "manager", ".", "Kills", "all", "alive", "machines", "." ]
def shutdown(self): """Shutdown the machine manager. Kills all alive machines. @raise CuckooMachineError: if unable to stop machine. """ if len(self.running()) > 0: log.info("Still %s guests alive. Shutting down...", len(self.running())) for machine in self.running(): try: self.stop(machine.label) except CuckooMachineError as e: log.warning("Unable to shutdown machine %s, please check " "manually. Error: %s", machine.label, e)
[ "def", "shutdown", "(", "self", ")", ":", "if", "len", "(", "self", ".", "running", "(", ")", ")", ">", "0", ":", "log", ".", "info", "(", "\"Still %s guests alive. Shutting down...\"", ",", "len", "(", "self", ".", "running", "(", ")", ")", ")", "for", "machine", "in", "self", ".", "running", "(", ")", ":", "try", ":", "self", ".", "stop", "(", "machine", ".", "label", ")", "except", "CuckooMachineError", "as", "e", ":", "log", ".", "warning", "(", "\"Unable to shutdown machine %s, please check \"", "\"manually. Error: %s\"", ",", "machine", ".", "label", ",", "e", ")" ]
https://github.com/spender-sandbox/cuckoo-modified/blob/eb93ef3d41b8fee51b4330306dcd315d8101e021/lib/cuckoo/common/abstracts.py#L230-L242
imageworks/OpenColorIO-Configs
0bb079c08be410030669cbf5f19ff869b88af953
aces_1.0.2/python/aces_ocio/colorspaces/general.py
python
create_matrix_plus_transfer_colorspace
( name='matrix_plus_transfer', transfer_function_name='transfer_function', transfer_function=lambda x: x, lut_directory='/tmp', lut_resolution_1d=1024, from_reference_values=None, to_reference_values=None, aliases=None)
return cs
Creates a ColorSpace that uses transfer functions encoded as 1D LUTs and matrice Parameters ---------- name : str, optional Aliases for this colorspace transfer_function_name : str, optional The name of the transfer function transfer_function : function, optional The transfer function to be evaluated lut_directory : str or unicode The directory to use when generating LUTs lut_resolution_1d : int The resolution of generated 1D LUTs from_reference_values : list of matrices List of matrices to convert from the reference colorspace to this space to_reference_values : list of matrices List of matrices to convert to the reference colorspace from this space aliases : list of str Aliases for this colorspace Returns ------- ColorSpace A *Matrx and LUT1D Transform*-based ColorSpace representing a transfer function and matrix
Creates a ColorSpace that uses transfer functions encoded as 1D LUTs and matrice
[ "Creates", "a", "ColorSpace", "that", "uses", "transfer", "functions", "encoded", "as", "1D", "LUTs", "and", "matrice" ]
def create_matrix_plus_transfer_colorspace( name='matrix_plus_transfer', transfer_function_name='transfer_function', transfer_function=lambda x: x, lut_directory='/tmp', lut_resolution_1d=1024, from_reference_values=None, to_reference_values=None, aliases=None): """ Creates a ColorSpace that uses transfer functions encoded as 1D LUTs and matrice Parameters ---------- name : str, optional Aliases for this colorspace transfer_function_name : str, optional The name of the transfer function transfer_function : function, optional The transfer function to be evaluated lut_directory : str or unicode The directory to use when generating LUTs lut_resolution_1d : int The resolution of generated 1D LUTs from_reference_values : list of matrices List of matrices to convert from the reference colorspace to this space to_reference_values : list of matrices List of matrices to convert to the reference colorspace from this space aliases : list of str Aliases for this colorspace Returns ------- ColorSpace A *Matrx and LUT1D Transform*-based ColorSpace representing a transfer function and matrix """ if from_reference_values is None: from_reference_values = [] if to_reference_values is None: to_reference_values = [] if aliases is None: aliases = [] cs = ColorSpace(name) cs.description = 'The %s color space' % name cs.aliases = aliases cs.equality_group = name cs.family = 'Utility' cs.is_data = False # A linear space needs allocation variables. cs.allocation_type = ocio.Constants.ALLOCATION_UNIFORM cs.allocation_vars = [0, 1] # Sampling the transfer function. data = array.array('f', '\0' * lut_resolution_1d * 4) for c in range(lut_resolution_1d): data[c] = transfer_function(c / (lut_resolution_1d - 1)) # Writing the sampled data to a *LUT*. lut = '%s_to_linear.spi1d' % transfer_function_name genlut.write_SPI_1d( os.path.join(lut_directory, lut), 0, 1, data, lut_resolution_1d, 1) # Creating the *to_reference* transforms. cs.to_reference_transforms = [] if to_reference_values: cs.to_reference_transforms.append({ 'type': 'lutFile', 'path': lut, 'interpolation': 'linear', 'direction': 'forward'}) for matrix in to_reference_values: cs.to_reference_transforms.append({ 'type': 'matrix', 'matrix': mat44_from_mat33(matrix), 'direction': 'forward'}) # Creating the *from_reference* transforms. cs.from_reference_transforms = [] if from_reference_values: for matrix in from_reference_values: cs.from_reference_transforms.append({ 'type': 'matrix', 'matrix': mat44_from_mat33(matrix), 'direction': 'forward'}) cs.from_reference_transforms.append({ 'type': 'lutFile', 'path': lut, 'interpolation': 'linear', 'direction': 'inverse'}) return cs
[ "def", "create_matrix_plus_transfer_colorspace", "(", "name", "=", "'matrix_plus_transfer'", ",", "transfer_function_name", "=", "'transfer_function'", ",", "transfer_function", "=", "lambda", "x", ":", "x", ",", "lut_directory", "=", "'/tmp'", ",", "lut_resolution_1d", "=", "1024", ",", "from_reference_values", "=", "None", ",", "to_reference_values", "=", "None", ",", "aliases", "=", "None", ")", ":", "if", "from_reference_values", "is", "None", ":", "from_reference_values", "=", "[", "]", "if", "to_reference_values", "is", "None", ":", "to_reference_values", "=", "[", "]", "if", "aliases", "is", "None", ":", "aliases", "=", "[", "]", "cs", "=", "ColorSpace", "(", "name", ")", "cs", ".", "description", "=", "'The %s color space'", "%", "name", "cs", ".", "aliases", "=", "aliases", "cs", ".", "equality_group", "=", "name", "cs", ".", "family", "=", "'Utility'", "cs", ".", "is_data", "=", "False", "# A linear space needs allocation variables.", "cs", ".", "allocation_type", "=", "ocio", ".", "Constants", ".", "ALLOCATION_UNIFORM", "cs", ".", "allocation_vars", "=", "[", "0", ",", "1", "]", "# Sampling the transfer function.", "data", "=", "array", ".", "array", "(", "'f'", ",", "'\\0'", "*", "lut_resolution_1d", "*", "4", ")", "for", "c", "in", "range", "(", "lut_resolution_1d", ")", ":", "data", "[", "c", "]", "=", "transfer_function", "(", "c", "/", "(", "lut_resolution_1d", "-", "1", ")", ")", "# Writing the sampled data to a *LUT*.", "lut", "=", "'%s_to_linear.spi1d'", "%", "transfer_function_name", "genlut", ".", "write_SPI_1d", "(", "os", ".", "path", ".", "join", "(", "lut_directory", ",", "lut", ")", ",", "0", ",", "1", ",", "data", ",", "lut_resolution_1d", ",", "1", ")", "# Creating the *to_reference* transforms.", "cs", ".", "to_reference_transforms", "=", "[", "]", "if", "to_reference_values", ":", "cs", ".", "to_reference_transforms", ".", "append", "(", "{", "'type'", ":", "'lutFile'", ",", "'path'", ":", "lut", ",", "'interpolation'", ":", "'linear'", ",", "'direction'", ":", "'forward'", "}", ")", "for", "matrix", "in", "to_reference_values", ":", "cs", ".", "to_reference_transforms", ".", "append", "(", "{", "'type'", ":", "'matrix'", ",", "'matrix'", ":", "mat44_from_mat33", "(", "matrix", ")", ",", "'direction'", ":", "'forward'", "}", ")", "# Creating the *from_reference* transforms.", "cs", ".", "from_reference_transforms", "=", "[", "]", "if", "from_reference_values", ":", "for", "matrix", "in", "from_reference_values", ":", "cs", ".", "from_reference_transforms", ".", "append", "(", "{", "'type'", ":", "'matrix'", ",", "'matrix'", ":", "mat44_from_mat33", "(", "matrix", ")", ",", "'direction'", ":", "'forward'", "}", ")", "cs", ".", "from_reference_transforms", ".", "append", "(", "{", "'type'", ":", "'lutFile'", ",", "'path'", ":", "lut", ",", "'interpolation'", ":", "'linear'", ",", "'direction'", ":", "'inverse'", "}", ")", "return", "cs" ]
https://github.com/imageworks/OpenColorIO-Configs/blob/0bb079c08be410030669cbf5f19ff869b88af953/aces_1.0.2/python/aces_ocio/colorspaces/general.py#L183-L287
tensorflow/tensor2tensor
2a33b152d7835af66a6d20afe7961751047e28dd
tensor2tensor/utils/cloud_mlengine.py
python
configure_job
()
return job_spec
Construct jobSpec for ML Engine job.
Construct jobSpec for ML Engine job.
[ "Construct", "jobSpec", "for", "ML", "Engine", "job", "." ]
def configure_job(): """Construct jobSpec for ML Engine job.""" # See documentation: # https://cloud.google.com/ml-engine/reference/rest/v1/projects.jobs#traininginput training_input = { "pythonModule": "tensor2tensor.bin.t2t_trainer", "args": flags_as_args(), "region": text_encoder.native_to_unicode(default_region()), "runtimeVersion": RUNTIME_VERSION, "pythonVersion": "3.5" if sys.version_info.major == 3 else "2.7", "jobDir": FLAGS.output_dir, "scaleTier": "CUSTOM", "masterType": FLAGS.cloud_mlengine_master_type or get_default_master_type( num_gpus=FLAGS.worker_gpu) } if FLAGS.use_tpu: training_input["masterType"] = (FLAGS.cloud_mlengine_master_type or "standard") training_input["workerType"] = "cloud_tpu" training_input["workerCount"] = 1 if FLAGS.hparams_range: tf.logging.info("Configuring hyperparameter tuning.") training_input["hyperparameters"] = configure_autotune( FLAGS.hparams_range, FLAGS.autotune_objective, FLAGS.autotune_maximize, FLAGS.autotune_max_trials, FLAGS.autotune_parallel_trials, ) timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S") job_spec = { "jobId": "%s_%s_t2t_%s" % (FLAGS.model, FLAGS.problem, timestamp), "labels": { "model": FLAGS.model, "problem": FLAGS.problem, "hparams": FLAGS.hparams_set }, "trainingInput": training_input, } return job_spec
[ "def", "configure_job", "(", ")", ":", "# See documentation:", "# https://cloud.google.com/ml-engine/reference/rest/v1/projects.jobs#traininginput", "training_input", "=", "{", "\"pythonModule\"", ":", "\"tensor2tensor.bin.t2t_trainer\"", ",", "\"args\"", ":", "flags_as_args", "(", ")", ",", "\"region\"", ":", "text_encoder", ".", "native_to_unicode", "(", "default_region", "(", ")", ")", ",", "\"runtimeVersion\"", ":", "RUNTIME_VERSION", ",", "\"pythonVersion\"", ":", "\"3.5\"", "if", "sys", ".", "version_info", ".", "major", "==", "3", "else", "\"2.7\"", ",", "\"jobDir\"", ":", "FLAGS", ".", "output_dir", ",", "\"scaleTier\"", ":", "\"CUSTOM\"", ",", "\"masterType\"", ":", "FLAGS", ".", "cloud_mlengine_master_type", "or", "get_default_master_type", "(", "num_gpus", "=", "FLAGS", ".", "worker_gpu", ")", "}", "if", "FLAGS", ".", "use_tpu", ":", "training_input", "[", "\"masterType\"", "]", "=", "(", "FLAGS", ".", "cloud_mlengine_master_type", "or", "\"standard\"", ")", "training_input", "[", "\"workerType\"", "]", "=", "\"cloud_tpu\"", "training_input", "[", "\"workerCount\"", "]", "=", "1", "if", "FLAGS", ".", "hparams_range", ":", "tf", ".", "logging", ".", "info", "(", "\"Configuring hyperparameter tuning.\"", ")", "training_input", "[", "\"hyperparameters\"", "]", "=", "configure_autotune", "(", "FLAGS", ".", "hparams_range", ",", "FLAGS", ".", "autotune_objective", ",", "FLAGS", ".", "autotune_maximize", ",", "FLAGS", ".", "autotune_max_trials", ",", "FLAGS", ".", "autotune_parallel_trials", ",", ")", "timestamp", "=", "datetime", ".", "datetime", ".", "now", "(", ")", ".", "strftime", "(", "\"%Y%m%d_%H%M%S\"", ")", "job_spec", "=", "{", "\"jobId\"", ":", "\"%s_%s_t2t_%s\"", "%", "(", "FLAGS", ".", "model", ",", "FLAGS", ".", "problem", ",", "timestamp", ")", ",", "\"labels\"", ":", "{", "\"model\"", ":", "FLAGS", ".", "model", ",", "\"problem\"", ":", "FLAGS", ".", "problem", ",", "\"hparams\"", ":", "FLAGS", ".", "hparams_set", "}", ",", "\"trainingInput\"", ":", "training_input", ",", "}", "return", "job_spec" ]
https://github.com/tensorflow/tensor2tensor/blob/2a33b152d7835af66a6d20afe7961751047e28dd/tensor2tensor/utils/cloud_mlengine.py#L130-L170
digidotcom/xbee-python
0757f4be0017530c205175fbee8f9f61be9614d1
digi/xbee/packets/devicecloud.py
python
SendDataRequestPacket.path
(self)
return self.__path
Returns the path of the file to upload to Device Cloud. Returns: String: the path of the file to upload to Device Cloud.
Returns the path of the file to upload to Device Cloud.
[ "Returns", "the", "path", "of", "the", "file", "to", "upload", "to", "Device", "Cloud", "." ]
def path(self): """ Returns the path of the file to upload to Device Cloud. Returns: String: the path of the file to upload to Device Cloud. """ return self.__path
[ "def", "path", "(", "self", ")", ":", "return", "self", ".", "__path" ]
https://github.com/digidotcom/xbee-python/blob/0757f4be0017530c205175fbee8f9f61be9614d1/digi/xbee/packets/devicecloud.py#L822-L829
collinsctk/PyQYT
7af3673955f94ff1b2df2f94220cd2dab2e252af
ExtentionPackages/pysnmp/hlapi/twisted/ntforg.py
python
sendNotification
(snmpEngine, authData, transportTarget, contextData, notifyType, varBinds, **options)
return deferred
Sends SNMP notification. Based on passed parameters, prepares SNMP TRAP or INFORM message (:RFC:`1905#section-4.2.6`) and schedules its transmission by :mod:`twisted` I/O framework at a later point of time. Parameters ---------- snmpEngine : :py:class:`~pysnmp.hlapi.SnmpEngine` Class instance representing SNMP engine. authData : :py:class:`~pysnmp.hlapi.CommunityData` or :py:class:`~pysnmp.hlapi.UsmUserData` Class instance representing SNMP credentials. transportTarget : :py:class:`~pysnmp.hlapi.twisted.UdpTransportTarget` or :py:class:`~pysnmp.hlapi.twisted.Udp6TransportTarget` Class instance representing transport type along with SNMP peer address. contextData : :py:class:`~pysnmp.hlapi.ContextData` Class instance representing SNMP ContextEngineId and ContextName values. notifyType : str Indicates type of notification to be sent. Recognized literal values are *trap* or *inform*. varBinds: tuple Single :py:class:`~pysnmp.smi.rfc1902.NotificationType` class instance representing a minimum sequence of MIB variables required for particular notification type. Alternatively, a sequence of :py:class:`~pysnmp.smi.rfc1902.ObjectType` objects could be passed instead. In the latter case it is up to the user to ensure proper Notification PDU contents. Other Parameters ---------------- \*\*options : Request options: * `lookupMib` - load MIB and resolve response MIB variables at the cost of slightly reduced performance. Default is `True`. Returns ------- deferred : :class:`~twisted.internet.defer.Deferred` Twisted Deferred object representing work-in-progress. User is expected to attach his own `success` and `error` callback functions to the Deferred object though :meth:`~twisted.internet.defer.Deferred.addCallbacks` method. Raises ------ PySnmpError Or its derivative indicating that an error occurred while performing SNMP operation. Notes ----- User `success` callback is called with the following tuple as its first argument: * errorStatus (str) : True value indicates SNMP PDU error. * errorIndex (int) : Non-zero value refers to `varBinds[errorIndex-1]` * varBinds (tuple) : A sequence of :class:`~pysnmp.smi.rfc1902.ObjectType` class instances representing MIB variables returned in SNMP response. User `error` callback is called with `errorIndication` object wrapped in :class:`~twisted.python.failure.Failure` object. Examples -------- >>> from twisted.internet.task import react >>> from pysnmp.hlapi.twisted import * >>> >>> def success((errorStatus, errorIndex, varBinds)): ... print(errorStatus, errorIndex, varBind) ... >>> def failure(errorIndication): ... print(errorIndication) ... >>> def run(reactor): ... d = sendNotification(SnmpEngine(), ... CommunityData('public'), ... UdpTransportTarget(('demo.snmplabs.com', 162)), ... ContextData(), ... 'trap', ... NotificationType(ObjectIdentity('IF-MIB', 'linkDown'))) ... d.addCallback(success).addErrback(failure) ... return d ... >>> react(run) (0, 0, []) >>>
Sends SNMP notification.
[ "Sends", "SNMP", "notification", "." ]
def sendNotification(snmpEngine, authData, transportTarget, contextData, notifyType, varBinds, **options): """Sends SNMP notification. Based on passed parameters, prepares SNMP TRAP or INFORM message (:RFC:`1905#section-4.2.6`) and schedules its transmission by :mod:`twisted` I/O framework at a later point of time. Parameters ---------- snmpEngine : :py:class:`~pysnmp.hlapi.SnmpEngine` Class instance representing SNMP engine. authData : :py:class:`~pysnmp.hlapi.CommunityData` or :py:class:`~pysnmp.hlapi.UsmUserData` Class instance representing SNMP credentials. transportTarget : :py:class:`~pysnmp.hlapi.twisted.UdpTransportTarget` or :py:class:`~pysnmp.hlapi.twisted.Udp6TransportTarget` Class instance representing transport type along with SNMP peer address. contextData : :py:class:`~pysnmp.hlapi.ContextData` Class instance representing SNMP ContextEngineId and ContextName values. notifyType : str Indicates type of notification to be sent. Recognized literal values are *trap* or *inform*. varBinds: tuple Single :py:class:`~pysnmp.smi.rfc1902.NotificationType` class instance representing a minimum sequence of MIB variables required for particular notification type. Alternatively, a sequence of :py:class:`~pysnmp.smi.rfc1902.ObjectType` objects could be passed instead. In the latter case it is up to the user to ensure proper Notification PDU contents. Other Parameters ---------------- \*\*options : Request options: * `lookupMib` - load MIB and resolve response MIB variables at the cost of slightly reduced performance. Default is `True`. Returns ------- deferred : :class:`~twisted.internet.defer.Deferred` Twisted Deferred object representing work-in-progress. User is expected to attach his own `success` and `error` callback functions to the Deferred object though :meth:`~twisted.internet.defer.Deferred.addCallbacks` method. Raises ------ PySnmpError Or its derivative indicating that an error occurred while performing SNMP operation. Notes ----- User `success` callback is called with the following tuple as its first argument: * errorStatus (str) : True value indicates SNMP PDU error. * errorIndex (int) : Non-zero value refers to `varBinds[errorIndex-1]` * varBinds (tuple) : A sequence of :class:`~pysnmp.smi.rfc1902.ObjectType` class instances representing MIB variables returned in SNMP response. User `error` callback is called with `errorIndication` object wrapped in :class:`~twisted.python.failure.Failure` object. Examples -------- >>> from twisted.internet.task import react >>> from pysnmp.hlapi.twisted import * >>> >>> def success((errorStatus, errorIndex, varBinds)): ... print(errorStatus, errorIndex, varBind) ... >>> def failure(errorIndication): ... print(errorIndication) ... >>> def run(reactor): ... d = sendNotification(SnmpEngine(), ... CommunityData('public'), ... UdpTransportTarget(('demo.snmplabs.com', 162)), ... ContextData(), ... 'trap', ... NotificationType(ObjectIdentity('IF-MIB', 'linkDown'))) ... d.addCallback(success).addErrback(failure) ... return d ... >>> react(run) (0, 0, []) >>> """ def __cbFun(snmpEngine, sendRequestHandle, errorIndication, errorStatus, errorIndex, varBinds, cbCtx): lookupMib, deferred = cbCtx if errorIndication: deferred.errback(Failure(errorIndication)) else: deferred.callback( (errorStatus, errorIndex, vbProcessor.unmakeVarBinds(snmpEngine, varBinds, lookupMib)) ) notifyName = lcd.configure( snmpEngine, authData, transportTarget, notifyType ) def __trapFun(deferred): deferred.callback((0, 0, [])) deferred = Deferred() ntforg.NotificationOriginator().sendVarBinds( snmpEngine, notifyName, contextData.contextEngineId, contextData.contextName, vbProcessor.makeVarBinds(snmpEngine, varBinds), __cbFun, (options.get('lookupMib', True), deferred) ) if notifyType == 'trap': reactor.callLater(0, __trapFun, deferred) return deferred
[ "def", "sendNotification", "(", "snmpEngine", ",", "authData", ",", "transportTarget", ",", "contextData", ",", "notifyType", ",", "varBinds", ",", "*", "*", "options", ")", ":", "def", "__cbFun", "(", "snmpEngine", ",", "sendRequestHandle", ",", "errorIndication", ",", "errorStatus", ",", "errorIndex", ",", "varBinds", ",", "cbCtx", ")", ":", "lookupMib", ",", "deferred", "=", "cbCtx", "if", "errorIndication", ":", "deferred", ".", "errback", "(", "Failure", "(", "errorIndication", ")", ")", "else", ":", "deferred", ".", "callback", "(", "(", "errorStatus", ",", "errorIndex", ",", "vbProcessor", ".", "unmakeVarBinds", "(", "snmpEngine", ",", "varBinds", ",", "lookupMib", ")", ")", ")", "notifyName", "=", "lcd", ".", "configure", "(", "snmpEngine", ",", "authData", ",", "transportTarget", ",", "notifyType", ")", "def", "__trapFun", "(", "deferred", ")", ":", "deferred", ".", "callback", "(", "(", "0", ",", "0", ",", "[", "]", ")", ")", "deferred", "=", "Deferred", "(", ")", "ntforg", ".", "NotificationOriginator", "(", ")", ".", "sendVarBinds", "(", "snmpEngine", ",", "notifyName", ",", "contextData", ".", "contextEngineId", ",", "contextData", ".", "contextName", ",", "vbProcessor", ".", "makeVarBinds", "(", "snmpEngine", ",", "varBinds", ")", ",", "__cbFun", ",", "(", "options", ".", "get", "(", "'lookupMib'", ",", "True", ")", ",", "deferred", ")", ")", "if", "notifyType", "==", "'trap'", ":", "reactor", ".", "callLater", "(", "0", ",", "__trapFun", ",", "deferred", ")", "return", "deferred" ]
https://github.com/collinsctk/PyQYT/blob/7af3673955f94ff1b2df2f94220cd2dab2e252af/ExtentionPackages/pysnmp/hlapi/twisted/ntforg.py#L23-L153
seveas/python-networkmanager
0c2e4334293fc4f5d79a449b3779004d26a44195
NetworkManager.py
python
SignalDispatcher.setup_signals
(self)
[]
def setup_signals(self): if not self.setup: bus = dbus.SystemBus() for interface in self.interfaces: bus.add_signal_receiver(self.handle_signal, dbus_interface=interface, interface_keyword='interface', member_keyword='signal', path_keyword='path') self.setup = True self.listen_for_restarts()
[ "def", "setup_signals", "(", "self", ")", ":", "if", "not", "self", ".", "setup", ":", "bus", "=", "dbus", ".", "SystemBus", "(", ")", "for", "interface", "in", "self", ".", "interfaces", ":", "bus", ".", "add_signal_receiver", "(", "self", ".", "handle_signal", ",", "dbus_interface", "=", "interface", ",", "interface_keyword", "=", "'interface'", ",", "member_keyword", "=", "'signal'", ",", "path_keyword", "=", "'path'", ")", "self", ".", "setup", "=", "True", "self", ".", "listen_for_restarts", "(", ")" ]
https://github.com/seveas/python-networkmanager/blob/0c2e4334293fc4f5d79a449b3779004d26a44195/NetworkManager.py#L32-L38
sabri-zaki/EasY_HaCk
2a39ac384dd0d6fc51c0dd22e8d38cece683fdb9
.modules/.sqlmap/thirdparty/odict/odict.py
python
_OrderedDict.setvalues
(self, values)
You can pass in a list of values, which will replace the current list. The value list must be the same len as the OrderedDict. (Or a ``ValueError`` is raised.) >>> d = OrderedDict(((1, 3), (3, 2), (2, 1))) >>> d.setvalues((1, 2, 3)) >>> d OrderedDict([(1, 1), (3, 2), (2, 3)]) >>> d.setvalues([6]) Traceback (most recent call last): ValueError: Value list is not the same length as the OrderedDict.
You can pass in a list of values, which will replace the current list. The value list must be the same len as the OrderedDict.
[ "You", "can", "pass", "in", "a", "list", "of", "values", "which", "will", "replace", "the", "current", "list", ".", "The", "value", "list", "must", "be", "the", "same", "len", "as", "the", "OrderedDict", "." ]
def setvalues(self, values): """ You can pass in a list of values, which will replace the current list. The value list must be the same len as the OrderedDict. (Or a ``ValueError`` is raised.) >>> d = OrderedDict(((1, 3), (3, 2), (2, 1))) >>> d.setvalues((1, 2, 3)) >>> d OrderedDict([(1, 1), (3, 2), (2, 3)]) >>> d.setvalues([6]) Traceback (most recent call last): ValueError: Value list is not the same length as the OrderedDict. """ if len(values) != len(self): # FIXME: correct error to raise? raise ValueError('Value list is not the same length as the ' 'OrderedDict.') self.update(zip(self, values))
[ "def", "setvalues", "(", "self", ",", "values", ")", ":", "if", "len", "(", "values", ")", "!=", "len", "(", "self", ")", ":", "# FIXME: correct error to raise?", "raise", "ValueError", "(", "'Value list is not the same length as the '", "'OrderedDict.'", ")", "self", ".", "update", "(", "zip", "(", "self", ",", "values", ")", ")" ]
https://github.com/sabri-zaki/EasY_HaCk/blob/2a39ac384dd0d6fc51c0dd22e8d38cece683fdb9/.modules/.sqlmap/thirdparty/odict/odict.py#L787-L806
cooelf/SemBERT
f849452f864b5dd47f94e2911cffc15e9f6a5a2a
run_classifier.py
python
SstProcessor.get_dev_examples
(self, data_dir)
return self._create_examples( self._read_tsv(os.path.join(data_dir, "dev.tsv_tag")), "dev")
See base class.
See base class.
[ "See", "base", "class", "." ]
def get_dev_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "dev.tsv_tag")), "dev")
[ "def", "get_dev_examples", "(", "self", ",", "data_dir", ")", ":", "return", "self", ".", "_create_examples", "(", "self", ".", "_read_tsv", "(", "os", ".", "path", ".", "join", "(", "data_dir", ",", "\"dev.tsv_tag\"", ")", ")", ",", "\"dev\"", ")" ]
https://github.com/cooelf/SemBERT/blob/f849452f864b5dd47f94e2911cffc15e9f6a5a2a/run_classifier.py#L188-L191
tobyyouup/conv_seq2seq
78a6e4e62a4c57a5caa9d584033a85e810fd726e
seq2seq/data/input_pipeline.py
python
make_input_pipeline_from_def
(def_dict, mode, **kwargs)
return pipeline_class(params=params, mode=mode)
Creates an InputPipeline object from a dictionary definition. Args: def_dict: A dictionary defining the input pipeline. It must have "class" and "params" that correspond to the class name and constructor parameters of an InputPipeline, respectively. mode: A value in tf.contrib.learn.ModeKeys Returns: A new InputPipeline object
Creates an InputPipeline object from a dictionary definition.
[ "Creates", "an", "InputPipeline", "object", "from", "a", "dictionary", "definition", "." ]
def make_input_pipeline_from_def(def_dict, mode, **kwargs): """Creates an InputPipeline object from a dictionary definition. Args: def_dict: A dictionary defining the input pipeline. It must have "class" and "params" that correspond to the class name and constructor parameters of an InputPipeline, respectively. mode: A value in tf.contrib.learn.ModeKeys Returns: A new InputPipeline object """ if not "class" in def_dict: raise ValueError("Input Pipeline definition must have a class property.") class_ = def_dict["class"] if not hasattr(sys.modules[__name__], class_): raise ValueError("Invalid Input Pipeline class: {}".format(class_)) pipeline_class = getattr(sys.modules[__name__], class_) # Constructor arguments params = {} if "params" in def_dict: params.update(def_dict["params"]) params.update(kwargs) return pipeline_class(params=params, mode=mode)
[ "def", "make_input_pipeline_from_def", "(", "def_dict", ",", "mode", ",", "*", "*", "kwargs", ")", ":", "if", "not", "\"class\"", "in", "def_dict", ":", "raise", "ValueError", "(", "\"Input Pipeline definition must have a class property.\"", ")", "class_", "=", "def_dict", "[", "\"class\"", "]", "if", "not", "hasattr", "(", "sys", ".", "modules", "[", "__name__", "]", ",", "class_", ")", ":", "raise", "ValueError", "(", "\"Invalid Input Pipeline class: {}\"", ".", "format", "(", "class_", ")", ")", "pipeline_class", "=", "getattr", "(", "sys", ".", "modules", "[", "__name__", "]", ",", "class_", ")", "# Constructor arguments", "params", "=", "{", "}", "if", "\"params\"", "in", "def_dict", ":", "params", ".", "update", "(", "def_dict", "[", "\"params\"", "]", ")", "params", ".", "update", "(", "kwargs", ")", "return", "pipeline_class", "(", "params", "=", "params", ",", "mode", "=", "mode", ")" ]
https://github.com/tobyyouup/conv_seq2seq/blob/78a6e4e62a4c57a5caa9d584033a85e810fd726e/seq2seq/data/input_pipeline.py#L39-L66
Chaffelson/nipyapi
d3b186fd701ce308c2812746d98af9120955e810
nipyapi/nifi/models/process_group_dto.py
python
ProcessGroupDTO.locally_modified_count
(self, locally_modified_count)
Sets the locally_modified_count of this ProcessGroupDTO. The number of locally modified versioned process groups in the process group. :param locally_modified_count: The locally_modified_count of this ProcessGroupDTO. :type: int
Sets the locally_modified_count of this ProcessGroupDTO. The number of locally modified versioned process groups in the process group.
[ "Sets", "the", "locally_modified_count", "of", "this", "ProcessGroupDTO", ".", "The", "number", "of", "locally", "modified", "versioned", "process", "groups", "in", "the", "process", "group", "." ]
def locally_modified_count(self, locally_modified_count): """ Sets the locally_modified_count of this ProcessGroupDTO. The number of locally modified versioned process groups in the process group. :param locally_modified_count: The locally_modified_count of this ProcessGroupDTO. :type: int """ self._locally_modified_count = locally_modified_count
[ "def", "locally_modified_count", "(", "self", ",", "locally_modified_count", ")", ":", "self", ".", "_locally_modified_count", "=", "locally_modified_count" ]
https://github.com/Chaffelson/nipyapi/blob/d3b186fd701ce308c2812746d98af9120955e810/nipyapi/nifi/models/process_group_dto.py#L713-L722
larryhastings/gilectomy
4315ec3f1d6d4f813cc82ce27a24e7f784dbfc1a
Lib/datetime.py
python
datetime.timetz
(self)
return time(self.hour, self.minute, self.second, self.microsecond, self._tzinfo)
Return the time part, with same tzinfo.
Return the time part, with same tzinfo.
[ "Return", "the", "time", "part", "with", "same", "tzinfo", "." ]
def timetz(self): "Return the time part, with same tzinfo." return time(self.hour, self.minute, self.second, self.microsecond, self._tzinfo)
[ "def", "timetz", "(", "self", ")", ":", "return", "time", "(", "self", ".", "hour", ",", "self", ".", "minute", ",", "self", ".", "second", ",", "self", ".", "microsecond", ",", "self", ".", "_tzinfo", ")" ]
https://github.com/larryhastings/gilectomy/blob/4315ec3f1d6d4f813cc82ce27a24e7f784dbfc1a/Lib/datetime.py#L1487-L1490
Juniper/py-junos-eznc
fd81d476e37ac1a234b503ab77f76ec658d04590
lib/jnpr/junos/utils/fs.py
python
FS.tgz
(self, from_path, tgz_path)
return rsp.text
Create a file called **tgz_path** that is the tar-gzip of the given directory specified **from_path**. :param str from_path: file-path to directory of files :param str tgz_path: file-path name of tgz file to create :returns: ``True`` if OK, error-msg (str) otherwise
Create a file called **tgz_path** that is the tar-gzip of the given directory specified **from_path**.
[ "Create", "a", "file", "called", "**", "tgz_path", "**", "that", "is", "the", "tar", "-", "gzip", "of", "the", "given", "directory", "specified", "**", "from_path", "**", "." ]
def tgz(self, from_path, tgz_path): """ Create a file called **tgz_path** that is the tar-gzip of the given directory specified **from_path**. :param str from_path: file-path to directory of files :param str tgz_path: file-path name of tgz file to create :returns: ``True`` if OK, error-msg (str) otherwise """ rsp = self._dev.rpc.file_archive( compress=True, source=from_path, destination=tgz_path ) # if the rsp is True, then the command executed OK. if rsp is True: return True # otherwise, return the error string to the caller return rsp.text
[ "def", "tgz", "(", "self", ",", "from_path", ",", "tgz_path", ")", ":", "rsp", "=", "self", ".", "_dev", ".", "rpc", ".", "file_archive", "(", "compress", "=", "True", ",", "source", "=", "from_path", ",", "destination", "=", "tgz_path", ")", "# if the rsp is True, then the command executed OK.", "if", "rsp", "is", "True", ":", "return", "True", "# otherwise, return the error string to the caller", "return", "rsp", ".", "text" ]
https://github.com/Juniper/py-junos-eznc/blob/fd81d476e37ac1a234b503ab77f76ec658d04590/lib/jnpr/junos/utils/fs.py#L433-L452
scikit-learn/scikit-learn
1d1aadd0711b87d2a11c80aad15df6f8cf156712
sklearn/neural_network/_multilayer_perceptron.py
python
BaseMultilayerPerceptron._forward_pass
(self, activations)
return activations
Perform a forward pass on the network by computing the values of the neurons in the hidden layers and the output layer. Parameters ---------- activations : list, length = n_layers - 1 The ith element of the list holds the values of the ith layer.
Perform a forward pass on the network by computing the values of the neurons in the hidden layers and the output layer.
[ "Perform", "a", "forward", "pass", "on", "the", "network", "by", "computing", "the", "values", "of", "the", "neurons", "in", "the", "hidden", "layers", "and", "the", "output", "layer", "." ]
def _forward_pass(self, activations): """Perform a forward pass on the network by computing the values of the neurons in the hidden layers and the output layer. Parameters ---------- activations : list, length = n_layers - 1 The ith element of the list holds the values of the ith layer. """ hidden_activation = ACTIVATIONS[self.activation] # Iterate over the hidden layers for i in range(self.n_layers_ - 1): activations[i + 1] = safe_sparse_dot(activations[i], self.coefs_[i]) activations[i + 1] += self.intercepts_[i] # For the hidden layers if (i + 1) != (self.n_layers_ - 1): hidden_activation(activations[i + 1]) # For the last layer output_activation = ACTIVATIONS[self.out_activation_] output_activation(activations[i + 1]) return activations
[ "def", "_forward_pass", "(", "self", ",", "activations", ")", ":", "hidden_activation", "=", "ACTIVATIONS", "[", "self", ".", "activation", "]", "# Iterate over the hidden layers", "for", "i", "in", "range", "(", "self", ".", "n_layers_", "-", "1", ")", ":", "activations", "[", "i", "+", "1", "]", "=", "safe_sparse_dot", "(", "activations", "[", "i", "]", ",", "self", ".", "coefs_", "[", "i", "]", ")", "activations", "[", "i", "+", "1", "]", "+=", "self", ".", "intercepts_", "[", "i", "]", "# For the hidden layers", "if", "(", "i", "+", "1", ")", "!=", "(", "self", ".", "n_layers_", "-", "1", ")", ":", "hidden_activation", "(", "activations", "[", "i", "+", "1", "]", ")", "# For the last layer", "output_activation", "=", "ACTIVATIONS", "[", "self", ".", "out_activation_", "]", "output_activation", "(", "activations", "[", "i", "+", "1", "]", ")", "return", "activations" ]
https://github.com/scikit-learn/scikit-learn/blob/1d1aadd0711b87d2a11c80aad15df6f8cf156712/sklearn/neural_network/_multilayer_perceptron.py#L118-L141
tensorflow/federated
5a60a032360087b8f4c7fcfd97ed1c0131c3eac3
tensorflow_federated/python/core/backends/iree/executor.py
python
IreeExecutor.create_value
(self, value, type_spec=None)
return IreeValue(value, type_spec, self._backend_info)
Embeds `value` of type `type_spec` within this executor. Args: value: An object that represents the value to embed within the executor. type_spec: The `tff.Type` of the value represented by this object, or something convertible to it. Can optionally be `None` if `value` is an instance of `typed_object.TypedObject`. Returns: An instance of `IreeValue`. Raises: TypeError: If the arguments are of the wrong types. ValueError: If the type was not specified and cannot be determined from the value.
Embeds `value` of type `type_spec` within this executor.
[ "Embeds", "value", "of", "type", "type_spec", "within", "this", "executor", "." ]
async def create_value(self, value, type_spec=None): """Embeds `value` of type `type_spec` within this executor. Args: value: An object that represents the value to embed within the executor. type_spec: The `tff.Type` of the value represented by this object, or something convertible to it. Can optionally be `None` if `value` is an instance of `typed_object.TypedObject`. Returns: An instance of `IreeValue`. Raises: TypeError: If the arguments are of the wrong types. ValueError: If the type was not specified and cannot be determined from the value. """ return IreeValue(value, type_spec, self._backend_info)
[ "async", "def", "create_value", "(", "self", ",", "value", ",", "type_spec", "=", "None", ")", ":", "return", "IreeValue", "(", "value", ",", "type_spec", ",", "self", ".", "_backend_info", ")" ]
https://github.com/tensorflow/federated/blob/5a60a032360087b8f4c7fcfd97ed1c0131c3eac3/tensorflow_federated/python/core/backends/iree/executor.py#L161-L178
nats-io/nats.py
49635bf58b1c888c66fa37569a9248b1a83a6c0a
benchmark/parser_perf.py
python
parse_msgs
(max_msgs=1, nbytes=1)
[]
def parse_msgs(max_msgs=1, nbytes=1): buf = b''.join([generate_msg("foo", nbytes) for i in range(0, max_msgs)]) print("--- buffer size: {}".format(len(buf))) loop = asyncio.get_event_loop() ps = Parser(DummyNatsClient()) loop.run_until_complete(ps.parse(buf)) print("--- stats: ", ps.nc.stats)
[ "def", "parse_msgs", "(", "max_msgs", "=", "1", ",", "nbytes", "=", "1", ")", ":", "buf", "=", "b''", ".", "join", "(", "[", "generate_msg", "(", "\"foo\"", ",", "nbytes", ")", "for", "i", "in", "range", "(", "0", ",", "max_msgs", ")", "]", ")", "print", "(", "\"--- buffer size: {}\"", ".", "format", "(", "len", "(", "buf", ")", ")", ")", "loop", "=", "asyncio", ".", "get_event_loop", "(", ")", "ps", "=", "Parser", "(", "DummyNatsClient", "(", ")", ")", "loop", ".", "run_until_complete", "(", "ps", ".", "parse", "(", "buf", ")", ")", "print", "(", "\"--- stats: \"", ",", "ps", ".", "nc", ".", "stats", ")" ]
https://github.com/nats-io/nats.py/blob/49635bf58b1c888c66fa37569a9248b1a83a6c0a/benchmark/parser_perf.py#L47-L53
crits/crits_services
c7abf91f1865d913cffad4b966599da204f8ae43
passivetotal_service/__init__.py
python
PassiveTotalService.get_config_details
(config)
return display_config
[]
def get_config_details(config): display_config = {} fields = forms.PassiveTotalConfigForm().fields for name, field in iteritems(fields): display_config[field.label] = config[name] return display_config
[ "def", "get_config_details", "(", "config", ")", ":", "display_config", "=", "{", "}", "fields", "=", "forms", ".", "PassiveTotalConfigForm", "(", ")", ".", "fields", "for", "name", ",", "field", "in", "iteritems", "(", "fields", ")", ":", "display_config", "[", "field", ".", "label", "]", "=", "config", "[", "name", "]", "return", "display_config" ]
https://github.com/crits/crits_services/blob/c7abf91f1865d913cffad4b966599da204f8ae43/passivetotal_service/__init__.py#L79-L84
trainindata/deploying-machine-learning-models
aaeb3e65d0a58ad583289aaa39b089f11d06a4eb
section-07-ci-and-publishing/model-package/regression_model/processing/features.py
python
Mapper.__init__
(self, variables: List[str], mappings: dict)
[]
def __init__(self, variables: List[str], mappings: dict): if not isinstance(variables, list): raise ValueError("variables should be a list") self.variables = variables self.mappings = mappings
[ "def", "__init__", "(", "self", ",", "variables", ":", "List", "[", "str", "]", ",", "mappings", ":", "dict", ")", ":", "if", "not", "isinstance", "(", "variables", ",", "list", ")", ":", "raise", "ValueError", "(", "\"variables should be a list\"", ")", "self", ".", "variables", "=", "variables", "self", ".", "mappings", "=", "mappings" ]
https://github.com/trainindata/deploying-machine-learning-models/blob/aaeb3e65d0a58ad583289aaa39b089f11d06a4eb/section-07-ci-and-publishing/model-package/regression_model/processing/features.py#L36-L42
zykls/whynot
86fd2349a83cd43c614b55f5bf2dfc9ece143081
whynot/simulators/zika/environments.py
python
observation_space
()
return spaces.Box(state_space_low, state_space_high, dtype=np.float64)
Return observation space, the positive orthant.
Return observation space, the positive orthant.
[ "Return", "observation", "space", "the", "positive", "orthant", "." ]
def observation_space(): """Return observation space, the positive orthant.""" state_dim = State.num_variables() state_space_low = np.zeros(state_dim) state_space_high = np.inf * np.ones(state_dim) return spaces.Box(state_space_low, state_space_high, dtype=np.float64)
[ "def", "observation_space", "(", ")", ":", "state_dim", "=", "State", ".", "num_variables", "(", ")", "state_space_low", "=", "np", ".", "zeros", "(", "state_dim", ")", "state_space_high", "=", "np", ".", "inf", "*", "np", ".", "ones", "(", "state_dim", ")", "return", "spaces", ".", "Box", "(", "state_space_low", ",", "state_space_high", ",", "dtype", "=", "np", ".", "float64", ")" ]
https://github.com/zykls/whynot/blob/86fd2349a83cd43c614b55f5bf2dfc9ece143081/whynot/simulators/zika/environments.py#L49-L54
home-assistant/core
265ebd17a3f17ed8dc1e9bdede03ac8e323f1ab1
homeassistant/components/nest/media_source.py
python
NestEventMediaStore._get_devices
(self)
return devices
Return a mapping of nest device id to home assistant device id.
Return a mapping of nest device id to home assistant device id.
[ "Return", "a", "mapping", "of", "nest", "device", "id", "to", "home", "assistant", "device", "id", "." ]
async def _get_devices(self) -> Mapping[str, str]: """Return a mapping of nest device id to home assistant device id.""" device_registry = dr.async_get(self._hass) device_manager = await self._subscriber.async_get_device_manager() devices = {} for device in device_manager.devices.values(): if device_entry := device_registry.async_get_device( {(DOMAIN, device.name)} ): devices[device.name] = device_entry.id return devices
[ "async", "def", "_get_devices", "(", "self", ")", "->", "Mapping", "[", "str", ",", "str", "]", ":", "device_registry", "=", "dr", ".", "async_get", "(", "self", ".", "_hass", ")", "device_manager", "=", "await", "self", ".", "_subscriber", ".", "async_get_device_manager", "(", ")", "devices", "=", "{", "}", "for", "device", "in", "device_manager", ".", "devices", ".", "values", "(", ")", ":", "if", "device_entry", ":=", "device_registry", ".", "async_get_device", "(", "{", "(", "DOMAIN", ",", "device", ".", "name", ")", "}", ")", ":", "devices", "[", "device", ".", "name", "]", "=", "device_entry", ".", "id", "return", "devices" ]
https://github.com/home-assistant/core/blob/265ebd17a3f17ed8dc1e9bdede03ac8e323f1ab1/homeassistant/components/nest/media_source.py#L253-L263
Blizzard/s2protocol
4bfe857bb832eee12cc6307dd699e3b74bd7e1b2
s2protocol/versions/protocol77661.py
python
decode_replay_attributes_events
(contents)
return attributes
Decodes and yields each attribute from the contents byte string.
Decodes and yields each attribute from the contents byte string.
[ "Decodes", "and", "yields", "each", "attribute", "from", "the", "contents", "byte", "string", "." ]
def decode_replay_attributes_events(contents): """Decodes and yields each attribute from the contents byte string.""" buffer = BitPackedBuffer(contents, 'little') attributes = {} if not buffer.done(): attributes['source'] = buffer.read_bits(8) attributes['mapNamespace'] = buffer.read_bits(32) count = buffer.read_bits(32) attributes['scopes'] = {} while not buffer.done(): value = {} value['namespace'] = buffer.read_bits(32) value['attrid'] = attrid = buffer.read_bits(32) scope = buffer.read_bits(8) value['value'] = buffer.read_aligned_bytes(4)[::-1].strip(b'\x00') if not scope in attributes['scopes']: attributes['scopes'][scope] = {} if not attrid in attributes['scopes'][scope]: attributes['scopes'][scope][attrid] = [] attributes['scopes'][scope][attrid].append(value) return attributes
[ "def", "decode_replay_attributes_events", "(", "contents", ")", ":", "buffer", "=", "BitPackedBuffer", "(", "contents", ",", "'little'", ")", "attributes", "=", "{", "}", "if", "not", "buffer", ".", "done", "(", ")", ":", "attributes", "[", "'source'", "]", "=", "buffer", ".", "read_bits", "(", "8", ")", "attributes", "[", "'mapNamespace'", "]", "=", "buffer", ".", "read_bits", "(", "32", ")", "count", "=", "buffer", ".", "read_bits", "(", "32", ")", "attributes", "[", "'scopes'", "]", "=", "{", "}", "while", "not", "buffer", ".", "done", "(", ")", ":", "value", "=", "{", "}", "value", "[", "'namespace'", "]", "=", "buffer", ".", "read_bits", "(", "32", ")", "value", "[", "'attrid'", "]", "=", "attrid", "=", "buffer", ".", "read_bits", "(", "32", ")", "scope", "=", "buffer", ".", "read_bits", "(", "8", ")", "value", "[", "'value'", "]", "=", "buffer", ".", "read_aligned_bytes", "(", "4", ")", "[", ":", ":", "-", "1", "]", ".", "strip", "(", "b'\\x00'", ")", "if", "not", "scope", "in", "attributes", "[", "'scopes'", "]", ":", "attributes", "[", "'scopes'", "]", "[", "scope", "]", "=", "{", "}", "if", "not", "attrid", "in", "attributes", "[", "'scopes'", "]", "[", "scope", "]", ":", "attributes", "[", "'scopes'", "]", "[", "scope", "]", "[", "attrid", "]", "=", "[", "]", "attributes", "[", "'scopes'", "]", "[", "scope", "]", "[", "attrid", "]", ".", "append", "(", "value", ")", "return", "attributes" ]
https://github.com/Blizzard/s2protocol/blob/4bfe857bb832eee12cc6307dd699e3b74bd7e1b2/s2protocol/versions/protocol77661.py#L490-L510
geopandas/geopandas
8e7133aef9e6c0d2465e07e92d954e95dedd3881
geopandas/base.py
python
GeoPandasBase.intersection
(self, other, align=True)
return _binary_geo("intersection", self, other, align)
Returns a ``GeoSeries`` of the intersection of points in each aligned geometry with `other`. .. image:: ../../../_static/binary_geo-intersection.svg :align: center The operation works on a 1-to-1 row-wise manner: .. image:: ../../../_static/binary_op-01.svg :align: center Parameters ---------- other : Geoseries or geometric object The Geoseries (elementwise) or geometric object to find the intersection with. align : bool (default True) If True, automatically aligns GeoSeries based on their indices. If False, the order of elements is preserved. Returns ------- GeoSeries Examples -------- >>> from shapely.geometry import Polygon, LineString, Point >>> s = geopandas.GeoSeries( ... [ ... Polygon([(0, 0), (2, 2), (0, 2)]), ... Polygon([(0, 0), (2, 2), (0, 2)]), ... LineString([(0, 0), (2, 2)]), ... LineString([(2, 0), (0, 2)]), ... Point(0, 1), ... ], ... ) >>> s2 = geopandas.GeoSeries( ... [ ... Polygon([(0, 0), (1, 1), (0, 1)]), ... LineString([(1, 0), (1, 3)]), ... LineString([(2, 0), (0, 2)]), ... Point(1, 1), ... Point(0, 1), ... ], ... index=range(1, 6), ... ) >>> s 0 POLYGON ((0.00000 0.00000, 2.00000 2.00000, 0.... 1 POLYGON ((0.00000 0.00000, 2.00000 2.00000, 0.... 2 LINESTRING (0.00000 0.00000, 2.00000 2.00000) 3 LINESTRING (2.00000 0.00000, 0.00000 2.00000) 4 POINT (0.00000 1.00000) dtype: geometry >>> s2 1 POLYGON ((0.00000 0.00000, 1.00000 1.00000, 0.... 2 LINESTRING (1.00000 0.00000, 1.00000 3.00000) 3 LINESTRING (2.00000 0.00000, 0.00000 2.00000) 4 POINT (1.00000 1.00000) 5 POINT (0.00000 1.00000) dtype: geometry We can also do intersection of each geometry and a single shapely geometry: .. image:: ../../../_static/binary_op-03.svg :align: center >>> s.intersection(Polygon([(0, 0), (1, 1), (0, 1)])) 0 POLYGON ((0.00000 0.00000, 0.00000 1.00000, 1.... 1 POLYGON ((0.00000 0.00000, 0.00000 1.00000, 1.... 2 LINESTRING (0.00000 0.00000, 1.00000 1.00000) 3 POINT (1.00000 1.00000) 4 POINT (0.00000 1.00000) dtype: geometry We can also check two GeoSeries against each other, row by row. The GeoSeries above have different indices. We can either align both GeoSeries based on index values and compare elements with the same index using ``align=True`` or ignore index and compare elements based on their matching order using ``align=False``: .. image:: ../../../_static/binary_op-02.svg >>> s.intersection(s2, align=True) 0 None 1 POLYGON ((0.00000 0.00000, 0.00000 1.00000, 1.... 2 POINT (1.00000 1.00000) 3 LINESTRING (2.00000 0.00000, 0.00000 2.00000) 4 POINT EMPTY 5 None dtype: geometry >>> s.intersection(s2, align=False) 0 POLYGON ((0.00000 0.00000, 0.00000 1.00000, 1.... 1 LINESTRING (1.00000 1.00000, 1.00000 2.00000) 2 POINT (1.00000 1.00000) 3 POINT (1.00000 1.00000) 4 POINT (0.00000 1.00000) dtype: geometry See Also -------- GeoSeries.difference GeoSeries.symmetric_difference GeoSeries.union
Returns a ``GeoSeries`` of the intersection of points in each aligned geometry with `other`.
[ "Returns", "a", "GeoSeries", "of", "the", "intersection", "of", "points", "in", "each", "aligned", "geometry", "with", "other", "." ]
def intersection(self, other, align=True): """Returns a ``GeoSeries`` of the intersection of points in each aligned geometry with `other`. .. image:: ../../../_static/binary_geo-intersection.svg :align: center The operation works on a 1-to-1 row-wise manner: .. image:: ../../../_static/binary_op-01.svg :align: center Parameters ---------- other : Geoseries or geometric object The Geoseries (elementwise) or geometric object to find the intersection with. align : bool (default True) If True, automatically aligns GeoSeries based on their indices. If False, the order of elements is preserved. Returns ------- GeoSeries Examples -------- >>> from shapely.geometry import Polygon, LineString, Point >>> s = geopandas.GeoSeries( ... [ ... Polygon([(0, 0), (2, 2), (0, 2)]), ... Polygon([(0, 0), (2, 2), (0, 2)]), ... LineString([(0, 0), (2, 2)]), ... LineString([(2, 0), (0, 2)]), ... Point(0, 1), ... ], ... ) >>> s2 = geopandas.GeoSeries( ... [ ... Polygon([(0, 0), (1, 1), (0, 1)]), ... LineString([(1, 0), (1, 3)]), ... LineString([(2, 0), (0, 2)]), ... Point(1, 1), ... Point(0, 1), ... ], ... index=range(1, 6), ... ) >>> s 0 POLYGON ((0.00000 0.00000, 2.00000 2.00000, 0.... 1 POLYGON ((0.00000 0.00000, 2.00000 2.00000, 0.... 2 LINESTRING (0.00000 0.00000, 2.00000 2.00000) 3 LINESTRING (2.00000 0.00000, 0.00000 2.00000) 4 POINT (0.00000 1.00000) dtype: geometry >>> s2 1 POLYGON ((0.00000 0.00000, 1.00000 1.00000, 0.... 2 LINESTRING (1.00000 0.00000, 1.00000 3.00000) 3 LINESTRING (2.00000 0.00000, 0.00000 2.00000) 4 POINT (1.00000 1.00000) 5 POINT (0.00000 1.00000) dtype: geometry We can also do intersection of each geometry and a single shapely geometry: .. image:: ../../../_static/binary_op-03.svg :align: center >>> s.intersection(Polygon([(0, 0), (1, 1), (0, 1)])) 0 POLYGON ((0.00000 0.00000, 0.00000 1.00000, 1.... 1 POLYGON ((0.00000 0.00000, 0.00000 1.00000, 1.... 2 LINESTRING (0.00000 0.00000, 1.00000 1.00000) 3 POINT (1.00000 1.00000) 4 POINT (0.00000 1.00000) dtype: geometry We can also check two GeoSeries against each other, row by row. The GeoSeries above have different indices. We can either align both GeoSeries based on index values and compare elements with the same index using ``align=True`` or ignore index and compare elements based on their matching order using ``align=False``: .. image:: ../../../_static/binary_op-02.svg >>> s.intersection(s2, align=True) 0 None 1 POLYGON ((0.00000 0.00000, 0.00000 1.00000, 1.... 2 POINT (1.00000 1.00000) 3 LINESTRING (2.00000 0.00000, 0.00000 2.00000) 4 POINT EMPTY 5 None dtype: geometry >>> s.intersection(s2, align=False) 0 POLYGON ((0.00000 0.00000, 0.00000 1.00000, 1.... 1 LINESTRING (1.00000 1.00000, 1.00000 2.00000) 2 POINT (1.00000 1.00000) 3 POINT (1.00000 1.00000) 4 POINT (0.00000 1.00000) dtype: geometry See Also -------- GeoSeries.difference GeoSeries.symmetric_difference GeoSeries.union """ return _binary_geo("intersection", self, other, align)
[ "def", "intersection", "(", "self", ",", "other", ",", "align", "=", "True", ")", ":", "return", "_binary_geo", "(", "\"intersection\"", ",", "self", ",", "other", ",", "align", ")" ]
https://github.com/geopandas/geopandas/blob/8e7133aef9e6c0d2465e07e92d954e95dedd3881/geopandas/base.py#L2426-L2537
PaddlePaddle/PaddleX
2bab73f81ab54e328204e7871e6ae4a82e719f5d
static/paddlex/cv/nets/hrnet.py
python
HRNet.net
(self, input)
return st4[-1]
[]
def net(self, input): width = self.width channels_1, channels_2, channels_3, channels_4 = self.channels[str( width)] num_modules_1, num_modules_2, num_modules_3, num_modules_4 = self.num_modules[ str(width)] num_blocks_1, num_blocks_2, num_blocks_3, num_blocks_4 = self.num_blocks[ str(width)] x = self.conv_bn_layer( input=input, filter_size=3, num_filters=channels_1[0], stride=2, if_act=True, name='layer1_1') x = self.conv_bn_layer( input=x, filter_size=3, num_filters=channels_1[0], stride=2, if_act=True, name='layer1_2') la1 = self.layer1(x, num_blocks_1, channels_1, name='layer2') tr1 = self.transition_layer([la1], [256], channels_2, name='tr1') st2 = self.stage( tr1, num_modules_2, num_blocks_2, channels_2, name='st2') tr2 = self.transition_layer(st2, channels_2, channels_3, name='tr2') st3 = self.stage( tr2, num_modules_3, num_blocks_3, channels_3, name='st3') tr3 = self.transition_layer(st3, channels_3, channels_4, name='tr3') st4 = self.stage( tr3, num_modules_4, num_blocks_4, channels_4, name='st4') # classification if self.num_classes: last_cls = self.last_cls_out(x=st4, name='cls_head') y = last_cls[0] last_num_filters = [256, 512, 1024] for i in range(3): y = fluid.layers.elementwise_add( last_cls[i + 1], self.conv_bn_layer( input=y, filter_size=3, num_filters=last_num_filters[i], stride=2, name='cls_head_add' + str(i + 1))) y = self.conv_bn_layer( input=y, filter_size=1, num_filters=2048, stride=1, name='cls_head_last_conv') pool = fluid.layers.pool2d( input=y, pool_type='avg', global_pooling=True) stdv = 1.0 / math.sqrt(pool.shape[1] * 1.0) out = fluid.layers.fc( input=pool, size=self.num_classes, param_attr=ParamAttr( name='fc_weights', initializer=fluid.initializer.Uniform(-stdv, stdv)), bias_attr=ParamAttr(name='fc_offset')) return out # segmentation if self.feature_maps == "stage4": return st4 self.end_points = st4 return st4[-1]
[ "def", "net", "(", "self", ",", "input", ")", ":", "width", "=", "self", ".", "width", "channels_1", ",", "channels_2", ",", "channels_3", ",", "channels_4", "=", "self", ".", "channels", "[", "str", "(", "width", ")", "]", "num_modules_1", ",", "num_modules_2", ",", "num_modules_3", ",", "num_modules_4", "=", "self", ".", "num_modules", "[", "str", "(", "width", ")", "]", "num_blocks_1", ",", "num_blocks_2", ",", "num_blocks_3", ",", "num_blocks_4", "=", "self", ".", "num_blocks", "[", "str", "(", "width", ")", "]", "x", "=", "self", ".", "conv_bn_layer", "(", "input", "=", "input", ",", "filter_size", "=", "3", ",", "num_filters", "=", "channels_1", "[", "0", "]", ",", "stride", "=", "2", ",", "if_act", "=", "True", ",", "name", "=", "'layer1_1'", ")", "x", "=", "self", ".", "conv_bn_layer", "(", "input", "=", "x", ",", "filter_size", "=", "3", ",", "num_filters", "=", "channels_1", "[", "0", "]", ",", "stride", "=", "2", ",", "if_act", "=", "True", ",", "name", "=", "'layer1_2'", ")", "la1", "=", "self", ".", "layer1", "(", "x", ",", "num_blocks_1", ",", "channels_1", ",", "name", "=", "'layer2'", ")", "tr1", "=", "self", ".", "transition_layer", "(", "[", "la1", "]", ",", "[", "256", "]", ",", "channels_2", ",", "name", "=", "'tr1'", ")", "st2", "=", "self", ".", "stage", "(", "tr1", ",", "num_modules_2", ",", "num_blocks_2", ",", "channels_2", ",", "name", "=", "'st2'", ")", "tr2", "=", "self", ".", "transition_layer", "(", "st2", ",", "channels_2", ",", "channels_3", ",", "name", "=", "'tr2'", ")", "st3", "=", "self", ".", "stage", "(", "tr2", ",", "num_modules_3", ",", "num_blocks_3", ",", "channels_3", ",", "name", "=", "'st3'", ")", "tr3", "=", "self", ".", "transition_layer", "(", "st3", ",", "channels_3", ",", "channels_4", ",", "name", "=", "'tr3'", ")", "st4", "=", "self", ".", "stage", "(", "tr3", ",", "num_modules_4", ",", "num_blocks_4", ",", "channels_4", ",", "name", "=", "'st4'", ")", "# classification", "if", "self", ".", "num_classes", ":", "last_cls", "=", "self", ".", "last_cls_out", "(", "x", "=", "st4", ",", "name", "=", "'cls_head'", ")", "y", "=", "last_cls", "[", "0", "]", "last_num_filters", "=", "[", "256", ",", "512", ",", "1024", "]", "for", "i", "in", "range", "(", "3", ")", ":", "y", "=", "fluid", ".", "layers", ".", "elementwise_add", "(", "last_cls", "[", "i", "+", "1", "]", ",", "self", ".", "conv_bn_layer", "(", "input", "=", "y", ",", "filter_size", "=", "3", ",", "num_filters", "=", "last_num_filters", "[", "i", "]", ",", "stride", "=", "2", ",", "name", "=", "'cls_head_add'", "+", "str", "(", "i", "+", "1", ")", ")", ")", "y", "=", "self", ".", "conv_bn_layer", "(", "input", "=", "y", ",", "filter_size", "=", "1", ",", "num_filters", "=", "2048", ",", "stride", "=", "1", ",", "name", "=", "'cls_head_last_conv'", ")", "pool", "=", "fluid", ".", "layers", ".", "pool2d", "(", "input", "=", "y", ",", "pool_type", "=", "'avg'", ",", "global_pooling", "=", "True", ")", "stdv", "=", "1.0", "/", "math", ".", "sqrt", "(", "pool", ".", "shape", "[", "1", "]", "*", "1.0", ")", "out", "=", "fluid", ".", "layers", ".", "fc", "(", "input", "=", "pool", ",", "size", "=", "self", ".", "num_classes", ",", "param_attr", "=", "ParamAttr", "(", "name", "=", "'fc_weights'", ",", "initializer", "=", "fluid", ".", "initializer", ".", "Uniform", "(", "-", "stdv", ",", "stdv", ")", ")", ",", "bias_attr", "=", "ParamAttr", "(", "name", "=", "'fc_offset'", ")", ")", "return", "out", "# segmentation", "if", "self", ".", "feature_maps", "==", "\"stage4\"", ":", "return", "st4", "self", ".", "end_points", "=", "st4", "return", "st4", "[", "-", "1", "]" ]
https://github.com/PaddlePaddle/PaddleX/blob/2bab73f81ab54e328204e7871e6ae4a82e719f5d/static/paddlex/cv/nets/hrnet.py#L97-L170
gevent/gevent
ae2cb5aeb2aea8987efcb90a4c50ca4e1ee12c31
src/gevent/pywsgi.py
python
WSGIHandler.start_response
(self, status, headers, exc_info=None)
return self.write
.. versionchanged:: 1.2a1 Avoid HTTP header injection by raising a :exc:`ValueError` if *status* or any *header* name or value contains a carriage return or newline. .. versionchanged:: 1.1b5 Pro-actively handle checking the encoding of the status line and headers during this method. On Python 2, avoid some extra encodings.
.. versionchanged:: 1.2a1 Avoid HTTP header injection by raising a :exc:`ValueError` if *status* or any *header* name or value contains a carriage return or newline. .. versionchanged:: 1.1b5 Pro-actively handle checking the encoding of the status line and headers during this method. On Python 2, avoid some extra encodings.
[ "..", "versionchanged", "::", "1", ".", "2a1", "Avoid", "HTTP", "header", "injection", "by", "raising", "a", ":", "exc", ":", "ValueError", "if", "*", "status", "*", "or", "any", "*", "header", "*", "name", "or", "value", "contains", "a", "carriage", "return", "or", "newline", ".", "..", "versionchanged", "::", "1", ".", "1b5", "Pro", "-", "actively", "handle", "checking", "the", "encoding", "of", "the", "status", "line", "and", "headers", "during", "this", "method", ".", "On", "Python", "2", "avoid", "some", "extra", "encodings", "." ]
def start_response(self, status, headers, exc_info=None): """ .. versionchanged:: 1.2a1 Avoid HTTP header injection by raising a :exc:`ValueError` if *status* or any *header* name or value contains a carriage return or newline. .. versionchanged:: 1.1b5 Pro-actively handle checking the encoding of the status line and headers during this method. On Python 2, avoid some extra encodings. """ # pylint:disable=too-many-branches,too-many-statements if exc_info: try: if self.headers_sent: # Re-raise original exception if headers sent reraise(*exc_info) finally: # Avoid dangling circular ref exc_info = None # Pep 3333, "The start_response callable": # https://www.python.org/dev/peps/pep-3333/#the-start-response-callable # "Servers should check for errors in the headers at the time # start_response is called, so that an error can be raised # while the application is still running." Here, we check the encoding. # This aids debugging: headers especially are generated programmatically # and an encoding error in a loop or list comprehension yields an opaque # UnicodeError without any clue which header was wrong. # Note that this results in copying the header list at this point, not modifying it, # although we are allowed to do so if needed. This slightly increases memory usage. # We also check for HTTP Response Splitting vulnerabilities response_headers = [] header = None value = None try: for header, value in headers: if not isinstance(header, str): raise UnicodeError("The header must be a native string", header, value) if not isinstance(value, str): raise UnicodeError("The value must be a native string", header, value) if '\r' in header or '\n' in header: raise ValueError('carriage return or newline in header name', header) if '\r' in value or '\n' in value: raise ValueError('carriage return or newline in header value', value) # Either we're on Python 2, in which case bytes is correct, or # we're on Python 3 and the user screwed up (because it should be a native # string). In either case, make sure that this is latin-1 compatible. Under # Python 2, bytes.encode() will take a round-trip through the system encoding, # which may be ascii, which is not really what we want. However, the latin-1 encoding # can encode everything except control characters and the block from 0x7F to 0x9F, so # explicitly round-tripping bytes through the encoding is unlikely to be of much # benefit, so we go for speed (the WSGI spec specifically calls out allowing the range # from 0x00 to 0xFF, although the HTTP spec forbids the control characters). # Note: Some Python 2 implementations, like Jython, may allow non-octet (above 255) values # in their str implementation; this is mentioned in the WSGI spec, but we don't # run on any platform like that so we can assume that a str value is pure bytes. response_headers.append((header if not PY3 else header.encode("latin-1"), value if not PY3 else value.encode("latin-1"))) except UnicodeEncodeError: # If we get here, we're guaranteed to have a header and value raise UnicodeError("Non-latin1 header", repr(header), repr(value)) # Same as above if not isinstance(status, str): raise UnicodeError("The status string must be a native string") if '\r' in status or '\n' in status: raise ValueError("carriage return or newline in status", status) # don't assign to anything until the validation is complete, including parsing the # code code = int(status.split(' ', 1)[0]) self.status = status if not PY3 else status.encode("latin-1") self._orig_status = status # Preserve the native string for logging self.response_headers = response_headers self.code = code provided_connection = None # Did the wsgi app give us a Connection header? self.provided_date = None self.provided_content_length = None for header, value in headers: header = header.lower() if header == 'connection': provided_connection = value elif header == 'date': self.provided_date = value elif header == 'content-length': self.provided_content_length = value if self.request_version == 'HTTP/1.0' and provided_connection is None: conntype = b'close' if self.close_connection else b'keep-alive' response_headers.append((b'Connection', conntype)) elif provided_connection == 'close': self.close_connection = True if self.code in (304, 204): if self.provided_content_length is not None and self.provided_content_length != '0': msg = 'Invalid Content-Length for %s response: %r (must be absent or zero)' % (self.code, self.provided_content_length) if PY3: msg = msg.encode('latin-1') raise self.ApplicationError(msg) return self.write
[ "def", "start_response", "(", "self", ",", "status", ",", "headers", ",", "exc_info", "=", "None", ")", ":", "# pylint:disable=too-many-branches,too-many-statements", "if", "exc_info", ":", "try", ":", "if", "self", ".", "headers_sent", ":", "# Re-raise original exception if headers sent", "reraise", "(", "*", "exc_info", ")", "finally", ":", "# Avoid dangling circular ref", "exc_info", "=", "None", "# Pep 3333, \"The start_response callable\":", "# https://www.python.org/dev/peps/pep-3333/#the-start-response-callable", "# \"Servers should check for errors in the headers at the time", "# start_response is called, so that an error can be raised", "# while the application is still running.\" Here, we check the encoding.", "# This aids debugging: headers especially are generated programmatically", "# and an encoding error in a loop or list comprehension yields an opaque", "# UnicodeError without any clue which header was wrong.", "# Note that this results in copying the header list at this point, not modifying it,", "# although we are allowed to do so if needed. This slightly increases memory usage.", "# We also check for HTTP Response Splitting vulnerabilities", "response_headers", "=", "[", "]", "header", "=", "None", "value", "=", "None", "try", ":", "for", "header", ",", "value", "in", "headers", ":", "if", "not", "isinstance", "(", "header", ",", "str", ")", ":", "raise", "UnicodeError", "(", "\"The header must be a native string\"", ",", "header", ",", "value", ")", "if", "not", "isinstance", "(", "value", ",", "str", ")", ":", "raise", "UnicodeError", "(", "\"The value must be a native string\"", ",", "header", ",", "value", ")", "if", "'\\r'", "in", "header", "or", "'\\n'", "in", "header", ":", "raise", "ValueError", "(", "'carriage return or newline in header name'", ",", "header", ")", "if", "'\\r'", "in", "value", "or", "'\\n'", "in", "value", ":", "raise", "ValueError", "(", "'carriage return or newline in header value'", ",", "value", ")", "# Either we're on Python 2, in which case bytes is correct, or", "# we're on Python 3 and the user screwed up (because it should be a native", "# string). In either case, make sure that this is latin-1 compatible. Under", "# Python 2, bytes.encode() will take a round-trip through the system encoding,", "# which may be ascii, which is not really what we want. However, the latin-1 encoding", "# can encode everything except control characters and the block from 0x7F to 0x9F, so", "# explicitly round-tripping bytes through the encoding is unlikely to be of much", "# benefit, so we go for speed (the WSGI spec specifically calls out allowing the range", "# from 0x00 to 0xFF, although the HTTP spec forbids the control characters).", "# Note: Some Python 2 implementations, like Jython, may allow non-octet (above 255) values", "# in their str implementation; this is mentioned in the WSGI spec, but we don't", "# run on any platform like that so we can assume that a str value is pure bytes.", "response_headers", ".", "append", "(", "(", "header", "if", "not", "PY3", "else", "header", ".", "encode", "(", "\"latin-1\"", ")", ",", "value", "if", "not", "PY3", "else", "value", ".", "encode", "(", "\"latin-1\"", ")", ")", ")", "except", "UnicodeEncodeError", ":", "# If we get here, we're guaranteed to have a header and value", "raise", "UnicodeError", "(", "\"Non-latin1 header\"", ",", "repr", "(", "header", ")", ",", "repr", "(", "value", ")", ")", "# Same as above", "if", "not", "isinstance", "(", "status", ",", "str", ")", ":", "raise", "UnicodeError", "(", "\"The status string must be a native string\"", ")", "if", "'\\r'", "in", "status", "or", "'\\n'", "in", "status", ":", "raise", "ValueError", "(", "\"carriage return or newline in status\"", ",", "status", ")", "# don't assign to anything until the validation is complete, including parsing the", "# code", "code", "=", "int", "(", "status", ".", "split", "(", "' '", ",", "1", ")", "[", "0", "]", ")", "self", ".", "status", "=", "status", "if", "not", "PY3", "else", "status", ".", "encode", "(", "\"latin-1\"", ")", "self", ".", "_orig_status", "=", "status", "# Preserve the native string for logging", "self", ".", "response_headers", "=", "response_headers", "self", ".", "code", "=", "code", "provided_connection", "=", "None", "# Did the wsgi app give us a Connection header?", "self", ".", "provided_date", "=", "None", "self", ".", "provided_content_length", "=", "None", "for", "header", ",", "value", "in", "headers", ":", "header", "=", "header", ".", "lower", "(", ")", "if", "header", "==", "'connection'", ":", "provided_connection", "=", "value", "elif", "header", "==", "'date'", ":", "self", ".", "provided_date", "=", "value", "elif", "header", "==", "'content-length'", ":", "self", ".", "provided_content_length", "=", "value", "if", "self", ".", "request_version", "==", "'HTTP/1.0'", "and", "provided_connection", "is", "None", ":", "conntype", "=", "b'close'", "if", "self", ".", "close_connection", "else", "b'keep-alive'", "response_headers", ".", "append", "(", "(", "b'Connection'", ",", "conntype", ")", ")", "elif", "provided_connection", "==", "'close'", ":", "self", ".", "close_connection", "=", "True", "if", "self", ".", "code", "in", "(", "304", ",", "204", ")", ":", "if", "self", ".", "provided_content_length", "is", "not", "None", "and", "self", ".", "provided_content_length", "!=", "'0'", ":", "msg", "=", "'Invalid Content-Length for %s response: %r (must be absent or zero)'", "%", "(", "self", ".", "code", ",", "self", ".", "provided_content_length", ")", "if", "PY3", ":", "msg", "=", "msg", ".", "encode", "(", "'latin-1'", ")", "raise", "self", ".", "ApplicationError", "(", "msg", ")", "return", "self", ".", "write" ]
https://github.com/gevent/gevent/blob/ae2cb5aeb2aea8987efcb90a4c50ca4e1ee12c31/src/gevent/pywsgi.py#L802-L905
Fizzadar/pyinfra
ff0913d6a172966760b63fe59e55dff9ea852e0d
pyinfra/api/facts.py
python
get_short_facts
(state, short_fact, **kwargs)
return { host: short_fact.process_data(data) for host, data in six.iteritems(facts) }
[]
def get_short_facts(state, short_fact, **kwargs): facts = get_facts(state, short_fact.fact.name, **kwargs) return { host: short_fact.process_data(data) for host, data in six.iteritems(facts) }
[ "def", "get_short_facts", "(", "state", ",", "short_fact", ",", "*", "*", "kwargs", ")", ":", "facts", "=", "get_facts", "(", "state", ",", "short_fact", ".", "fact", ".", "name", ",", "*", "*", "kwargs", ")", "return", "{", "host", ":", "short_fact", ".", "process_data", "(", "data", ")", "for", "host", ",", "data", "in", "six", ".", "iteritems", "(", "facts", ")", "}" ]
https://github.com/Fizzadar/pyinfra/blob/ff0913d6a172966760b63fe59e55dff9ea852e0d/pyinfra/api/facts.py#L114-L120
oracle/graalpython
577e02da9755d916056184ec441c26e00b70145c
graalpython/lib-python/3/_pyio.py
python
TextIOWrapper.close
(self)
[]
def close(self): if self.buffer is not None and not self.closed: try: self.flush() finally: self.buffer.close()
[ "def", "close", "(", "self", ")", ":", "if", "self", ".", "buffer", "is", "not", "None", "and", "not", "self", ".", "closed", ":", "try", ":", "self", ".", "flush", "(", ")", "finally", ":", "self", ".", "buffer", ".", "close", "(", ")" ]
https://github.com/oracle/graalpython/blob/577e02da9755d916056184ec441c26e00b70145c/graalpython/lib-python/3/_pyio.py#L2174-L2179
Project-MONAI/MONAI
83f8b06372a3803ebe9281300cb794a1f3395018
monai/data/csv_saver.py
python
CSVSaver.save
(self, data: Union[torch.Tensor, np.ndarray], meta_data: Optional[Dict] = None)
Save data into the cache dictionary. The metadata should have the following key: - ``'filename_or_obj'`` -- save the data corresponding to file name or object. If meta_data is None, use the default index from 0 to save data instead. Args: data: target data content that save into cache. meta_data: the meta data information corresponding to the data.
Save data into the cache dictionary. The metadata should have the following key: - ``'filename_or_obj'`` -- save the data corresponding to file name or object. If meta_data is None, use the default index from 0 to save data instead.
[ "Save", "data", "into", "the", "cache", "dictionary", ".", "The", "metadata", "should", "have", "the", "following", "key", ":", "-", "filename_or_obj", "--", "save", "the", "data", "corresponding", "to", "file", "name", "or", "object", ".", "If", "meta_data", "is", "None", "use", "the", "default", "index", "from", "0", "to", "save", "data", "instead", "." ]
def save(self, data: Union[torch.Tensor, np.ndarray], meta_data: Optional[Dict] = None) -> None: """Save data into the cache dictionary. The metadata should have the following key: - ``'filename_or_obj'`` -- save the data corresponding to file name or object. If meta_data is None, use the default index from 0 to save data instead. Args: data: target data content that save into cache. meta_data: the meta data information corresponding to the data. """ save_key = meta_data[Key.FILENAME_OR_OBJ] if meta_data else str(self._data_index) self._data_index += 1 if isinstance(data, torch.Tensor): data = data.detach().cpu().numpy() self._cache_dict[save_key] = np.asarray(data, dtype=float)
[ "def", "save", "(", "self", ",", "data", ":", "Union", "[", "torch", ".", "Tensor", ",", "np", ".", "ndarray", "]", ",", "meta_data", ":", "Optional", "[", "Dict", "]", "=", "None", ")", "->", "None", ":", "save_key", "=", "meta_data", "[", "Key", ".", "FILENAME_OR_OBJ", "]", "if", "meta_data", "else", "str", "(", "self", ".", "_data_index", ")", "self", ".", "_data_index", "+=", "1", "if", "isinstance", "(", "data", ",", "torch", ".", "Tensor", ")", ":", "data", "=", "data", ".", "detach", "(", ")", ".", "cpu", "(", ")", ".", "numpy", "(", ")", "self", ".", "_cache_dict", "[", "save_key", "]", "=", "np", ".", "asarray", "(", "data", ",", "dtype", "=", "float", ")" ]
https://github.com/Project-MONAI/MONAI/blob/83f8b06372a3803ebe9281300cb794a1f3395018/monai/data/csv_saver.py#L80-L94
apple/coremltools
141a83af482fcbdd5179807c9eaff9a7999c2c49
coremltools/models/neural_network/builder.py
python
NeuralNetworkBuilder.add_copy
(self, name, input_name, output_name)
return spec_layer
Add a copy layer to the model that copies its input tensor to the output tensor. Input tensor and output tensor must have distinct names. Refer to the ``CopyLayerParams`` message in the specification (NeuralNetwork.proto) for more details. Parameters ---------- name: str The name of this layer. input_name: str The input blob name of this layer. output_name: str The output blob name of this layer.
Add a copy layer to the model that copies its input tensor to the output tensor. Input tensor and output tensor must have distinct names. Refer to the ``CopyLayerParams`` message in the specification (NeuralNetwork.proto) for more details.
[ "Add", "a", "copy", "layer", "to", "the", "model", "that", "copies", "its", "input", "tensor", "to", "the", "output", "tensor", ".", "Input", "tensor", "and", "output", "tensor", "must", "have", "distinct", "names", ".", "Refer", "to", "the", "CopyLayerParams", "message", "in", "the", "specification", "(", "NeuralNetwork", ".", "proto", ")", "for", "more", "details", "." ]
def add_copy(self, name, input_name, output_name): """ Add a copy layer to the model that copies its input tensor to the output tensor. Input tensor and output tensor must have distinct names. Refer to the ``CopyLayerParams`` message in the specification (NeuralNetwork.proto) for more details. Parameters ---------- name: str The name of this layer. input_name: str The input blob name of this layer. output_name: str The output blob name of this layer. """ spec_layer = self._add_generic_layer(name, [input_name], [output_name]) spec_layer.copy.MergeFromString(b"") # If output name rank is different than earlier, # mark it as unknown if output_name in self.rank_dict and self._get_rank( output_name ) != self._get_rank(input_name): self.rank_dict[output_name] = -1 else: self.rank_dict[output_name] = self._get_rank(input_name) return spec_layer
[ "def", "add_copy", "(", "self", ",", "name", ",", "input_name", ",", "output_name", ")", ":", "spec_layer", "=", "self", ".", "_add_generic_layer", "(", "name", ",", "[", "input_name", "]", ",", "[", "output_name", "]", ")", "spec_layer", ".", "copy", ".", "MergeFromString", "(", "b\"\"", ")", "# If output name rank is different than earlier,", "# mark it as unknown", "if", "output_name", "in", "self", ".", "rank_dict", "and", "self", ".", "_get_rank", "(", "output_name", ")", "!=", "self", ".", "_get_rank", "(", "input_name", ")", ":", "self", ".", "rank_dict", "[", "output_name", "]", "=", "-", "1", "else", ":", "self", ".", "rank_dict", "[", "output_name", "]", "=", "self", ".", "_get_rank", "(", "input_name", ")", "return", "spec_layer" ]
https://github.com/apple/coremltools/blob/141a83af482fcbdd5179807c9eaff9a7999c2c49/coremltools/models/neural_network/builder.py#L6153-L6179
natanielruiz/disrupting-deepfakes
c5b4373a54693139ae4c408b1fcca2de745355e0
stargan/logger.py
python
Logger.__init__
(self, log_dir)
Initialize summary writer.
Initialize summary writer.
[ "Initialize", "summary", "writer", "." ]
def __init__(self, log_dir): """Initialize summary writer.""" self.writer = tf.summary.FileWriter(log_dir)
[ "def", "__init__", "(", "self", ",", "log_dir", ")", ":", "self", ".", "writer", "=", "tf", ".", "summary", ".", "FileWriter", "(", "log_dir", ")" ]
https://github.com/natanielruiz/disrupting-deepfakes/blob/c5b4373a54693139ae4c408b1fcca2de745355e0/stargan/logger.py#L7-L9
tp4a/teleport
1fafd34f1f775d2cf80ea4af6e44468d8e0b24ad
server/www/packages/packages-linux/x64/mako/runtime.py
python
LoopContext.reverse_index
(self)
return len(self) - self.index - 1
[]
def reverse_index(self): return len(self) - self.index - 1
[ "def", "reverse_index", "(", "self", ")", ":", "return", "len", "(", "self", ")", "-", "self", ".", "index", "-", "1" ]
https://github.com/tp4a/teleport/blob/1fafd34f1f775d2cf80ea4af6e44468d8e0b24ad/server/www/packages/packages-linux/x64/mako/runtime.py#L322-L323
memray/seq2seq-keyphrase
9145c63ebdc4c3bc431f8091dc52547a46804012
emolga/models/covc_encdec.py
python
DecoderAtt.build_sampler
(self)
Build a sampler which only steps once. Typically it only works for one word a time?
Build a sampler which only steps once. Typically it only works for one word a time?
[ "Build", "a", "sampler", "which", "only", "steps", "once", ".", "Typically", "it", "only", "works", "for", "one", "word", "a", "time?" ]
def build_sampler(self): """ Build a sampler which only steps once. Typically it only works for one word a time? """ logger.info("build sampler ...") if self.config['sample_stoch'] and self.config['sample_argmax']: logger.info("use argmax search!") elif self.config['sample_stoch'] and (not self.config['sample_argmax']): logger.info("use stochastic sampling!") elif self.config['sample_beam'] > 1: logger.info("use beam search! (beam_size={})".format(self.config['sample_beam'])) # initial state of our Decoder. context = T.tensor3() # theano variable. shape=(n_sample, sent_len, 2*output_dim) c_mask = T.matrix() # mask of the input sentence. context_A = self.Is(context) # an bridge layer init_h = self.Initializer(context[:, 0, :]) init_a = T.zeros((context.shape[0], context.shape[1])) cov = T.zeros((context.shape[0], context.shape[1])) logger.info('compile the function: get_init_state') self.get_init_state \ = theano.function([context], [init_h, init_a, cov], name='get_init_state', allow_input_downcast=True) logger.info('done.') # word sampler: 1 x 1 prev_word = T.vector('prev_word', dtype='int32') prev_stat = T.matrix('prev_state', dtype='float32') prev_a = T.matrix('prev_a', dtype='float32') prev_cov = T.matrix('prev_cov', dtype='float32') next_prob, next_sample, next_stat, ncov, alpha \ = self._step_sample(prev_word, prev_stat, prev_a, prev_cov, context, c_mask, context_A) # next word probability logger.info('compile the function: sample_next') inputs = [prev_word, prev_stat, prev_a, prev_cov, context, c_mask] outputs = [next_prob, next_sample, next_stat, ncov, alpha] self.sample_next = theano.function(inputs, outputs, name='sample_next', allow_input_downcast=True) logger.info('done')
[ "def", "build_sampler", "(", "self", ")", ":", "logger", ".", "info", "(", "\"build sampler ...\"", ")", "if", "self", ".", "config", "[", "'sample_stoch'", "]", "and", "self", ".", "config", "[", "'sample_argmax'", "]", ":", "logger", ".", "info", "(", "\"use argmax search!\"", ")", "elif", "self", ".", "config", "[", "'sample_stoch'", "]", "and", "(", "not", "self", ".", "config", "[", "'sample_argmax'", "]", ")", ":", "logger", ".", "info", "(", "\"use stochastic sampling!\"", ")", "elif", "self", ".", "config", "[", "'sample_beam'", "]", ">", "1", ":", "logger", ".", "info", "(", "\"use beam search! (beam_size={})\"", ".", "format", "(", "self", ".", "config", "[", "'sample_beam'", "]", ")", ")", "# initial state of our Decoder.", "context", "=", "T", ".", "tensor3", "(", ")", "# theano variable. shape=(n_sample, sent_len, 2*output_dim)", "c_mask", "=", "T", ".", "matrix", "(", ")", "# mask of the input sentence.", "context_A", "=", "self", ".", "Is", "(", "context", ")", "# an bridge layer", "init_h", "=", "self", ".", "Initializer", "(", "context", "[", ":", ",", "0", ",", ":", "]", ")", "init_a", "=", "T", ".", "zeros", "(", "(", "context", ".", "shape", "[", "0", "]", ",", "context", ".", "shape", "[", "1", "]", ")", ")", "cov", "=", "T", ".", "zeros", "(", "(", "context", ".", "shape", "[", "0", "]", ",", "context", ".", "shape", "[", "1", "]", ")", ")", "logger", ".", "info", "(", "'compile the function: get_init_state'", ")", "self", ".", "get_init_state", "=", "theano", ".", "function", "(", "[", "context", "]", ",", "[", "init_h", ",", "init_a", ",", "cov", "]", ",", "name", "=", "'get_init_state'", ",", "allow_input_downcast", "=", "True", ")", "logger", ".", "info", "(", "'done.'", ")", "# word sampler: 1 x 1", "prev_word", "=", "T", ".", "vector", "(", "'prev_word'", ",", "dtype", "=", "'int32'", ")", "prev_stat", "=", "T", ".", "matrix", "(", "'prev_state'", ",", "dtype", "=", "'float32'", ")", "prev_a", "=", "T", ".", "matrix", "(", "'prev_a'", ",", "dtype", "=", "'float32'", ")", "prev_cov", "=", "T", ".", "matrix", "(", "'prev_cov'", ",", "dtype", "=", "'float32'", ")", "next_prob", ",", "next_sample", ",", "next_stat", ",", "ncov", ",", "alpha", "=", "self", ".", "_step_sample", "(", "prev_word", ",", "prev_stat", ",", "prev_a", ",", "prev_cov", ",", "context", ",", "c_mask", ",", "context_A", ")", "# next word probability", "logger", ".", "info", "(", "'compile the function: sample_next'", ")", "inputs", "=", "[", "prev_word", ",", "prev_stat", ",", "prev_a", ",", "prev_cov", ",", "context", ",", "c_mask", "]", "outputs", "=", "[", "next_prob", ",", "next_sample", ",", "next_stat", ",", "ncov", ",", "alpha", "]", "self", ".", "sample_next", "=", "theano", ".", "function", "(", "inputs", ",", "outputs", ",", "name", "=", "'sample_next'", ",", "allow_input_downcast", "=", "True", ")", "logger", ".", "info", "(", "'done'", ")" ]
https://github.com/memray/seq2seq-keyphrase/blob/9145c63ebdc4c3bc431f8091dc52547a46804012/emolga/models/covc_encdec.py#L1155-L1202
aleju/imgaug
0101108d4fed06bc5056c4a03e2bcb0216dac326
imgaug/augmenters/imgcorruptlike.py
python
Spatter.__init__
(self, severity=(1, 5), seed=None, name=None, random_state="deprecated", deterministic="deprecated")
[]
def __init__(self, severity=(1, 5), seed=None, name=None, random_state="deprecated", deterministic="deprecated"): super(Spatter, self).__init__( apply_spatter, severity, seed=seed, name=name, random_state=random_state, deterministic=deterministic)
[ "def", "__init__", "(", "self", ",", "severity", "=", "(", "1", ",", "5", ")", ",", "seed", "=", "None", ",", "name", "=", "None", ",", "random_state", "=", "\"deprecated\"", ",", "deterministic", "=", "\"deprecated\"", ")", ":", "super", "(", "Spatter", ",", "self", ")", ".", "__init__", "(", "apply_spatter", ",", "severity", ",", "seed", "=", "seed", ",", "name", "=", "name", ",", "random_state", "=", "random_state", ",", "deterministic", "=", "deterministic", ")" ]
https://github.com/aleju/imgaug/blob/0101108d4fed06bc5056c4a03e2bcb0216dac326/imgaug/augmenters/imgcorruptlike.py#L1809-L1815
ntoll/drogulus
d74b78d0bf0220b91f075dbd3f9a06c2663b474e
drogulus/version.py
python
get_version
()
return '.'.join([str(i) for i in VERSION])
Returns a string representation of the version information of this project.
Returns a string representation of the version information of this project.
[ "Returns", "a", "string", "representation", "of", "the", "version", "information", "of", "this", "project", "." ]
def get_version(): """ Returns a string representation of the version information of this project. """ return '.'.join([str(i) for i in VERSION])
[ "def", "get_version", "(", ")", ":", "return", "'.'", ".", "join", "(", "[", "str", "(", "i", ")", "for", "i", "in", "VERSION", "]", ")" ]
https://github.com/ntoll/drogulus/blob/d74b78d0bf0220b91f075dbd3f9a06c2663b474e/drogulus/version.py#L12-L16
nerdvegas/rez
d392c65bf63b4bca8106f938cec49144ba54e770
src/rez/vendor/distlib/locators.py
python
JSONLocator.get_distribution_names
(self)
Return all the distribution names known to this locator.
Return all the distribution names known to this locator.
[ "Return", "all", "the", "distribution", "names", "known", "to", "this", "locator", "." ]
def get_distribution_names(self): """ Return all the distribution names known to this locator. """ raise NotImplementedError('Not available from this locator')
[ "def", "get_distribution_names", "(", "self", ")", ":", "raise", "NotImplementedError", "(", "'Not available from this locator'", ")" ]
https://github.com/nerdvegas/rez/blob/d392c65bf63b4bca8106f938cec49144ba54e770/src/rez/vendor/distlib/locators.py#L900-L904
spyder-ide/spyder
55da47c032dfcf519600f67f8b30eab467f965e7
spyder/plugins/onlinehelp/widgets.py
python
PydocBrowser.url_to_text
(self, url)
return osp.splitext(str(url.path()))[0][1:]
Convert QUrl object to displayed text in combo box. Parameters ---------- url: QUrl Url address.
Convert QUrl object to displayed text in combo box.
[ "Convert", "QUrl", "object", "to", "displayed", "text", "in", "combo", "box", "." ]
def url_to_text(self, url): """ Convert QUrl object to displayed text in combo box. Parameters ---------- url: QUrl Url address. """ string_url = url.toString() if 'about:blank' in string_url: return 'about:blank' elif 'get?key=' in string_url or 'search?key=' in string_url: return url.toString().split('=')[-1] return osp.splitext(str(url.path()))[0][1:]
[ "def", "url_to_text", "(", "self", ",", "url", ")", ":", "string_url", "=", "url", ".", "toString", "(", ")", "if", "'about:blank'", "in", "string_url", ":", "return", "'about:blank'", "elif", "'get?key='", "in", "string_url", "or", "'search?key='", "in", "string_url", ":", "return", "url", ".", "toString", "(", ")", ".", "split", "(", "'='", ")", "[", "-", "1", "]", "return", "osp", ".", "splitext", "(", "str", "(", "url", ".", "path", "(", ")", ")", ")", "[", "0", "]", "[", "1", ":", "]" ]
https://github.com/spyder-ide/spyder/blob/55da47c032dfcf519600f67f8b30eab467f965e7/spyder/plugins/onlinehelp/widgets.py#L387-L402
fonttools/fonttools
892322aaff6a89bea5927379ec06bc0da3dfb7df
Lib/fontTools/varLib/instancer/names.py
python
updateNameTable
(varfont, axisLimits)
Update instatiated variable font's name table using STAT AxisValues. Raises ValueError if the STAT table is missing or an Axis Value table is missing for requested axis locations. First, collect all STAT AxisValues that match the new default axis locations (excluding "elided" ones); concatenate the strings in design axis order, while giving priority to "synthetic" values (Format 4), to form the typographic subfamily name associated with the new default instance. Finally, update all related records in the name table, making sure that legacy family/sub-family names conform to the the R/I/B/BI (Regular, Italic, Bold, Bold Italic) naming model. Example: Updating a partial variable font: | >>> ttFont = TTFont("OpenSans[wdth,wght].ttf") | >>> updateNameTable(ttFont, {"wght": AxisRange(400, 900), "wdth": 75}) The name table records will be updated in the following manner: NameID 1 familyName: "Open Sans" --> "Open Sans Condensed" NameID 2 subFamilyName: "Regular" --> "Regular" NameID 3 Unique font identifier: "3.000;GOOG;OpenSans-Regular" --> \ "3.000;GOOG;OpenSans-Condensed" NameID 4 Full font name: "Open Sans Regular" --> "Open Sans Condensed" NameID 6 PostScript name: "OpenSans-Regular" --> "OpenSans-Condensed" NameID 16 Typographic Family name: None --> "Open Sans" NameID 17 Typographic Subfamily name: None --> "Condensed" References: https://docs.microsoft.com/en-us/typography/opentype/spec/stat https://docs.microsoft.com/en-us/typography/opentype/spec/name#name-ids
Update instatiated variable font's name table using STAT AxisValues.
[ "Update", "instatiated", "variable", "font", "s", "name", "table", "using", "STAT", "AxisValues", "." ]
def updateNameTable(varfont, axisLimits): """Update instatiated variable font's name table using STAT AxisValues. Raises ValueError if the STAT table is missing or an Axis Value table is missing for requested axis locations. First, collect all STAT AxisValues that match the new default axis locations (excluding "elided" ones); concatenate the strings in design axis order, while giving priority to "synthetic" values (Format 4), to form the typographic subfamily name associated with the new default instance. Finally, update all related records in the name table, making sure that legacy family/sub-family names conform to the the R/I/B/BI (Regular, Italic, Bold, Bold Italic) naming model. Example: Updating a partial variable font: | >>> ttFont = TTFont("OpenSans[wdth,wght].ttf") | >>> updateNameTable(ttFont, {"wght": AxisRange(400, 900), "wdth": 75}) The name table records will be updated in the following manner: NameID 1 familyName: "Open Sans" --> "Open Sans Condensed" NameID 2 subFamilyName: "Regular" --> "Regular" NameID 3 Unique font identifier: "3.000;GOOG;OpenSans-Regular" --> \ "3.000;GOOG;OpenSans-Condensed" NameID 4 Full font name: "Open Sans Regular" --> "Open Sans Condensed" NameID 6 PostScript name: "OpenSans-Regular" --> "OpenSans-Condensed" NameID 16 Typographic Family name: None --> "Open Sans" NameID 17 Typographic Subfamily name: None --> "Condensed" References: https://docs.microsoft.com/en-us/typography/opentype/spec/stat https://docs.microsoft.com/en-us/typography/opentype/spec/name#name-ids """ from . import AxisRange, axisValuesFromAxisLimits if "STAT" not in varfont: raise ValueError("Cannot update name table since there is no STAT table.") stat = varfont["STAT"].table if not stat.AxisValueArray: raise ValueError("Cannot update name table since there are no STAT Axis Values") fvar = varfont["fvar"] # The updated name table will reflect the new 'zero origin' of the font. # If we're instantiating a partial font, we will populate the unpinned # axes with their default axis values. fvarDefaults = {a.axisTag: a.defaultValue for a in fvar.axes} defaultAxisCoords = deepcopy(axisLimits) for axisTag, val in fvarDefaults.items(): if axisTag not in defaultAxisCoords or isinstance( defaultAxisCoords[axisTag], AxisRange ): defaultAxisCoords[axisTag] = val axisValueTables = axisValuesFromAxisLimits(stat, defaultAxisCoords) checkAxisValuesExist(stat, axisValueTables, defaultAxisCoords) # ignore "elidable" axis values, should be omitted in application font menus. axisValueTables = [ v for v in axisValueTables if not v.Flags & ELIDABLE_AXIS_VALUE_NAME ] axisValueTables = _sortAxisValues(axisValueTables) _updateNameRecords(varfont, axisValueTables)
[ "def", "updateNameTable", "(", "varfont", ",", "axisLimits", ")", ":", "from", ".", "import", "AxisRange", ",", "axisValuesFromAxisLimits", "if", "\"STAT\"", "not", "in", "varfont", ":", "raise", "ValueError", "(", "\"Cannot update name table since there is no STAT table.\"", ")", "stat", "=", "varfont", "[", "\"STAT\"", "]", ".", "table", "if", "not", "stat", ".", "AxisValueArray", ":", "raise", "ValueError", "(", "\"Cannot update name table since there are no STAT Axis Values\"", ")", "fvar", "=", "varfont", "[", "\"fvar\"", "]", "# The updated name table will reflect the new 'zero origin' of the font.", "# If we're instantiating a partial font, we will populate the unpinned", "# axes with their default axis values.", "fvarDefaults", "=", "{", "a", ".", "axisTag", ":", "a", ".", "defaultValue", "for", "a", "in", "fvar", ".", "axes", "}", "defaultAxisCoords", "=", "deepcopy", "(", "axisLimits", ")", "for", "axisTag", ",", "val", "in", "fvarDefaults", ".", "items", "(", ")", ":", "if", "axisTag", "not", "in", "defaultAxisCoords", "or", "isinstance", "(", "defaultAxisCoords", "[", "axisTag", "]", ",", "AxisRange", ")", ":", "defaultAxisCoords", "[", "axisTag", "]", "=", "val", "axisValueTables", "=", "axisValuesFromAxisLimits", "(", "stat", ",", "defaultAxisCoords", ")", "checkAxisValuesExist", "(", "stat", ",", "axisValueTables", ",", "defaultAxisCoords", ")", "# ignore \"elidable\" axis values, should be omitted in application font menus.", "axisValueTables", "=", "[", "v", "for", "v", "in", "axisValueTables", "if", "not", "v", ".", "Flags", "&", "ELIDABLE_AXIS_VALUE_NAME", "]", "axisValueTables", "=", "_sortAxisValues", "(", "axisValueTables", ")", "_updateNameRecords", "(", "varfont", ",", "axisValueTables", ")" ]
https://github.com/fonttools/fonttools/blob/892322aaff6a89bea5927379ec06bc0da3dfb7df/Lib/fontTools/varLib/instancer/names.py#L70-L130
trakt/Plex-Trakt-Scrobbler
aeb0bfbe62fad4b06c164f1b95581da7f35dce0b
Trakttv.bundle/Contents/Libraries/MacOSX/i386/ucs2/cryptography/hazmat/primitives/serialization.py
python
_ssh_read_next_mpint
(data)
return ( utils.int_from_bytes(mpint_data, byteorder='big', signed=False), rest )
Reads the next mpint from the data. Currently, all mpints are interpreted as unsigned.
Reads the next mpint from the data.
[ "Reads", "the", "next", "mpint", "from", "the", "data", "." ]
def _ssh_read_next_mpint(data): """ Reads the next mpint from the data. Currently, all mpints are interpreted as unsigned. """ mpint_data, rest = _ssh_read_next_string(data) return ( utils.int_from_bytes(mpint_data, byteorder='big', signed=False), rest )
[ "def", "_ssh_read_next_mpint", "(", "data", ")", ":", "mpint_data", ",", "rest", "=", "_ssh_read_next_string", "(", "data", ")", "return", "(", "utils", ".", "int_from_bytes", "(", "mpint_data", ",", "byteorder", "=", "'big'", ",", "signed", "=", "False", ")", ",", "rest", ")" ]
https://github.com/trakt/Plex-Trakt-Scrobbler/blob/aeb0bfbe62fad4b06c164f1b95581da7f35dce0b/Trakttv.bundle/Contents/Libraries/MacOSX/i386/ucs2/cryptography/hazmat/primitives/serialization.py#L140-L150
twilio/twilio-python
6e1e811ea57a1edfadd5161ace87397c563f6915
twilio/rest/verify/v2/service/entity/factor.py
python
FactorInstance.config
(self)
return self._properties['config']
:returns: Configurations for a `factor_type`. :rtype: dict
:returns: Configurations for a `factor_type`. :rtype: dict
[ ":", "returns", ":", "Configurations", "for", "a", "factor_type", ".", ":", "rtype", ":", "dict" ]
def config(self): """ :returns: Configurations for a `factor_type`. :rtype: dict """ return self._properties['config']
[ "def", "config", "(", "self", ")", ":", "return", "self", ".", "_properties", "[", "'config'", "]" ]
https://github.com/twilio/twilio-python/blob/6e1e811ea57a1edfadd5161ace87397c563f6915/twilio/rest/verify/v2/service/entity/factor.py#L460-L465
getsentry/sentry
83b1f25aac3e08075e0e2495bc29efaf35aca18a
src/sentry/eventstore/base.py
python
EventStorage.get_unfetched_events
( self, snuba_filter, orderby=None, limit=100, offset=0, referrer="eventstore.get_unfetched_events", # NOQA )
Same as get_events but returns events without their node datas loaded. Only the event ID, projectID, groupID and timestamp field will be present without an additional fetch to nodestore. Used for fetching large volumes of events that do not need data loaded from nodestore. Currently this is just used for event data deletions where we just need the event IDs in order to process the deletions. Arguments: snuba_filter (Filter): Filter orderby (Sequence[str]): List of fields to order by - default ['-time', '-event_id'] limit (int): Query limit - default 100 offset (int): Query offset - default 0 referrer (string): Referrer - default "eventstore.get_unfetched_events"
Same as get_events but returns events without their node datas loaded. Only the event ID, projectID, groupID and timestamp field will be present without an additional fetch to nodestore.
[ "Same", "as", "get_events", "but", "returns", "events", "without", "their", "node", "datas", "loaded", ".", "Only", "the", "event", "ID", "projectID", "groupID", "and", "timestamp", "field", "will", "be", "present", "without", "an", "additional", "fetch", "to", "nodestore", "." ]
def get_unfetched_events( self, snuba_filter, orderby=None, limit=100, offset=0, referrer="eventstore.get_unfetched_events", # NOQA ): """ Same as get_events but returns events without their node datas loaded. Only the event ID, projectID, groupID and timestamp field will be present without an additional fetch to nodestore. Used for fetching large volumes of events that do not need data loaded from nodestore. Currently this is just used for event data deletions where we just need the event IDs in order to process the deletions. Arguments: snuba_filter (Filter): Filter orderby (Sequence[str]): List of fields to order by - default ['-time', '-event_id'] limit (int): Query limit - default 100 offset (int): Query offset - default 0 referrer (string): Referrer - default "eventstore.get_unfetched_events" """ raise NotImplementedError
[ "def", "get_unfetched_events", "(", "self", ",", "snuba_filter", ",", "orderby", "=", "None", ",", "limit", "=", "100", ",", "offset", "=", "0", ",", "referrer", "=", "\"eventstore.get_unfetched_events\"", ",", "# NOQA", ")", ":", "raise", "NotImplementedError" ]
https://github.com/getsentry/sentry/blob/83b1f25aac3e08075e0e2495bc29efaf35aca18a/src/sentry/eventstore/base.py#L161-L185
chribsen/simple-machine-learning-examples
dc94e52a4cebdc8bb959ff88b81ff8cfeca25022
venv/lib/python2.7/site-packages/pandas/computation/pytables.py
python
BinOp.is_in_table
(self)
return self.queryables.get(self.lhs) is not None
return True if this is a valid column name for generation (e.g. an actual column in the table)
return True if this is a valid column name for generation (e.g. an actual column in the table)
[ "return", "True", "if", "this", "is", "a", "valid", "column", "name", "for", "generation", "(", "e", ".", "g", ".", "an", "actual", "column", "in", "the", "table", ")" ]
def is_in_table(self): """ return True if this is a valid column name for generation (e.g. an actual column in the table) """ return self.queryables.get(self.lhs) is not None
[ "def", "is_in_table", "(", "self", ")", ":", "return", "self", ".", "queryables", ".", "get", "(", "self", ".", "lhs", ")", "is", "not", "None" ]
https://github.com/chribsen/simple-machine-learning-examples/blob/dc94e52a4cebdc8bb959ff88b81ff8cfeca25022/venv/lib/python2.7/site-packages/pandas/computation/pytables.py#L144-L147
securesystemslab/zippy
ff0e84ac99442c2c55fe1d285332cfd4e185e089
zippy/lib-python/3/tkinter/font.py
python
Font.copy
(self)
return Font(self._root, **self.actual())
Return a distinct copy of the current font
Return a distinct copy of the current font
[ "Return", "a", "distinct", "copy", "of", "the", "current", "font" ]
def copy(self): "Return a distinct copy of the current font" return Font(self._root, **self.actual())
[ "def", "copy", "(", "self", ")", ":", "return", "Font", "(", "self", ".", "_root", ",", "*", "*", "self", ".", "actual", "(", ")", ")" ]
https://github.com/securesystemslab/zippy/blob/ff0e84ac99442c2c55fe1d285332cfd4e185e089/zippy/lib-python/3/tkinter/font.py#L117-L119
vlachoudis/bCNC
67126b4894dabf6579baf47af8d0f9b7de35e6e3
bCNC/lib/tkExtra.py
python
InPlaceText.resize
(self, event=None)
[]
def resize(self, event=None): if self.frame is None: return bbox = self.listbox.bbox(self.item) if bbox is None: return x, y, w, h = bbox x += self.listbox.winfo_rootx() y += self.listbox.winfo_rooty() w = self.listbox.winfo_width() try: self.toplevel.wm_geometry("+%d+%d" % (x,y)) except TclError: pass
[ "def", "resize", "(", "self", ",", "event", "=", "None", ")", ":", "if", "self", ".", "frame", "is", "None", ":", "return", "bbox", "=", "self", ".", "listbox", ".", "bbox", "(", "self", ".", "item", ")", "if", "bbox", "is", "None", ":", "return", "x", ",", "y", ",", "w", ",", "h", "=", "bbox", "x", "+=", "self", ".", "listbox", ".", "winfo_rootx", "(", ")", "y", "+=", "self", ".", "listbox", ".", "winfo_rooty", "(", ")", "w", "=", "self", ".", "listbox", ".", "winfo_width", "(", ")", "try", ":", "self", ".", "toplevel", ".", "wm_geometry", "(", "\"+%d+%d\"", "%", "(", "x", ",", "y", ")", ")", "except", "TclError", ":", "pass" ]
https://github.com/vlachoudis/bCNC/blob/67126b4894dabf6579baf47af8d0f9b7de35e6e3/bCNC/lib/tkExtra.py#L2656-L2667
linxid/Machine_Learning_Study_Path
558e82d13237114bbb8152483977806fc0c222af
Machine Learning In Action/Chapter5-LogisticRegression/venv/Lib/codecs.py
python
StreamReader.readlines
(self, sizehint=None, keepends=True)
return data.splitlines(keepends)
Read all lines available on the input stream and return them as a list. Line breaks are implemented using the codec's decoder method and are included in the list entries. sizehint, if given, is ignored since there is no efficient way to finding the true end-of-line.
Read all lines available on the input stream and return them as a list.
[ "Read", "all", "lines", "available", "on", "the", "input", "stream", "and", "return", "them", "as", "a", "list", "." ]
def readlines(self, sizehint=None, keepends=True): """ Read all lines available on the input stream and return them as a list. Line breaks are implemented using the codec's decoder method and are included in the list entries. sizehint, if given, is ignored since there is no efficient way to finding the true end-of-line. """ data = self.read() return data.splitlines(keepends)
[ "def", "readlines", "(", "self", ",", "sizehint", "=", "None", ",", "keepends", "=", "True", ")", ":", "data", "=", "self", ".", "read", "(", ")", "return", "data", ".", "splitlines", "(", "keepends", ")" ]
https://github.com/linxid/Machine_Learning_Study_Path/blob/558e82d13237114bbb8152483977806fc0c222af/Machine Learning In Action/Chapter5-LogisticRegression/venv/Lib/codecs.py#L603-L616
CvvT/dumpDex
92ab3b7e996194a06bf1dd5538a4954e8a5ee9c1
python/idaapi.py
python
is_commutative
(*args)
return _idaapi.is_commutative(*args)
is_commutative(op) -> bool
is_commutative(op) -> bool
[ "is_commutative", "(", "op", ")", "-", ">", "bool" ]
def is_commutative(*args): """ is_commutative(op) -> bool """ return _idaapi.is_commutative(*args)
[ "def", "is_commutative", "(", "*", "args", ")", ":", "return", "_idaapi", ".", "is_commutative", "(", "*", "args", ")" ]
https://github.com/CvvT/dumpDex/blob/92ab3b7e996194a06bf1dd5538a4954e8a5ee9c1/python/idaapi.py#L36818-L36822
exodrifter/unity-python
bef6e4e9ddfbbf1eaf7acbbb973e9aa3dd64a20d
Lib/nntplib.py
python
NNTP.group
(self, name)
return resp, count, first, last, name
Process a GROUP command. Argument: - group: the group name Returns: - resp: server response if successful - count: number of articles (string) - first: first article number (string) - last: last article number (string) - name: the group name
Process a GROUP command. Argument: - group: the group name Returns: - resp: server response if successful - count: number of articles (string) - first: first article number (string) - last: last article number (string) - name: the group name
[ "Process", "a", "GROUP", "command", ".", "Argument", ":", "-", "group", ":", "the", "group", "name", "Returns", ":", "-", "resp", ":", "server", "response", "if", "successful", "-", "count", ":", "number", "of", "articles", "(", "string", ")", "-", "first", ":", "first", "article", "number", "(", "string", ")", "-", "last", ":", "last", "article", "number", "(", "string", ")", "-", "name", ":", "the", "group", "name" ]
def group(self, name): """Process a GROUP command. Argument: - group: the group name Returns: - resp: server response if successful - count: number of articles (string) - first: first article number (string) - last: last article number (string) - name: the group name""" resp = self.shortcmd('GROUP ' + name) if resp[:3] != '211': raise NNTPReplyError(resp) words = resp.split() count = first = last = 0 n = len(words) if n > 1: count = words[1] if n > 2: first = words[2] if n > 3: last = words[3] if n > 4: name = words[4].lower() return resp, count, first, last, name
[ "def", "group", "(", "self", ",", "name", ")", ":", "resp", "=", "self", ".", "shortcmd", "(", "'GROUP '", "+", "name", ")", "if", "resp", "[", ":", "3", "]", "!=", "'211'", ":", "raise", "NNTPReplyError", "(", "resp", ")", "words", "=", "resp", ".", "split", "(", ")", "count", "=", "first", "=", "last", "=", "0", "n", "=", "len", "(", "words", ")", "if", "n", ">", "1", ":", "count", "=", "words", "[", "1", "]", "if", "n", ">", "2", ":", "first", "=", "words", "[", "2", "]", "if", "n", ">", "3", ":", "last", "=", "words", "[", "3", "]", "if", "n", ">", "4", ":", "name", "=", "words", "[", "4", "]", ".", "lower", "(", ")", "return", "resp", ",", "count", ",", "first", ",", "last", ",", "name" ]
https://github.com/exodrifter/unity-python/blob/bef6e4e9ddfbbf1eaf7acbbb973e9aa3dd64a20d/Lib/nntplib.py#L344-L368
Terrance/SkPy
055a24f2087a79552f5ffebc8b2da28951313015
skpy/core.py
python
SkypeObj.rawToFields
(cls, raw={})
return {}
Convert the raw properties of an API response into class fields. Override to process additional values. Args: raw (dict): raw object, as provided by the API Returns: dict: a collection of fields, with keys matching :attr:`attrs`
Convert the raw properties of an API response into class fields. Override to process additional values.
[ "Convert", "the", "raw", "properties", "of", "an", "API", "response", "into", "class", "fields", ".", "Override", "to", "process", "additional", "values", "." ]
def rawToFields(cls, raw={}): """ Convert the raw properties of an API response into class fields. Override to process additional values. Args: raw (dict): raw object, as provided by the API Returns: dict: a collection of fields, with keys matching :attr:`attrs` """ return {}
[ "def", "rawToFields", "(", "cls", ",", "raw", "=", "{", "}", ")", ":", "return", "{", "}" ]
https://github.com/Terrance/SkPy/blob/055a24f2087a79552f5ffebc8b2da28951313015/skpy/core.py#L35-L45
whoosh-community/whoosh
5421f1ab3bb802114105b3181b7ce4f44ad7d0bb
src/whoosh/writing.py
python
IndexWriter.delete_by_term
(self, fieldname, text, searcher=None)
return self.delete_by_query(q, searcher=searcher)
Deletes any documents containing "term" in the "fieldname" field. This is useful when you have an indexed field containing a unique ID (such as "pathname") for each document. :returns: the number of documents deleted.
Deletes any documents containing "term" in the "fieldname" field. This is useful when you have an indexed field containing a unique ID (such as "pathname") for each document.
[ "Deletes", "any", "documents", "containing", "term", "in", "the", "fieldname", "field", ".", "This", "is", "useful", "when", "you", "have", "an", "indexed", "field", "containing", "a", "unique", "ID", "(", "such", "as", "pathname", ")", "for", "each", "document", "." ]
def delete_by_term(self, fieldname, text, searcher=None): """Deletes any documents containing "term" in the "fieldname" field. This is useful when you have an indexed field containing a unique ID (such as "pathname") for each document. :returns: the number of documents deleted. """ from whoosh.query import Term q = Term(fieldname, text) return self.delete_by_query(q, searcher=searcher)
[ "def", "delete_by_term", "(", "self", ",", "fieldname", ",", "text", ",", "searcher", "=", "None", ")", ":", "from", "whoosh", ".", "query", "import", "Term", "q", "=", "Term", "(", "fieldname", ",", "text", ")", "return", "self", ".", "delete_by_query", "(", "q", ",", "searcher", "=", "searcher", ")" ]
https://github.com/whoosh-community/whoosh/blob/5421f1ab3bb802114105b3181b7ce4f44ad7d0bb/src/whoosh/writing.py#L298-L309
securesystemslab/zippy
ff0e84ac99442c2c55fe1d285332cfd4e185e089
zippy/benchmarks/src/benchmarks/sympy/sympy/polys/factortools.py
python
dup_ext_factor
(f, K)
return lc, factors
Factor univariate polynomials over algebraic number fields.
Factor univariate polynomials over algebraic number fields.
[ "Factor", "univariate", "polynomials", "over", "algebraic", "number", "fields", "." ]
def dup_ext_factor(f, K): """Factor univariate polynomials over algebraic number fields. """ n, lc = dup_degree(f), dup_LC(f, K) f = dup_monic(f, K) if n <= 0: return lc, [] if n == 1: return lc, [(f, 1)] f, F = dup_sqf_part(f, K), f s, g, r = dup_sqf_norm(f, K) factors = dup_factor_list_include(r, K.dom) if len(factors) == 1: return lc, [(f, n//dup_degree(f))] H = s*K.unit for i, (factor, _) in enumerate(factors): h = dup_convert(factor, K.dom, K) h, _, g = dup_inner_gcd(h, g, K) h = dup_shift(h, H, K) factors[i] = h factors = dup_trial_division(F, factors, K) return lc, factors
[ "def", "dup_ext_factor", "(", "f", ",", "K", ")", ":", "n", ",", "lc", "=", "dup_degree", "(", "f", ")", ",", "dup_LC", "(", "f", ",", "K", ")", "f", "=", "dup_monic", "(", "f", ",", "K", ")", "if", "n", "<=", "0", ":", "return", "lc", ",", "[", "]", "if", "n", "==", "1", ":", "return", "lc", ",", "[", "(", "f", ",", "1", ")", "]", "f", ",", "F", "=", "dup_sqf_part", "(", "f", ",", "K", ")", ",", "f", "s", ",", "g", ",", "r", "=", "dup_sqf_norm", "(", "f", ",", "K", ")", "factors", "=", "dup_factor_list_include", "(", "r", ",", "K", ".", "dom", ")", "if", "len", "(", "factors", ")", "==", "1", ":", "return", "lc", ",", "[", "(", "f", ",", "n", "//", "dup_degree", "(", "f", ")", ")", "]", "H", "=", "s", "*", "K", ".", "unit", "for", "i", ",", "(", "factor", ",", "_", ")", "in", "enumerate", "(", "factors", ")", ":", "h", "=", "dup_convert", "(", "factor", ",", "K", ".", "dom", ",", "K", ")", "h", ",", "_", ",", "g", "=", "dup_inner_gcd", "(", "h", ",", "g", ",", "K", ")", "h", "=", "dup_shift", "(", "h", ",", "H", ",", "K", ")", "factors", "[", "i", "]", "=", "h", "factors", "=", "dup_trial_division", "(", "F", ",", "factors", ",", "K", ")", "return", "lc", ",", "factors" ]
https://github.com/securesystemslab/zippy/blob/ff0e84ac99442c2c55fe1d285332cfd4e185e089/zippy/benchmarks/src/benchmarks/sympy/sympy/polys/factortools.py#L1100-L1128
nipy/nipy
d16d268938dcd5c15748ca051532c21f57cf8a22
nipy/externals/configobj.py
python
ConfigObj._a_to_u
(self, aString)
Decode ASCII strings to unicode if a self.encoding is specified.
Decode ASCII strings to unicode if a self.encoding is specified.
[ "Decode", "ASCII", "strings", "to", "unicode", "if", "a", "self", ".", "encoding", "is", "specified", "." ]
def _a_to_u(self, aString): """Decode ASCII strings to unicode if a self.encoding is specified.""" if isinstance(aString, six.binary_type) and self.encoding: return aString.decode(self.encoding) else: return aString
[ "def", "_a_to_u", "(", "self", ",", "aString", ")", ":", "if", "isinstance", "(", "aString", ",", "six", ".", "binary_type", ")", "and", "self", ".", "encoding", ":", "return", "aString", ".", "decode", "(", "self", ".", "encoding", ")", "else", ":", "return", "aString" ]
https://github.com/nipy/nipy/blob/d16d268938dcd5c15748ca051532c21f57cf8a22/nipy/externals/configobj.py#L1490-L1495
djblets/djblets
0496e1ec49e43d43d776768c9fc5b6f8af56ec2c
djblets/mail/message.py
python
EmailMessage.message
(self)
return msg
Construct an outgoing message for the e-mail. This will construct a message based on the data provided to the constructor. This represents the e-mail that will later be sent using :py:meth:`send`. After calling this method, the message's ID will be stored in the :py:attr:`message_id` attribute for later reference. This does not need to be called manually. It's called by :py:meth:`send`. Returns: django.core.mail.message.SafeMIMEText: The resulting message.
Construct an outgoing message for the e-mail.
[ "Construct", "an", "outgoing", "message", "for", "the", "e", "-", "mail", "." ]
def message(self): """Construct an outgoing message for the e-mail. This will construct a message based on the data provided to the constructor. This represents the e-mail that will later be sent using :py:meth:`send`. After calling this method, the message's ID will be stored in the :py:attr:`message_id` attribute for later reference. This does not need to be called manually. It's called by :py:meth:`send`. Returns: django.core.mail.message.SafeMIMEText: The resulting message. """ msg = super(EmailMessage, self).message() self.message_id = msg['Message-ID'] for name, value_list in six.iterlists(self._headers): for value in value_list: # Use the native string on each version of Python. These # are headers, so they'll be convertible without encoding # issues. msg[name] = value return msg
[ "def", "message", "(", "self", ")", ":", "msg", "=", "super", "(", "EmailMessage", ",", "self", ")", ".", "message", "(", ")", "self", ".", "message_id", "=", "msg", "[", "'Message-ID'", "]", "for", "name", ",", "value_list", "in", "six", ".", "iterlists", "(", "self", ".", "_headers", ")", ":", "for", "value", "in", "value_list", ":", "# Use the native string on each version of Python. These", "# are headers, so they'll be convertible without encoding", "# issues.", "msg", "[", "name", "]", "=", "value", "return", "msg" ]
https://github.com/djblets/djblets/blob/0496e1ec49e43d43d776768c9fc5b6f8af56ec2c/djblets/mail/message.py#L285-L312
buke/GreenOdoo
3d8c55d426fb41fdb3f2f5a1533cfe05983ba1df
source/openerp/__init__.py
python
registry
(database_name=None)
return modules.registry.RegistryManager.get(database_name)
Return the model registry for the given database, or the database mentioned on the current thread. If the registry does not exist yet, it is created on the fly.
Return the model registry for the given database, or the database mentioned on the current thread. If the registry does not exist yet, it is created on the fly.
[ "Return", "the", "model", "registry", "for", "the", "given", "database", "or", "the", "database", "mentioned", "on", "the", "current", "thread", ".", "If", "the", "registry", "does", "not", "exist", "yet", "it", "is", "created", "on", "the", "fly", "." ]
def registry(database_name=None): """ Return the model registry for the given database, or the database mentioned on the current thread. If the registry does not exist yet, it is created on the fly. """ if database_name is None: import threading database_name = threading.currentThread().dbname return modules.registry.RegistryManager.get(database_name)
[ "def", "registry", "(", "database_name", "=", "None", ")", ":", "if", "database_name", "is", "None", ":", "import", "threading", "database_name", "=", "threading", ".", "currentThread", "(", ")", ".", "dbname", "return", "modules", ".", "registry", ".", "RegistryManager", ".", "get", "(", "database_name", ")" ]
https://github.com/buke/GreenOdoo/blob/3d8c55d426fb41fdb3f2f5a1533cfe05983ba1df/source/openerp/__init__.py#L59-L68
stackimpact/stackimpact-python
4d0a415b790c89e7bee1d70216f948b7fec11540
stackimpact/profilers/cpu_profiler.py
python
CPUProfiler.process_sample
(self, signal_frame)
[]
def process_sample(self, signal_frame): if self.profile: start = time.clock() if signal_frame: stack = self.recover_stack(signal_frame) if stack: self.update_profile(self.profile, stack) stack = None
[ "def", "process_sample", "(", "self", ",", "signal_frame", ")", ":", "if", "self", ".", "profile", ":", "start", "=", "time", ".", "clock", "(", ")", "if", "signal_frame", ":", "stack", "=", "self", ".", "recover_stack", "(", "signal_frame", ")", "if", "stack", ":", "self", ".", "update_profile", "(", "self", ".", "profile", ",", "stack", ")", "stack", "=", "None" ]
https://github.com/stackimpact/stackimpact-python/blob/4d0a415b790c89e7bee1d70216f948b7fec11540/stackimpact/profilers/cpu_profiler.py#L94-L102
bruderstein/PythonScript
df9f7071ddf3a079e3a301b9b53a6dc78cf1208f
PythonLib/min/timeit.py
python
timeit
(stmt="pass", setup="pass", timer=default_timer, number=default_number, globals=None)
return Timer(stmt, setup, timer, globals).timeit(number)
Convenience function to create Timer object and call timeit method.
Convenience function to create Timer object and call timeit method.
[ "Convenience", "function", "to", "create", "Timer", "object", "and", "call", "timeit", "method", "." ]
def timeit(stmt="pass", setup="pass", timer=default_timer, number=default_number, globals=None): """Convenience function to create Timer object and call timeit method.""" return Timer(stmt, setup, timer, globals).timeit(number)
[ "def", "timeit", "(", "stmt", "=", "\"pass\"", ",", "setup", "=", "\"pass\"", ",", "timer", "=", "default_timer", ",", "number", "=", "default_number", ",", "globals", "=", "None", ")", ":", "return", "Timer", "(", "stmt", ",", "setup", ",", "timer", ",", "globals", ")", ".", "timeit", "(", "number", ")" ]
https://github.com/bruderstein/PythonScript/blob/df9f7071ddf3a079e3a301b9b53a6dc78cf1208f/PythonLib/min/timeit.py#L231-L234
erikdubois/Aureola
005fb14b3cab0ba1929ebf9ac3ac68d2c6e1c0ef
lazuli8core/dropbox.py
python
running
(argv)
return int(is_dropbox_running())
u"""return whether dropbox is running dropbox running Returns 1 if running 0 if not running.
u"""return whether dropbox is running dropbox running
[ "u", "return", "whether", "dropbox", "is", "running", "dropbox", "running" ]
def running(argv): u"""return whether dropbox is running dropbox running Returns 1 if running 0 if not running. """ return int(is_dropbox_running())
[ "def", "running", "(", "argv", ")", ":", "return", "int", "(", "is_dropbox_running", "(", ")", ")" ]
https://github.com/erikdubois/Aureola/blob/005fb14b3cab0ba1929ebf9ac3ac68d2c6e1c0ef/lazuli8core/dropbox.py#L1200-L1206
overhangio/tutor
d45b36394af46de29d7817e2b45694d226d5677d
tutor/jobs.py
python
BaseJobRunner.run_job
(self, service: str, command: str)
Given a (potentially large) string command, run it with the corresponding service. Implementations will differ depending on the deployment strategy.
Given a (potentially large) string command, run it with the corresponding service. Implementations will differ depending on the deployment strategy.
[ "Given", "a", "(", "potentially", "large", ")", "string", "command", "run", "it", "with", "the", "corresponding", "service", ".", "Implementations", "will", "differ", "depending", "on", "the", "deployment", "strategy", "." ]
def run_job(self, service: str, command: str) -> int: """ Given a (potentially large) string command, run it with the corresponding service. Implementations will differ depending on the deployment strategy. """ raise NotImplementedError
[ "def", "run_job", "(", "self", ",", "service", ":", "str", ",", "command", ":", "str", ")", "->", "int", ":", "raise", "NotImplementedError" ]
https://github.com/overhangio/tutor/blob/d45b36394af46de29d7817e2b45694d226d5677d/tutor/jobs.py#L31-L37
python-zk/kazoo
f585d605eea0a37a08aae95a8cc259b80da2ecf0
kazoo/handlers/gevent.py
python
SequentialGeventHandler.async_result
(self)
return AsyncResult()
Create a :class:`AsyncResult` instance The :class:`AsyncResult` instance will have its completion callbacks executed in the thread the :class:`SequentialGeventHandler` is created in (which should be the gevent/main thread).
Create a :class:`AsyncResult` instance
[ "Create", "a", ":", "class", ":", "AsyncResult", "instance" ]
def async_result(self): """Create a :class:`AsyncResult` instance The :class:`AsyncResult` instance will have its completion callbacks executed in the thread the :class:`SequentialGeventHandler` is created in (which should be the gevent/main thread). """ return AsyncResult()
[ "def", "async_result", "(", "self", ")", ":", "return", "AsyncResult", "(", ")" ]
https://github.com/python-zk/kazoo/blob/f585d605eea0a37a08aae95a8cc259b80da2ecf0/kazoo/handlers/gevent.py#L148-L157
WenmuZhou/PytorchOCR
0b2b3a67814ae40b20f3814d6793f5d75d644e38
torchocr/datasets/det_modules/augment.py
python
HorizontalFlip.__call__
(self, data: dict)
return data
从scales中随机选择一个尺度,对图片和文本框进行缩放 :param data: {'img':,'text_polys':,'texts':,'ignore_tags':} :return:
从scales中随机选择一个尺度,对图片和文本框进行缩放 :param data: {'img':,'text_polys':,'texts':,'ignore_tags':} :return:
[ "从scales中随机选择一个尺度,对图片和文本框进行缩放", ":", "param", "data", ":", "{", "img", ":", "text_polys", ":", "texts", ":", "ignore_tags", ":", "}", ":", "return", ":" ]
def __call__(self, data: dict) -> dict: """ 从scales中随机选择一个尺度,对图片和文本框进行缩放 :param data: {'img':,'text_polys':,'texts':,'ignore_tags':} :return: """ if random.random() > self.random_rate: return data im = data['img'] text_polys = data['text_polys'] flip_text_polys = text_polys.copy() flip_im = cv2.flip(im, 1) h, w, _ = flip_im.shape flip_text_polys[:, :, 0] = w - flip_text_polys[:, :, 0] data['img'] = flip_im data['text_polys'] = flip_text_polys return data
[ "def", "__call__", "(", "self", ",", "data", ":", "dict", ")", "->", "dict", ":", "if", "random", ".", "random", "(", ")", ">", "self", ".", "random_rate", ":", "return", "data", "im", "=", "data", "[", "'img'", "]", "text_polys", "=", "data", "[", "'text_polys'", "]", "flip_text_polys", "=", "text_polys", ".", "copy", "(", ")", "flip_im", "=", "cv2", ".", "flip", "(", "im", ",", "1", ")", "h", ",", "w", ",", "_", "=", "flip_im", ".", "shape", "flip_text_polys", "[", ":", ",", ":", ",", "0", "]", "=", "w", "-", "flip_text_polys", "[", ":", ",", ":", ",", "0", "]", "data", "[", "'img'", "]", "=", "flip_im", "data", "[", "'text_polys'", "]", "=", "flip_text_polys", "return", "data" ]
https://github.com/WenmuZhou/PytorchOCR/blob/0b2b3a67814ae40b20f3814d6793f5d75d644e38/torchocr/datasets/det_modules/augment.py#L250-L268
PyMVPA/PyMVPA
76c476b3de8264b0bb849bf226da5674d659564e
mvpa2/measures/irelief.py
python
IterativeReliefOnline.__init__
(self, a=10.0, permute=True, max_iter=3, **kwargs)
Constructor of the IRELIEF class.
Constructor of the IRELIEF class.
[ "Constructor", "of", "the", "IRELIEF", "class", "." ]
def __init__(self, a=10.0, permute=True, max_iter=3, **kwargs): """Constructor of the IRELIEF class. """ # init base classes first IterativeRelief.__init__(self, **kwargs) self.a = a # parameter of the learning rate self.permute = permute # shuffle data when running I-RELIEF self.max_iter = max_iter
[ "def", "__init__", "(", "self", ",", "a", "=", "10.0", ",", "permute", "=", "True", ",", "max_iter", "=", "3", ",", "*", "*", "kwargs", ")", ":", "# init base classes first", "IterativeRelief", ".", "__init__", "(", "self", ",", "*", "*", "kwargs", ")", "self", ".", "a", "=", "a", "# parameter of the learning rate", "self", ".", "permute", "=", "permute", "# shuffle data when running I-RELIEF", "self", ".", "max_iter", "=", "max_iter" ]
https://github.com/PyMVPA/PyMVPA/blob/76c476b3de8264b0bb849bf226da5674d659564e/mvpa2/measures/irelief.py#L390-L399
tualatrix/ubuntu-tweak
24ee3575da39622d8ff1ae2db8630846436772d4
ubuntutweak/modules/__init__.py
python
TweakModule.reparent
(self, widget)
If module use glade, it must call this method to reparent the main frame
If module use glade, it must call this method to reparent the main frame
[ "If", "module", "use", "glade", "it", "must", "call", "this", "method", "to", "reparent", "the", "main", "frame" ]
def reparent(self, widget): ''' If module use glade, it must call this method to reparent the main frame ''' widget.reparent(self.inner_vbox)
[ "def", "reparent", "(", "self", ",", "widget", ")", ":", "widget", ".", "reparent", "(", "self", ".", "inner_vbox", ")" ]
https://github.com/tualatrix/ubuntu-tweak/blob/24ee3575da39622d8ff1ae2db8630846436772d4/ubuntutweak/modules/__init__.py#L311-L315