nwo
stringlengths
5
106
sha
stringlengths
40
40
path
stringlengths
4
174
language
stringclasses
1 value
identifier
stringlengths
1
140
parameters
stringlengths
0
87.7k
argument_list
stringclasses
1 value
return_statement
stringlengths
0
426k
docstring
stringlengths
0
64.3k
docstring_summary
stringlengths
0
26.3k
docstring_tokens
list
function
stringlengths
18
4.83M
function_tokens
list
url
stringlengths
83
304
Abjad/abjad
d0646dfbe83db3dc5ab268f76a0950712b87b7fd
abjad/duration.py
python
Duration.__reduce__
(self)
return type(self), (self.numerator, self.denominator)
Documentation required.
Documentation required.
[ "Documentation", "required", "." ]
def __reduce__(self): """ Documentation required. """ return type(self), (self.numerator, self.denominator)
[ "def", "__reduce__", "(", "self", ")", ":", "return", "type", "(", "self", ")", ",", "(", "self", ".", "numerator", ",", "self", ".", "denominator", ")" ]
https://github.com/Abjad/abjad/blob/d0646dfbe83db3dc5ab268f76a0950712b87b7fd/abjad/duration.py#L385-L389
pwnieexpress/pwn_plug_sources
1a23324f5dc2c3de20f9c810269b6a29b2758cad
src/metagoofil/hachoir_core/event_handler.py
python
EventHandler.raiseEvent
(self, event_name, *args)
Raiser an event: call each handler for this event_name.
Raiser an event: call each handler for this event_name.
[ "Raiser", "an", "event", ":", "call", "each", "handler", "for", "this", "event_name", "." ]
def raiseEvent(self, event_name, *args): """ Raiser an event: call each handler for this event_name. """ if event_name not in self.handlers: return for handler in self.handlers[event_name]: handler(*args)
[ "def", "raiseEvent", "(", "self", ",", "event_name", ",", "*", "args", ")", ":", "if", "event_name", "not", "in", "self", ".", "handlers", ":", "return", "for", "handler", "in", "self", ".", "handlers", "[", "event_name", "]", ":", "handler", "(", "*", "args", ")" ]
https://github.com/pwnieexpress/pwn_plug_sources/blob/1a23324f5dc2c3de20f9c810269b6a29b2758cad/src/metagoofil/hachoir_core/event_handler.py#L18-L25
tsroten/pynlpir
384fbd8c34312645ddeb11ca848686030d693f1f
pynlpir/__init__.py
python
close
()
Exits the NLPIR API and frees allocated memory. This calls the function :func:`~pynlpir.nlpir.Exit`.
Exits the NLPIR API and frees allocated memory.
[ "Exits", "the", "NLPIR", "API", "and", "frees", "allocated", "memory", "." ]
def close(): """Exits the NLPIR API and frees allocated memory. This calls the function :func:`~pynlpir.nlpir.Exit`. """ logger.debug("Exiting the NLPIR API.") if not nlpir.Exit(): logger.warning("NLPIR function 'NLPIR_Exit' failed.") else: logger.debug("NLPIR API exited.")
[ "def", "close", "(", ")", ":", "logger", ".", "debug", "(", "\"Exiting the NLPIR API.\"", ")", "if", "not", "nlpir", ".", "Exit", "(", ")", ":", "logger", ".", "warning", "(", "\"NLPIR function 'NLPIR_Exit' failed.\"", ")", "else", ":", "logger", ".", "debug", "(", "\"NLPIR API exited.\"", ")" ]
https://github.com/tsroten/pynlpir/blob/384fbd8c34312645ddeb11ca848686030d693f1f/pynlpir/__init__.py#L113-L123
szymonmaszke/torchlayers
24669cda500852f0285feac06b76e43afc8d65fa
torchlayers/activations.py
python
swish
(tensor: torch.Tensor, beta: float = 1.0)
return torch.sigmoid(beta * tensor) * tensor
Applies Swish function element-wise. See :class:`torchlayers.activations.Swish` for more details. Parameters ---------- tensor : torch.Tensor Tensor activated element-wise beta : float, optional Multiplier used for sigmoid. Default: 1.0 (no multiplier) Returns ------- torch.Tensor
Applies Swish function element-wise.
[ "Applies", "Swish", "function", "element", "-", "wise", "." ]
def swish(tensor: torch.Tensor, beta: float = 1.0) -> torch.Tensor: """ Applies Swish function element-wise. See :class:`torchlayers.activations.Swish` for more details. Parameters ---------- tensor : torch.Tensor Tensor activated element-wise beta : float, optional Multiplier used for sigmoid. Default: 1.0 (no multiplier) Returns ------- torch.Tensor """ return torch.sigmoid(beta * tensor) * tensor
[ "def", "swish", "(", "tensor", ":", "torch", ".", "Tensor", ",", "beta", ":", "float", "=", "1.0", ")", "->", "torch", ".", "Tensor", ":", "return", "torch", ".", "sigmoid", "(", "beta", "*", "tensor", ")", "*", "tensor" ]
https://github.com/szymonmaszke/torchlayers/blob/24669cda500852f0285feac06b76e43afc8d65fa/torchlayers/activations.py#L41-L58
xmyqsh/FPN
5473ce069e8a469e837ad65b95411eac599c92ba
lib/roi_data_layer/roidb.py
python
_compute_targets
(rois, overlaps, labels)
return targets
Compute bounding-box regression targets for an image. for each roi find the corresponding gt_box, then compute the distance.
Compute bounding-box regression targets for an image. for each roi find the corresponding gt_box, then compute the distance.
[ "Compute", "bounding", "-", "box", "regression", "targets", "for", "an", "image", ".", "for", "each", "roi", "find", "the", "corresponding", "gt_box", "then", "compute", "the", "distance", "." ]
def _compute_targets(rois, overlaps, labels): """ Compute bounding-box regression targets for an image. for each roi find the corresponding gt_box, then compute the distance. """ # Indices of ground-truth ROIs gt_inds = np.where(overlaps == 1)[0] if len(gt_inds) == 0: # Bail if the image has no ground-truth ROIs return np.zeros((rois.shape[0], 5), dtype=np.float32) # Indices of examples for which we try to make predictions ex_inds = np.where(overlaps >= cfg.TRAIN.BBOX_THRESH)[0] # Get IoU overlap between each ex ROI and gt ROI ex_gt_overlaps = bbox_overlaps( np.ascontiguousarray(rois[ex_inds, :], dtype=np.float), np.ascontiguousarray(rois[gt_inds, :], dtype=np.float)) # Find which gt ROI each ex ROI has max overlap with: # this will be the ex ROI's gt target gt_assignment = ex_gt_overlaps.argmax(axis=1) gt_rois = rois[gt_inds[gt_assignment], :] ex_rois = rois[ex_inds, :] targets = np.zeros((rois.shape[0], 5), dtype=np.float32) targets[ex_inds, 0] = labels[ex_inds] targets[ex_inds, 1:] = bbox_transform(ex_rois, gt_rois) return targets
[ "def", "_compute_targets", "(", "rois", ",", "overlaps", ",", "labels", ")", ":", "# Indices of ground-truth ROIs", "gt_inds", "=", "np", ".", "where", "(", "overlaps", "==", "1", ")", "[", "0", "]", "if", "len", "(", "gt_inds", ")", "==", "0", ":", "# Bail if the image has no ground-truth ROIs", "return", "np", ".", "zeros", "(", "(", "rois", ".", "shape", "[", "0", "]", ",", "5", ")", ",", "dtype", "=", "np", ".", "float32", ")", "# Indices of examples for which we try to make predictions", "ex_inds", "=", "np", ".", "where", "(", "overlaps", ">=", "cfg", ".", "TRAIN", ".", "BBOX_THRESH", ")", "[", "0", "]", "# Get IoU overlap between each ex ROI and gt ROI", "ex_gt_overlaps", "=", "bbox_overlaps", "(", "np", ".", "ascontiguousarray", "(", "rois", "[", "ex_inds", ",", ":", "]", ",", "dtype", "=", "np", ".", "float", ")", ",", "np", ".", "ascontiguousarray", "(", "rois", "[", "gt_inds", ",", ":", "]", ",", "dtype", "=", "np", ".", "float", ")", ")", "# Find which gt ROI each ex ROI has max overlap with:", "# this will be the ex ROI's gt target", "gt_assignment", "=", "ex_gt_overlaps", ".", "argmax", "(", "axis", "=", "1", ")", "gt_rois", "=", "rois", "[", "gt_inds", "[", "gt_assignment", "]", ",", ":", "]", "ex_rois", "=", "rois", "[", "ex_inds", ",", ":", "]", "targets", "=", "np", ".", "zeros", "(", "(", "rois", ".", "shape", "[", "0", "]", ",", "5", ")", ",", "dtype", "=", "np", ".", "float32", ")", "targets", "[", "ex_inds", ",", "0", "]", "=", "labels", "[", "ex_inds", "]", "targets", "[", "ex_inds", ",", "1", ":", "]", "=", "bbox_transform", "(", "ex_rois", ",", "gt_rois", ")", "return", "targets" ]
https://github.com/xmyqsh/FPN/blob/5473ce069e8a469e837ad65b95411eac599c92ba/lib/roi_data_layer/roidb.py#L121-L148
home-assistant/core
265ebd17a3f17ed8dc1e9bdede03ac8e323f1ab1
homeassistant/components/freedompro/climate.py
python
async_setup_entry
( hass: HomeAssistant, entry: ConfigEntry, async_add_entities: AddEntitiesCallback )
Set up Freedompro climate.
Set up Freedompro climate.
[ "Set", "up", "Freedompro", "climate", "." ]
async def async_setup_entry( hass: HomeAssistant, entry: ConfigEntry, async_add_entities: AddEntitiesCallback ) -> None: """Set up Freedompro climate.""" api_key = entry.data[CONF_API_KEY] coordinator = hass.data[DOMAIN][entry.entry_id] async_add_entities( Device( aiohttp_client.async_get_clientsession(hass), api_key, device, coordinator ) for device in coordinator.data if device["type"] == "thermostat" )
[ "async", "def", "async_setup_entry", "(", "hass", ":", "HomeAssistant", ",", "entry", ":", "ConfigEntry", ",", "async_add_entities", ":", "AddEntitiesCallback", ")", "->", "None", ":", "api_key", "=", "entry", ".", "data", "[", "CONF_API_KEY", "]", "coordinator", "=", "hass", ".", "data", "[", "DOMAIN", "]", "[", "entry", ".", "entry_id", "]", "async_add_entities", "(", "Device", "(", "aiohttp_client", ".", "async_get_clientsession", "(", "hass", ")", ",", "api_key", ",", "device", ",", "coordinator", ")", "for", "device", "in", "coordinator", ".", "data", "if", "device", "[", "\"type\"", "]", "==", "\"thermostat\"", ")" ]
https://github.com/home-assistant/core/blob/265ebd17a3f17ed8dc1e9bdede03ac8e323f1ab1/homeassistant/components/freedompro/climate.py#L38-L50
kwotsin/mimicry
70ce919b0684b14af264881cc6acf4eccaff42b2
torch_mimicry/nets/wgan_gp/wgan_gp_48.py
python
WGANGPGenerator48.forward
(self, x)
return h
r""" Feedforwards a batch of noise vectors into a batch of fake images. Args: x (Tensor): A batch of noise vectors of shape (N, nz). Returns: Tensor: A batch of fake images of shape (N, C, H, W).
r""" Feedforwards a batch of noise vectors into a batch of fake images.
[ "r", "Feedforwards", "a", "batch", "of", "noise", "vectors", "into", "a", "batch", "of", "fake", "images", "." ]
def forward(self, x): r""" Feedforwards a batch of noise vectors into a batch of fake images. Args: x (Tensor): A batch of noise vectors of shape (N, nz). Returns: Tensor: A batch of fake images of shape (N, C, H, W). """ h = self.l1(x) h = h.view(x.shape[0], -1, self.bottom_width, self.bottom_width) h = self.block2(h) h = self.block3(h) h = self.block4(h) h = self.b5(h) h = self.activation(h) h = torch.tanh(self.c5(h)) return h
[ "def", "forward", "(", "self", ",", "x", ")", ":", "h", "=", "self", ".", "l1", "(", "x", ")", "h", "=", "h", ".", "view", "(", "x", ".", "shape", "[", "0", "]", ",", "-", "1", ",", "self", ".", "bottom_width", ",", "self", ".", "bottom_width", ")", "h", "=", "self", ".", "block2", "(", "h", ")", "h", "=", "self", ".", "block3", "(", "h", ")", "h", "=", "self", ".", "block4", "(", "h", ")", "h", "=", "self", ".", "b5", "(", "h", ")", "h", "=", "self", ".", "activation", "(", "h", ")", "h", "=", "torch", ".", "tanh", "(", "self", ".", "c5", "(", "h", ")", ")", "return", "h" ]
https://github.com/kwotsin/mimicry/blob/70ce919b0684b14af264881cc6acf4eccaff42b2/torch_mimicry/nets/wgan_gp/wgan_gp_48.py#L36-L55
s0md3v/Arjun
9e7fe278867869f7c8ff33889024175224db82e2
arjun/core/importer.py
python
importer
(path, method, headers, include)
main importer function that calls other import functions
main importer function that calls other import functions
[ "main", "importer", "function", "that", "calls", "other", "import", "functions" ]
def importer(path, method, headers, include): """ main importer function that calls other import functions """ with open(path, 'r', encoding='utf-8') as file: for line in file: if line.startswith('<?xml'): return burp_import(path) elif line.startswith(('http://', 'https://')): return urls_import(path, method, headers, include) elif line.startswith(('GET', 'POST')): return request_import(path) return 'unknown'
[ "def", "importer", "(", "path", ",", "method", ",", "headers", ",", "include", ")", ":", "with", "open", "(", "path", ",", "'r'", ",", "encoding", "=", "'utf-8'", ")", "as", "file", ":", "for", "line", "in", "file", ":", "if", "line", ".", "startswith", "(", "'<?xml'", ")", ":", "return", "burp_import", "(", "path", ")", "elif", "line", ".", "startswith", "(", "(", "'http://'", ",", "'https://'", ")", ")", ":", "return", "urls_import", "(", "path", ",", "method", ",", "headers", ",", "include", ")", "elif", "line", ".", "startswith", "(", "(", "'GET'", ",", "'POST'", ")", ")", ":", "return", "request_import", "(", "path", ")", "return", "'unknown'" ]
https://github.com/s0md3v/Arjun/blob/9e7fe278867869f7c8ff33889024175224db82e2/arjun/core/importer.py#L103-L115
krintoxi/NoobSec-Toolkit
38738541cbc03cedb9a3b3ed13b629f781ad64f6
NoobSecToolkit /tools/sqli/thirdparty/bottle/bottle.py
python
WSGIHeaderDict.__len__
(self)
return len(self.keys())
[]
def __len__(self): return len(self.keys())
[ "def", "__len__", "(", "self", ")", ":", "return", "len", "(", "self", ".", "keys", "(", ")", ")" ]
https://github.com/krintoxi/NoobSec-Toolkit/blob/38738541cbc03cedb9a3b3ed13b629f781ad64f6/NoobSecToolkit /tools/sqli/thirdparty/bottle/bottle.py#L1879-L1879
wrye-bash/wrye-bash
d495c47cfdb44475befa523438a40c4419cb386f
Mopy/bash/_games_lo.py
python
Game.print_lo_paths
(self)
Prints the paths that will be used and what they'll be used for. Useful for debugging.
Prints the paths that will be used and what they'll be used for. Useful for debugging.
[ "Prints", "the", "paths", "that", "will", "be", "used", "and", "what", "they", "ll", "be", "used", "for", ".", "Useful", "for", "debugging", "." ]
def print_lo_paths(self): """Prints the paths that will be used and what they'll be used for. Useful for debugging.""" lo_file = self.get_lo_file() acti_file = self.get_acti_file() if lo_file or acti_file: bolt.deprint(u'Using the following load order files:') if acti_file == lo_file: bolt.deprint(f' - Load order and active plugins: {acti_file}') else: if lo_file: bolt.deprint(f' - Load order: {lo_file}') if acti_file: bolt.deprint(f' - Active plugins: {acti_file}')
[ "def", "print_lo_paths", "(", "self", ")", ":", "lo_file", "=", "self", ".", "get_lo_file", "(", ")", "acti_file", "=", "self", ".", "get_acti_file", "(", ")", "if", "lo_file", "or", "acti_file", ":", "bolt", ".", "deprint", "(", "u'Using the following load order files:'", ")", "if", "acti_file", "==", "lo_file", ":", "bolt", ".", "deprint", "(", "f' - Load order and active plugins: {acti_file}'", ")", "else", ":", "if", "lo_file", ":", "bolt", ".", "deprint", "(", "f' - Load order: {lo_file}'", ")", "if", "acti_file", ":", "bolt", ".", "deprint", "(", "f' - Active plugins: {acti_file}'", ")" ]
https://github.com/wrye-bash/wrye-bash/blob/d495c47cfdb44475befa523438a40c4419cb386f/Mopy/bash/_games_lo.py#L662-L675
mozillazg/pypy
2ff5cd960c075c991389f842c6d59e71cf0cb7d0
pypy/module/_winreg/interp_winreg.py
python
QueryInfoKey
(space, w_hkey)
tuple = QueryInfoKey(key) - Returns information about a key. key is an already open key, or any one of the predefined HKEY_* constants. The result is a tuple of 3 items: An integer that identifies the number of sub keys this key has. An integer that identifies the number of values this key has. A long integer that identifies when the key was last modified (if available) as 100's of nanoseconds since Jan 1, 1600.
tuple = QueryInfoKey(key) - Returns information about a key.
[ "tuple", "=", "QueryInfoKey", "(", "key", ")", "-", "Returns", "information", "about", "a", "key", "." ]
def QueryInfoKey(space, w_hkey): """tuple = QueryInfoKey(key) - Returns information about a key. key is an already open key, or any one of the predefined HKEY_* constants. The result is a tuple of 3 items: An integer that identifies the number of sub keys this key has. An integer that identifies the number of values this key has. A long integer that identifies when the key was last modified (if available) as 100's of nanoseconds since Jan 1, 1600.""" hkey = hkey_w(w_hkey, space) with lltype.scoped_alloc(rwin32.LPDWORD.TO, 1) as nSubKeys: with lltype.scoped_alloc(rwin32.LPDWORD.TO, 1) as nValues: with lltype.scoped_alloc(rwin32.PFILETIME.TO, 1) as ft: null_dword = lltype.nullptr(rwin32.LPDWORD.TO) ret = rwinreg.RegQueryInfoKeyA( hkey, None, null_dword, null_dword, nSubKeys, null_dword, null_dword, nValues, null_dword, null_dword, null_dword, ft) if ret != 0: raiseWindowsError(space, ret, 'RegQueryInfoKey') l = ((lltype.r_longlong(ft[0].c_dwHighDateTime) << 32) + lltype.r_longlong(ft[0].c_dwLowDateTime)) return space.newtuple([space.newint(nSubKeys[0]), space.newint(nValues[0]), space.newint(l)])
[ "def", "QueryInfoKey", "(", "space", ",", "w_hkey", ")", ":", "hkey", "=", "hkey_w", "(", "w_hkey", ",", "space", ")", "with", "lltype", ".", "scoped_alloc", "(", "rwin32", ".", "LPDWORD", ".", "TO", ",", "1", ")", "as", "nSubKeys", ":", "with", "lltype", ".", "scoped_alloc", "(", "rwin32", ".", "LPDWORD", ".", "TO", ",", "1", ")", "as", "nValues", ":", "with", "lltype", ".", "scoped_alloc", "(", "rwin32", ".", "PFILETIME", ".", "TO", ",", "1", ")", "as", "ft", ":", "null_dword", "=", "lltype", ".", "nullptr", "(", "rwin32", ".", "LPDWORD", ".", "TO", ")", "ret", "=", "rwinreg", ".", "RegQueryInfoKeyA", "(", "hkey", ",", "None", ",", "null_dword", ",", "null_dword", ",", "nSubKeys", ",", "null_dword", ",", "null_dword", ",", "nValues", ",", "null_dword", ",", "null_dword", ",", "null_dword", ",", "ft", ")", "if", "ret", "!=", "0", ":", "raiseWindowsError", "(", "space", ",", "ret", ",", "'RegQueryInfoKey'", ")", "l", "=", "(", "(", "lltype", ".", "r_longlong", "(", "ft", "[", "0", "]", ".", "c_dwHighDateTime", ")", "<<", "32", ")", "+", "lltype", ".", "r_longlong", "(", "ft", "[", "0", "]", ".", "c_dwLowDateTime", ")", ")", "return", "space", ".", "newtuple", "(", "[", "space", ".", "newint", "(", "nSubKeys", "[", "0", "]", ")", ",", "space", ".", "newint", "(", "nValues", "[", "0", "]", ")", ",", "space", ".", "newint", "(", "l", ")", "]", ")" ]
https://github.com/mozillazg/pypy/blob/2ff5cd960c075c991389f842c6d59e71cf0cb7d0/pypy/module/_winreg/interp_winreg.py#L714-L740
Abjad/abjad
d0646dfbe83db3dc5ab268f76a0950712b87b7fd
abjad/rhythmtrees.py
python
RhythmTreeMixin.parentage_ratios
(self)
return tuple(reversed(result))
A sequence describing the relative durations of the nodes in a node's improper parentage. The first item in the sequence is the preprolated_duration of the root node, and subsequent items are pairs of the preprolated duration of the next node in the parentage and the total preprolated_duration of that node and its siblings: >>> a = abjad.rhythmtrees.RhythmTreeContainer(preprolated_duration=1) >>> b = abjad.rhythmtrees.RhythmTreeContainer(preprolated_duration=2) >>> c = abjad.rhythmtrees.RhythmTreeLeaf(preprolated_duration=3) >>> d = abjad.rhythmtrees.RhythmTreeLeaf(preprolated_duration=4) >>> e = abjad.rhythmtrees.RhythmTreeLeaf(preprolated_duration=5) >>> a.extend([b, c]) >>> b.extend([d, e]) >>> a.parentage_ratios (Duration(1, 1),) >>> b.parentage_ratios (Duration(1, 1), (Duration(2, 1), Duration(5, 1))) >>> c.parentage_ratios (Duration(1, 1), (Duration(3, 1), Duration(5, 1))) >>> d.parentage_ratios (Duration(1, 1), (Duration(2, 1), Duration(5, 1)), (Duration(4, 1), Duration(9, 1))) >>> e.parentage_ratios (Duration(1, 1), (Duration(2, 1), Duration(5, 1)), (Duration(5, 1), Duration(9, 1))) Returns tuple.
A sequence describing the relative durations of the nodes in a node's improper parentage.
[ "A", "sequence", "describing", "the", "relative", "durations", "of", "the", "nodes", "in", "a", "node", "s", "improper", "parentage", "." ]
def parentage_ratios(self): """ A sequence describing the relative durations of the nodes in a node's improper parentage. The first item in the sequence is the preprolated_duration of the root node, and subsequent items are pairs of the preprolated duration of the next node in the parentage and the total preprolated_duration of that node and its siblings: >>> a = abjad.rhythmtrees.RhythmTreeContainer(preprolated_duration=1) >>> b = abjad.rhythmtrees.RhythmTreeContainer(preprolated_duration=2) >>> c = abjad.rhythmtrees.RhythmTreeLeaf(preprolated_duration=3) >>> d = abjad.rhythmtrees.RhythmTreeLeaf(preprolated_duration=4) >>> e = abjad.rhythmtrees.RhythmTreeLeaf(preprolated_duration=5) >>> a.extend([b, c]) >>> b.extend([d, e]) >>> a.parentage_ratios (Duration(1, 1),) >>> b.parentage_ratios (Duration(1, 1), (Duration(2, 1), Duration(5, 1))) >>> c.parentage_ratios (Duration(1, 1), (Duration(3, 1), Duration(5, 1))) >>> d.parentage_ratios (Duration(1, 1), (Duration(2, 1), Duration(5, 1)), (Duration(4, 1), Duration(9, 1))) >>> e.parentage_ratios (Duration(1, 1), (Duration(2, 1), Duration(5, 1)), (Duration(5, 1), Duration(9, 1))) Returns tuple. """ result = [] node = self while node.parent is not None: result.append( ( node.preprolated_duration, node.parent._get_contents_duration(), ) ) node = node.parent result.append(node.preprolated_duration) return tuple(reversed(result))
[ "def", "parentage_ratios", "(", "self", ")", ":", "result", "=", "[", "]", "node", "=", "self", "while", "node", ".", "parent", "is", "not", "None", ":", "result", ".", "append", "(", "(", "node", ".", "preprolated_duration", ",", "node", ".", "parent", ".", "_get_contents_duration", "(", ")", ",", ")", ")", "node", "=", "node", ".", "parent", "result", ".", "append", "(", "node", ".", "preprolated_duration", ")", "return", "tuple", "(", "reversed", "(", "result", ")", ")" ]
https://github.com/Abjad/abjad/blob/d0646dfbe83db3dc5ab268f76a0950712b87b7fd/abjad/rhythmtrees.py#L127-L175
scipy/scipy
e0a749f01e79046642ccfdc419edbf9e7ca141ad
scipy/ndimage/_filters.py
python
maximum_filter
(input, size=None, footprint=None, output=None, mode="reflect", cval=0.0, origin=0)
return _min_or_max_filter(input, size, footprint, None, output, mode, cval, origin, 0)
Calculate a multidimensional maximum filter. Parameters ---------- %(input)s %(size_foot)s %(output)s %(mode_multiple)s %(cval)s %(origin_multiple)s Returns ------- maximum_filter : ndarray Filtered array. Has the same shape as `input`. Notes ----- A sequence of modes (one per axis) is only supported when the footprint is separable. Otherwise, a single mode string must be provided. Examples -------- >>> from scipy import ndimage, misc >>> import matplotlib.pyplot as plt >>> fig = plt.figure() >>> plt.gray() # show the filtered result in grayscale >>> ax1 = fig.add_subplot(121) # left side >>> ax2 = fig.add_subplot(122) # right side >>> ascent = misc.ascent() >>> result = ndimage.maximum_filter(ascent, size=20) >>> ax1.imshow(ascent) >>> ax2.imshow(result) >>> plt.show()
Calculate a multidimensional maximum filter.
[ "Calculate", "a", "multidimensional", "maximum", "filter", "." ]
def maximum_filter(input, size=None, footprint=None, output=None, mode="reflect", cval=0.0, origin=0): """Calculate a multidimensional maximum filter. Parameters ---------- %(input)s %(size_foot)s %(output)s %(mode_multiple)s %(cval)s %(origin_multiple)s Returns ------- maximum_filter : ndarray Filtered array. Has the same shape as `input`. Notes ----- A sequence of modes (one per axis) is only supported when the footprint is separable. Otherwise, a single mode string must be provided. Examples -------- >>> from scipy import ndimage, misc >>> import matplotlib.pyplot as plt >>> fig = plt.figure() >>> plt.gray() # show the filtered result in grayscale >>> ax1 = fig.add_subplot(121) # left side >>> ax2 = fig.add_subplot(122) # right side >>> ascent = misc.ascent() >>> result = ndimage.maximum_filter(ascent, size=20) >>> ax1.imshow(ascent) >>> ax2.imshow(result) >>> plt.show() """ return _min_or_max_filter(input, size, footprint, None, output, mode, cval, origin, 0)
[ "def", "maximum_filter", "(", "input", ",", "size", "=", "None", ",", "footprint", "=", "None", ",", "output", "=", "None", ",", "mode", "=", "\"reflect\"", ",", "cval", "=", "0.0", ",", "origin", "=", "0", ")", ":", "return", "_min_or_max_filter", "(", "input", ",", "size", ",", "footprint", ",", "None", ",", "output", ",", "mode", ",", "cval", ",", "origin", ",", "0", ")" ]
https://github.com/scipy/scipy/blob/e0a749f01e79046642ccfdc419edbf9e7ca141ad/scipy/ndimage/_filters.py#L1206-L1244
dimagi/commcare-hq
d67ff1d3b4c51fa050c19e60c3253a79d3452a39
corehq/apps/reports/generic.py
python
GenericTabularReport.json_dict
(self)
return ret
When you implement self.rows for a paginated report, it should take into consideration the following: self.pagination.start (skip) self.pagination.count (limit)
When you implement self.rows for a paginated report, it should take into consideration the following: self.pagination.start (skip) self.pagination.count (limit)
[ "When", "you", "implement", "self", ".", "rows", "for", "a", "paginated", "report", "it", "should", "take", "into", "consideration", "the", "following", ":", "self", ".", "pagination", ".", "start", "(", "skip", ")", "self", ".", "pagination", ".", "count", "(", "limit", ")" ]
def json_dict(self): """ When you implement self.rows for a paginated report, it should take into consideration the following: self.pagination.start (skip) self.pagination.count (limit) """ rows = _sanitize_rows(self.rows) total_records = self.total_records if not isinstance(total_records, int): raise ValueError("Property 'total_records' should return an int.") total_filtered_records = self.total_filtered_records if not isinstance(total_filtered_records, int): raise ValueError("Property 'total_filtered_records' should return an int.") ret = dict( sEcho=self.pagination.echo, iTotalRecords=total_records, iTotalDisplayRecords=total_filtered_records if total_filtered_records >= 0 else total_records, aaData=rows, ) if self.total_row: ret["total_row"] = list(self.total_row) if self.statistics_rows: ret["statistics_rows"] = list(self.statistics_rows) return ret
[ "def", "json_dict", "(", "self", ")", ":", "rows", "=", "_sanitize_rows", "(", "self", ".", "rows", ")", "total_records", "=", "self", ".", "total_records", "if", "not", "isinstance", "(", "total_records", ",", "int", ")", ":", "raise", "ValueError", "(", "\"Property 'total_records' should return an int.\"", ")", "total_filtered_records", "=", "self", ".", "total_filtered_records", "if", "not", "isinstance", "(", "total_filtered_records", ",", "int", ")", ":", "raise", "ValueError", "(", "\"Property 'total_filtered_records' should return an int.\"", ")", "ret", "=", "dict", "(", "sEcho", "=", "self", ".", "pagination", ".", "echo", ",", "iTotalRecords", "=", "total_records", ",", "iTotalDisplayRecords", "=", "total_filtered_records", "if", "total_filtered_records", ">=", "0", "else", "total_records", ",", "aaData", "=", "rows", ",", ")", "if", "self", ".", "total_row", ":", "ret", "[", "\"total_row\"", "]", "=", "list", "(", "self", ".", "total_row", ")", "if", "self", ".", "statistics_rows", ":", "ret", "[", "\"statistics_rows\"", "]", "=", "list", "(", "self", ".", "statistics_rows", ")", "return", "ret" ]
https://github.com/dimagi/commcare-hq/blob/d67ff1d3b4c51fa050c19e60c3253a79d3452a39/corehq/apps/reports/generic.py#L916-L942
Netflix/security_monkey
c28592ffd518fa399527d26262683fc860c30eef
security_monkey/auditors/github/repo.py
python
GitHubRepoAuditor.check_for_admin_teams
(self, repo_item)
Alert when repo has a team with admin permissions attached. Score = 3 :param repo_item: :return:
Alert when repo has a team with admin permissions attached. Score = 3 :param repo_item: :return:
[ "Alert", "when", "repo", "has", "a", "team", "with", "admin", "permissions", "attached", ".", "Score", "=", "3", ":", "param", "repo_item", ":", ":", "return", ":" ]
def check_for_admin_teams(self, repo_item): """ Alert when repo has a team with admin permissions attached. Score = 3 :param repo_item: :return: """ tag = "Repo has teams with admin permissions." for permission in itervalues(repo_item.config["team_permissions"]): if permission == "admin": self.add_issue(3, tag, repo_item, notes="Repo has a team with admin permissions to it.")
[ "def", "check_for_admin_teams", "(", "self", ",", "repo_item", ")", ":", "tag", "=", "\"Repo has teams with admin permissions.\"", "for", "permission", "in", "itervalues", "(", "repo_item", ".", "config", "[", "\"team_permissions\"", "]", ")", ":", "if", "permission", "==", "\"admin\"", ":", "self", ".", "add_issue", "(", "3", ",", "tag", ",", "repo_item", ",", "notes", "=", "\"Repo has a team with admin permissions to it.\"", ")" ]
https://github.com/Netflix/security_monkey/blob/c28592ffd518fa399527d26262683fc860c30eef/security_monkey/auditors/github/repo.py#L108-L118
guildai/guildai
1665985a3d4d788efc1a3180ca51cc417f71ca78
guild/external/pip/_internal/utils/misc.py
python
dist_in_usersite
(dist)
return norm_path.startswith(normalize_path(user_site))
Return True if given Distribution is installed in user site.
Return True if given Distribution is installed in user site.
[ "Return", "True", "if", "given", "Distribution", "is", "installed", "in", "user", "site", "." ]
def dist_in_usersite(dist): """ Return True if given Distribution is installed in user site. """ norm_path = normalize_path(dist_location(dist)) return norm_path.startswith(normalize_path(user_site))
[ "def", "dist_in_usersite", "(", "dist", ")", ":", "norm_path", "=", "normalize_path", "(", "dist_location", "(", "dist", ")", ")", "return", "norm_path", ".", "startswith", "(", "normalize_path", "(", "user_site", ")", ")" ]
https://github.com/guildai/guildai/blob/1665985a3d4d788efc1a3180ca51cc417f71ca78/guild/external/pip/_internal/utils/misc.py#L309-L314
kuri65536/python-for-android
26402a08fc46b09ef94e8d7a6bbc3a54ff9d0891
python-modules/twisted/twisted/spread/flavors.py
python
RemoteCopy.setCopyableState
(self, state)
I will be invoked with the state to copy locally. 'state' is the data returned from the remote object's 'getStateToCopyFor' method, which will often be the remote object's dictionary (or a filtered approximation of it depending on my peer's perspective).
I will be invoked with the state to copy locally.
[ "I", "will", "be", "invoked", "with", "the", "state", "to", "copy", "locally", "." ]
def setCopyableState(self, state): """I will be invoked with the state to copy locally. 'state' is the data returned from the remote object's 'getStateToCopyFor' method, which will often be the remote object's dictionary (or a filtered approximation of it depending on my peer's perspective). """ self.__dict__ = state
[ "def", "setCopyableState", "(", "self", ",", "state", ")", ":", "self", ".", "__dict__", "=", "state" ]
https://github.com/kuri65536/python-for-android/blob/26402a08fc46b09ef94e8d7a6bbc3a54ff9d0891/python-modules/twisted/twisted/spread/flavors.py#L377-L386
JiYou/openstack
8607dd488bde0905044b303eb6e52bdea6806923
packages/source/quantum/quantum/db/l3_db.py
python
L3_NAT_db_mixin.prevent_l3_port_deletion
(self, context, port_id)
Checks to make sure a port is allowed to be deleted, raising an exception if this is not the case. This should be called by any plugin when the API requests the deletion of a port, since some ports for L3 are not intended to be deleted directly via a DELETE to /ports, but rather via other API calls that perform the proper deletion checks.
Checks to make sure a port is allowed to be deleted, raising an exception if this is not the case. This should be called by any plugin when the API requests the deletion of a port, since some ports for L3 are not intended to be deleted directly via a DELETE to /ports, but rather via other API calls that perform the proper deletion checks.
[ "Checks", "to", "make", "sure", "a", "port", "is", "allowed", "to", "be", "deleted", "raising", "an", "exception", "if", "this", "is", "not", "the", "case", ".", "This", "should", "be", "called", "by", "any", "plugin", "when", "the", "API", "requests", "the", "deletion", "of", "a", "port", "since", "some", "ports", "for", "L3", "are", "not", "intended", "to", "be", "deleted", "directly", "via", "a", "DELETE", "to", "/", "ports", "but", "rather", "via", "other", "API", "calls", "that", "perform", "the", "proper", "deletion", "checks", "." ]
def prevent_l3_port_deletion(self, context, port_id): """ Checks to make sure a port is allowed to be deleted, raising an exception if this is not the case. This should be called by any plugin when the API requests the deletion of a port, since some ports for L3 are not intended to be deleted directly via a DELETE to /ports, but rather via other API calls that perform the proper deletion checks. """ port_db = self._get_port(context, port_id) if port_db['device_owner'] in [DEVICE_OWNER_ROUTER_INTF, DEVICE_OWNER_ROUTER_GW, DEVICE_OWNER_FLOATINGIP]: # Raise port in use only if the port has IP addresses # Otherwise it's a stale port that can be removed fixed_ips = port_db['fixed_ips'].all() if fixed_ips: raise l3.L3PortInUse(port_id=port_id, device_owner=port_db['device_owner']) else: LOG.debug(_("Port %(port_id)s has owner %(port_owner)s, but " "no IP address, so it can be deleted"), {'port_id': port_db['id'], 'port_owner': port_db['device_owner']})
[ "def", "prevent_l3_port_deletion", "(", "self", ",", "context", ",", "port_id", ")", ":", "port_db", "=", "self", ".", "_get_port", "(", "context", ",", "port_id", ")", "if", "port_db", "[", "'device_owner'", "]", "in", "[", "DEVICE_OWNER_ROUTER_INTF", ",", "DEVICE_OWNER_ROUTER_GW", ",", "DEVICE_OWNER_FLOATINGIP", "]", ":", "# Raise port in use only if the port has IP addresses", "# Otherwise it's a stale port that can be removed", "fixed_ips", "=", "port_db", "[", "'fixed_ips'", "]", ".", "all", "(", ")", "if", "fixed_ips", ":", "raise", "l3", ".", "L3PortInUse", "(", "port_id", "=", "port_id", ",", "device_owner", "=", "port_db", "[", "'device_owner'", "]", ")", "else", ":", "LOG", ".", "debug", "(", "_", "(", "\"Port %(port_id)s has owner %(port_owner)s, but \"", "\"no IP address, so it can be deleted\"", ")", ",", "{", "'port_id'", ":", "port_db", "[", "'id'", "]", ",", "'port_owner'", ":", "port_db", "[", "'device_owner'", "]", "}", ")" ]
https://github.com/JiYou/openstack/blob/8607dd488bde0905044b303eb6e52bdea6806923/packages/source/quantum/quantum/db/l3_db.py#L746-L768
poliastro/poliastro
6223a3f127ed730967a7091c4535100176a6c9e4
src/poliastro/maneuver.py
python
Maneuver.impulse
(cls, dv)
return cls((0 * u.s, dv))
Single impulse at current time. Parameters ---------- dv: np.array Velocity components of the impulse.
Single impulse at current time.
[ "Single", "impulse", "at", "current", "time", "." ]
def impulse(cls, dv): """Single impulse at current time. Parameters ---------- dv: np.array Velocity components of the impulse. """ return cls((0 * u.s, dv))
[ "def", "impulse", "(", "cls", ",", "dv", ")", ":", "return", "cls", "(", "(", "0", "*", "u", ".", "s", ",", "dv", ")", ")" ]
https://github.com/poliastro/poliastro/blob/6223a3f127ed730967a7091c4535100176a6c9e4/src/poliastro/maneuver.py#L66-L76
ellisdg/3DUnetCNN
92def62763bb59918ab90700786d961c6fdc5cd1
unet3d/utils/nipy/empirical_pvalue.py
python
check_p_values
(p_values)
return p_values
Basic checks on the p_values array: values should be within [0,1] Assures also that p_values are at least in 1d array. None of the checks is performed if p_values is None. Parameters ---------- p_values : array of shape (n) The sample p-values Returns ------- p_values : array of shape (n) The sample p-values
Basic checks on the p_values array: values should be within [0,1]
[ "Basic", "checks", "on", "the", "p_values", "array", ":", "values", "should", "be", "within", "[", "0", "1", "]" ]
def check_p_values(p_values): """Basic checks on the p_values array: values should be within [0,1] Assures also that p_values are at least in 1d array. None of the checks is performed if p_values is None. Parameters ---------- p_values : array of shape (n) The sample p-values Returns ------- p_values : array of shape (n) The sample p-values """ if p_values is None: return None # Take all elements unfolded and assure having at least 1d p_values = np.atleast_1d(np.ravel(p_values)) if np.any(np.isnan(p_values)): raise ValueError("%d values are NaN" % (sum(np.isnan(p_values)))) if p_values.min() < 0: raise ValueError("Negative p-values. Min=%g" % (p_values.min(),)) if p_values.max() > 1: raise ValueError("P-values greater than 1! Max=%g" % ( p_values.max(),)) return p_values
[ "def", "check_p_values", "(", "p_values", ")", ":", "if", "p_values", "is", "None", ":", "return", "None", "# Take all elements unfolded and assure having at least 1d", "p_values", "=", "np", ".", "atleast_1d", "(", "np", ".", "ravel", "(", "p_values", ")", ")", "if", "np", ".", "any", "(", "np", ".", "isnan", "(", "p_values", ")", ")", ":", "raise", "ValueError", "(", "\"%d values are NaN\"", "%", "(", "sum", "(", "np", ".", "isnan", "(", "p_values", ")", ")", ")", ")", "if", "p_values", ".", "min", "(", ")", "<", "0", ":", "raise", "ValueError", "(", "\"Negative p-values. Min=%g\"", "%", "(", "p_values", ".", "min", "(", ")", ",", ")", ")", "if", "p_values", ".", "max", "(", ")", ">", "1", ":", "raise", "ValueError", "(", "\"P-values greater than 1! Max=%g\"", "%", "(", "p_values", ".", "max", "(", ")", ",", ")", ")", "return", "p_values" ]
https://github.com/ellisdg/3DUnetCNN/blob/92def62763bb59918ab90700786d961c6fdc5cd1/unet3d/utils/nipy/empirical_pvalue.py#L32-L59
zmqless/python-zeroless
bff8ce0d12aae36537f41b57b2bd4ee087ed70e2
zeroless/zeroless.py
python
Client.disconnect_local
(self, port)
return self.disconnect('127.0.0.1', port)
Disconnects from a server in localhost at the specified port. :param port: port number from 1024 up to 65535 :type port: int :rtype: self
Disconnects from a server in localhost at the specified port.
[ "Disconnects", "from", "a", "server", "in", "localhost", "at", "the", "specified", "port", "." ]
def disconnect_local(self, port): """ Disconnects from a server in localhost at the specified port. :param port: port number from 1024 up to 65535 :type port: int :rtype: self """ return self.disconnect('127.0.0.1', port)
[ "def", "disconnect_local", "(", "self", ",", "port", ")", ":", "return", "self", ".", "disconnect", "(", "'127.0.0.1'", ",", "port", ")" ]
https://github.com/zmqless/python-zeroless/blob/bff8ce0d12aae36537f41b57b2bd4ee087ed70e2/zeroless/zeroless.py#L333-L341
Nuitka/Nuitka
39262276993757fa4e299f497654065600453fc9
nuitka/build/inline_copy/jinja2/jinja2/bccache.py
python
Bucket.bytecode_to_string
(self)
return out.getvalue()
Return the bytecode as string.
Return the bytecode as string.
[ "Return", "the", "bytecode", "as", "string", "." ]
def bytecode_to_string(self): """Return the bytecode as string.""" out = BytesIO() self.write_bytecode(out) return out.getvalue()
[ "def", "bytecode_to_string", "(", "self", ")", ":", "out", "=", "BytesIO", "(", ")", "self", ".", "write_bytecode", "(", "out", ")", "return", "out", ".", "getvalue", "(", ")" ]
https://github.com/Nuitka/Nuitka/blob/39262276993757fa4e299f497654065600453fc9/nuitka/build/inline_copy/jinja2/jinja2/bccache.py#L110-L114
securesystemslab/zippy
ff0e84ac99442c2c55fe1d285332cfd4e185e089
zippy/lib-python/3/cgi.py
python
print_directory
()
Dump the current directory as HTML.
Dump the current directory as HTML.
[ "Dump", "the", "current", "directory", "as", "HTML", "." ]
def print_directory(): """Dump the current directory as HTML.""" print() print("<H3>Current Working Directory:</H3>") try: pwd = os.getcwd() except os.error as msg: print("os.error:", html.escape(str(msg))) else: print(html.escape(pwd)) print()
[ "def", "print_directory", "(", ")", ":", "print", "(", ")", "print", "(", "\"<H3>Current Working Directory:</H3>\"", ")", "try", ":", "pwd", "=", "os", ".", "getcwd", "(", ")", "except", "os", ".", "error", "as", "msg", ":", "print", "(", "\"os.error:\"", ",", "html", ".", "escape", "(", "str", "(", "msg", ")", ")", ")", "else", ":", "print", "(", "html", ".", "escape", "(", "pwd", ")", ")", "print", "(", ")" ]
https://github.com/securesystemslab/zippy/blob/ff0e84ac99442c2c55fe1d285332cfd4e185e089/zippy/lib-python/3/cgi.py#L937-L947
wbond/package_control
cfaaeb57612023e3679ecb7f8cd7ceac9f57990d
package_control/deps/asn1crypto/x509.py
python
Certificate.not_valid_after
(self)
return self['tbs_certificate']['validity']['not_after'].native
:return: A datetime of latest time when the certificate is still valid
:return: A datetime of latest time when the certificate is still valid
[ ":", "return", ":", "A", "datetime", "of", "latest", "time", "when", "the", "certificate", "is", "still", "valid" ]
def not_valid_after(self): """ :return: A datetime of latest time when the certificate is still valid """ return self['tbs_certificate']['validity']['not_after'].native
[ "def", "not_valid_after", "(", "self", ")", ":", "return", "self", "[", "'tbs_certificate'", "]", "[", "'validity'", "]", "[", "'not_after'", "]", ".", "native" ]
https://github.com/wbond/package_control/blob/cfaaeb57612023e3679ecb7f8cd7ceac9f57990d/package_control/deps/asn1crypto/x509.py#L2589-L2594
googleprojectzero/domato
7625d1d27c71b45c273f27ee06cd9499cb1c2307
canvas/generator.py
python
GenerateNewSample
(template, jsgrammar)
return result
Parses grammar rules from string. Args: template: A template string. htmlgrammar: Grammar for generating HTML code. cssgrammar: Grammar for generating CSS code. jsgrammar: Grammar for generating JS code. Returns: A string containing sample data.
Parses grammar rules from string.
[ "Parses", "grammar", "rules", "from", "string", "." ]
def GenerateNewSample(template, jsgrammar): """Parses grammar rules from string. Args: template: A template string. htmlgrammar: Grammar for generating HTML code. cssgrammar: Grammar for generating CSS code. jsgrammar: Grammar for generating JS code. Returns: A string containing sample data. """ result = template handlers = False while '<canvasfuzz>' in result: numlines = _N_MAIN_LINES if handlers: numlines = _N_EVENTHANDLER_LINES else: handlers = True result = result.replace( '<canvasfuzz>', generate_function_body(jsgrammar, numlines), 1 ) return result
[ "def", "GenerateNewSample", "(", "template", ",", "jsgrammar", ")", ":", "result", "=", "template", "handlers", "=", "False", "while", "'<canvasfuzz>'", "in", "result", ":", "numlines", "=", "_N_MAIN_LINES", "if", "handlers", ":", "numlines", "=", "_N_EVENTHANDLER_LINES", "else", ":", "handlers", "=", "True", "result", "=", "result", ".", "replace", "(", "'<canvasfuzz>'", ",", "generate_function_body", "(", "jsgrammar", ",", "numlines", ")", ",", "1", ")", "return", "result" ]
https://github.com/googleprojectzero/domato/blob/7625d1d27c71b45c273f27ee06cd9499cb1c2307/canvas/generator.py#L39-L66
HymanLiuTS/flaskTs
286648286976e85d9b9a5873632331efcafe0b21
flasky/lib/python2.7/site-packages/sqlalchemy/sql/selectable.py
python
FromClause._reset_exported
(self)
delete memoized collections when a FromClause is cloned.
delete memoized collections when a FromClause is cloned.
[ "delete", "memoized", "collections", "when", "a", "FromClause", "is", "cloned", "." ]
def _reset_exported(self): """delete memoized collections when a FromClause is cloned.""" self._memoized_property.expire_instance(self)
[ "def", "_reset_exported", "(", "self", ")", ":", "self", ".", "_memoized_property", ".", "expire_instance", "(", "self", ")" ]
https://github.com/HymanLiuTS/flaskTs/blob/286648286976e85d9b9a5873632331efcafe0b21/flasky/lib/python2.7/site-packages/sqlalchemy/sql/selectable.py#L663-L666
Antergos/Cnchi
13ac2209da9432d453e0097cf48a107640b563a9
utils/parted-partition.py
python
Partition.nextPartition
(self)
Return the Partition following this one on the Disk.
Return the Partition following this one on the Disk.
[ "Return", "the", "Partition", "following", "this", "one", "on", "the", "Disk", "." ]
def nextPartition(self): """Return the Partition following this one on the Disk.""" partition = self.disk.getPedDisk().next_partition(self.__partition) if partition is None: return None else: return parted.Partition(disk=self.disk, PedPartition=partition)
[ "def", "nextPartition", "(", "self", ")", ":", "partition", "=", "self", ".", "disk", ".", "getPedDisk", "(", ")", ".", "next_partition", "(", "self", ".", "__partition", ")", "if", "partition", "is", "None", ":", "return", "None", "else", ":", "return", "parted", ".", "Partition", "(", "disk", "=", "self", ".", "disk", ",", "PedPartition", "=", "partition", ")" ]
https://github.com/Antergos/Cnchi/blob/13ac2209da9432d453e0097cf48a107640b563a9/utils/parted-partition.py#L179-L186
haiwen/seahub
e92fcd44e3e46260597d8faa9347cb8222b8b10d
seahub/notifications/models.py
python
request_reviewer_msg_to_json
(draft_id, from_user, to_user)
return json.dumps({'draft_id': draft_id, 'from_user': from_user, 'to_user': to_user})
[]
def request_reviewer_msg_to_json(draft_id, from_user, to_user): return json.dumps({'draft_id': draft_id, 'from_user': from_user, 'to_user': to_user})
[ "def", "request_reviewer_msg_to_json", "(", "draft_id", ",", "from_user", ",", "to_user", ")", ":", "return", "json", ".", "dumps", "(", "{", "'draft_id'", ":", "draft_id", ",", "'from_user'", ":", "from_user", ",", "'to_user'", ":", "to_user", "}", ")" ]
https://github.com/haiwen/seahub/blob/e92fcd44e3e46260597d8faa9347cb8222b8b10d/seahub/notifications/models.py#L120-L123
ni/nidaqmx-python
62fc6b48cbbb330fe1bcc9aedadc86610a1269b6
nidaqmx/_task_modules/export_signals.py
python
ExportSignals.adv_trig_pulse_width_units
(self, val)
[]
def adv_trig_pulse_width_units(self, val): val = val.value cfunc = lib_importer.windll.DAQmxSetExportedAdvTrigPulseWidthUnits if cfunc.argtypes is None: with cfunc.arglock: if cfunc.argtypes is None: cfunc.argtypes = [ lib_importer.task_handle, ctypes.c_int] error_code = cfunc( self._handle, val) check_for_error(error_code)
[ "def", "adv_trig_pulse_width_units", "(", "self", ",", "val", ")", ":", "val", "=", "val", ".", "value", "cfunc", "=", "lib_importer", ".", "windll", ".", "DAQmxSetExportedAdvTrigPulseWidthUnits", "if", "cfunc", ".", "argtypes", "is", "None", ":", "with", "cfunc", ".", "arglock", ":", "if", "cfunc", ".", "argtypes", "is", "None", ":", "cfunc", ".", "argtypes", "=", "[", "lib_importer", ".", "task_handle", ",", "ctypes", ".", "c_int", "]", "error_code", "=", "cfunc", "(", "self", ".", "_handle", ",", "val", ")", "check_for_error", "(", "error_code", ")" ]
https://github.com/ni/nidaqmx-python/blob/62fc6b48cbbb330fe1bcc9aedadc86610a1269b6/nidaqmx/_task_modules/export_signals.py#L383-L394
meolu/walle-web
f96dc41ed882782d52ec62fddbe0213b8b9158ec
walle/service/deployer.py
python
Deployer.post_release
(self, waller)
6.部署代码到目标机器后要做的任务 - 切换软链 - 重启 nginx :return:
6.部署代码到目标机器后要做的任务 - 切换软链 - 重启 nginx :return:
[ "6", ".", "部署代码到目标机器后要做的任务", "-", "切换软链", "-", "重启", "nginx", ":", "return", ":" ]
def post_release(self, waller): ''' 6.部署代码到目标机器后要做的任务 - 切换软链 - 重启 nginx :return: ''' self.stage = self.stage_post_release self.sequence = 6 # 用户自定义命令 commands = self.project_info['post_release'] if commands: for command in commands.split('\n'): if command.strip().startswith('#') or not command.strip(): continue # TODO with waller.cd(self.project_info['target_root']): pty = False if command.find('nohup') >= 0 else True result = waller.run(command, wenv=self.config(), pty=pty) # 个性化,用户重启的不一定是NGINX,可能是tomcat, apache, php-fpm等 # self.post_release_service(waller) # 清理现场 self.cleanup_remote(waller)
[ "def", "post_release", "(", "self", ",", "waller", ")", ":", "self", ".", "stage", "=", "self", ".", "stage_post_release", "self", ".", "sequence", "=", "6", "# 用户自定义命令", "commands", "=", "self", ".", "project_info", "[", "'post_release'", "]", "if", "commands", ":", "for", "command", "in", "commands", ".", "split", "(", "'\\n'", ")", ":", "if", "command", ".", "strip", "(", ")", ".", "startswith", "(", "'#'", ")", "or", "not", "command", ".", "strip", "(", ")", ":", "continue", "# TODO", "with", "waller", ".", "cd", "(", "self", ".", "project_info", "[", "'target_root'", "]", ")", ":", "pty", "=", "False", "if", "command", ".", "find", "(", "'nohup'", ")", ">=", "0", "else", "True", "result", "=", "waller", ".", "run", "(", "command", ",", "wenv", "=", "self", ".", "config", "(", ")", ",", "pty", "=", "pty", ")", "# 个性化,用户重启的不一定是NGINX,可能是tomcat, apache, php-fpm等", "# self.post_release_service(waller)", "# 清理现场", "self", ".", "cleanup_remote", "(", "waller", ")" ]
https://github.com/meolu/walle-web/blob/f96dc41ed882782d52ec62fddbe0213b8b9158ec/walle/service/deployer.py#L335-L358
openhatch/oh-mainline
ce29352a034e1223141dcc2f317030bbc3359a51
vendor/packages/gdata/src/gdata/geo/__init__.py
python
Where.set_longitude
(self, lon)
return self.set_location(lat, lon)
(bool) Set the longtitude value of the geo-tag. Args: lat (float): The new latitude value See also .set_location()
(bool) Set the longtitude value of the geo-tag. Args: lat (float): The new latitude value
[ "(", "bool", ")", "Set", "the", "longtitude", "value", "of", "the", "geo", "-", "tag", ".", "Args", ":", "lat", "(", "float", ")", ":", "The", "new", "latitude", "value" ]
def set_longitude(self, lon): """(bool) Set the longtitude value of the geo-tag. Args: lat (float): The new latitude value See also .set_location() """ lat, _lon = self.location() return self.set_location(lat, lon)
[ "def", "set_longitude", "(", "self", ",", "lon", ")", ":", "lat", ",", "_lon", "=", "self", ".", "location", "(", ")", "return", "self", ".", "set_location", "(", "lat", ",", "lon", ")" ]
https://github.com/openhatch/oh-mainline/blob/ce29352a034e1223141dcc2f317030bbc3359a51/vendor/packages/gdata/src/gdata/geo/__init__.py#L170-L179
aws-samples/aws-kube-codesuite
ab4e5ce45416b83bffb947ab8d234df5437f4fca
src/kubernetes/client/models/v1_flex_volume_source.py
python
V1FlexVolumeSource.__init__
(self, driver=None, fs_type=None, options=None, read_only=None, secret_ref=None)
V1FlexVolumeSource - a model defined in Swagger :param dict swaggerTypes: The key is attribute name and the value is attribute type. :param dict attributeMap: The key is attribute name and the value is json key in definition.
V1FlexVolumeSource - a model defined in Swagger
[ "V1FlexVolumeSource", "-", "a", "model", "defined", "in", "Swagger" ]
def __init__(self, driver=None, fs_type=None, options=None, read_only=None, secret_ref=None): """ V1FlexVolumeSource - a model defined in Swagger :param dict swaggerTypes: The key is attribute name and the value is attribute type. :param dict attributeMap: The key is attribute name and the value is json key in definition. """ self.swagger_types = { 'driver': 'str', 'fs_type': 'str', 'options': 'dict(str, str)', 'read_only': 'bool', 'secret_ref': 'V1LocalObjectReference' } self.attribute_map = { 'driver': 'driver', 'fs_type': 'fsType', 'options': 'options', 'read_only': 'readOnly', 'secret_ref': 'secretRef' } self._driver = driver self._fs_type = fs_type self._options = options self._read_only = read_only self._secret_ref = secret_ref
[ "def", "__init__", "(", "self", ",", "driver", "=", "None", ",", "fs_type", "=", "None", ",", "options", "=", "None", ",", "read_only", "=", "None", ",", "secret_ref", "=", "None", ")", ":", "self", ".", "swagger_types", "=", "{", "'driver'", ":", "'str'", ",", "'fs_type'", ":", "'str'", ",", "'options'", ":", "'dict(str, str)'", ",", "'read_only'", ":", "'bool'", ",", "'secret_ref'", ":", "'V1LocalObjectReference'", "}", "self", ".", "attribute_map", "=", "{", "'driver'", ":", "'driver'", ",", "'fs_type'", ":", "'fsType'", ",", "'options'", ":", "'options'", ",", "'read_only'", ":", "'readOnly'", ",", "'secret_ref'", ":", "'secretRef'", "}", "self", ".", "_driver", "=", "driver", "self", ".", "_fs_type", "=", "fs_type", "self", ".", "_options", "=", "options", "self", ".", "_read_only", "=", "read_only", "self", ".", "_secret_ref", "=", "secret_ref" ]
https://github.com/aws-samples/aws-kube-codesuite/blob/ab4e5ce45416b83bffb947ab8d234df5437f4fca/src/kubernetes/client/models/v1_flex_volume_source.py#L24-L53
vintasoftware/tapioca-wrapper
d4ee3d3ade42935fbea9b2cb91d10b67fd5f4767
tapioca/tapioca.py
python
TapiocaClientExecutor.__init__
(self, api, *args, **kwargs)
[]
def __init__(self, api, *args, **kwargs): super(TapiocaClientExecutor, self).__init__(api, *args, **kwargs)
[ "def", "__init__", "(", "self", ",", "api", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "super", "(", "TapiocaClientExecutor", ",", "self", ")", ".", "__init__", "(", "api", ",", "*", "args", ",", "*", "*", "kwargs", ")" ]
https://github.com/vintasoftware/tapioca-wrapper/blob/d4ee3d3ade42935fbea9b2cb91d10b67fd5f4767/tapioca/tapioca.py#L186-L187
home-assistant/core
265ebd17a3f17ed8dc1e9bdede03ac8e323f1ab1
homeassistant/components/fritz/common.py
python
FritzBoxBaseEntity.mac_address
(self)
return self._avm_device.mac
Return the mac address of the main device.
Return the mac address of the main device.
[ "Return", "the", "mac", "address", "of", "the", "main", "device", "." ]
def mac_address(self) -> str: """Return the mac address of the main device.""" return self._avm_device.mac
[ "def", "mac_address", "(", "self", ")", "->", "str", ":", "return", "self", ".", "_avm_device", ".", "mac" ]
https://github.com/home-assistant/core/blob/265ebd17a3f17ed8dc1e9bdede03ac8e323f1ab1/homeassistant/components/fritz/common.py#L656-L658
huggingface/transformers
623b4f7c63f60cce917677ee704d6c93ee960b4b
src/transformers/deepspeed.py
python
HfTrainerDeepSpeedConfig.dtype
(self)
return self._dtype
[]
def dtype(self): return self._dtype
[ "def", "dtype", "(", "self", ")", ":", "return", "self", ".", "_dtype" ]
https://github.com/huggingface/transformers/blob/623b4f7c63f60cce917677ee704d6c93ee960b4b/src/transformers/deepspeed.py#L175-L176
demisto/content
5c664a65b992ac8ca90ac3f11b1b2cdf11ee9b07
Packs/BmcHelixRemedyForce/Integrations/BmcHelixRemedyForce/BMCHelixRemedyforce.py
python
validate_max_incidents
(max_incidents: str)
Validates the value of max_incident parameter. :params max_incidents: In fetch-incident maximum number of incidents to return. :raises ValueError: if max incidents parameter is not a positive integer. :return: None
Validates the value of max_incident parameter.
[ "Validates", "the", "value", "of", "max_incident", "parameter", "." ]
def validate_max_incidents(max_incidents: str) -> None: """ Validates the value of max_incident parameter. :params max_incidents: In fetch-incident maximum number of incidents to return. :raises ValueError: if max incidents parameter is not a positive integer. :return: None """ try: max_incidents_int = int(max_incidents) if max_incidents_int <= 0: raise ValueError except ValueError: raise ValueError(MESSAGES['INVALID_MAX_INCIDENT_ERROR'])
[ "def", "validate_max_incidents", "(", "max_incidents", ":", "str", ")", "->", "None", ":", "try", ":", "max_incidents_int", "=", "int", "(", "max_incidents", ")", "if", "max_incidents_int", "<=", "0", ":", "raise", "ValueError", "except", "ValueError", ":", "raise", "ValueError", "(", "MESSAGES", "[", "'INVALID_MAX_INCIDENT_ERROR'", "]", ")" ]
https://github.com/demisto/content/blob/5c664a65b992ac8ca90ac3f11b1b2cdf11ee9b07/Packs/BmcHelixRemedyForce/Integrations/BmcHelixRemedyForce/BMCHelixRemedyforce.py#L620-L633
VainF/pytorch-msssim
6ceec02d64447216881423dd7428b68a2ad0905f
pytorch_msssim/ssim.py
python
_fspecial_gauss_1d
(size, sigma)
return g.unsqueeze(0).unsqueeze(0)
r"""Create 1-D gauss kernel Args: size (int): the size of gauss kernel sigma (float): sigma of normal distribution Returns: torch.Tensor: 1D kernel (1 x 1 x size)
r"""Create 1-D gauss kernel Args: size (int): the size of gauss kernel sigma (float): sigma of normal distribution
[ "r", "Create", "1", "-", "D", "gauss", "kernel", "Args", ":", "size", "(", "int", ")", ":", "the", "size", "of", "gauss", "kernel", "sigma", "(", "float", ")", ":", "sigma", "of", "normal", "distribution" ]
def _fspecial_gauss_1d(size, sigma): r"""Create 1-D gauss kernel Args: size (int): the size of gauss kernel sigma (float): sigma of normal distribution Returns: torch.Tensor: 1D kernel (1 x 1 x size) """ coords = torch.arange(size, dtype=torch.float) coords -= size // 2 g = torch.exp(-(coords ** 2) / (2 * sigma ** 2)) g /= g.sum() return g.unsqueeze(0).unsqueeze(0)
[ "def", "_fspecial_gauss_1d", "(", "size", ",", "sigma", ")", ":", "coords", "=", "torch", ".", "arange", "(", "size", ",", "dtype", "=", "torch", ".", "float", ")", "coords", "-=", "size", "//", "2", "g", "=", "torch", ".", "exp", "(", "-", "(", "coords", "**", "2", ")", "/", "(", "2", "*", "sigma", "**", "2", ")", ")", "g", "/=", "g", ".", "sum", "(", ")", "return", "g", ".", "unsqueeze", "(", "0", ")", ".", "unsqueeze", "(", "0", ")" ]
https://github.com/VainF/pytorch-msssim/blob/6ceec02d64447216881423dd7428b68a2ad0905f/pytorch_msssim/ssim.py#L9-L24
thinkle/gourmet
8af29c8ded24528030e5ae2ea3461f61c1e5a575
gourmet/plugins/duplicate_finder/recipeMergerPlugin.py
python
RecipeMergerImportManagerPlugin.activate
(self, pluggable)
[]
def activate (self, pluggable): pluggable.add_hook(PRE,'follow_up',self.follow_up_pre_hook)
[ "def", "activate", "(", "self", ",", "pluggable", ")", ":", "pluggable", ".", "add_hook", "(", "PRE", ",", "'follow_up'", ",", "self", ".", "follow_up_pre_hook", ")" ]
https://github.com/thinkle/gourmet/blob/8af29c8ded24528030e5ae2ea3461f61c1e5a575/gourmet/plugins/duplicate_finder/recipeMergerPlugin.py#L11-L12
aws/aws-elastic-beanstalk-cli-setup
102025cb0d1d38e949fde2ca6455764c6f8475fb
scripts/ebcli_installer.py
python
_powershell_script_body
(virtualenv_location)
return EXECUTABLE_WRAPPERS['ps1'].format( bin_location=_original_eb_location(virtualenv_location) )
Function returns a Powershell (PS1) script which essentially will wrap the `eb` executable such that the executable is invoked within the virtualenv, ".ebcli-virtual-env", created apriori. :param virtualenv_location: the relative or absolute path to the location where the virtualenv, ".ebcli-virtual-env", was created. :return: None
Function returns a Powershell (PS1) script which essentially will wrap the `eb` executable such that the executable is invoked within the virtualenv, ".ebcli-virtual-env", created apriori. :param virtualenv_location: the relative or absolute path to the location where the virtualenv, ".ebcli-virtual-env", was created. :return: None
[ "Function", "returns", "a", "Powershell", "(", "PS1", ")", "script", "which", "essentially", "will", "wrap", "the", "eb", "executable", "such", "that", "the", "executable", "is", "invoked", "within", "the", "virtualenv", ".", "ebcli", "-", "virtual", "-", "env", "created", "apriori", ".", ":", "param", "virtualenv_location", ":", "the", "relative", "or", "absolute", "path", "to", "the", "location", "where", "the", "virtualenv", ".", "ebcli", "-", "virtual", "-", "env", "was", "created", ".", ":", "return", ":", "None" ]
def _powershell_script_body(virtualenv_location): """ Function returns a Powershell (PS1) script which essentially will wrap the `eb` executable such that the executable is invoked within the virtualenv, ".ebcli-virtual-env", created apriori. :param virtualenv_location: the relative or absolute path to the location where the virtualenv, ".ebcli-virtual-env", was created. :return: None """ return EXECUTABLE_WRAPPERS['ps1'].format( bin_location=_original_eb_location(virtualenv_location) )
[ "def", "_powershell_script_body", "(", "virtualenv_location", ")", ":", "return", "EXECUTABLE_WRAPPERS", "[", "'ps1'", "]", ".", "format", "(", "bin_location", "=", "_original_eb_location", "(", "virtualenv_location", ")", ")" ]
https://github.com/aws/aws-elastic-beanstalk-cli-setup/blob/102025cb0d1d38e949fde2ca6455764c6f8475fb/scripts/ebcli_installer.py#L825-L837
jparkhill/TensorMol
d52104dc7ee46eec8301d332a95d672270ac0bd1
TensorMol/TFNetworks/TFBehlerParinelloSymEE.py
python
MolInstance_DirectBP_EE_SymFunction.test_EandG
(self, step)
return test_loss
Perform a single test step (complete processing of all input), using minibatches of size self.batch_size Args: step: the index of this step.
Perform a single test step (complete processing of all input), using minibatches of size self.batch_size
[ "Perform", "a", "single", "test", "step", "(", "complete", "processing", "of", "all", "input", ")", "using", "minibatches", "of", "size", "self", ".", "batch_size" ]
def test_EandG(self, step): """ Perform a single test step (complete processing of all input), using minibatches of size self.batch_size Args: step: the index of this step. """ Ncase_test = self.TData.NTest start_time = time.time() test_loss = 0.0 test_energy_loss = 0.0 test_dipole_loss = 0.0 test_grads_loss = 0.0 num_of_mols = 0 for ministep in range (0, int(Ncase_test/self.batch_size)): batch_data = self.TData.GetTestBatch(self.batch_size)+[PARAMS["AddEcc"]] + [np.ones(self.nlayer+1)] actual_mols = self.batch_size t = time.time() total_loss_value, loss_value, energy_loss, grads_loss, dipole_loss, Etotal, Ecc, mol_dipole, atom_charge = self.sess.run([self.total_loss_EandG, self.loss_EandG, self.energy_loss_EandG, self.grads_loss_EandG, self.dipole_loss_EandG, self.Etotal, self.Ecc, self.dipole, self.charge], feed_dict=self.fill_feed_dict(batch_data)) test_loss = test_loss + loss_value test_energy_loss += energy_loss test_grads_loss += grads_loss test_dipole_loss += dipole_loss duration = time.time() - start_time num_of_mols += actual_mols print ("testing...") self.print_training(step, test_loss, test_energy_loss, test_grads_loss, test_dipole_loss, num_of_mols, duration, False) return test_loss
[ "def", "test_EandG", "(", "self", ",", "step", ")", ":", "Ncase_test", "=", "self", ".", "TData", ".", "NTest", "start_time", "=", "time", ".", "time", "(", ")", "test_loss", "=", "0.0", "test_energy_loss", "=", "0.0", "test_dipole_loss", "=", "0.0", "test_grads_loss", "=", "0.0", "num_of_mols", "=", "0", "for", "ministep", "in", "range", "(", "0", ",", "int", "(", "Ncase_test", "/", "self", ".", "batch_size", ")", ")", ":", "batch_data", "=", "self", ".", "TData", ".", "GetTestBatch", "(", "self", ".", "batch_size", ")", "+", "[", "PARAMS", "[", "\"AddEcc\"", "]", "]", "+", "[", "np", ".", "ones", "(", "self", ".", "nlayer", "+", "1", ")", "]", "actual_mols", "=", "self", ".", "batch_size", "t", "=", "time", ".", "time", "(", ")", "total_loss_value", ",", "loss_value", ",", "energy_loss", ",", "grads_loss", ",", "dipole_loss", ",", "Etotal", ",", "Ecc", ",", "mol_dipole", ",", "atom_charge", "=", "self", ".", "sess", ".", "run", "(", "[", "self", ".", "total_loss_EandG", ",", "self", ".", "loss_EandG", ",", "self", ".", "energy_loss_EandG", ",", "self", ".", "grads_loss_EandG", ",", "self", ".", "dipole_loss_EandG", ",", "self", ".", "Etotal", ",", "self", ".", "Ecc", ",", "self", ".", "dipole", ",", "self", ".", "charge", "]", ",", "feed_dict", "=", "self", ".", "fill_feed_dict", "(", "batch_data", ")", ")", "test_loss", "=", "test_loss", "+", "loss_value", "test_energy_loss", "+=", "energy_loss", "test_grads_loss", "+=", "grads_loss", "test_dipole_loss", "+=", "dipole_loss", "duration", "=", "time", ".", "time", "(", ")", "-", "start_time", "num_of_mols", "+=", "actual_mols", "print", "(", "\"testing...\"", ")", "self", ".", "print_training", "(", "step", ",", "test_loss", ",", "test_energy_loss", ",", "test_grads_loss", ",", "test_dipole_loss", ",", "num_of_mols", ",", "duration", ",", "False", ")", "return", "test_loss" ]
https://github.com/jparkhill/TensorMol/blob/d52104dc7ee46eec8301d332a95d672270ac0bd1/TensorMol/TFNetworks/TFBehlerParinelloSymEE.py#L1277-L1304
joosthoeks/jhTAlib
4931a34829d966ccc973fb29d767a359d6e94b44
jhtalib/price_transform/price_transform.py
python
AVGPRICE
(df, open='Open', high='High', low='Low', close='Close')
return avgprice_list
Average Price Returns: list of floats = jhta.AVGPRICE(df, open='Open', high='High', low='Low', close='Close') Source: https://www.fmlabs.com/reference/default.htm?url=AvgPrices.htm
Average Price Returns: list of floats = jhta.AVGPRICE(df, open='Open', high='High', low='Low', close='Close') Source: https://www.fmlabs.com/reference/default.htm?url=AvgPrices.htm
[ "Average", "Price", "Returns", ":", "list", "of", "floats", "=", "jhta", ".", "AVGPRICE", "(", "df", "open", "=", "Open", "high", "=", "High", "low", "=", "Low", "close", "=", "Close", ")", "Source", ":", "https", ":", "//", "www", ".", "fmlabs", ".", "com", "/", "reference", "/", "default", ".", "htm?url", "=", "AvgPrices", ".", "htm" ]
def AVGPRICE(df, open='Open', high='High', low='Low', close='Close'): """ Average Price Returns: list of floats = jhta.AVGPRICE(df, open='Open', high='High', low='Low', close='Close') Source: https://www.fmlabs.com/reference/default.htm?url=AvgPrices.htm """ avgprice_list = [] for i in range(len(df[close])): avgprice = (df[open][i] + df[high][i] + df[low][i] + df[close][i]) / 4 avgprice_list.append(avgprice) return avgprice_list
[ "def", "AVGPRICE", "(", "df", ",", "open", "=", "'Open'", ",", "high", "=", "'High'", ",", "low", "=", "'Low'", ",", "close", "=", "'Close'", ")", ":", "avgprice_list", "=", "[", "]", "for", "i", "in", "range", "(", "len", "(", "df", "[", "close", "]", ")", ")", ":", "avgprice", "=", "(", "df", "[", "open", "]", "[", "i", "]", "+", "df", "[", "high", "]", "[", "i", "]", "+", "df", "[", "low", "]", "[", "i", "]", "+", "df", "[", "close", "]", "[", "i", "]", ")", "/", "4", "avgprice_list", ".", "append", "(", "avgprice", ")", "return", "avgprice_list" ]
https://github.com/joosthoeks/jhTAlib/blob/4931a34829d966ccc973fb29d767a359d6e94b44/jhtalib/price_transform/price_transform.py#L10-L20
jliljebl/flowblade
995313a509b80e99eb1ad550d945bdda5995093b
flowblade-trunk/Flowblade/audiowaveform.py
python
_waveform_render_progress_dialog
(callback, title, text, progress_bar, parent_window)
return dialog
[]
def _waveform_render_progress_dialog(callback, title, text, progress_bar, parent_window): dialog = Gtk.Dialog(title, parent_window, Gtk.DialogFlags.MODAL | Gtk.DialogFlags.DESTROY_WITH_PARENT, (_("Cancel"), Gtk.ResponseType.REJECT)) dialog.text_label = Gtk.Label(label=text) dialog.text_label.set_use_markup(True) text_box = Gtk.HBox(False, 2) text_box.pack_start(dialog.text_label,False, False, 0) text_box.pack_start(Gtk.Label(), True, True, 0) status_box = Gtk.HBox(False, 2) status_box.pack_start(text_box, False, False, 0) status_box.pack_start(Gtk.Label(), True, True, 0) progress_vbox = Gtk.VBox(False, 2) progress_vbox.pack_start(status_box, False, False, 0) progress_vbox.pack_start(guiutils.get_pad_label(10, 10), False, False, 0) progress_vbox.pack_start(progress_bar, False, False, 0) alignment = guiutils.set_margins(progress_vbox, 12, 12, 12, 12) dialog.vbox.pack_start(alignment, True, True, 0) dialogutils.set_outer_margins(dialog.vbox) dialog.set_default_size(500, 125) alignment.show_all() dialog.connect('response', callback) dialog.show() return dialog
[ "def", "_waveform_render_progress_dialog", "(", "callback", ",", "title", ",", "text", ",", "progress_bar", ",", "parent_window", ")", ":", "dialog", "=", "Gtk", ".", "Dialog", "(", "title", ",", "parent_window", ",", "Gtk", ".", "DialogFlags", ".", "MODAL", "|", "Gtk", ".", "DialogFlags", ".", "DESTROY_WITH_PARENT", ",", "(", "_", "(", "\"Cancel\"", ")", ",", "Gtk", ".", "ResponseType", ".", "REJECT", ")", ")", "dialog", ".", "text_label", "=", "Gtk", ".", "Label", "(", "label", "=", "text", ")", "dialog", ".", "text_label", ".", "set_use_markup", "(", "True", ")", "text_box", "=", "Gtk", ".", "HBox", "(", "False", ",", "2", ")", "text_box", ".", "pack_start", "(", "dialog", ".", "text_label", ",", "False", ",", "False", ",", "0", ")", "text_box", ".", "pack_start", "(", "Gtk", ".", "Label", "(", ")", ",", "True", ",", "True", ",", "0", ")", "status_box", "=", "Gtk", ".", "HBox", "(", "False", ",", "2", ")", "status_box", ".", "pack_start", "(", "text_box", ",", "False", ",", "False", ",", "0", ")", "status_box", ".", "pack_start", "(", "Gtk", ".", "Label", "(", ")", ",", "True", ",", "True", ",", "0", ")", "progress_vbox", "=", "Gtk", ".", "VBox", "(", "False", ",", "2", ")", "progress_vbox", ".", "pack_start", "(", "status_box", ",", "False", ",", "False", ",", "0", ")", "progress_vbox", ".", "pack_start", "(", "guiutils", ".", "get_pad_label", "(", "10", ",", "10", ")", ",", "False", ",", "False", ",", "0", ")", "progress_vbox", ".", "pack_start", "(", "progress_bar", ",", "False", ",", "False", ",", "0", ")", "alignment", "=", "guiutils", ".", "set_margins", "(", "progress_vbox", ",", "12", ",", "12", ",", "12", ",", "12", ")", "dialog", ".", "vbox", ".", "pack_start", "(", "alignment", ",", "True", ",", "True", ",", "0", ")", "dialogutils", ".", "set_outer_margins", "(", "dialog", ".", "vbox", ")", "dialog", ".", "set_default_size", "(", "500", ",", "125", ")", "alignment", ".", "show_all", "(", ")", "dialog", ".", "connect", "(", "'response'", ",", "callback", ")", "dialog", ".", "show", "(", ")", "return", "dialog" ]
https://github.com/jliljebl/flowblade/blob/995313a509b80e99eb1ad550d945bdda5995093b/flowblade-trunk/Flowblade/audiowaveform.py#L191-L220
cuthbertLab/music21
bd30d4663e52955ed922c10fdf541419d8c67671
music21/converter/subConverters.py
python
ConverterLilypond.show
(self, obj, fmt, app=None, subformats=None, **keywords)
Call .write (write out the lilypond (.ly) file; convert to .png/.pdf, etc.) then launch the appropriate viewer for .png/.pdf (graphicsPath) or .svg (vectorPath)
Call .write (write out the lilypond (.ly) file; convert to .png/.pdf, etc.) then launch the appropriate viewer for .png/.pdf (graphicsPath) or .svg (vectorPath)
[ "Call", ".", "write", "(", "write", "out", "the", "lilypond", "(", ".", "ly", ")", "file", ";", "convert", "to", ".", "png", "/", ".", "pdf", "etc", ".", ")", "then", "launch", "the", "appropriate", "viewer", "for", ".", "png", "/", ".", "pdf", "(", "graphicsPath", ")", "or", ".", "svg", "(", "vectorPath", ")" ]
def show(self, obj, fmt, app=None, subformats=None, **keywords): # pragma: no cover ''' Call .write (write out the lilypond (.ly) file; convert to .png/.pdf, etc.) then launch the appropriate viewer for .png/.pdf (graphicsPath) or .svg (vectorPath) ''' if not subformats: subformats = ['png'] returnedFilePath = self.write(obj, fmt, subformats=subformats, **keywords) if subformats is not None and subformats: outFormat = subformats[0] else: outFormat = 'png' launchKey = environLocal.formatToKey(outFormat) self.launch(returnedFilePath, fmt=outFormat, app=app, launchKey=launchKey)
[ "def", "show", "(", "self", ",", "obj", ",", "fmt", ",", "app", "=", "None", ",", "subformats", "=", "None", ",", "*", "*", "keywords", ")", ":", "# pragma: no cover", "if", "not", "subformats", ":", "subformats", "=", "[", "'png'", "]", "returnedFilePath", "=", "self", ".", "write", "(", "obj", ",", "fmt", ",", "subformats", "=", "subformats", ",", "*", "*", "keywords", ")", "if", "subformats", "is", "not", "None", "and", "subformats", ":", "outFormat", "=", "subformats", "[", "0", "]", "else", ":", "outFormat", "=", "'png'", "launchKey", "=", "environLocal", ".", "formatToKey", "(", "outFormat", ")", "self", ".", "launch", "(", "returnedFilePath", ",", "fmt", "=", "outFormat", ",", "app", "=", "app", ",", "launchKey", "=", "launchKey", ")" ]
https://github.com/cuthbertLab/music21/blob/bd30d4663e52955ed922c10fdf541419d8c67671/music21/converter/subConverters.py#L477-L492
quic/aimet
dae9bae9a77ca719aa7553fefde4768270fc3518
Examples/torch/quantization/range_learning.py
python
apply_cross_layer_equalization
(model: torch.nn.Module, input_shape: tuple)
Applies CLE on the model and calculates model accuracy on quantized simulator Applying CLE on the model inplace consists of: Batch Norm Folding Cross Layer Scaling High Bias Fold Converts any ReLU6 into ReLU. :param model: the loaded model :param input_shape: the shape of the input to the model :return:
Applies CLE on the model and calculates model accuracy on quantized simulator Applying CLE on the model inplace consists of: Batch Norm Folding Cross Layer Scaling High Bias Fold Converts any ReLU6 into ReLU. :param model: the loaded model :param input_shape: the shape of the input to the model :return:
[ "Applies", "CLE", "on", "the", "model", "and", "calculates", "model", "accuracy", "on", "quantized", "simulator", "Applying", "CLE", "on", "the", "model", "inplace", "consists", "of", ":", "Batch", "Norm", "Folding", "Cross", "Layer", "Scaling", "High", "Bias", "Fold", "Converts", "any", "ReLU6", "into", "ReLU", ".", ":", "param", "model", ":", "the", "loaded", "model", ":", "param", "input_shape", ":", "the", "shape", "of", "the", "input", "to", "the", "model", ":", "return", ":" ]
def apply_cross_layer_equalization(model: torch.nn.Module, input_shape: tuple): """ Applies CLE on the model and calculates model accuracy on quantized simulator Applying CLE on the model inplace consists of: Batch Norm Folding Cross Layer Scaling High Bias Fold Converts any ReLU6 into ReLU. :param model: the loaded model :param input_shape: the shape of the input to the model :return: """ equalize_model(model, input_shape)
[ "def", "apply_cross_layer_equalization", "(", "model", ":", "torch", ".", "nn", ".", "Module", ",", "input_shape", ":", "tuple", ")", ":", "equalize_model", "(", "model", ",", "input_shape", ")" ]
https://github.com/quic/aimet/blob/dae9bae9a77ca719aa7553fefde4768270fc3518/Examples/torch/quantization/range_learning.py#L169-L182
JiYou/openstack
8607dd488bde0905044b303eb6e52bdea6806923
packages/source/quantum/quantum/openstack/common/rpc/impl_qpid.py
python
Connection.join_consumer_pool
(self, callback, pool_name, topic, exchange_name=None)
return consumer
Register as a member of a group of consumers for a given topic from the specified exchange. Exactly one member of a given pool will receive each message. A message will be delivered to multiple pools, if more than one is created.
Register as a member of a group of consumers for a given topic from the specified exchange.
[ "Register", "as", "a", "member", "of", "a", "group", "of", "consumers", "for", "a", "given", "topic", "from", "the", "specified", "exchange", "." ]
def join_consumer_pool(self, callback, pool_name, topic, exchange_name=None): """Register as a member of a group of consumers for a given topic from the specified exchange. Exactly one member of a given pool will receive each message. A message will be delivered to multiple pools, if more than one is created. """ callback_wrapper = rpc_amqp.CallbackWrapper( conf=self.conf, callback=callback, connection_pool=rpc_amqp.get_connection_pool(self.conf, Connection), ) self.proxy_callbacks.append(callback_wrapper) consumer = TopicConsumer(conf=self.conf, session=self.session, topic=topic, callback=callback_wrapper, name=pool_name, exchange_name=exchange_name) self._register_consumer(consumer) return consumer
[ "def", "join_consumer_pool", "(", "self", ",", "callback", ",", "pool_name", ",", "topic", ",", "exchange_name", "=", "None", ")", ":", "callback_wrapper", "=", "rpc_amqp", ".", "CallbackWrapper", "(", "conf", "=", "self", ".", "conf", ",", "callback", "=", "callback", ",", "connection_pool", "=", "rpc_amqp", ".", "get_connection_pool", "(", "self", ".", "conf", ",", "Connection", ")", ",", ")", "self", ".", "proxy_callbacks", ".", "append", "(", "callback_wrapper", ")", "consumer", "=", "TopicConsumer", "(", "conf", "=", "self", ".", "conf", ",", "session", "=", "self", ".", "session", ",", "topic", "=", "topic", ",", "callback", "=", "callback_wrapper", ",", "name", "=", "pool_name", ",", "exchange_name", "=", "exchange_name", ")", "self", ".", "_register_consumer", "(", "consumer", ")", "return", "consumer" ]
https://github.com/JiYou/openstack/blob/8607dd488bde0905044b303eb6e52bdea6806923/packages/source/quantum/quantum/openstack/common/rpc/impl_qpid.py#L563-L589
Fenixin/Minecraft-Region-Fixer
bfafd378ceb65116e4ea48cab24f1e6394051978
nbt/world.py
python
_BaseWorldFolder.__repr__
(self)
return "%s(%r)" % (self.__class__.__name__,self.worldfolder)
[]
def __repr__(self): return "%s(%r)" % (self.__class__.__name__,self.worldfolder)
[ "def", "__repr__", "(", "self", ")", ":", "return", "\"%s(%r)\"", "%", "(", "self", ".", "__class__", ".", "__name__", ",", "self", ".", "worldfolder", ")" ]
https://github.com/Fenixin/Minecraft-Region-Fixer/blob/bfafd378ceb65116e4ea48cab24f1e6394051978/nbt/world.py#L230-L231
omz/PythonistaAppTemplate
f560f93f8876d82a21d108977f90583df08d55af
PythonistaAppTemplate/PythonistaKit.framework/pylib/site-packages/werkzeug/http.py
python
is_byte_range_valid
(start, stop, length)
return 0 <= start < length
Checks if a given byte content range is valid for the given length. .. versionadded:: 0.7
Checks if a given byte content range is valid for the given length.
[ "Checks", "if", "a", "given", "byte", "content", "range", "is", "valid", "for", "the", "given", "length", "." ]
def is_byte_range_valid(start, stop, length): """Checks if a given byte content range is valid for the given length. .. versionadded:: 0.7 """ if (start is None) != (stop is None): return False elif start is None: return length is None or length >= 0 elif length is None: return 0 <= start < stop elif start >= stop: return False return 0 <= start < length
[ "def", "is_byte_range_valid", "(", "start", ",", "stop", ",", "length", ")", ":", "if", "(", "start", "is", "None", ")", "!=", "(", "stop", "is", "None", ")", ":", "return", "False", "elif", "start", "is", "None", ":", "return", "length", "is", "None", "or", "length", ">=", "0", "elif", "length", "is", "None", ":", "return", "0", "<=", "start", "<", "stop", "elif", "start", ">=", "stop", ":", "return", "False", "return", "0", "<=", "start", "<", "length" ]
https://github.com/omz/PythonistaAppTemplate/blob/f560f93f8876d82a21d108977f90583df08d55af/PythonistaAppTemplate/PythonistaKit.framework/pylib/site-packages/werkzeug/http.py#L954-L967
ajinabraham/OWASP-Xenotix-XSS-Exploit-Framework
cb692f527e4e819b6c228187c5702d990a180043
external/Scripting Engine/Xenotix Python Scripting Engine/Lib/rlcompleter.py
python
Completer.__init__
(self, namespace = None)
Create a new completer for the command line. Completer([namespace]) -> completer instance. If unspecified, the default namespace where completions are performed is __main__ (technically, __main__.__dict__). Namespaces should be given as dictionaries. Completer instances should be used as the completion mechanism of readline via the set_completer() call: readline.set_completer(Completer(my_namespace).complete)
Create a new completer for the command line.
[ "Create", "a", "new", "completer", "for", "the", "command", "line", "." ]
def __init__(self, namespace = None): """Create a new completer for the command line. Completer([namespace]) -> completer instance. If unspecified, the default namespace where completions are performed is __main__ (technically, __main__.__dict__). Namespaces should be given as dictionaries. Completer instances should be used as the completion mechanism of readline via the set_completer() call: readline.set_completer(Completer(my_namespace).complete) """ if namespace and not isinstance(namespace, dict): raise TypeError,'namespace must be a dictionary' # Don't bind to namespace quite yet, but flag whether the user wants a # specific namespace or to use __main__.__dict__. This will allow us # to bind to __main__.__dict__ at completion time, not now. if namespace is None: self.use_main_ns = 1 else: self.use_main_ns = 0 self.namespace = namespace
[ "def", "__init__", "(", "self", ",", "namespace", "=", "None", ")", ":", "if", "namespace", "and", "not", "isinstance", "(", "namespace", ",", "dict", ")", ":", "raise", "TypeError", ",", "'namespace must be a dictionary'", "# Don't bind to namespace quite yet, but flag whether the user wants a", "# specific namespace or to use __main__.__dict__. This will allow us", "# to bind to __main__.__dict__ at completion time, not now.", "if", "namespace", "is", "None", ":", "self", ".", "use_main_ns", "=", "1", "else", ":", "self", ".", "use_main_ns", "=", "0", "self", ".", "namespace", "=", "namespace" ]
https://github.com/ajinabraham/OWASP-Xenotix-XSS-Exploit-Framework/blob/cb692f527e4e819b6c228187c5702d990a180043/external/Scripting Engine/Xenotix Python Scripting Engine/Lib/rlcompleter.py#L48-L73
UCL-INGI/INGInious
60f10cb4c375ce207471043e76bd813220b95399
inginious/agent/docker_agent/_docker_interface.py
python
DockerInterface.start_container
(self, container_id)
Starts a container (obviously)
Starts a container (obviously)
[ "Starts", "a", "container", "(", "obviously", ")" ]
def start_container(self, container_id): """ Starts a container (obviously) """ self._docker.containers.get(container_id).start()
[ "def", "start_container", "(", "self", ",", "container_id", ")", ":", "self", ".", "_docker", ".", "containers", ".", "get", "(", "container_id", ")", ".", "start", "(", ")" ]
https://github.com/UCL-INGI/INGInious/blob/60f10cb4c375ce207471043e76bd813220b95399/inginious/agent/docker_agent/_docker_interface.py#L202-L204
openedx/ecommerce
db6c774e239e5aa65e5a6151995073d364e8c896
ecommerce/extensions/basket/views.py
python
VoucherAddApiView.post
(self, request)
return self.get_payment_api_response(status=status)
Adds voucher to a basket using the voucher's code. Parameters: { "code": "SUMMER20" } If successful, adds voucher and returns 200 and the same response as the payment api. If unsuccessful, returns 400 with the errors and the same response as the payment api.
Adds voucher to a basket using the voucher's code.
[ "Adds", "voucher", "to", "a", "basket", "using", "the", "voucher", "s", "code", "." ]
def post(self, request): """ Adds voucher to a basket using the voucher's code. Parameters: { "code": "SUMMER20" } If successful, adds voucher and returns 200 and the same response as the payment api. If unsuccessful, returns 400 with the errors and the same response as the payment api. """ code = request.data.get('code') code = code.strip() try: self.verify_and_apply_voucher(code) status = 200 except RedirectException as e: return Response({'redirect': e.response.url}) except VoucherException: # errors are passed via messages object and handled during serialization status = 400 return self.get_payment_api_response(status=status)
[ "def", "post", "(", "self", ",", "request", ")", ":", "code", "=", "request", ".", "data", ".", "get", "(", "'code'", ")", "code", "=", "code", ".", "strip", "(", ")", "try", ":", "self", ".", "verify_and_apply_voucher", "(", "code", ")", "status", "=", "200", "except", "RedirectException", "as", "e", ":", "return", "Response", "(", "{", "'redirect'", ":", "e", ".", "response", ".", "url", "}", ")", "except", "VoucherException", ":", "# errors are passed via messages object and handled during serialization", "status", "=", "400", "return", "self", ".", "get_payment_api_response", "(", "status", "=", "status", ")" ]
https://github.com/openedx/ecommerce/blob/db6c774e239e5aa65e5a6151995073d364e8c896/ecommerce/extensions/basket/views.py#L1082-L1106
menpo/menpo
a61500656c4fc2eea82497684f13cc31a605550b
menpo/transform/homogeneous/rotation.py
python
AlignmentRotation.set_rotation_matrix
(self, value, skip_checks=False)
r""" Sets the rotation matrix. Parameters ---------- value : ``(n_dims, n_dims)`` `ndarray` The new rotation matrix. skip_checks : `bool`, optional If ``True`` avoid sanity checks on ``value`` for performance.
r""" Sets the rotation matrix.
[ "r", "Sets", "the", "rotation", "matrix", "." ]
def set_rotation_matrix(self, value, skip_checks=False): r""" Sets the rotation matrix. Parameters ---------- value : ``(n_dims, n_dims)`` `ndarray` The new rotation matrix. skip_checks : `bool`, optional If ``True`` avoid sanity checks on ``value`` for performance. """ Rotation.set_rotation_matrix(self, value, skip_checks=skip_checks) self._sync_target_from_state()
[ "def", "set_rotation_matrix", "(", "self", ",", "value", ",", "skip_checks", "=", "False", ")", ":", "Rotation", ".", "set_rotation_matrix", "(", "self", ",", "value", ",", "skip_checks", "=", "skip_checks", ")", "self", ".", "_sync_target_from_state", "(", ")" ]
https://github.com/menpo/menpo/blob/a61500656c4fc2eea82497684f13cc31a605550b/menpo/transform/homogeneous/rotation.py#L534-L546
livid/v2ex-gae
32be3a77d535e7c9df85a333e01ab8834d0e8581
v2ex/babel/ext/bleach/__init__.py
python
_render
(tree)
Try rendering as HTML, then XML, then give up.
Try rendering as HTML, then XML, then give up.
[ "Try", "rendering", "as", "HTML", "then", "XML", "then", "give", "up", "." ]
def _render(tree): """Try rendering as HTML, then XML, then give up.""" try: return force_unicode(_serialize(tree)) except Exception, e: log.error('HTML: %r' % e, exc_info=sys.exc_info()) try: return force_unicode(tree.toxml()) except Exception, e: log.error('XML: %r' % e, exc_info=sys.exc_info()) return u''
[ "def", "_render", "(", "tree", ")", ":", "try", ":", "return", "force_unicode", "(", "_serialize", "(", "tree", ")", ")", "except", "Exception", ",", "e", ":", "log", ".", "error", "(", "'HTML: %r'", "%", "e", ",", "exc_info", "=", "sys", ".", "exc_info", "(", ")", ")", "try", ":", "return", "force_unicode", "(", "tree", ".", "toxml", "(", ")", ")", "except", "Exception", ",", "e", ":", "log", ".", "error", "(", "'XML: %r'", "%", "e", ",", "exc_info", "=", "sys", ".", "exc_info", "(", ")", ")", "return", "u''" ]
https://github.com/livid/v2ex-gae/blob/32be3a77d535e7c9df85a333e01ab8834d0e8581/v2ex/babel/ext/bleach/__init__.py#L323-L333
apple/ccs-calendarserver
13c706b985fb728b9aab42dc0fef85aae21921c3
txdav/caldav/datastore/util.py
python
StorageTransportBase.write
(self, data)
Children must override this to actually write the data, but should upcall this implementation to interact properly with producers.
Children must override this to actually write the data, but should upcall this implementation to interact properly with producers.
[ "Children", "must", "override", "this", "to", "actually", "write", "the", "data", "but", "should", "upcall", "this", "implementation", "to", "interact", "properly", "with", "producers", "." ]
def write(self, data): """ Children must override this to actually write the data, but should upcall this implementation to interact properly with producers. """ if self._producer and self._streamingProducer: # XXX this needs to be in a callLater because otherwise # resumeProducing will call write which will call resumeProducing # (etc) forever. self._clock.callLater(0, self._producer.resumeProducing)
[ "def", "write", "(", "self", ",", "data", ")", ":", "if", "self", ".", "_producer", "and", "self", ".", "_streamingProducer", ":", "# XXX this needs to be in a callLater because otherwise", "# resumeProducing will call write which will call resumeProducing", "# (etc) forever.", "self", ".", "_clock", ".", "callLater", "(", "0", ",", "self", ".", "_producer", ".", "resumeProducing", ")" ]
https://github.com/apple/ccs-calendarserver/blob/13c706b985fb728b9aab42dc0fef85aae21921c3/txdav/caldav/datastore/util.py#L521-L530
ctxis/canape
5f0e03424577296bcc60c2008a60a98ec5307e4b
CANAPE.Scripting/Lib/logging/__init__.py
python
FileHandler._open
(self)
return stream
Open the current base file with the (original) mode and encoding. Return the resulting stream.
Open the current base file with the (original) mode and encoding. Return the resulting stream.
[ "Open", "the", "current", "base", "file", "with", "the", "(", "original", ")", "mode", "and", "encoding", ".", "Return", "the", "resulting", "stream", "." ]
def _open(self): """ Open the current base file with the (original) mode and encoding. Return the resulting stream. """ if self.encoding is None: stream = open(self.baseFilename, self.mode) else: stream = codecs.open(self.baseFilename, self.mode, self.encoding) return stream
[ "def", "_open", "(", "self", ")", ":", "if", "self", ".", "encoding", "is", "None", ":", "stream", "=", "open", "(", "self", ".", "baseFilename", ",", "self", ".", "mode", ")", "else", ":", "stream", "=", "codecs", ".", "open", "(", "self", ".", "baseFilename", ",", "self", ".", "mode", ",", "self", ".", "encoding", ")", "return", "stream" ]
https://github.com/ctxis/canape/blob/5f0e03424577296bcc60c2008a60a98ec5307e4b/CANAPE.Scripting/Lib/logging/__init__.py#L906-L915
edisonlz/fastor
342078a18363ac41d3c6b1ab29dbdd44fdb0b7b3
base/site-packages/django/db/backends/sqlite3/introspection.py
python
DatabaseIntrospection.get_table_description
(self, cursor, table_name)
return [FieldInfo(info['name'], info['type'], None, info['size'], None, None, info['null_ok']) for info in self._table_info(cursor, table_name)]
Returns a description of the table, with the DB-API cursor.description interface.
Returns a description of the table, with the DB-API cursor.description interface.
[ "Returns", "a", "description", "of", "the", "table", "with", "the", "DB", "-", "API", "cursor", ".", "description", "interface", "." ]
def get_table_description(self, cursor, table_name): "Returns a description of the table, with the DB-API cursor.description interface." return [FieldInfo(info['name'], info['type'], None, info['size'], None, None, info['null_ok']) for info in self._table_info(cursor, table_name)]
[ "def", "get_table_description", "(", "self", ",", "cursor", ",", "table_name", ")", ":", "return", "[", "FieldInfo", "(", "info", "[", "'name'", "]", ",", "info", "[", "'type'", "]", ",", "None", ",", "info", "[", "'size'", "]", ",", "None", ",", "None", ",", "info", "[", "'null_ok'", "]", ")", "for", "info", "in", "self", ".", "_table_info", "(", "cursor", ",", "table_name", ")", "]" ]
https://github.com/edisonlz/fastor/blob/342078a18363ac41d3c6b1ab29dbdd44fdb0b7b3/base/site-packages/django/db/backends/sqlite3/introspection.py#L62-L65
shinnytech/tqsdk-python
b766b45bb82c89a0401a6a84e0e42600fa10e6f4
tqsdk/tafunc.py
python
get_vega
(series, k, r, v, t, d1=None)
return pd.Series(np.where(np.isnan(d1), np.nan, series * np.sqrt(t) * _get_pdf(d1)))
计算期权希腊指标 vega 值 Args: series (pandas.Series): 标的价格序列 k (float): 期权行权价 r (float): 无风险利率 v (float / pandas.Series): 波动率 * float: 对于 series 中每个元素都使用相同的 v 计算理论价 * pandas.Series: 其元素个数应该和 series 元素个数相同,对于 series 中每个元素都使用 v 中对应的值计算理论价 t (float / pandas.Series): 年化到期时间,例如:还有 100 天到期,则年化到期时间为 100/360 * float: 对于 series 中每个元素都使用相同的 t 计算理论价 * pandas.Series: 其元素个数应该和 series 元素个数相同,对于 series 中每个元素都使用 t 中对应的值计算理论价 d1 (None | pandas.Series): [可选] 序列对应的 BS 公式中 b1 值 Returns: pandas.Series: 该序列的 vega 值 Example:: import pandas as pd from tqsdk import TqApi, TqAuth, tafunc api = TqApi(auth=TqAuth("信易账户", "账户密码")) quote = api.get_quote("SHFE.cu2006") ks = api.get_kline_serial("SHFE.cu2006", 24 * 60 * 60, 10) v = tafunc.get_his_volatility(ks, quote) # 历史波动率 option = api.get_quote("SHFE.cu2006C45000") klines = api.get_kline_serial(["SHFE.cu2006C45000", "SHFE.cu2006"], 24 * 60 * 60, 10) t = tafunc.get_t(klines, option.expire_datetime) impv = tafunc.get_impv(klines["close1"], klines["close"], 45000, 0.025, v, t, "CALL") vega = tafunc.get_vega(klines["close1"], 45000, 0.025, v, t) print("vega", list(vega)) api.close()
计算期权希腊指标 vega 值
[ "计算期权希腊指标", "vega", "值" ]
def get_vega(series, k, r, v, t, d1=None): """ 计算期权希腊指标 vega 值 Args: series (pandas.Series): 标的价格序列 k (float): 期权行权价 r (float): 无风险利率 v (float / pandas.Series): 波动率 * float: 对于 series 中每个元素都使用相同的 v 计算理论价 * pandas.Series: 其元素个数应该和 series 元素个数相同,对于 series 中每个元素都使用 v 中对应的值计算理论价 t (float / pandas.Series): 年化到期时间,例如:还有 100 天到期,则年化到期时间为 100/360 * float: 对于 series 中每个元素都使用相同的 t 计算理论价 * pandas.Series: 其元素个数应该和 series 元素个数相同,对于 series 中每个元素都使用 t 中对应的值计算理论价 d1 (None | pandas.Series): [可选] 序列对应的 BS 公式中 b1 值 Returns: pandas.Series: 该序列的 vega 值 Example:: import pandas as pd from tqsdk import TqApi, TqAuth, tafunc api = TqApi(auth=TqAuth("信易账户", "账户密码")) quote = api.get_quote("SHFE.cu2006") ks = api.get_kline_serial("SHFE.cu2006", 24 * 60 * 60, 10) v = tafunc.get_his_volatility(ks, quote) # 历史波动率 option = api.get_quote("SHFE.cu2006C45000") klines = api.get_kline_serial(["SHFE.cu2006C45000", "SHFE.cu2006"], 24 * 60 * 60, 10) t = tafunc.get_t(klines, option.expire_datetime) impv = tafunc.get_impv(klines["close1"], klines["close"], 45000, 0.025, v, t, "CALL") vega = tafunc.get_vega(klines["close1"], 45000, 0.025, v, t) print("vega", list(vega)) api.close() """ if d1 is None: d1 = _get_d1(series, k, r, v, t) return pd.Series(np.where(np.isnan(d1), np.nan, series * np.sqrt(t) * _get_pdf(d1)))
[ "def", "get_vega", "(", "series", ",", "k", ",", "r", ",", "v", ",", "t", ",", "d1", "=", "None", ")", ":", "if", "d1", "is", "None", ":", "d1", "=", "_get_d1", "(", "series", ",", "k", ",", "r", ",", "v", ",", "t", ")", "return", "pd", ".", "Series", "(", "np", ".", "where", "(", "np", ".", "isnan", "(", "d1", ")", ",", "np", ".", "nan", ",", "series", "*", "np", ".", "sqrt", "(", "t", ")", "*", "_get_pdf", "(", "d1", ")", ")", ")" ]
https://github.com/shinnytech/tqsdk-python/blob/b766b45bb82c89a0401a6a84e0e42600fa10e6f4/tqsdk/tafunc.py#L1182-L1232
JaniceWuo/MovieRecommend
4c86db64ca45598917d304f535413df3bc9fea65
movierecommend/venv1/Lib/site-packages/pip-9.0.1-py3.6.egg/pip/_vendor/distlib/metadata.py
python
_version2fieldlist
(version)
[]
def _version2fieldlist(version): if version == '1.0': return _241_FIELDS elif version == '1.1': return _314_FIELDS elif version == '1.2': return _345_FIELDS elif version == '2.0': return _426_FIELDS raise MetadataUnrecognizedVersionError(version)
[ "def", "_version2fieldlist", "(", "version", ")", ":", "if", "version", "==", "'1.0'", ":", "return", "_241_FIELDS", "elif", "version", "==", "'1.1'", ":", "return", "_314_FIELDS", "elif", "version", "==", "'1.2'", ":", "return", "_345_FIELDS", "elif", "version", "==", "'2.0'", ":", "return", "_426_FIELDS", "raise", "MetadataUnrecognizedVersionError", "(", "version", ")" ]
https://github.com/JaniceWuo/MovieRecommend/blob/4c86db64ca45598917d304f535413df3bc9fea65/movierecommend/venv1/Lib/site-packages/pip-9.0.1-py3.6.egg/pip/_vendor/distlib/metadata.py#L103-L112
twilio/twilio-python
6e1e811ea57a1edfadd5161ace87397c563f6915
twilio/rest/ip_messaging/v1/service/channel/message.py
python
MessageInstance.__repr__
(self)
return '<Twilio.IpMessaging.V1.MessageInstance {}>'.format(context)
Provide a friendly representation :returns: Machine friendly representation :rtype: str
Provide a friendly representation
[ "Provide", "a", "friendly", "representation" ]
def __repr__(self): """ Provide a friendly representation :returns: Machine friendly representation :rtype: str """ context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items()) return '<Twilio.IpMessaging.V1.MessageInstance {}>'.format(context)
[ "def", "__repr__", "(", "self", ")", ":", "context", "=", "' '", ".", "join", "(", "'{}={}'", ".", "format", "(", "k", ",", "v", ")", "for", "k", ",", "v", "in", "self", ".", "_solution", ".", "items", "(", ")", ")", "return", "'<Twilio.IpMessaging.V1.MessageInstance {}>'", ".", "format", "(", "context", ")" ]
https://github.com/twilio/twilio-python/blob/6e1e811ea57a1edfadd5161ace87397c563f6915/twilio/rest/ip_messaging/v1/service/channel/message.py#L501-L509
tensorflow/lingvo
ce10019243d954c3c3ebe739f7589b5eebfdf907
lingvo/tasks/mt/encoder.py
python
TransformerBatchMajorEncoder.FProp
(self, theta, input_batch)
Embeds source ids and transforms with TransformerStack. Args: theta: A `.NestedMap` object containing weights' values of this layer and its children layers. input_batch: A `.NestedMap` object containing: ids - The inputs tensor of shape [batch, time]. paddings - The ids' paddings of shape [batch, time]. Returns: A '.NestedMap' object containing: encoded - The encoded features of shape [time, batch, dim] or [batch, time, dim], depending p.output_data_format. padding - The encoded features' padding of shape [time, batch] or [batch, time]. segment_id - The segmentation of packed inputs of shape [time, batch] or [batch, time] if it is supported by the model, or None otherwise. embedded_inputs - The embedded inputs tokens without positional encodings of shape [time, batch, dim] or [batch, time, dim].
Embeds source ids and transforms with TransformerStack.
[ "Embeds", "source", "ids", "and", "transforms", "with", "TransformerStack", "." ]
def FProp(self, theta, input_batch): """Embeds source ids and transforms with TransformerStack. Args: theta: A `.NestedMap` object containing weights' values of this layer and its children layers. input_batch: A `.NestedMap` object containing: ids - The inputs tensor of shape [batch, time]. paddings - The ids' paddings of shape [batch, time]. Returns: A '.NestedMap' object containing: encoded - The encoded features of shape [time, batch, dim] or [batch, time, dim], depending p.output_data_format. padding - The encoded features' padding of shape [time, batch] or [batch, time]. segment_id - The segmentation of packed inputs of shape [time, batch] or [batch, time] if it is supported by the model, or None otherwise. embedded_inputs - The embedded inputs tokens without positional encodings of shape [time, batch, dim] or [batch, time, dim]. """ p = self.params with tf.name_scope(p.name): # [batch, time] input_ids = input_batch.ids # [batch, time] paddings = input_batch.paddings # [batch, time] segment_ids = input_batch.segment_ids if p.packed_input else None batch = py_utils.GetShape(input_ids)[0] time = py_utils.GetShape(input_ids)[1] # Embedding layer. # [batch, time, dim] if not p.shared_emb: input_embs = self.token_emb.EmbLookup(theta.token_emb, input_ids) else: input_embs = self.softmax.EmbLookup(theta.softmax, input_ids) orig_input_embs = input_embs # [1, time, dim] if p.packed_input: positions = input_batch.segment_pos position_embs = tf.expand_dims( self.position_emb.FPropWithPosition(theta.position_emb, positions), 0) else: position_embs = tf.expand_dims( self.position_emb.FProp(theta.position_emb, time), 0) # [batch, time, dim] input_embs += position_embs if p.input_dropout_tpl.fprop_dtype: input_embs = tf.cast(input_embs, p.input_dropout_tpl.fprop_dtype) paddings = tf.cast(paddings, p.input_dropout_tpl.fprop_dtype) input_embs = self.input_dropout.FProp(theta.input_dropout, input_embs) # [batch, time, dim] transformer_input = input_embs # Explicitly set the input shape of Transformer layers, to avoid # unknown shape error occurred to tf.einsum on nonTPU devices. transformer_input = tf.reshape(transformer_input, [batch, time, p.model_dim]) # Compute self-attention segment mask once. if p.packed_input: segment_mask = batch_major_attention.SegmentMask( segment_ids, segment_ids, dtype=transformer_input.dtype) else: segment_mask = tf.zeros([batch, 1, time, time]) shape = py_utils.GetShape(transformer_input) batch_size = shape[0] seq_len = shape[1] paddings = tf.reshape(paddings, [batch_size, seq_len]) encoded, padding = self.transformer_stack.FProp(theta.transformer_stack, transformer_input, paddings, segment_mask) if p.final_layer_norm: encoded = self.final_ln.FProp(theta.final_ln, encoded) seq_lengths = tf.cast(tf.reduce_sum(1. - padding, axis=1), tf.int32) if p.output_data_format == 'TBC': encoded = tf.transpose(encoded, [1, 0, 2]) # [time, batch, dim] padding = tf.transpose(padding) # [time, batch] segment_ids = tf.transpose(segment_ids) if p.packed_input else None orig_input_embs = tf.transpose(orig_input_embs, [1, 0, 2]) return py_utils.NestedMap( encoded=encoded, padding=padding, seq_lengths=seq_lengths, # used by beam_search_helper. segment_id=segment_ids, embedded_inputs=orig_input_embs)
[ "def", "FProp", "(", "self", ",", "theta", ",", "input_batch", ")", ":", "p", "=", "self", ".", "params", "with", "tf", ".", "name_scope", "(", "p", ".", "name", ")", ":", "# [batch, time]", "input_ids", "=", "input_batch", ".", "ids", "# [batch, time]", "paddings", "=", "input_batch", ".", "paddings", "# [batch, time]", "segment_ids", "=", "input_batch", ".", "segment_ids", "if", "p", ".", "packed_input", "else", "None", "batch", "=", "py_utils", ".", "GetShape", "(", "input_ids", ")", "[", "0", "]", "time", "=", "py_utils", ".", "GetShape", "(", "input_ids", ")", "[", "1", "]", "# Embedding layer.", "# [batch, time, dim]", "if", "not", "p", ".", "shared_emb", ":", "input_embs", "=", "self", ".", "token_emb", ".", "EmbLookup", "(", "theta", ".", "token_emb", ",", "input_ids", ")", "else", ":", "input_embs", "=", "self", ".", "softmax", ".", "EmbLookup", "(", "theta", ".", "softmax", ",", "input_ids", ")", "orig_input_embs", "=", "input_embs", "# [1, time, dim]", "if", "p", ".", "packed_input", ":", "positions", "=", "input_batch", ".", "segment_pos", "position_embs", "=", "tf", ".", "expand_dims", "(", "self", ".", "position_emb", ".", "FPropWithPosition", "(", "theta", ".", "position_emb", ",", "positions", ")", ",", "0", ")", "else", ":", "position_embs", "=", "tf", ".", "expand_dims", "(", "self", ".", "position_emb", ".", "FProp", "(", "theta", ".", "position_emb", ",", "time", ")", ",", "0", ")", "# [batch, time, dim]", "input_embs", "+=", "position_embs", "if", "p", ".", "input_dropout_tpl", ".", "fprop_dtype", ":", "input_embs", "=", "tf", ".", "cast", "(", "input_embs", ",", "p", ".", "input_dropout_tpl", ".", "fprop_dtype", ")", "paddings", "=", "tf", ".", "cast", "(", "paddings", ",", "p", ".", "input_dropout_tpl", ".", "fprop_dtype", ")", "input_embs", "=", "self", ".", "input_dropout", ".", "FProp", "(", "theta", ".", "input_dropout", ",", "input_embs", ")", "# [batch, time, dim]", "transformer_input", "=", "input_embs", "# Explicitly set the input shape of Transformer layers, to avoid", "# unknown shape error occurred to tf.einsum on nonTPU devices.", "transformer_input", "=", "tf", ".", "reshape", "(", "transformer_input", ",", "[", "batch", ",", "time", ",", "p", ".", "model_dim", "]", ")", "# Compute self-attention segment mask once.", "if", "p", ".", "packed_input", ":", "segment_mask", "=", "batch_major_attention", ".", "SegmentMask", "(", "segment_ids", ",", "segment_ids", ",", "dtype", "=", "transformer_input", ".", "dtype", ")", "else", ":", "segment_mask", "=", "tf", ".", "zeros", "(", "[", "batch", ",", "1", ",", "time", ",", "time", "]", ")", "shape", "=", "py_utils", ".", "GetShape", "(", "transformer_input", ")", "batch_size", "=", "shape", "[", "0", "]", "seq_len", "=", "shape", "[", "1", "]", "paddings", "=", "tf", ".", "reshape", "(", "paddings", ",", "[", "batch_size", ",", "seq_len", "]", ")", "encoded", ",", "padding", "=", "self", ".", "transformer_stack", ".", "FProp", "(", "theta", ".", "transformer_stack", ",", "transformer_input", ",", "paddings", ",", "segment_mask", ")", "if", "p", ".", "final_layer_norm", ":", "encoded", "=", "self", ".", "final_ln", ".", "FProp", "(", "theta", ".", "final_ln", ",", "encoded", ")", "seq_lengths", "=", "tf", ".", "cast", "(", "tf", ".", "reduce_sum", "(", "1.", "-", "padding", ",", "axis", "=", "1", ")", ",", "tf", ".", "int32", ")", "if", "p", ".", "output_data_format", "==", "'TBC'", ":", "encoded", "=", "tf", ".", "transpose", "(", "encoded", ",", "[", "1", ",", "0", ",", "2", "]", ")", "# [time, batch, dim]", "padding", "=", "tf", ".", "transpose", "(", "padding", ")", "# [time, batch]", "segment_ids", "=", "tf", ".", "transpose", "(", "segment_ids", ")", "if", "p", ".", "packed_input", "else", "None", "orig_input_embs", "=", "tf", ".", "transpose", "(", "orig_input_embs", ",", "[", "1", ",", "0", ",", "2", "]", ")", "return", "py_utils", ".", "NestedMap", "(", "encoded", "=", "encoded", ",", "padding", "=", "padding", ",", "seq_lengths", "=", "seq_lengths", ",", "# used by beam_search_helper.", "segment_id", "=", "segment_ids", ",", "embedded_inputs", "=", "orig_input_embs", ")" ]
https://github.com/tensorflow/lingvo/blob/ce10019243d954c3c3ebe739f7589b5eebfdf907/lingvo/tasks/mt/encoder.py#L927-L1026
agateau/yokadi
c646657e2da5703268cbd24e3837f7a1ec62c263
yokadi/ycli/tui.py
python
addInputAnswers
(*answers)
Add answers to tui internal answer buffer. Next call to editLine() will pop the first answer from the buffer instead of prompting the user. This is useful for unit-testing.
Add answers to tui internal answer buffer. Next call to editLine() will pop the first answer from the buffer instead of prompting the user. This is useful for unit-testing.
[ "Add", "answers", "to", "tui", "internal", "answer", "buffer", ".", "Next", "call", "to", "editLine", "()", "will", "pop", "the", "first", "answer", "from", "the", "buffer", "instead", "of", "prompting", "the", "user", ".", "This", "is", "useful", "for", "unit", "-", "testing", "." ]
def addInputAnswers(*answers): """Add answers to tui internal answer buffer. Next call to editLine() will pop the first answer from the buffer instead of prompting the user. This is useful for unit-testing.""" _answers.extend(answers)
[ "def", "addInputAnswers", "(", "*", "answers", ")", ":", "_answers", ".", "extend", "(", "answers", ")" ]
https://github.com/agateau/yokadi/blob/c646657e2da5703268cbd24e3837f7a1ec62c263/yokadi/ycli/tui.py#L259-L263
Cadene/pretrained-models.pytorch
8aae3d8f1135b6b13fed79c1d431e3449fdbf6e0
pretrainedmodels/models/xception.py
python
Xception.logits
(self, features)
return x
[]
def logits(self, features): x = nn.ReLU(inplace=True)(features) x = F.adaptive_avg_pool2d(x, (1, 1)) x = x.view(x.size(0), -1) x = self.last_linear(x) return x
[ "def", "logits", "(", "self", ",", "features", ")", ":", "x", "=", "nn", ".", "ReLU", "(", "inplace", "=", "True", ")", "(", "features", ")", "x", "=", "F", ".", "adaptive_avg_pool2d", "(", "x", ",", "(", "1", ",", "1", ")", ")", "x", "=", "x", ".", "view", "(", "x", ".", "size", "(", "0", ")", ",", "-", "1", ")", "x", "=", "self", ".", "last_linear", "(", "x", ")", "return", "x" ]
https://github.com/Cadene/pretrained-models.pytorch/blob/8aae3d8f1135b6b13fed79c1d431e3449fdbf6e0/pretrainedmodels/models/xception.py#L202-L208
debian-calibre/calibre
020fc81d3936a64b2ac51459ecb796666ab6a051
src/calibre/gui2/tweak_book/editor/syntax/html.py
python
attribute_name
(state, text, i, formats, user_data)
return [(0, None)]
After attribute name
After attribute name
[ "After", "attribute", "name" ]
def attribute_name(state, text, i, formats, user_data): ' After attribute name ' ch = text[i] if ch in space_chars: return [(1, None)] if ch == '=': state.parse = ATTRIBUTE_VALUE return [(1, formats['attr'])] # Standalone attribute with no value state.parse = IN_OPENING_TAG state.attribute_name = None return [(0, None)]
[ "def", "attribute_name", "(", "state", ",", "text", ",", "i", ",", "formats", ",", "user_data", ")", ":", "ch", "=", "text", "[", "i", "]", "if", "ch", "in", "space_chars", ":", "return", "[", "(", "1", ",", "None", ")", "]", "if", "ch", "==", "'='", ":", "state", ".", "parse", "=", "ATTRIBUTE_VALUE", "return", "[", "(", "1", ",", "formats", "[", "'attr'", "]", ")", "]", "# Standalone attribute with no value", "state", ".", "parse", "=", "IN_OPENING_TAG", "state", ".", "attribute_name", "=", "None", "return", "[", "(", "0", ",", "None", ")", "]" ]
https://github.com/debian-calibre/calibre/blob/020fc81d3936a64b2ac51459ecb796666ab6a051/src/calibre/gui2/tweak_book/editor/syntax/html.py#L342-L353
Nuitka/Nuitka
39262276993757fa4e299f497654065600453fc9
nuitka/build/inline_copy/lib/scons-3.1.2/SCons/Node/Alias.py
python
AliasNodeInfo.__setstate__
(self, state)
Restore the attributes from a pickled state.
Restore the attributes from a pickled state.
[ "Restore", "the", "attributes", "from", "a", "pickled", "state", "." ]
def __setstate__(self, state): """ Restore the attributes from a pickled state. """ # TODO check or discard version del state['_version_id'] for key, value in state.items(): if key not in ('__weakref__',): setattr(self, key, value)
[ "def", "__setstate__", "(", "self", ",", "state", ")", ":", "# TODO check or discard version", "del", "state", "[", "'_version_id'", "]", "for", "key", ",", "value", "in", "state", ".", "items", "(", ")", ":", "if", "key", "not", "in", "(", "'__weakref__'", ",", ")", ":", "setattr", "(", "self", ",", "key", ",", "value", ")" ]
https://github.com/Nuitka/Nuitka/blob/39262276993757fa4e299f497654065600453fc9/nuitka/build/inline_copy/lib/scons-3.1.2/SCons/Node/Alias.py#L86-L94
projecthamster/hamster
19d160090de30e756bdc3122ff935bdaa86e2843
src/hamster/lib/datetime.py
python
Range.format
(self, default_day=None, explicit_none=True)
Return a string representing the time range. Start date is shown only if start does not belong to default_day. End date is shown only if end does not belong to the same hamster day as start (or to default_day if start is None).
Return a string representing the time range.
[ "Return", "a", "string", "representing", "the", "time", "range", "." ]
def format(self, default_day=None, explicit_none=True): """Return a string representing the time range. Start date is shown only if start does not belong to default_day. End date is shown only if end does not belong to the same hamster day as start (or to default_day if start is None). """ none_str = "--" if explicit_none else "" if self.start: if self.start.hday() != default_day: start_str = self.start.strftime(datetime.FMT) else: start_str = self.start.strftime(time.FMT) default_end_day = self.start.hday() else: start_str = none_str default_end_day = default_day if self.end: if self.end.hday() != default_end_day: end_str = self.end.strftime(datetime.FMT) else: end_str = self.end.strftime(time.FMT) else: end_str = none_str if end_str: return "{} - {}".format(start_str, end_str) else: return start_str
[ "def", "format", "(", "self", ",", "default_day", "=", "None", ",", "explicit_none", "=", "True", ")", ":", "none_str", "=", "\"--\"", "if", "explicit_none", "else", "\"\"", "if", "self", ".", "start", ":", "if", "self", ".", "start", ".", "hday", "(", ")", "!=", "default_day", ":", "start_str", "=", "self", ".", "start", ".", "strftime", "(", "datetime", ".", "FMT", ")", "else", ":", "start_str", "=", "self", ".", "start", ".", "strftime", "(", "time", ".", "FMT", ")", "default_end_day", "=", "self", ".", "start", ".", "hday", "(", ")", "else", ":", "start_str", "=", "none_str", "default_end_day", "=", "default_day", "if", "self", ".", "end", ":", "if", "self", ".", "end", ".", "hday", "(", ")", "!=", "default_end_day", ":", "end_str", "=", "self", ".", "end", ".", "strftime", "(", "datetime", ".", "FMT", ")", "else", ":", "end_str", "=", "self", ".", "end", ".", "strftime", "(", "time", ".", "FMT", ")", "else", ":", "end_str", "=", "none_str", "if", "end_str", ":", "return", "\"{} - {}\"", ".", "format", "(", "start_str", ",", "end_str", ")", "else", ":", "return", "start_str" ]
https://github.com/projecthamster/hamster/blob/19d160090de30e756bdc3122ff935bdaa86e2843/src/hamster/lib/datetime.py#L436-L467
openedx/edx-platform
68dd185a0ab45862a2a61e0f803d7e03d2be71b5
openedx/core/djangoapps/verified_track_content/models.py
python
VerifiedTrackCohortedCourse.verified_cohort_name_for_course
(cls, course_key)
Returns the given cohort name for the specific course. Args: course_key (CourseKey): a course key representing the course we want the verified cohort name for Returns: The cohort name if the course key has one associated to it. None otherwise.
Returns the given cohort name for the specific course.
[ "Returns", "the", "given", "cohort", "name", "for", "the", "specific", "course", "." ]
def verified_cohort_name_for_course(cls, course_key): """ Returns the given cohort name for the specific course. Args: course_key (CourseKey): a course key representing the course we want the verified cohort name for Returns: The cohort name if the course key has one associated to it. None otherwise. """ try: config = cls.objects.get(course_key=course_key) return config.verified_cohort_name except cls.DoesNotExist: return None
[ "def", "verified_cohort_name_for_course", "(", "cls", ",", "course_key", ")", ":", "try", ":", "config", "=", "cls", ".", "objects", ".", "get", "(", "course_key", "=", "course_key", ")", "return", "config", ".", "verified_cohort_name", "except", "cls", ".", "DoesNotExist", ":", "return", "None" ]
https://github.com/openedx/edx-platform/blob/68dd185a0ab45862a2a61e0f803d7e03d2be71b5/openedx/core/djangoapps/verified_track_content/models.py#L116-L131
jgyates/genmon
2cb2ed2945f55cd8c259b09ccfa9a51e23f1341e
genmonlib/mycommon.py
python
MyCommon.StripJson
(self, InputString)
return InputString
[]
def StripJson(self, InputString): for char in '{}[]"': InputString = InputString.replace(char,'') return InputString
[ "def", "StripJson", "(", "self", ",", "InputString", ")", ":", "for", "char", "in", "'{}[]\"'", ":", "InputString", "=", "InputString", ".", "replace", "(", "char", ",", "''", ")", "return", "InputString" ]
https://github.com/jgyates/genmon/blob/2cb2ed2945f55cd8c259b09ccfa9a51e23f1341e/genmonlib/mycommon.py#L61-L64
gammapy/gammapy
735b25cd5bbed35e2004d633621896dcd5295e8b
gammapy/stats/counts_statistic.py
python
WStatCountsStatistic.n_bkg
(self)
return self.alpha * self.n_off
Known background computed alpha * n_off
Known background computed alpha * n_off
[ "Known", "background", "computed", "alpha", "*", "n_off" ]
def n_bkg(self): """Known background computed alpha * n_off""" return self.alpha * self.n_off
[ "def", "n_bkg", "(", "self", ")", ":", "return", "self", ".", "alpha", "*", "self", ".", "n_off" ]
https://github.com/gammapy/gammapy/blob/735b25cd5bbed35e2004d633621896dcd5295e8b/gammapy/stats/counts_statistic.py#L238-L240
Flexget/Flexget
ffad58f206278abefc88d63a1ffaa80476fc4d98
update-changelog.py
python
MDChangeSet.parse_message
(self, message: str)
return found
Parses a git commit message and formats and adds any tagged messages to this changeset. Returns True if one or more changelog messages was found.
Parses a git commit message and formats and adds any tagged messages to this changeset. Returns True if one or more changelog messages was found.
[ "Parses", "a", "git", "commit", "message", "and", "formats", "and", "adds", "any", "tagged", "messages", "to", "this", "changeset", ".", "Returns", "True", "if", "one", "or", "more", "changelog", "messages", "was", "found", "." ]
def parse_message(self, message: str) -> bool: """ Parses a git commit message and formats and adds any tagged messages to this changeset. Returns True if one or more changelog messages was found. """ found = False for cat, item in self.change_items(message): found = True item = re.sub( r'#(\d{3,4})', r'[#\1](https://github.com/Flexget/Flexget/issues/\1)', item ) item = f'- {item}\n' self.sections.setdefault(cat, ['\n']).insert(0, item) return found
[ "def", "parse_message", "(", "self", ",", "message", ":", "str", ")", "->", "bool", ":", "found", "=", "False", "for", "cat", ",", "item", "in", "self", ".", "change_items", "(", "message", ")", ":", "found", "=", "True", "item", "=", "re", ".", "sub", "(", "r'#(\\d{3,4})'", ",", "r'[#\\1](https://github.com/Flexget/Flexget/issues/\\1)'", ",", "item", ")", "item", "=", "f'- {item}\\n'", "self", ".", "sections", ".", "setdefault", "(", "cat", ",", "[", "'\\n'", "]", ")", ".", "insert", "(", "0", ",", "item", ")", "return", "found" ]
https://github.com/Flexget/Flexget/blob/ffad58f206278abefc88d63a1ffaa80476fc4d98/update-changelog.py#L39-L52
Chaffelson/nipyapi
d3b186fd701ce308c2812746d98af9120955e810
nipyapi/nifi/models/versioned_resource_definition.py
python
VersionedResourceDefinition.__repr__
(self)
return self.to_str()
For `print` and `pprint`
For `print` and `pprint`
[ "For", "print", "and", "pprint" ]
def __repr__(self): """ For `print` and `pprint` """ return self.to_str()
[ "def", "__repr__", "(", "self", ")", ":", "return", "self", ".", "to_str", "(", ")" ]
https://github.com/Chaffelson/nipyapi/blob/d3b186fd701ce308c2812746d98af9120955e810/nipyapi/nifi/models/versioned_resource_definition.py#L147-L151
sagemath/sage
f9b2db94f675ff16963ccdefba4f1a3393b3fe0d
src/sage/features/pkg_systems.py
python
PackageSystem._is_present
(self)
return self in package_systems()
r""" Test whether ``self`` appears in the list of available package systems. EXAMPLES:: sage: from sage.features.pkg_systems import PackageSystem sage: debian = PackageSystem('debian') sage: debian.is_present() # indirect doctest, random True
r""" Test whether ``self`` appears in the list of available package systems.
[ "r", "Test", "whether", "self", "appears", "in", "the", "list", "of", "available", "package", "systems", "." ]
def _is_present(self): r""" Test whether ``self`` appears in the list of available package systems. EXAMPLES:: sage: from sage.features.pkg_systems import PackageSystem sage: debian = PackageSystem('debian') sage: debian.is_present() # indirect doctest, random True """ from . import package_systems return self in package_systems()
[ "def", "_is_present", "(", "self", ")", ":", "from", ".", "import", "package_systems", "return", "self", "in", "package_systems", "(", ")" ]
https://github.com/sagemath/sage/blob/f9b2db94f675ff16963ccdefba4f1a3393b3fe0d/src/sage/features/pkg_systems.py#L17-L29
Pyomo/pyomo
dbd4faee151084f343b893cc2b0c04cf2b76fd92
pyomo/core/base/constraint.py
python
simple_constraintlist_rule
(rule)
return rule_wrapper(rule, { None: ConstraintList.End, True: Constraint.Feasible, False: Constraint.Infeasible, })
This is a decorator that translates None/True/False return values into ConstraintList.End/Constraint.Feasible/Constraint.Infeasible. This supports a simpler syntax in constraint rules, though these can be more difficult to debug when errors occur. Example use: @simple_constraintlist_rule def C_rule(model, i, j): ... model.c = ConstraintList(expr=simple_constraintlist_rule(...))
This is a decorator that translates None/True/False return values into ConstraintList.End/Constraint.Feasible/Constraint.Infeasible. This supports a simpler syntax in constraint rules, though these can be more difficult to debug when errors occur.
[ "This", "is", "a", "decorator", "that", "translates", "None", "/", "True", "/", "False", "return", "values", "into", "ConstraintList", ".", "End", "/", "Constraint", ".", "Feasible", "/", "Constraint", ".", "Infeasible", ".", "This", "supports", "a", "simpler", "syntax", "in", "constraint", "rules", "though", "these", "can", "be", "more", "difficult", "to", "debug", "when", "errors", "occur", "." ]
def simple_constraintlist_rule(rule): """ This is a decorator that translates None/True/False return values into ConstraintList.End/Constraint.Feasible/Constraint.Infeasible. This supports a simpler syntax in constraint rules, though these can be more difficult to debug when errors occur. Example use: @simple_constraintlist_rule def C_rule(model, i, j): ... model.c = ConstraintList(expr=simple_constraintlist_rule(...)) """ return rule_wrapper(rule, { None: ConstraintList.End, True: Constraint.Feasible, False: Constraint.Infeasible, })
[ "def", "simple_constraintlist_rule", "(", "rule", ")", ":", "return", "rule_wrapper", "(", "rule", ",", "{", "None", ":", "ConstraintList", ".", "End", ",", "True", ":", "Constraint", ".", "Feasible", ",", "False", ":", "Constraint", ".", "Infeasible", ",", "}", ")" ]
https://github.com/Pyomo/pyomo/blob/dbd4faee151084f343b893cc2b0c04cf2b76fd92/pyomo/core/base/constraint.py#L75-L94
biolab/orange3
41685e1c7b1d1babe680113685a2d44bcc9fec0b
Orange/widgets/utils/buttons.py
python
tooltip_with_shortcut
(tool_tip, shortcut: QKeySequence)
return "&nbsp;&nbsp;".join(text)
[]
def tooltip_with_shortcut(tool_tip, shortcut: QKeySequence) -> str: text = [] if tool_tip: text.append("<span>{}</span>".format(tool_tip)) if not shortcut.isEmpty(): text.append("<kbd>{}</kbd>" .format(shortcut.toString(QKeySequence.NativeText))) return "&nbsp;&nbsp;".join(text)
[ "def", "tooltip_with_shortcut", "(", "tool_tip", ",", "shortcut", ":", "QKeySequence", ")", "->", "str", ":", "text", "=", "[", "]", "if", "tool_tip", ":", "text", ".", "append", "(", "\"<span>{}</span>\"", ".", "format", "(", "tool_tip", ")", ")", "if", "not", "shortcut", ".", "isEmpty", "(", ")", ":", "text", ".", "append", "(", "\"<kbd>{}</kbd>\"", ".", "format", "(", "shortcut", ".", "toString", "(", "QKeySequence", ".", "NativeText", ")", ")", ")", "return", "\"&nbsp;&nbsp;\"", ".", "join", "(", "text", ")" ]
https://github.com/biolab/orange3/blob/41685e1c7b1d1babe680113685a2d44bcc9fec0b/Orange/widgets/utils/buttons.py#L43-L50
barrycarey/Plex-Data-Collector-For-InfluxDB
d916591929789d400f10695afaf7e53faa16bd49
plexcollector/PlexInfluxdbCollector.py
python
PlexInfluxdbCollector._process_active_streams
(self, stream_data)
Take an object of stream data and create Influx JSON data :param stream_data: :return:
Take an object of stream data and create Influx JSON data :param stream_data: :return:
[ "Take", "an", "object", "of", "stream", "data", "and", "create", "Influx", "JSON", "data", ":", "param", "stream_data", ":", ":", "return", ":" ]
def _process_active_streams(self, stream_data): """ Take an object of stream data and create Influx JSON data :param stream_data: :return: """ log.info('Processing Active Streams') combined_streams = 0 session_ids = [] # Active Session IDs for this run for host, streams in stream_data.items(): combined_streams += len(streams) # Record total streams total_stream_points = [ { 'measurement': 'active_streams', 'fields': { 'active_streams': len(streams) }, 'tags': { 'host': host } } ] self.write_influx_data(total_stream_points) for stream in streams: player = stream.players[0] user = stream.usernames[0] session_id = stream.session[0].id transcode = stream.transcodeSessions if stream.transcodeSessions else None session_ids.append(session_id) if session_id in self.active_streams: start_time = self.active_streams[session_id]['start_time'] else: start_time = time.time() self.active_streams[session_id] = {} self.active_streams[session_id]['start_time'] = start_time if stream.type == 'movie': media_type = 'Movie' elif stream.type == 'episode': media_type = 'TV Show' elif stream.type == 'track': media_type = 'Music' else: media_type = 'Unknown' # Build the title. TV and Music Have a root title plus episode/track name. Movies don't if hasattr(stream, 'grandparentTitle'): full_title = stream.grandparentTitle + ' - ' + stream.title else: full_title = stream.title if media_type != 'Music': resolution = stream.media[0].videoResolution else: resolution = str(stream.media[0].bitrate) + 'Kbps' playing_points = [ { 'measurement': 'now_playing', 'fields': { 'stream_title': full_title, 'player': player.title, 'state': player.state, 'user': user, 'resolution': resolution, 'media_type': media_type, 'playback': 'transcode' if transcode else 'direct', 'duration': time.time() - start_time, }, 'tags': { 'host': host, 'player_address': player.address, 'session_id': session_id } } ] self.write_influx_data(playing_points) if config.report_combined: combined_stream_points = [ { 'measurement': 'active_streams', 'fields': { 'active_streams': combined_streams }, 'tags': { 'host': 'All' } } ] self.write_influx_data(combined_stream_points) self._remove_dead_streams(session_ids)
[ "def", "_process_active_streams", "(", "self", ",", "stream_data", ")", ":", "log", ".", "info", "(", "'Processing Active Streams'", ")", "combined_streams", "=", "0", "session_ids", "=", "[", "]", "# Active Session IDs for this run", "for", "host", ",", "streams", "in", "stream_data", ".", "items", "(", ")", ":", "combined_streams", "+=", "len", "(", "streams", ")", "# Record total streams", "total_stream_points", "=", "[", "{", "'measurement'", ":", "'active_streams'", ",", "'fields'", ":", "{", "'active_streams'", ":", "len", "(", "streams", ")", "}", ",", "'tags'", ":", "{", "'host'", ":", "host", "}", "}", "]", "self", ".", "write_influx_data", "(", "total_stream_points", ")", "for", "stream", "in", "streams", ":", "player", "=", "stream", ".", "players", "[", "0", "]", "user", "=", "stream", ".", "usernames", "[", "0", "]", "session_id", "=", "stream", ".", "session", "[", "0", "]", ".", "id", "transcode", "=", "stream", ".", "transcodeSessions", "if", "stream", ".", "transcodeSessions", "else", "None", "session_ids", ".", "append", "(", "session_id", ")", "if", "session_id", "in", "self", ".", "active_streams", ":", "start_time", "=", "self", ".", "active_streams", "[", "session_id", "]", "[", "'start_time'", "]", "else", ":", "start_time", "=", "time", ".", "time", "(", ")", "self", ".", "active_streams", "[", "session_id", "]", "=", "{", "}", "self", ".", "active_streams", "[", "session_id", "]", "[", "'start_time'", "]", "=", "start_time", "if", "stream", ".", "type", "==", "'movie'", ":", "media_type", "=", "'Movie'", "elif", "stream", ".", "type", "==", "'episode'", ":", "media_type", "=", "'TV Show'", "elif", "stream", ".", "type", "==", "'track'", ":", "media_type", "=", "'Music'", "else", ":", "media_type", "=", "'Unknown'", "# Build the title. TV and Music Have a root title plus episode/track name. Movies don't", "if", "hasattr", "(", "stream", ",", "'grandparentTitle'", ")", ":", "full_title", "=", "stream", ".", "grandparentTitle", "+", "' - '", "+", "stream", ".", "title", "else", ":", "full_title", "=", "stream", ".", "title", "if", "media_type", "!=", "'Music'", ":", "resolution", "=", "stream", ".", "media", "[", "0", "]", ".", "videoResolution", "else", ":", "resolution", "=", "str", "(", "stream", ".", "media", "[", "0", "]", ".", "bitrate", ")", "+", "'Kbps'", "playing_points", "=", "[", "{", "'measurement'", ":", "'now_playing'", ",", "'fields'", ":", "{", "'stream_title'", ":", "full_title", ",", "'player'", ":", "player", ".", "title", ",", "'state'", ":", "player", ".", "state", ",", "'user'", ":", "user", ",", "'resolution'", ":", "resolution", ",", "'media_type'", ":", "media_type", ",", "'playback'", ":", "'transcode'", "if", "transcode", "else", "'direct'", ",", "'duration'", ":", "time", ".", "time", "(", ")", "-", "start_time", ",", "}", ",", "'tags'", ":", "{", "'host'", ":", "host", ",", "'player_address'", ":", "player", ".", "address", ",", "'session_id'", ":", "session_id", "}", "}", "]", "self", ".", "write_influx_data", "(", "playing_points", ")", "if", "config", ".", "report_combined", ":", "combined_stream_points", "=", "[", "{", "'measurement'", ":", "'active_streams'", ",", "'fields'", ":", "{", "'active_streams'", ":", "combined_streams", "}", ",", "'tags'", ":", "{", "'host'", ":", "'All'", "}", "}", "]", "self", ".", "write_influx_data", "(", "combined_stream_points", ")", "self", ".", "_remove_dead_streams", "(", "session_ids", ")" ]
https://github.com/barrycarey/Plex-Data-Collector-For-InfluxDB/blob/d916591929789d400f10695afaf7e53faa16bd49/plexcollector/PlexInfluxdbCollector.py#L156-L258
awslabs/aws-servicebroker
c301912e7df3a2f09a9c34d3ae7ffe67c55aa3a0
sample-apps/rds/sample-app/src/pymysql/cursors.py
python
SSCursor.fetchmany
(self, size=None)
return rows
Fetch many
Fetch many
[ "Fetch", "many" ]
def fetchmany(self, size=None): """Fetch many""" self._check_executed() if size is None: size = self.arraysize rows = [] for i in range_type(size): row = self.read_next() if row is None: self._show_warnings() break rows.append(row) self.rownumber += 1 return rows
[ "def", "fetchmany", "(", "self", ",", "size", "=", "None", ")", ":", "self", ".", "_check_executed", "(", ")", "if", "size", "is", "None", ":", "size", "=", "self", ".", "arraysize", "rows", "=", "[", "]", "for", "i", "in", "range_type", "(", "size", ")", ":", "row", "=", "self", ".", "read_next", "(", ")", "if", "row", "is", "None", ":", "self", ".", "_show_warnings", "(", ")", "break", "rows", ".", "append", "(", "row", ")", "self", ".", "rownumber", "+=", "1", "return", "rows" ]
https://github.com/awslabs/aws-servicebroker/blob/c301912e7df3a2f09a9c34d3ae7ffe67c55aa3a0/sample-apps/rds/sample-app/src/pymysql/cursors.py#L478-L492
gpodder/mygpo
7a028ad621d05d4ca0d58fd22fb92656c8835e43
mygpo/users/subscriptions.py
python
subscription_changes
(device_id, podcast_states, since, until)
return add, rem
returns subscription changes for the device and podcast states
returns subscription changes for the device and podcast states
[ "returns", "subscription", "changes", "for", "the", "device", "and", "podcast", "states" ]
def subscription_changes(device_id, podcast_states, since, until): """returns subscription changes for the device and podcast states""" add, rem = [], [] for p_state in podcast_states: change = p_state.get_change_between(device_id, since, until) if change == "subscribe": add.append(p_state.ref_url) elif change == "unsubscribe": rem.append(p_state.ref_url) return add, rem
[ "def", "subscription_changes", "(", "device_id", ",", "podcast_states", ",", "since", ",", "until", ")", ":", "add", ",", "rem", "=", "[", "]", ",", "[", "]", "for", "p_state", "in", "podcast_states", ":", "change", "=", "p_state", ".", "get_change_between", "(", "device_id", ",", "since", ",", "until", ")", "if", "change", "==", "\"subscribe\"", ":", "add", ".", "append", "(", "p_state", ".", "ref_url", ")", "elif", "change", "==", "\"unsubscribe\"", ":", "rem", ".", "append", "(", "p_state", ".", "ref_url", ")", "return", "add", ",", "rem" ]
https://github.com/gpodder/mygpo/blob/7a028ad621d05d4ca0d58fd22fb92656c8835e43/mygpo/users/subscriptions.py#L59-L70
securityclippy/elasticintel
aa08d3e9f5ab1c000128e95161139ce97ff0e334
whois_lambda/requests/utils.py
python
select_proxy
(url, proxies)
return proxy
Select a proxy for the url, if applicable. :param url: The url being for the request :param proxies: A dictionary of schemes or schemes and hosts to proxy URLs
Select a proxy for the url, if applicable.
[ "Select", "a", "proxy", "for", "the", "url", "if", "applicable", "." ]
def select_proxy(url, proxies): """Select a proxy for the url, if applicable. :param url: The url being for the request :param proxies: A dictionary of schemes or schemes and hosts to proxy URLs """ proxies = proxies or {} urlparts = urlparse(url) proxy = proxies.get(urlparts.scheme+'://'+urlparts.hostname) if proxy is None: proxy = proxies.get(urlparts.scheme) return proxy
[ "def", "select_proxy", "(", "url", ",", "proxies", ")", ":", "proxies", "=", "proxies", "or", "{", "}", "urlparts", "=", "urlparse", "(", "url", ")", "proxy", "=", "proxies", ".", "get", "(", "urlparts", ".", "scheme", "+", "'://'", "+", "urlparts", ".", "hostname", ")", "if", "proxy", "is", "None", ":", "proxy", "=", "proxies", ".", "get", "(", "urlparts", ".", "scheme", ")", "return", "proxy" ]
https://github.com/securityclippy/elasticintel/blob/aa08d3e9f5ab1c000128e95161139ce97ff0e334/whois_lambda/requests/utils.py#L567-L578
googleads/google-ads-python
2a1d6062221f6aad1992a6bcca0e7e4a93d2db86
google/ads/googleads/v9/services/services/campaign_criterion_service/client.py
python
CampaignCriterionServiceClient.common_folder_path
(folder: str,)
return "folders/{folder}".format(folder=folder,)
Return a fully-qualified folder string.
Return a fully-qualified folder string.
[ "Return", "a", "fully", "-", "qualified", "folder", "string", "." ]
def common_folder_path(folder: str,) -> str: """Return a fully-qualified folder string.""" return "folders/{folder}".format(folder=folder,)
[ "def", "common_folder_path", "(", "folder", ":", "str", ",", ")", "->", "str", ":", "return", "\"folders/{folder}\"", ".", "format", "(", "folder", "=", "folder", ",", ")" ]
https://github.com/googleads/google-ads-python/blob/2a1d6062221f6aad1992a6bcca0e7e4a93d2db86/google/ads/googleads/v9/services/services/campaign_criterion_service/client.py#L230-L232
pretix/pretix
96f694cf61345f54132cd26cdeb07d5d11b34232
src/pretix/base/models/orders.py
python
OrderPayment.payment_provider
(self)
return self.order.event.get_payment_providers(cached=True).get(self.provider)
Cached access to an instance of the payment provider in use.
Cached access to an instance of the payment provider in use.
[ "Cached", "access", "to", "an", "instance", "of", "the", "payment", "provider", "in", "use", "." ]
def payment_provider(self): """ Cached access to an instance of the payment provider in use. """ return self.order.event.get_payment_providers(cached=True).get(self.provider)
[ "def", "payment_provider", "(", "self", ")", ":", "return", "self", ".", "order", ".", "event", ".", "get_payment_providers", "(", "cached", "=", "True", ")", ".", "get", "(", "self", ".", "provider", ")" ]
https://github.com/pretix/pretix/blob/96f694cf61345f54132cd26cdeb07d5d11b34232/src/pretix/base/models/orders.py#L1553-L1557
kubeflow/pipelines
bea751c9259ff0ae85290f873170aae89284ba8e
sdk/python/kfp/v2/components/tasks_group.py
python
TasksGroup.remove_task_recursive
(self, task: pipeline_task.PipelineTask)
Removes a task from the group recursively.
Removes a task from the group recursively.
[ "Removes", "a", "task", "from", "the", "group", "recursively", "." ]
def remove_task_recursive(self, task: pipeline_task.PipelineTask): """Removes a task from the group recursively.""" if self.tasks and task in self.tasks: self.tasks.remove(task) for group in self.groups or []: group.remove_task_recursive(task)
[ "def", "remove_task_recursive", "(", "self", ",", "task", ":", "pipeline_task", ".", "PipelineTask", ")", ":", "if", "self", ".", "tasks", "and", "task", "in", "self", ".", "tasks", ":", "self", ".", "tasks", ".", "remove", "(", "task", ")", "for", "group", "in", "self", ".", "groups", "or", "[", "]", ":", "group", ".", "remove_task_recursive", "(", "task", ")" ]
https://github.com/kubeflow/pipelines/blob/bea751c9259ff0ae85290f873170aae89284ba8e/sdk/python/kfp/v2/components/tasks_group.py#L88-L93
pymeasure/pymeasure
b4d888e9ead85ef7f7af0031f2dbb44c9ce1825e
pymeasure/instruments/attocube/adapters.py
python
AttocubeConsoleAdapter.check_acknowledgement
(self, reply, msg="")
checks the last reply of the instrument to be 'OK', otherwise a ValueError is raised. :param reply: last reply string of the instrument :param msg: optional message for the eventual error
checks the last reply of the instrument to be 'OK', otherwise a ValueError is raised.
[ "checks", "the", "last", "reply", "of", "the", "instrument", "to", "be", "OK", "otherwise", "a", "ValueError", "is", "raised", "." ]
def check_acknowledgement(self, reply, msg=""): """ checks the last reply of the instrument to be 'OK', otherwise a ValueError is raised. :param reply: last reply string of the instrument :param msg: optional message for the eventual error """ if reply != 'OK': if msg == "": # clear buffer msg = reply super().read() raise ValueError("AttocubeConsoleAdapter: Error after command " f"{self.lastcommand} with message {msg}")
[ "def", "check_acknowledgement", "(", "self", ",", "reply", ",", "msg", "=", "\"\"", ")", ":", "if", "reply", "!=", "'OK'", ":", "if", "msg", "==", "\"\"", ":", "# clear buffer", "msg", "=", "reply", "super", "(", ")", ".", "read", "(", ")", "raise", "ValueError", "(", "\"AttocubeConsoleAdapter: Error after command \"", "f\"{self.lastcommand} with message {msg}\"", ")" ]
https://github.com/pymeasure/pymeasure/blob/b4d888e9ead85ef7f7af0031f2dbb44c9ce1825e/pymeasure/instruments/attocube/adapters.py#L74-L86
linxid/Machine_Learning_Study_Path
558e82d13237114bbb8152483977806fc0c222af
Machine Learning In Action/Chapter4-NaiveBayes/venv/Lib/site-packages/pip/_vendor/html5lib/_tokenizer.py
python
HTMLTokenizer.afterDoctypeNameState
(self)
return True
[]
def afterDoctypeNameState(self): data = self.stream.char() if data in spaceCharacters: pass elif data == ">": self.tokenQueue.append(self.currentToken) self.state = self.dataState elif data is EOF: self.currentToken["correct"] = False self.stream.unget(data) self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "eof-in-doctype"}) self.tokenQueue.append(self.currentToken) self.state = self.dataState else: if data in ("p", "P"): matched = True for expected in (("u", "U"), ("b", "B"), ("l", "L"), ("i", "I"), ("c", "C")): data = self.stream.char() if data not in expected: matched = False break if matched: self.state = self.afterDoctypePublicKeywordState return True elif data in ("s", "S"): matched = True for expected in (("y", "Y"), ("s", "S"), ("t", "T"), ("e", "E"), ("m", "M")): data = self.stream.char() if data not in expected: matched = False break if matched: self.state = self.afterDoctypeSystemKeywordState return True # All the characters read before the current 'data' will be # [a-zA-Z], so they're garbage in the bogus doctype and can be # discarded; only the latest character might be '>' or EOF # and needs to be ungetted self.stream.unget(data) self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "expected-space-or-right-bracket-in-doctype", "datavars": {"data": data}}) self.currentToken["correct"] = False self.state = self.bogusDoctypeState return True
[ "def", "afterDoctypeNameState", "(", "self", ")", ":", "data", "=", "self", ".", "stream", ".", "char", "(", ")", "if", "data", "in", "spaceCharacters", ":", "pass", "elif", "data", "==", "\">\"", ":", "self", ".", "tokenQueue", ".", "append", "(", "self", ".", "currentToken", ")", "self", ".", "state", "=", "self", ".", "dataState", "elif", "data", "is", "EOF", ":", "self", ".", "currentToken", "[", "\"correct\"", "]", "=", "False", "self", ".", "stream", ".", "unget", "(", "data", ")", "self", ".", "tokenQueue", ".", "append", "(", "{", "\"type\"", ":", "tokenTypes", "[", "\"ParseError\"", "]", ",", "\"data\"", ":", "\"eof-in-doctype\"", "}", ")", "self", ".", "tokenQueue", ".", "append", "(", "self", ".", "currentToken", ")", "self", ".", "state", "=", "self", ".", "dataState", "else", ":", "if", "data", "in", "(", "\"p\"", ",", "\"P\"", ")", ":", "matched", "=", "True", "for", "expected", "in", "(", "(", "\"u\"", ",", "\"U\"", ")", ",", "(", "\"b\"", ",", "\"B\"", ")", ",", "(", "\"l\"", ",", "\"L\"", ")", ",", "(", "\"i\"", ",", "\"I\"", ")", ",", "(", "\"c\"", ",", "\"C\"", ")", ")", ":", "data", "=", "self", ".", "stream", ".", "char", "(", ")", "if", "data", "not", "in", "expected", ":", "matched", "=", "False", "break", "if", "matched", ":", "self", ".", "state", "=", "self", ".", "afterDoctypePublicKeywordState", "return", "True", "elif", "data", "in", "(", "\"s\"", ",", "\"S\"", ")", ":", "matched", "=", "True", "for", "expected", "in", "(", "(", "\"y\"", ",", "\"Y\"", ")", ",", "(", "\"s\"", ",", "\"S\"", ")", ",", "(", "\"t\"", ",", "\"T\"", ")", ",", "(", "\"e\"", ",", "\"E\"", ")", ",", "(", "\"m\"", ",", "\"M\"", ")", ")", ":", "data", "=", "self", ".", "stream", ".", "char", "(", ")", "if", "data", "not", "in", "expected", ":", "matched", "=", "False", "break", "if", "matched", ":", "self", ".", "state", "=", "self", ".", "afterDoctypeSystemKeywordState", "return", "True", "# All the characters read before the current 'data' will be", "# [a-zA-Z], so they're garbage in the bogus doctype and can be", "# discarded; only the latest character might be '>' or EOF", "# and needs to be ungetted", "self", ".", "stream", ".", "unget", "(", "data", ")", "self", ".", "tokenQueue", ".", "append", "(", "{", "\"type\"", ":", "tokenTypes", "[", "\"ParseError\"", "]", ",", "\"data\"", ":", "\"expected-space-or-right-bracket-in-doctype\"", ",", "\"datavars\"", ":", "{", "\"data\"", ":", "data", "}", "}", ")", "self", ".", "currentToken", "[", "\"correct\"", "]", "=", "False", "self", ".", "state", "=", "self", ".", "bogusDoctypeState", "return", "True" ]
https://github.com/linxid/Machine_Learning_Study_Path/blob/558e82d13237114bbb8152483977806fc0c222af/Machine Learning In Action/Chapter4-NaiveBayes/venv/Lib/site-packages/pip/_vendor/html5lib/_tokenizer.py#L1359-L1408
LinOTP/LinOTP
bb3940bbaccea99550e6c063ff824f258dd6d6d7
linotp/useridresolver/SQLIdResolver.py
python
make_connect
(driver, user, pass_, server, port, db, conParams="")
return connect
create a connect string from decicated parts - to build a SQLAlchemy Uri :param driver: mysql, postgres, ... :type driver: string :param user: database connection user :type user: string :param pass_: password of the database user :type pass_: string :param server: servername :type server: string :param port: database connection port :type port: string or int :param db: database name :type db: string :param conParams: additional connection parameters :type conParams: string
create a connect string from decicated parts - to build a SQLAlchemy Uri
[ "create", "a", "connect", "string", "from", "decicated", "parts", "-", "to", "build", "a", "SQLAlchemy", "Uri" ]
def make_connect(driver, user, pass_, server, port, db, conParams=""): """ create a connect string from decicated parts - to build a SQLAlchemy Uri :param driver: mysql, postgres, ... :type driver: string :param user: database connection user :type user: string :param pass_: password of the database user :type pass_: string :param server: servername :type server: string :param port: database connection port :type port: string or int :param db: database name :type db: string :param conParams: additional connection parameters :type conParams: string """ connect = "" if "?odbc_connect=" in driver: # we have the need to support the odbc_connect mode # where the parameters of the drivers will be concated # The template for the odbc_connect string is submitted # in the field "Additional connection parameters:" param_str = conParams settings = {} settings["{PORT}"] = port if user: user = user.strip() settings["{DBUSER}"] = user if server: server = server.strip() settings["{SERVER}"] = server settings["{PASSWORT}"] = pass_ settings["{DATABASE}"] = db for key, value in list(settings.items()): param_str = param_str.replace(key, value) url_quote = urllib.parse.quote_plus(param_str) connect = "%s%s" % (driver, url_quote) else: connect = build_simple_connect( driver, user, pass_, server, port, db, conParams ) return connect
[ "def", "make_connect", "(", "driver", ",", "user", ",", "pass_", ",", "server", ",", "port", ",", "db", ",", "conParams", "=", "\"\"", ")", ":", "connect", "=", "\"\"", "if", "\"?odbc_connect=\"", "in", "driver", ":", "# we have the need to support the odbc_connect mode", "# where the parameters of the drivers will be concated", "# The template for the odbc_connect string is submitted", "# in the field \"Additional connection parameters:\"", "param_str", "=", "conParams", "settings", "=", "{", "}", "settings", "[", "\"{PORT}\"", "]", "=", "port", "if", "user", ":", "user", "=", "user", ".", "strip", "(", ")", "settings", "[", "\"{DBUSER}\"", "]", "=", "user", "if", "server", ":", "server", "=", "server", ".", "strip", "(", ")", "settings", "[", "\"{SERVER}\"", "]", "=", "server", "settings", "[", "\"{PASSWORT}\"", "]", "=", "pass_", "settings", "[", "\"{DATABASE}\"", "]", "=", "db", "for", "key", ",", "value", "in", "list", "(", "settings", ".", "items", "(", ")", ")", ":", "param_str", "=", "param_str", ".", "replace", "(", "key", ",", "value", ")", "url_quote", "=", "urllib", ".", "parse", ".", "quote_plus", "(", "param_str", ")", "connect", "=", "\"%s%s\"", "%", "(", "driver", ",", "url_quote", ")", "else", ":", "connect", "=", "build_simple_connect", "(", "driver", ",", "user", ",", "pass_", ",", "server", ",", "port", ",", "db", ",", "conParams", ")", "return", "connect" ]
https://github.com/LinOTP/LinOTP/blob/bb3940bbaccea99550e6c063ff824f258dd6d6d7/linotp/useridresolver/SQLIdResolver.py#L187-L239
jliljebl/flowblade
995313a509b80e99eb1ad550d945bdda5995093b
flowblade-trunk/Flowblade/projectaction.py
python
_columns_count_item_selected
(w, data)
[]
def _columns_count_item_selected(w, data): gui.editor_window.media_list_view.columns_changed(data)
[ "def", "_columns_count_item_selected", "(", "w", ",", "data", ")", ":", "gui", ".", "editor_window", ".", "media_list_view", ".", "columns_changed", "(", "data", ")" ]
https://github.com/jliljebl/flowblade/blob/995313a509b80e99eb1ad550d945bdda5995093b/flowblade-trunk/Flowblade/projectaction.py#L1377-L1378
rlgraph/rlgraph
428fc136a9a075f29a397495b4226a491a287be2
rlgraph/spaces/containers.py
python
ContainerSpace.sample
(self, size=None, fill_value=None, horizontal=False)
Child classes must overwrite this one again with support for the `horizontal` parameter. Args: horizontal (bool): False: Within this container, sample each child-space `size` times. True: Produce `size` single containers in an np.array of len `size`.
Child classes must overwrite this one again with support for the `horizontal` parameter.
[ "Child", "classes", "must", "overwrite", "this", "one", "again", "with", "support", "for", "the", "horizontal", "parameter", "." ]
def sample(self, size=None, fill_value=None, horizontal=False): """ Child classes must overwrite this one again with support for the `horizontal` parameter. Args: horizontal (bool): False: Within this container, sample each child-space `size` times. True: Produce `size` single containers in an np.array of len `size`. """ raise NotImplementedError
[ "def", "sample", "(", "self", ",", "size", "=", "None", ",", "fill_value", "=", "None", ",", "horizontal", "=", "False", ")", ":", "raise", "NotImplementedError" ]
https://github.com/rlgraph/rlgraph/blob/428fc136a9a075f29a397495b4226a491a287be2/rlgraph/spaces/containers.py#L29-L37
kbandla/ImmunityDebugger
2abc03fb15c8f3ed0914e1175c4d8933977c73e3
1.73/Libs/pefile.py
python
COFF.set_qword_at_rva
(self, rva, qword)
return self.set_bytes_at_rva(rva, self.get_data_from_qword(qword))
Set the quad-word value at the file offset corresponding to the given RVA.
Set the quad-word value at the file offset corresponding to the given RVA.
[ "Set", "the", "quad", "-", "word", "value", "at", "the", "file", "offset", "corresponding", "to", "the", "given", "RVA", "." ]
def set_qword_at_rva(self, rva, qword): """Set the quad-word value at the file offset corresponding to the given RVA.""" return self.set_bytes_at_rva(rva, self.get_data_from_qword(qword))
[ "def", "set_qword_at_rva", "(", "self", ",", "rva", ",", "qword", ")", ":", "return", "self", ".", "set_bytes_at_rva", "(", "rva", ",", "self", ".", "get_data_from_qword", "(", "qword", ")", ")" ]
https://github.com/kbandla/ImmunityDebugger/blob/2abc03fb15c8f3ed0914e1175c4d8933977c73e3/1.73/Libs/pefile.py#L3727-L3729
zhl2008/awd-platform
0416b31abea29743387b10b3914581fbe8e7da5e
web_flaskbb/build/lib.linux-x86_64-2.7/flaskbb/utils/helpers.py
python
anonymous_required
(f)
return wrapper
[]
def anonymous_required(f): @wraps(f) def wrapper(*a, **k): if current_user is not None and current_user.is_authenticated: return redirect_or_next(url_for('forum.index')) return f(*a, **k) return wrapper
[ "def", "anonymous_required", "(", "f", ")", ":", "@", "wraps", "(", "f", ")", "def", "wrapper", "(", "*", "a", ",", "*", "*", "k", ")", ":", "if", "current_user", "is", "not", "None", "and", "current_user", ".", "is_authenticated", ":", "return", "redirect_or_next", "(", "url_for", "(", "'forum.index'", ")", ")", "return", "f", "(", "*", "a", ",", "*", "*", "k", ")", "return", "wrapper" ]
https://github.com/zhl2008/awd-platform/blob/0416b31abea29743387b10b3914581fbe8e7da5e/web_flaskbb/build/lib.linux-x86_64-2.7/flaskbb/utils/helpers.py#L715-L723
PaddlePaddle/PaddleX
2bab73f81ab54e328204e7871e6ae4a82e719f5d
paddlex/ppcls/arch/backbone/model_zoo/dla.py
python
DLA.__init__
(self, levels, channels, in_chans=3, cardinality=1, base_width=64, block=DlaBottleneck, residual_root=False, drop_rate=0.0, class_num=1000, with_pool=True)
[]
def __init__(self, levels, channels, in_chans=3, cardinality=1, base_width=64, block=DlaBottleneck, residual_root=False, drop_rate=0.0, class_num=1000, with_pool=True): super(DLA, self).__init__() self.channels = channels self.class_num = class_num self.with_pool = with_pool self.cardinality = cardinality self.base_width = base_width self.drop_rate = drop_rate self.base_layer = nn.Sequential( nn.Conv2D( in_chans, channels[0], kernel_size=7, stride=1, padding=3, bias_attr=False), nn.BatchNorm2D(channels[0]), nn.ReLU()) self.level0 = self._make_conv_level(channels[0], channels[0], levels[0]) self.level1 = self._make_conv_level( channels[0], channels[1], levels[1], stride=2) cargs = dict( cardinality=cardinality, base_width=base_width, root_residual=residual_root) self.level2 = DlaTree( levels[2], block, channels[1], channels[2], 2, level_root=False, **cargs) self.level3 = DlaTree( levels[3], block, channels[2], channels[3], 2, level_root=True, **cargs) self.level4 = DlaTree( levels[4], block, channels[3], channels[4], 2, level_root=True, **cargs) self.level5 = DlaTree( levels[5], block, channels[4], channels[5], 2, level_root=True, **cargs) self.feature_info = [ # rare to have a meaningful stride 1 level dict( num_chs=channels[0], reduction=1, module='level0'), dict( num_chs=channels[1], reduction=2, module='level1'), dict( num_chs=channels[2], reduction=4, module='level2'), dict( num_chs=channels[3], reduction=8, module='level3'), dict( num_chs=channels[4], reduction=16, module='level4'), dict( num_chs=channels[5], reduction=32, module='level5'), ] self.num_features = channels[-1] if with_pool: self.global_pool = nn.AdaptiveAvgPool2D(1) if class_num > 0: self.fc = nn.Conv2D(self.num_features, class_num, 1) for m in self.sublayers(): if isinstance(m, nn.Conv2D): n = m._kernel_size[0] * m._kernel_size[1] * m._out_channels normal_ = Normal(mean=0.0, std=math.sqrt(2. / n)) normal_(m.weight) elif isinstance(m, nn.BatchNorm2D): ones_(m.weight) zeros_(m.bias)
[ "def", "__init__", "(", "self", ",", "levels", ",", "channels", ",", "in_chans", "=", "3", ",", "cardinality", "=", "1", ",", "base_width", "=", "64", ",", "block", "=", "DlaBottleneck", ",", "residual_root", "=", "False", ",", "drop_rate", "=", "0.0", ",", "class_num", "=", "1000", ",", "with_pool", "=", "True", ")", ":", "super", "(", "DLA", ",", "self", ")", ".", "__init__", "(", ")", "self", ".", "channels", "=", "channels", "self", ".", "class_num", "=", "class_num", "self", ".", "with_pool", "=", "with_pool", "self", ".", "cardinality", "=", "cardinality", "self", ".", "base_width", "=", "base_width", "self", ".", "drop_rate", "=", "drop_rate", "self", ".", "base_layer", "=", "nn", ".", "Sequential", "(", "nn", ".", "Conv2D", "(", "in_chans", ",", "channels", "[", "0", "]", ",", "kernel_size", "=", "7", ",", "stride", "=", "1", ",", "padding", "=", "3", ",", "bias_attr", "=", "False", ")", ",", "nn", ".", "BatchNorm2D", "(", "channels", "[", "0", "]", ")", ",", "nn", ".", "ReLU", "(", ")", ")", "self", ".", "level0", "=", "self", ".", "_make_conv_level", "(", "channels", "[", "0", "]", ",", "channels", "[", "0", "]", ",", "levels", "[", "0", "]", ")", "self", ".", "level1", "=", "self", ".", "_make_conv_level", "(", "channels", "[", "0", "]", ",", "channels", "[", "1", "]", ",", "levels", "[", "1", "]", ",", "stride", "=", "2", ")", "cargs", "=", "dict", "(", "cardinality", "=", "cardinality", ",", "base_width", "=", "base_width", ",", "root_residual", "=", "residual_root", ")", "self", ".", "level2", "=", "DlaTree", "(", "levels", "[", "2", "]", ",", "block", ",", "channels", "[", "1", "]", ",", "channels", "[", "2", "]", ",", "2", ",", "level_root", "=", "False", ",", "*", "*", "cargs", ")", "self", ".", "level3", "=", "DlaTree", "(", "levels", "[", "3", "]", ",", "block", ",", "channels", "[", "2", "]", ",", "channels", "[", "3", "]", ",", "2", ",", "level_root", "=", "True", ",", "*", "*", "cargs", ")", "self", ".", "level4", "=", "DlaTree", "(", "levels", "[", "4", "]", ",", "block", ",", "channels", "[", "3", "]", ",", "channels", "[", "4", "]", ",", "2", ",", "level_root", "=", "True", ",", "*", "*", "cargs", ")", "self", ".", "level5", "=", "DlaTree", "(", "levels", "[", "5", "]", ",", "block", ",", "channels", "[", "4", "]", ",", "channels", "[", "5", "]", ",", "2", ",", "level_root", "=", "True", ",", "*", "*", "cargs", ")", "self", ".", "feature_info", "=", "[", "# rare to have a meaningful stride 1 level", "dict", "(", "num_chs", "=", "channels", "[", "0", "]", ",", "reduction", "=", "1", ",", "module", "=", "'level0'", ")", ",", "dict", "(", "num_chs", "=", "channels", "[", "1", "]", ",", "reduction", "=", "2", ",", "module", "=", "'level1'", ")", ",", "dict", "(", "num_chs", "=", "channels", "[", "2", "]", ",", "reduction", "=", "4", ",", "module", "=", "'level2'", ")", ",", "dict", "(", "num_chs", "=", "channels", "[", "3", "]", ",", "reduction", "=", "8", ",", "module", "=", "'level3'", ")", ",", "dict", "(", "num_chs", "=", "channels", "[", "4", "]", ",", "reduction", "=", "16", ",", "module", "=", "'level4'", ")", ",", "dict", "(", "num_chs", "=", "channels", "[", "5", "]", ",", "reduction", "=", "32", ",", "module", "=", "'level5'", ")", ",", "]", "self", ".", "num_features", "=", "channels", "[", "-", "1", "]", "if", "with_pool", ":", "self", ".", "global_pool", "=", "nn", ".", "AdaptiveAvgPool2D", "(", "1", ")", "if", "class_num", ">", "0", ":", "self", ".", "fc", "=", "nn", ".", "Conv2D", "(", "self", ".", "num_features", ",", "class_num", ",", "1", ")", "for", "m", "in", "self", ".", "sublayers", "(", ")", ":", "if", "isinstance", "(", "m", ",", "nn", ".", "Conv2D", ")", ":", "n", "=", "m", ".", "_kernel_size", "[", "0", "]", "*", "m", ".", "_kernel_size", "[", "1", "]", "*", "m", ".", "_out_channels", "normal_", "=", "Normal", "(", "mean", "=", "0.0", ",", "std", "=", "math", ".", "sqrt", "(", "2.", "/", "n", ")", ")", "normal_", "(", "m", ".", "weight", ")", "elif", "isinstance", "(", "m", ",", "nn", ".", "BatchNorm2D", ")", ":", "ones_", "(", "m", ".", "weight", ")", "zeros_", "(", "m", ".", "bias", ")" ]
https://github.com/PaddlePaddle/PaddleX/blob/2bab73f81ab54e328204e7871e6ae4a82e719f5d/paddlex/ppcls/arch/backbone/model_zoo/dla.py#L264-L368
kubeflow-kale/kale
bda9d296822e56ba8fe76b0072e656005da04905
backend/kale/common/astutils.py
python
walk
(node, stop_at=tuple(), ignore=tuple())
Walk through the children of an ast node. Args: node: an ast node stop_at: stop traversing through these nodes, including the matching node ignore: stop traversing through these nodes, excluding the matching node Returns: a generator of ast nodes
Walk through the children of an ast node.
[ "Walk", "through", "the", "children", "of", "an", "ast", "node", "." ]
def walk(node, stop_at=tuple(), ignore=tuple()): """Walk through the children of an ast node. Args: node: an ast node stop_at: stop traversing through these nodes, including the matching node ignore: stop traversing through these nodes, excluding the matching node Returns: a generator of ast nodes """ todo = deque([node]) while todo: node = todo.popleft() if isinstance(node, ignore): # dequeue next node continue if not isinstance(node, stop_at): next_nodes = ast.iter_child_nodes(node) for n in next_nodes: todo.extend([n]) yield node
[ "def", "walk", "(", "node", ",", "stop_at", "=", "tuple", "(", ")", ",", "ignore", "=", "tuple", "(", ")", ")", ":", "todo", "=", "deque", "(", "[", "node", "]", ")", "while", "todo", ":", "node", "=", "todo", ".", "popleft", "(", ")", "if", "isinstance", "(", "node", ",", "ignore", ")", ":", "# dequeue next node", "continue", "if", "not", "isinstance", "(", "node", ",", "stop_at", ")", ":", "next_nodes", "=", "ast", ".", "iter_child_nodes", "(", "node", ")", "for", "n", "in", "next_nodes", ":", "todo", ".", "extend", "(", "[", "n", "]", ")", "yield", "node" ]
https://github.com/kubeflow-kale/kale/blob/bda9d296822e56ba8fe76b0072e656005da04905/backend/kale/common/astutils.py#L28-L50
hzy46/Deep-Learning-21-Examples
15c2d9edccad090cd67b033f24a43c544e5cba3e
chapter_5/research/object_detection/core/preprocessor.py
python
random_rgb_to_gray
(image, probability=0.1, seed=None)
return image
Changes the image from RGB to Grayscale with the given probability. Args: image: rank 3 float32 tensor contains 1 image -> [height, width, channels] with pixel values varying between [0, 1]. probability: the probability of returning a grayscale image. The probability should be a number between [0, 1]. seed: random seed. Returns: image: image which is the same shape as input image.
Changes the image from RGB to Grayscale with the given probability.
[ "Changes", "the", "image", "from", "RGB", "to", "Grayscale", "with", "the", "given", "probability", "." ]
def random_rgb_to_gray(image, probability=0.1, seed=None): """Changes the image from RGB to Grayscale with the given probability. Args: image: rank 3 float32 tensor contains 1 image -> [height, width, channels] with pixel values varying between [0, 1]. probability: the probability of returning a grayscale image. The probability should be a number between [0, 1]. seed: random seed. Returns: image: image which is the same shape as input image. """ def _image_to_gray(image): image_gray1 = tf.image.rgb_to_grayscale(image) image_gray3 = tf.image.grayscale_to_rgb(image_gray1) return image_gray3 with tf.name_scope('RandomRGBtoGray', values=[image]): # random variable defining whether to do flip or not do_gray_random = tf.random_uniform([], seed=seed) image = tf.cond( tf.greater(do_gray_random, probability), lambda: image, lambda: _image_to_gray(image)) return image
[ "def", "random_rgb_to_gray", "(", "image", ",", "probability", "=", "0.1", ",", "seed", "=", "None", ")", ":", "def", "_image_to_gray", "(", "image", ")", ":", "image_gray1", "=", "tf", ".", "image", ".", "rgb_to_grayscale", "(", "image", ")", "image_gray3", "=", "tf", ".", "image", ".", "grayscale_to_rgb", "(", "image_gray1", ")", "return", "image_gray3", "with", "tf", ".", "name_scope", "(", "'RandomRGBtoGray'", ",", "values", "=", "[", "image", "]", ")", ":", "# random variable defining whether to do flip or not", "do_gray_random", "=", "tf", ".", "random_uniform", "(", "[", "]", ",", "seed", "=", "seed", ")", "image", "=", "tf", ".", "cond", "(", "tf", ".", "greater", "(", "do_gray_random", ",", "probability", ")", ",", "lambda", ":", "image", ",", "lambda", ":", "_image_to_gray", "(", "image", ")", ")", "return", "image" ]
https://github.com/hzy46/Deep-Learning-21-Examples/blob/15c2d9edccad090cd67b033f24a43c544e5cba3e/chapter_5/research/object_detection/core/preprocessor.py#L402-L428
brendano/tweetmotif
1b0b1e3a941745cd5a26eba01f554688b7c4b27e
everything_else/djfrontend/django-1.0.2/http/multipartparser.py
python
LimitBytes.read
(self, num_bytes=None)
return self._file.read(num_bytes)
Read data from the underlying file. If you ask for too much or there isn't anything left, this will raise an InputStreamExhausted error.
Read data from the underlying file. If you ask for too much or there isn't anything left, this will raise an InputStreamExhausted error.
[ "Read", "data", "from", "the", "underlying", "file", ".", "If", "you", "ask", "for", "too", "much", "or", "there", "isn", "t", "anything", "left", "this", "will", "raise", "an", "InputStreamExhausted", "error", "." ]
def read(self, num_bytes=None): """ Read data from the underlying file. If you ask for too much or there isn't anything left, this will raise an InputStreamExhausted error. """ if self.remaining <= 0: raise InputStreamExhausted() if num_bytes is None: num_bytes = self.remaining else: num_bytes = min(num_bytes, self.remaining) self.remaining -= num_bytes return self._file.read(num_bytes)
[ "def", "read", "(", "self", ",", "num_bytes", "=", "None", ")", ":", "if", "self", ".", "remaining", "<=", "0", ":", "raise", "InputStreamExhausted", "(", ")", "if", "num_bytes", "is", "None", ":", "num_bytes", "=", "self", ".", "remaining", "else", ":", "num_bytes", "=", "min", "(", "num_bytes", ",", "self", ".", "remaining", ")", "self", ".", "remaining", "-=", "num_bytes", "return", "self", ".", "_file", ".", "read", "(", "num_bytes", ")" ]
https://github.com/brendano/tweetmotif/blob/1b0b1e3a941745cd5a26eba01f554688b7c4b27e/everything_else/djfrontend/django-1.0.2/http/multipartparser.py#L393-L406
Rapptz/discord.py
45d498c1b76deaf3b394d17ccf56112fa691d160
discord/integrations.py
python
StreamIntegration.sync
(self)
|coro| Syncs the integration. You must have the :attr:`~Permissions.manage_guild` permission to do this. Raises ------- Forbidden You do not have permission to sync the integration. HTTPException Syncing the integration failed.
|coro|
[ "|coro|" ]
async def sync(self) -> None: """|coro| Syncs the integration. You must have the :attr:`~Permissions.manage_guild` permission to do this. Raises ------- Forbidden You do not have permission to sync the integration. HTTPException Syncing the integration failed. """ await self._state.http.sync_integration(self.guild.id, self.id) self.synced_at = datetime.datetime.now(datetime.timezone.utc)
[ "async", "def", "sync", "(", "self", ")", "->", "None", ":", "await", "self", ".", "_state", ".", "http", ".", "sync_integration", "(", "self", ".", "guild", ".", "id", ",", "self", ".", "id", ")", "self", ".", "synced_at", "=", "datetime", ".", "datetime", ".", "now", "(", "datetime", ".", "timezone", ".", "utc", ")" ]
https://github.com/Rapptz/discord.py/blob/45d498c1b76deaf3b394d17ccf56112fa691d160/discord/integrations.py#L269-L285
zhl2008/awd-platform
0416b31abea29743387b10b3914581fbe8e7da5e
web_flaskbb/lib/python2.7/site-packages/pygments/lexers/robotframework.py
python
VariableSplitter.__init__
(self, string, identifiers)
[]
def __init__(self, string, identifiers): self.identifier = None self.base = None self.index = None self.start = -1 self.end = -1 self._identifiers = identifiers self._may_have_internal_variables = False try: self._split(string) except ValueError: pass else: self._finalize()
[ "def", "__init__", "(", "self", ",", "string", ",", "identifiers", ")", ":", "self", ".", "identifier", "=", "None", "self", ".", "base", "=", "None", "self", ".", "index", "=", "None", "self", ".", "start", "=", "-", "1", "self", ".", "end", "=", "-", "1", "self", ".", "_identifiers", "=", "identifiers", "self", ".", "_may_have_internal_variables", "=", "False", "try", ":", "self", ".", "_split", "(", "string", ")", "except", "ValueError", ":", "pass", "else", ":", "self", ".", "_finalize", "(", ")" ]
https://github.com/zhl2008/awd-platform/blob/0416b31abea29743387b10b3914581fbe8e7da5e/web_flaskbb/lib/python2.7/site-packages/pygments/lexers/robotframework.py#L444-L457
freelawproject/courtlistener
ab3ae7bb6e5e836b286749113e7dbb403d470912
cl/lib/argparse_types.py
python
_argparse_volumes
(volumes_arg: str)
return range(start, stop)
Custom argparse handling for volumes :param volumes_arg: The volume argparse for harvard imports :return: Range of values
Custom argparse handling for volumes
[ "Custom", "argparse", "handling", "for", "volumes" ]
def _argparse_volumes(volumes_arg: str) -> range: """Custom argparse handling for volumes :param volumes_arg: The volume argparse for harvard imports :return: Range of values """ volumes = [int(e) if e.strip() else 2000 for e in volumes_arg.split(":")] if len(volumes) == 1: start = stop = volumes[0] else: start, stop = volumes[0], volumes[1] + 1 return range(start, stop)
[ "def", "_argparse_volumes", "(", "volumes_arg", ":", "str", ")", "->", "range", ":", "volumes", "=", "[", "int", "(", "e", ")", "if", "e", ".", "strip", "(", ")", "else", "2000", "for", "e", "in", "volumes_arg", ".", "split", "(", "\":\"", ")", "]", "if", "len", "(", "volumes", ")", "==", "1", ":", "start", "=", "stop", "=", "volumes", "[", "0", "]", "else", ":", "start", ",", "stop", "=", "volumes", "[", "0", "]", ",", "volumes", "[", "1", "]", "+", "1", "return", "range", "(", "start", ",", "stop", ")" ]
https://github.com/freelawproject/courtlistener/blob/ab3ae7bb6e5e836b286749113e7dbb403d470912/cl/lib/argparse_types.py#L40-L51
raiden-network/raiden
76c68b426a6f81f173b9a2c09bd88a610502c38b
raiden/utils/cli.py
python
Parser.parse
(self, ctx: Context, value: Any, source: ParameterSource)
Parses more values to provide to clicks 'ctx.params' based the value of a parameter value - the value of the parameter for `self.name` that was parsed by click source - the ParameterSource where the value of `self.name` was set by click. ctx - The current click `Context` This method should return a dictionary that maps the "internal" parameter name to the parameter values as read in by the parser. In order to get the internal name from the parameter name as defined by the parser, the `self.get_internal_name()` method should be called.
Parses more values to provide to clicks 'ctx.params' based the value of a parameter
[ "Parses", "more", "values", "to", "provide", "to", "clicks", "ctx", ".", "params", "based", "the", "value", "of", "a", "parameter" ]
def parse(self, ctx: Context, value: Any, source: ParameterSource) -> Dict[str, Any]: """Parses more values to provide to clicks 'ctx.params' based the value of a parameter value - the value of the parameter for `self.name` that was parsed by click source - the ParameterSource where the value of `self.name` was set by click. ctx - The current click `Context` This method should return a dictionary that maps the "internal" parameter name to the parameter values as read in by the parser. In order to get the internal name from the parameter name as defined by the parser, the `self.get_internal_name()` method should be called. """ pass
[ "def", "parse", "(", "self", ",", "ctx", ":", "Context", ",", "value", ":", "Any", ",", "source", ":", "ParameterSource", ")", "->", "Dict", "[", "str", ",", "Any", "]", ":", "pass" ]
https://github.com/raiden-network/raiden/blob/76c68b426a6f81f173b9a2c09bd88a610502c38b/raiden/utils/cli.py#L489-L502
kubernetes-client/python
47b9da9de2d02b2b7a34fbe05afb44afd130d73a
kubernetes/client/api/apiextensions_v1_api.py
python
ApiextensionsV1Api.list_custom_resource_definition_with_http_info
(self, **kwargs)
return self.api_client.call_api( '/apis/apiextensions.k8s.io/v1/customresourcedefinitions', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='V1CustomResourceDefinitionList', # noqa: E501 auth_settings=auth_settings, async_req=local_var_params.get('async_req'), _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501 _preload_content=local_var_params.get('_preload_content', True), _request_timeout=local_var_params.get('_request_timeout'), collection_formats=collection_formats)
list_custom_resource_definition # noqa: E501 list or watch objects of kind CustomResourceDefinition # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.list_custom_resource_definition_with_http_info(async_req=True) >>> result = thread.get() :param async_req bool: execute request asynchronously :param str pretty: If 'true', then the output is pretty printed. :param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything. :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything. :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion. :param _return_http_data_only: response data without head status code and headers :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :return: tuple(V1CustomResourceDefinitionList, status_code(int), headers(HTTPHeaderDict)) If the method is called asynchronously, returns the request thread.
list_custom_resource_definition # noqa: E501
[ "list_custom_resource_definition", "#", "noqa", ":", "E501" ]
def list_custom_resource_definition_with_http_info(self, **kwargs): # noqa: E501 """list_custom_resource_definition # noqa: E501 list or watch objects of kind CustomResourceDefinition # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.list_custom_resource_definition_with_http_info(async_req=True) >>> result = thread.get() :param async_req bool: execute request asynchronously :param str pretty: If 'true', then the output is pretty printed. :param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything. :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything. :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion. :param _return_http_data_only: response data without head status code and headers :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :return: tuple(V1CustomResourceDefinitionList, status_code(int), headers(HTTPHeaderDict)) If the method is called asynchronously, returns the request thread. """ local_var_params = locals() all_params = [ 'pretty', 'allow_watch_bookmarks', '_continue', 'field_selector', 'label_selector', 'limit', 'resource_version', 'resource_version_match', 'timeout_seconds', 'watch' ] all_params.extend( [ 'async_req', '_return_http_data_only', '_preload_content', '_request_timeout' ] ) for key, val in six.iteritems(local_var_params['kwargs']): if key not in all_params: raise ApiTypeError( "Got an unexpected keyword argument '%s'" " to method list_custom_resource_definition" % key ) local_var_params[key] = val del local_var_params['kwargs'] collection_formats = {} path_params = {} query_params = [] if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501 query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501 if 'allow_watch_bookmarks' in local_var_params and local_var_params['allow_watch_bookmarks'] is not None: # noqa: E501 query_params.append(('allowWatchBookmarks', local_var_params['allow_watch_bookmarks'])) # noqa: E501 if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501 query_params.append(('continue', local_var_params['_continue'])) # noqa: E501 if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501 query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501 if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501 query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501 if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501 query_params.append(('limit', local_var_params['limit'])) # noqa: E501 if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501 query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501 if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501 query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501 if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501 query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501 if 'watch' in local_var_params and local_var_params['watch'] is not None: # noqa: E501 query_params.append(('watch', local_var_params['watch'])) # noqa: E501 header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.select_header_accept( ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch']) # noqa: E501 # Authentication setting auth_settings = ['BearerToken'] # noqa: E501 return self.api_client.call_api( '/apis/apiextensions.k8s.io/v1/customresourcedefinitions', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='V1CustomResourceDefinitionList', # noqa: E501 auth_settings=auth_settings, async_req=local_var_params.get('async_req'), _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501 _preload_content=local_var_params.get('_preload_content', True), _request_timeout=local_var_params.get('_request_timeout'), collection_formats=collection_formats)
[ "def", "list_custom_resource_definition_with_http_info", "(", "self", ",", "*", "*", "kwargs", ")", ":", "# noqa: E501", "local_var_params", "=", "locals", "(", ")", "all_params", "=", "[", "'pretty'", ",", "'allow_watch_bookmarks'", ",", "'_continue'", ",", "'field_selector'", ",", "'label_selector'", ",", "'limit'", ",", "'resource_version'", ",", "'resource_version_match'", ",", "'timeout_seconds'", ",", "'watch'", "]", "all_params", ".", "extend", "(", "[", "'async_req'", ",", "'_return_http_data_only'", ",", "'_preload_content'", ",", "'_request_timeout'", "]", ")", "for", "key", ",", "val", "in", "six", ".", "iteritems", "(", "local_var_params", "[", "'kwargs'", "]", ")", ":", "if", "key", "not", "in", "all_params", ":", "raise", "ApiTypeError", "(", "\"Got an unexpected keyword argument '%s'\"", "\" to method list_custom_resource_definition\"", "%", "key", ")", "local_var_params", "[", "key", "]", "=", "val", "del", "local_var_params", "[", "'kwargs'", "]", "collection_formats", "=", "{", "}", "path_params", "=", "{", "}", "query_params", "=", "[", "]", "if", "'pretty'", "in", "local_var_params", "and", "local_var_params", "[", "'pretty'", "]", "is", "not", "None", ":", "# noqa: E501", "query_params", ".", "append", "(", "(", "'pretty'", ",", "local_var_params", "[", "'pretty'", "]", ")", ")", "# noqa: E501", "if", "'allow_watch_bookmarks'", "in", "local_var_params", "and", "local_var_params", "[", "'allow_watch_bookmarks'", "]", "is", "not", "None", ":", "# noqa: E501", "query_params", ".", "append", "(", "(", "'allowWatchBookmarks'", ",", "local_var_params", "[", "'allow_watch_bookmarks'", "]", ")", ")", "# noqa: E501", "if", "'_continue'", "in", "local_var_params", "and", "local_var_params", "[", "'_continue'", "]", "is", "not", "None", ":", "# noqa: E501", "query_params", ".", "append", "(", "(", "'continue'", ",", "local_var_params", "[", "'_continue'", "]", ")", ")", "# noqa: E501", "if", "'field_selector'", "in", "local_var_params", "and", "local_var_params", "[", "'field_selector'", "]", "is", "not", "None", ":", "# noqa: E501", "query_params", ".", "append", "(", "(", "'fieldSelector'", ",", "local_var_params", "[", "'field_selector'", "]", ")", ")", "# noqa: E501", "if", "'label_selector'", "in", "local_var_params", "and", "local_var_params", "[", "'label_selector'", "]", "is", "not", "None", ":", "# noqa: E501", "query_params", ".", "append", "(", "(", "'labelSelector'", ",", "local_var_params", "[", "'label_selector'", "]", ")", ")", "# noqa: E501", "if", "'limit'", "in", "local_var_params", "and", "local_var_params", "[", "'limit'", "]", "is", "not", "None", ":", "# noqa: E501", "query_params", ".", "append", "(", "(", "'limit'", ",", "local_var_params", "[", "'limit'", "]", ")", ")", "# noqa: E501", "if", "'resource_version'", "in", "local_var_params", "and", "local_var_params", "[", "'resource_version'", "]", "is", "not", "None", ":", "# noqa: E501", "query_params", ".", "append", "(", "(", "'resourceVersion'", ",", "local_var_params", "[", "'resource_version'", "]", ")", ")", "# noqa: E501", "if", "'resource_version_match'", "in", "local_var_params", "and", "local_var_params", "[", "'resource_version_match'", "]", "is", "not", "None", ":", "# noqa: E501", "query_params", ".", "append", "(", "(", "'resourceVersionMatch'", ",", "local_var_params", "[", "'resource_version_match'", "]", ")", ")", "# noqa: E501", "if", "'timeout_seconds'", "in", "local_var_params", "and", "local_var_params", "[", "'timeout_seconds'", "]", "is", "not", "None", ":", "# noqa: E501", "query_params", ".", "append", "(", "(", "'timeoutSeconds'", ",", "local_var_params", "[", "'timeout_seconds'", "]", ")", ")", "# noqa: E501", "if", "'watch'", "in", "local_var_params", "and", "local_var_params", "[", "'watch'", "]", "is", "not", "None", ":", "# noqa: E501", "query_params", ".", "append", "(", "(", "'watch'", ",", "local_var_params", "[", "'watch'", "]", ")", ")", "# noqa: E501", "header_params", "=", "{", "}", "form_params", "=", "[", "]", "local_var_files", "=", "{", "}", "body_params", "=", "None", "# HTTP header `Accept`", "header_params", "[", "'Accept'", "]", "=", "self", ".", "api_client", ".", "select_header_accept", "(", "[", "'application/json'", ",", "'application/yaml'", ",", "'application/vnd.kubernetes.protobuf'", ",", "'application/json;stream=watch'", ",", "'application/vnd.kubernetes.protobuf;stream=watch'", "]", ")", "# noqa: E501", "# Authentication setting", "auth_settings", "=", "[", "'BearerToken'", "]", "# noqa: E501", "return", "self", ".", "api_client", ".", "call_api", "(", "'/apis/apiextensions.k8s.io/v1/customresourcedefinitions'", ",", "'GET'", ",", "path_params", ",", "query_params", ",", "header_params", ",", "body", "=", "body_params", ",", "post_params", "=", "form_params", ",", "files", "=", "local_var_files", ",", "response_type", "=", "'V1CustomResourceDefinitionList'", ",", "# noqa: E501", "auth_settings", "=", "auth_settings", ",", "async_req", "=", "local_var_params", ".", "get", "(", "'async_req'", ")", ",", "_return_http_data_only", "=", "local_var_params", ".", "get", "(", "'_return_http_data_only'", ")", ",", "# noqa: E501", "_preload_content", "=", "local_var_params", ".", "get", "(", "'_preload_content'", ",", "True", ")", ",", "_request_timeout", "=", "local_var_params", ".", "get", "(", "'_request_timeout'", ")", ",", "collection_formats", "=", "collection_formats", ")" ]
https://github.com/kubernetes-client/python/blob/47b9da9de2d02b2b7a34fbe05afb44afd130d73a/kubernetes/client/api/apiextensions_v1_api.py#L621-L740
4shadoww/hakkuframework
409a11fc3819d251f86faa3473439f8c19066a21
lib/scapy/layers/tls/cert.py
python
_PrivKeyFactory.__call__
(cls, key_path=None)
return obj
key_path may be the path to either: _an RSAPrivateKey_OpenSSL (as generated by openssl); _an ECDSAPrivateKey_OpenSSL (as generated by openssl); _an RSAPrivateKey; _an ECDSAPrivateKey.
key_path may be the path to either: _an RSAPrivateKey_OpenSSL (as generated by openssl); _an ECDSAPrivateKey_OpenSSL (as generated by openssl); _an RSAPrivateKey; _an ECDSAPrivateKey.
[ "key_path", "may", "be", "the", "path", "to", "either", ":", "_an", "RSAPrivateKey_OpenSSL", "(", "as", "generated", "by", "openssl", ")", ";", "_an", "ECDSAPrivateKey_OpenSSL", "(", "as", "generated", "by", "openssl", ")", ";", "_an", "RSAPrivateKey", ";", "_an", "ECDSAPrivateKey", "." ]
def __call__(cls, key_path=None): """ key_path may be the path to either: _an RSAPrivateKey_OpenSSL (as generated by openssl); _an ECDSAPrivateKey_OpenSSL (as generated by openssl); _an RSAPrivateKey; _an ECDSAPrivateKey. """ if key_path is None: obj = type.__call__(cls) if cls is PrivKey: cls = PrivKeyECDSA obj.__class__ = cls obj.frmt = "original" obj.fill_and_store() return obj obj = _PKIObjMaker.__call__(cls, key_path, _MAX_KEY_SIZE) multiPEM = False try: privkey = RSAPrivateKey_OpenSSL(obj.der) privkey = privkey.privateKey obj.__class__ = PrivKeyRSA marker = b"PRIVATE KEY" except Exception: try: privkey = ECDSAPrivateKey_OpenSSL(obj.der) privkey = privkey.privateKey obj.__class__ = PrivKeyECDSA marker = b"EC PRIVATE KEY" multiPEM = True except Exception: try: privkey = RSAPrivateKey(obj.der) obj.__class__ = PrivKeyRSA marker = b"RSA PRIVATE KEY" except Exception: try: privkey = ECDSAPrivateKey(obj.der) obj.__class__ = PrivKeyECDSA marker = b"EC PRIVATE KEY" except Exception: raise Exception("Unable to import private key") try: obj.import_from_asn1pkt(privkey) except ImportError: pass if obj.frmt == "DER": if multiPEM: # this does not restore the EC PARAMETERS header obj.pem = der2pem(raw(privkey), marker) else: obj.pem = der2pem(obj.der, marker) return obj
[ "def", "__call__", "(", "cls", ",", "key_path", "=", "None", ")", ":", "if", "key_path", "is", "None", ":", "obj", "=", "type", ".", "__call__", "(", "cls", ")", "if", "cls", "is", "PrivKey", ":", "cls", "=", "PrivKeyECDSA", "obj", ".", "__class__", "=", "cls", "obj", ".", "frmt", "=", "\"original\"", "obj", ".", "fill_and_store", "(", ")", "return", "obj", "obj", "=", "_PKIObjMaker", ".", "__call__", "(", "cls", ",", "key_path", ",", "_MAX_KEY_SIZE", ")", "multiPEM", "=", "False", "try", ":", "privkey", "=", "RSAPrivateKey_OpenSSL", "(", "obj", ".", "der", ")", "privkey", "=", "privkey", ".", "privateKey", "obj", ".", "__class__", "=", "PrivKeyRSA", "marker", "=", "b\"PRIVATE KEY\"", "except", "Exception", ":", "try", ":", "privkey", "=", "ECDSAPrivateKey_OpenSSL", "(", "obj", ".", "der", ")", "privkey", "=", "privkey", ".", "privateKey", "obj", ".", "__class__", "=", "PrivKeyECDSA", "marker", "=", "b\"EC PRIVATE KEY\"", "multiPEM", "=", "True", "except", "Exception", ":", "try", ":", "privkey", "=", "RSAPrivateKey", "(", "obj", ".", "der", ")", "obj", ".", "__class__", "=", "PrivKeyRSA", "marker", "=", "b\"RSA PRIVATE KEY\"", "except", "Exception", ":", "try", ":", "privkey", "=", "ECDSAPrivateKey", "(", "obj", ".", "der", ")", "obj", ".", "__class__", "=", "PrivKeyECDSA", "marker", "=", "b\"EC PRIVATE KEY\"", "except", "Exception", ":", "raise", "Exception", "(", "\"Unable to import private key\"", ")", "try", ":", "obj", ".", "import_from_asn1pkt", "(", "privkey", ")", "except", "ImportError", ":", "pass", "if", "obj", ".", "frmt", "==", "\"DER\"", ":", "if", "multiPEM", ":", "# this does not restore the EC PARAMETERS header", "obj", ".", "pem", "=", "der2pem", "(", "raw", "(", "privkey", ")", ",", "marker", ")", "else", ":", "obj", ".", "pem", "=", "der2pem", "(", "obj", ".", "der", ",", "marker", ")", "return", "obj" ]
https://github.com/4shadoww/hakkuframework/blob/409a11fc3819d251f86faa3473439f8c19066a21/lib/scapy/layers/tls/cert.py#L354-L408
weechat/scripts
99ec0e7eceefabb9efb0f11ec26d45d6e8e84335
python/anotify.py
python
notify_highlighted_message
(prefix, message)
Notify on highlighted message.
Notify on highlighted message.
[ "Notify", "on", "highlighted", "message", "." ]
def notify_highlighted_message(prefix, message): '''Notify on highlighted message.''' if weechat.config_get_plugin("show_highlighted_message") == "on": a_notify( 'Highlight', 'Highlighted Message', "{0}: {1}".format(prefix, message), priority=notify2.URGENCY_CRITICAL)
[ "def", "notify_highlighted_message", "(", "prefix", ",", "message", ")", ":", "if", "weechat", ".", "config_get_plugin", "(", "\"show_highlighted_message\"", ")", "==", "\"on\"", ":", "a_notify", "(", "'Highlight'", ",", "'Highlighted Message'", ",", "\"{0}: {1}\"", ".", "format", "(", "prefix", ",", "message", ")", ",", "priority", "=", "notify2", ".", "URGENCY_CRITICAL", ")" ]
https://github.com/weechat/scripts/blob/99ec0e7eceefabb9efb0f11ec26d45d6e8e84335/python/anotify.py#L172-L179
gnuradio/pybombs
17044241bf835b93571026b112f179f2db7448a4
pybombs/dep_manager.py
python
DepManager.make_dep_tree
(self, pkg_list, filter_callback)
return install_tree
- pkg_list: List of package names. - filter_callback: Function that takes a package name and returns True if the package should go into the tree.
- pkg_list: List of package names. - filter_callback: Function that takes a package name and returns True if the package should go into the tree.
[ "-", "pkg_list", ":", "List", "of", "package", "names", ".", "-", "filter_callback", ":", "Function", "that", "takes", "a", "package", "name", "and", "returns", "True", "if", "the", "package", "should", "go", "into", "the", "tree", "." ]
def make_dep_tree(self, pkg_list, filter_callback): """ - pkg_list: List of package names. - filter_callback: Function that takes a package name and returns True if the package should go into the tree. """ # - all of pkg_list goes into a set P # - init an empty dict D # - for every element p of P: # - create a full dep tree T # - D[p] -> T # - for every element q of P\p: # - if q is in T, then P <- P\q and del(D[q]) if exists # - merge all elements of D into T' and return that pkg_set = set([pkg for pkg in pkg_list if filter_callback(pkg)]) new_pkg_set = copy.copy(pkg_set) dep_trees = {} for pkg in pkg_set: dep_trees[pkg] = self.make_tree_recursive(pkg, filter_callback) assert dep_trees[pkg] is not None for other_pkg in new_pkg_set.difference([pkg]): if other_pkg in dep_trees[pkg]: new_pkg_set.remove(other_pkg) install_tree = TreeNode() for pkg in new_pkg_set: install_tree.add_child(dep_trees[pkg]) return install_tree
[ "def", "make_dep_tree", "(", "self", ",", "pkg_list", ",", "filter_callback", ")", ":", "# - all of pkg_list goes into a set P", "# - init an empty dict D", "# - for every element p of P:", "# - create a full dep tree T", "# - D[p] -> T", "# - for every element q of P\\p:", "# - if q is in T, then P <- P\\q and del(D[q]) if exists", "# - merge all elements of D into T' and return that", "pkg_set", "=", "set", "(", "[", "pkg", "for", "pkg", "in", "pkg_list", "if", "filter_callback", "(", "pkg", ")", "]", ")", "new_pkg_set", "=", "copy", ".", "copy", "(", "pkg_set", ")", "dep_trees", "=", "{", "}", "for", "pkg", "in", "pkg_set", ":", "dep_trees", "[", "pkg", "]", "=", "self", ".", "make_tree_recursive", "(", "pkg", ",", "filter_callback", ")", "assert", "dep_trees", "[", "pkg", "]", "is", "not", "None", "for", "other_pkg", "in", "new_pkg_set", ".", "difference", "(", "[", "pkg", "]", ")", ":", "if", "other_pkg", "in", "dep_trees", "[", "pkg", "]", ":", "new_pkg_set", ".", "remove", "(", "other_pkg", ")", "install_tree", "=", "TreeNode", "(", ")", "for", "pkg", "in", "new_pkg_set", ":", "install_tree", ".", "add_child", "(", "dep_trees", "[", "pkg", "]", ")", "return", "install_tree" ]
https://github.com/gnuradio/pybombs/blob/17044241bf835b93571026b112f179f2db7448a4/pybombs/dep_manager.py#L39-L65
openhatch/oh-mainline
ce29352a034e1223141dcc2f317030bbc3359a51
vendor/packages/Django/django/contrib/formtools/wizard/legacy.py
python
FormWizard.get_template
(self, step)
return 'forms/wizard.html'
Hook for specifying the name of the template to use for a given step. Note that this can return a tuple of template names if you'd like to use the template system's select_template() hook.
Hook for specifying the name of the template to use for a given step.
[ "Hook", "for", "specifying", "the", "name", "of", "the", "template", "to", "use", "for", "a", "given", "step", "." ]
def get_template(self, step): """ Hook for specifying the name of the template to use for a given step. Note that this can return a tuple of template names if you'd like to use the template system's select_template() hook. """ return 'forms/wizard.html'
[ "def", "get_template", "(", "self", ",", "step", ")", ":", "return", "'forms/wizard.html'" ]
https://github.com/openhatch/oh-mainline/blob/ce29352a034e1223141dcc2f317030bbc3359a51/vendor/packages/Django/django/contrib/formtools/wizard/legacy.py#L204-L211
rancavil/tornado-rest
84d70ca003a00c84f0438c7a3ecde3f88b321134
pyrestful/rest.py
python
RestHandler.put
(self)
Executes put method
Executes put method
[ "Executes", "put", "method" ]
def put(self): """ Executes put method """ self._exe('PUT')
[ "def", "put", "(", "self", ")", ":", "self", ".", "_exe", "(", "'PUT'", ")" ]
https://github.com/rancavil/tornado-rest/blob/84d70ca003a00c84f0438c7a3ecde3f88b321134/pyrestful/rest.py#L145-L147