repo
stringlengths
7
55
path
stringlengths
4
223
url
stringlengths
87
315
code
stringlengths
75
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
avg_line_len
float64
7.91
980
pandas-dev/pandas
pandas/core/frame.py
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/frame.py#L5978-L6039
def unstack(self, level=-1, fill_value=None): """ Pivot a level of the (necessarily hierarchical) index labels, returning a DataFrame having a new level of column labels whose inner-most level consists of the pivoted index labels. If the index is not a MultiIndex, the output will be a Series (the analogue of stack when the columns are not a MultiIndex). The level involved will automatically get sorted. Parameters ---------- level : int, string, or list of these, default -1 (last level) Level(s) of index to unstack, can pass level name fill_value : replace NaN with this value if the unstack produces missing values .. versionadded:: 0.18.0 Returns ------- Series or DataFrame See Also -------- DataFrame.pivot : Pivot a table based on column values. DataFrame.stack : Pivot a level of the column labels (inverse operation from `unstack`). Examples -------- >>> index = pd.MultiIndex.from_tuples([('one', 'a'), ('one', 'b'), ... ('two', 'a'), ('two', 'b')]) >>> s = pd.Series(np.arange(1.0, 5.0), index=index) >>> s one a 1.0 b 2.0 two a 3.0 b 4.0 dtype: float64 >>> s.unstack(level=-1) a b one 1.0 2.0 two 3.0 4.0 >>> s.unstack(level=0) one two a 1.0 3.0 b 2.0 4.0 >>> df = s.unstack(level=0) >>> df.unstack() one a 1.0 b 2.0 two a 3.0 b 4.0 dtype: float64 """ from pandas.core.reshape.reshape import unstack return unstack(self, level, fill_value)
[ "def", "unstack", "(", "self", ",", "level", "=", "-", "1", ",", "fill_value", "=", "None", ")", ":", "from", "pandas", ".", "core", ".", "reshape", ".", "reshape", "import", "unstack", "return", "unstack", "(", "self", ",", "level", ",", "fill_value", ")" ]
Pivot a level of the (necessarily hierarchical) index labels, returning a DataFrame having a new level of column labels whose inner-most level consists of the pivoted index labels. If the index is not a MultiIndex, the output will be a Series (the analogue of stack when the columns are not a MultiIndex). The level involved will automatically get sorted. Parameters ---------- level : int, string, or list of these, default -1 (last level) Level(s) of index to unstack, can pass level name fill_value : replace NaN with this value if the unstack produces missing values .. versionadded:: 0.18.0 Returns ------- Series or DataFrame See Also -------- DataFrame.pivot : Pivot a table based on column values. DataFrame.stack : Pivot a level of the column labels (inverse operation from `unstack`). Examples -------- >>> index = pd.MultiIndex.from_tuples([('one', 'a'), ('one', 'b'), ... ('two', 'a'), ('two', 'b')]) >>> s = pd.Series(np.arange(1.0, 5.0), index=index) >>> s one a 1.0 b 2.0 two a 3.0 b 4.0 dtype: float64 >>> s.unstack(level=-1) a b one 1.0 2.0 two 3.0 4.0 >>> s.unstack(level=0) one two a 1.0 3.0 b 2.0 4.0 >>> df = s.unstack(level=0) >>> df.unstack() one a 1.0 b 2.0 two a 3.0 b 4.0 dtype: float64
[ "Pivot", "a", "level", "of", "the", "(", "necessarily", "hierarchical", ")", "index", "labels", "returning", "a", "DataFrame", "having", "a", "new", "level", "of", "column", "labels", "whose", "inner", "-", "most", "level", "consists", "of", "the", "pivoted", "index", "labels", "." ]
python
train
29.33871
yueyoum/social-oauth
example/_bottle.py
https://github.com/yueyoum/social-oauth/blob/80600ea737355b20931c8a0b5223f5b68175d930/example/_bottle.py#L1303-L1308
def headers(self): ''' An instance of :class:`HeaderDict`, a case-insensitive dict-like view on the response headers. ''' self.__dict__['headers'] = hdict = HeaderDict() hdict.dict = self._headers return hdict
[ "def", "headers", "(", "self", ")", ":", "self", ".", "__dict__", "[", "'headers'", "]", "=", "hdict", "=", "HeaderDict", "(", ")", "hdict", ".", "dict", "=", "self", ".", "_headers", "return", "hdict" ]
An instance of :class:`HeaderDict`, a case-insensitive dict-like view on the response headers.
[ "An", "instance", "of", ":", "class", ":", "HeaderDict", "a", "case", "-", "insensitive", "dict", "-", "like", "view", "on", "the", "response", "headers", "." ]
python
train
41.333333
rene-aguirre/pywinusb
pywinusb/hid/core.py
https://github.com/rene-aguirre/pywinusb/blob/954c4b2105d9f01cb0c50e24500bb747d4ecdc43/pywinusb/hid/core.py#L184-L242
def get_devices(self, hid_filter = None): """Filter a HID device list by current object parameters. Devices must match the all of the filtering parameters """ if not hid_filter: #empty list or called without any parameters if type(hid_filter) == type(None): #request to query connected devices hid_filter = find_all_hid_devices() else: return hid_filter #initially all accepted results = {}.fromkeys(hid_filter) #the filter parameters validating_attributes = list(self.filter_params.keys()) #first filter out restricted access devices if not len(results): return {} for device in list(results.keys()): if not device.is_active(): del results[device] if not len(results): return {} #filter out for item in validating_attributes: if item.endswith("_includes"): item = item[:-len("_includes")] elif item.endswith("_mask"): item = item[:-len("_mask")] elif item +"_mask" in self.filter_params or item + "_includes" \ in self.filter_params: continue # value mask or string search is being queried elif item not in HidDevice.filter_attributes: continue # field does not exist sys.error.write(...) #start filtering out for device in list(results.keys()): if not hasattr(device, item): del results[device] elif item + "_mask" in validating_attributes: #masked value if getattr(device, item) & self.filter_params[item + \ "_mask"] != self.filter_params[item] \ & self.filter_params[item + "_mask"]: del results[device] elif item + "_includes" in validating_attributes: #subset item if self.filter_params[item + "_includes"] not in \ getattr(device, item): del results[device] else: #plain comparison if getattr(device, item) != self.filter_params[item]: del results[device] # return list(results.keys())
[ "def", "get_devices", "(", "self", ",", "hid_filter", "=", "None", ")", ":", "if", "not", "hid_filter", ":", "#empty list or called without any parameters\r", "if", "type", "(", "hid_filter", ")", "==", "type", "(", "None", ")", ":", "#request to query connected devices\r", "hid_filter", "=", "find_all_hid_devices", "(", ")", "else", ":", "return", "hid_filter", "#initially all accepted\r", "results", "=", "{", "}", ".", "fromkeys", "(", "hid_filter", ")", "#the filter parameters\r", "validating_attributes", "=", "list", "(", "self", ".", "filter_params", ".", "keys", "(", ")", ")", "#first filter out restricted access devices\r", "if", "not", "len", "(", "results", ")", ":", "return", "{", "}", "for", "device", "in", "list", "(", "results", ".", "keys", "(", ")", ")", ":", "if", "not", "device", ".", "is_active", "(", ")", ":", "del", "results", "[", "device", "]", "if", "not", "len", "(", "results", ")", ":", "return", "{", "}", "#filter out\r", "for", "item", "in", "validating_attributes", ":", "if", "item", ".", "endswith", "(", "\"_includes\"", ")", ":", "item", "=", "item", "[", ":", "-", "len", "(", "\"_includes\"", ")", "]", "elif", "item", ".", "endswith", "(", "\"_mask\"", ")", ":", "item", "=", "item", "[", ":", "-", "len", "(", "\"_mask\"", ")", "]", "elif", "item", "+", "\"_mask\"", "in", "self", ".", "filter_params", "or", "item", "+", "\"_includes\"", "in", "self", ".", "filter_params", ":", "continue", "# value mask or string search is being queried\r", "elif", "item", "not", "in", "HidDevice", ".", "filter_attributes", ":", "continue", "# field does not exist sys.error.write(...)\r", "#start filtering out\r", "for", "device", "in", "list", "(", "results", ".", "keys", "(", ")", ")", ":", "if", "not", "hasattr", "(", "device", ",", "item", ")", ":", "del", "results", "[", "device", "]", "elif", "item", "+", "\"_mask\"", "in", "validating_attributes", ":", "#masked value\r", "if", "getattr", "(", "device", ",", "item", ")", "&", "self", ".", "filter_params", "[", "item", "+", "\"_mask\"", "]", "!=", "self", ".", "filter_params", "[", "item", "]", "&", "self", ".", "filter_params", "[", "item", "+", "\"_mask\"", "]", ":", "del", "results", "[", "device", "]", "elif", "item", "+", "\"_includes\"", "in", "validating_attributes", ":", "#subset item\r", "if", "self", ".", "filter_params", "[", "item", "+", "\"_includes\"", "]", "not", "in", "getattr", "(", "device", ",", "item", ")", ":", "del", "results", "[", "device", "]", "else", ":", "#plain comparison\r", "if", "getattr", "(", "device", ",", "item", ")", "!=", "self", ".", "filter_params", "[", "item", "]", ":", "del", "results", "[", "device", "]", "#\r", "return", "list", "(", "results", ".", "keys", "(", ")", ")" ]
Filter a HID device list by current object parameters. Devices must match the all of the filtering parameters
[ "Filter", "a", "HID", "device", "list", "by", "current", "object", "parameters", ".", "Devices", "must", "match", "the", "all", "of", "the", "filtering", "parameters" ]
python
train
41.864407
secdev/scapy
scapy/contrib/isotp.py
https://github.com/secdev/scapy/blob/3ffe757c184017dd46464593a8f80f85abc1e79a/scapy/contrib/isotp.py#L329-L340
def feed(self, can): """Attempt to feed an incoming CAN frame into the state machine""" if not isinstance(can, CAN): raise Scapy_Exception("argument is not a CAN frame") identifier = can.identifier data = bytes(can.data) if len(data) > 1 and self.use_ext_addr is not True: self._try_feed(identifier, None, data) if len(data) > 2 and self.use_ext_addr is not False: ea = six.indexbytes(data, 0) self._try_feed(identifier, ea, data[1:])
[ "def", "feed", "(", "self", ",", "can", ")", ":", "if", "not", "isinstance", "(", "can", ",", "CAN", ")", ":", "raise", "Scapy_Exception", "(", "\"argument is not a CAN frame\"", ")", "identifier", "=", "can", ".", "identifier", "data", "=", "bytes", "(", "can", ".", "data", ")", "if", "len", "(", "data", ")", ">", "1", "and", "self", ".", "use_ext_addr", "is", "not", "True", ":", "self", ".", "_try_feed", "(", "identifier", ",", "None", ",", "data", ")", "if", "len", "(", "data", ")", ">", "2", "and", "self", ".", "use_ext_addr", "is", "not", "False", ":", "ea", "=", "six", ".", "indexbytes", "(", "data", ",", "0", ")", "self", ".", "_try_feed", "(", "identifier", ",", "ea", ",", "data", "[", "1", ":", "]", ")" ]
Attempt to feed an incoming CAN frame into the state machine
[ "Attempt", "to", "feed", "an", "incoming", "CAN", "frame", "into", "the", "state", "machine" ]
python
train
43.333333
fastai/fastai
fastai/core.py
https://github.com/fastai/fastai/blob/9fb84a5cdefe5a766cdb792b8f5d8971737b7e67/fastai/core.py#L221-L224
def loadtxt_str(path:PathOrStr)->np.ndarray: "Return `ndarray` of `str` of lines of text from `path`." with open(path, 'r') as f: lines = f.readlines() return np.array([l.strip() for l in lines])
[ "def", "loadtxt_str", "(", "path", ":", "PathOrStr", ")", "->", "np", ".", "ndarray", ":", "with", "open", "(", "path", ",", "'r'", ")", "as", "f", ":", "lines", "=", "f", ".", "readlines", "(", ")", "return", "np", ".", "array", "(", "[", "l", ".", "strip", "(", ")", "for", "l", "in", "lines", "]", ")" ]
Return `ndarray` of `str` of lines of text from `path`.
[ "Return", "ndarray", "of", "str", "of", "lines", "of", "text", "from", "path", "." ]
python
train
51
nion-software/nionswift
nion/swift/Facade.py
https://github.com/nion-software/nionswift/blob/d43693eaf057b8683b9638e575000f055fede452/nion/swift/Facade.py#L1785-L1801
def create_record_task(self, frame_parameters: dict=None, channels_enabled: typing.List[bool]=None) -> RecordTask: """Create a record task for this hardware source. .. versionadded:: 1.0 :param frame_parameters: The frame parameters for the record. Pass None for defaults. :type frame_parameters: :py:class:`FrameParameters` :param channels_enabled: The enabled channels for the record. Pass None for defaults. :type channels_enabled: List of booleans. :return: The :py:class:`RecordTask` object. :rtype: :py:class:`RecordTask` Callers should call close on the returned task when finished. See :py:class:`RecordTask` for examples of how to use. """ return RecordTask(self.__hardware_source, frame_parameters, channels_enabled)
[ "def", "create_record_task", "(", "self", ",", "frame_parameters", ":", "dict", "=", "None", ",", "channels_enabled", ":", "typing", ".", "List", "[", "bool", "]", "=", "None", ")", "->", "RecordTask", ":", "return", "RecordTask", "(", "self", ".", "__hardware_source", ",", "frame_parameters", ",", "channels_enabled", ")" ]
Create a record task for this hardware source. .. versionadded:: 1.0 :param frame_parameters: The frame parameters for the record. Pass None for defaults. :type frame_parameters: :py:class:`FrameParameters` :param channels_enabled: The enabled channels for the record. Pass None for defaults. :type channels_enabled: List of booleans. :return: The :py:class:`RecordTask` object. :rtype: :py:class:`RecordTask` Callers should call close on the returned task when finished. See :py:class:`RecordTask` for examples of how to use.
[ "Create", "a", "record", "task", "for", "this", "hardware", "source", "." ]
python
train
47.647059
trailofbits/manticore
manticore/platforms/linux.py
https://github.com/trailofbits/manticore/blob/54c5a15b1119c523ae54c09972413e8b97f11629/manticore/platforms/linux.py#L1242-L1265
def sys_lseek(self, fd, offset, whence): """ lseek - reposition read/write file offset The lseek() function repositions the file offset of the open file description associated with the file descriptor fd to the argument offset according to the directive whence :param fd: a valid file descriptor :param offset: the offset in bytes :param whence: SEEK_SET: The file offset is set to offset bytes. SEEK_CUR: The file offset is set to its current location plus offset bytes. SEEK_END: The file offset is set to the size of the file plus offset bytes. :return: offset from file beginning, or EBADF (fd is not a valid file descriptor or is not open) """ signed_offset = self._to_signed_dword(offset) try: return self._get_fd(fd).seek(signed_offset, whence) except FdError as e: logger.info(("LSEEK: Not valid file descriptor on lseek." "Fd not seekable. Returning EBADF")) return -e.err
[ "def", "sys_lseek", "(", "self", ",", "fd", ",", "offset", ",", "whence", ")", ":", "signed_offset", "=", "self", ".", "_to_signed_dword", "(", "offset", ")", "try", ":", "return", "self", ".", "_get_fd", "(", "fd", ")", ".", "seek", "(", "signed_offset", ",", "whence", ")", "except", "FdError", "as", "e", ":", "logger", ".", "info", "(", "(", "\"LSEEK: Not valid file descriptor on lseek.\"", "\"Fd not seekable. Returning EBADF\"", ")", ")", "return", "-", "e", ".", "err" ]
lseek - reposition read/write file offset The lseek() function repositions the file offset of the open file description associated with the file descriptor fd to the argument offset according to the directive whence :param fd: a valid file descriptor :param offset: the offset in bytes :param whence: SEEK_SET: The file offset is set to offset bytes. SEEK_CUR: The file offset is set to its current location plus offset bytes. SEEK_END: The file offset is set to the size of the file plus offset bytes. :return: offset from file beginning, or EBADF (fd is not a valid file descriptor or is not open)
[ "lseek", "-", "reposition", "read", "/", "write", "file", "offset" ]
python
valid
44.416667
tanghaibao/jcvi
jcvi/graphics/base.py
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/graphics/base.py#L348-L413
def discrete_rainbow(N=7, cmap=cm.Set1, usepreset=True, shuffle=False, \ plot=False): """ Return a discrete colormap and the set of colors. modified from <http://www.scipy.org/Cookbook/Matplotlib/ColormapTransformations> cmap: colormap instance, eg. cm.jet. N: Number of colors. Example >>> x = resize(arange(100), (5,100)) >>> djet = cmap_discretize(cm.jet, 5) >>> imshow(x, cmap=djet) See available matplotlib colormaps at: <http://dept.astro.lsa.umich.edu/~msshin/science/code/matplotlib_cm/> If N>20 the sampled colors might not be very distinctive. If you want to error and try anyway, set usepreset=False """ import random from scipy import interpolate if usepreset: if 0 < N <= 5: cmap = cm.gist_rainbow elif N <= 20: cmap = cm.Set1 else: sys.exit(discrete_rainbow.__doc__) cdict = cmap._segmentdata.copy() # N colors colors_i = np.linspace(0,1.,N) # N+1 indices indices = np.linspace(0,1.,N+1) rgbs = [] for key in ('red','green','blue'): # Find the N colors D = np.array(cdict[key]) I = interpolate.interp1d(D[:,0], D[:,1]) colors = I(colors_i) rgbs.append(colors) # Place these colors at the correct indices. A = np.zeros((N+1,3), float) A[:,0] = indices A[1:,1] = colors A[:-1,2] = colors # Create a tuple for the dictionary. L = [] for l in A: L.append(tuple(l)) cdict[key] = tuple(L) palette = zip(*rgbs) if shuffle: random.shuffle(palette) if plot: print_colors(palette) # Return (colormap object, RGB tuples) return mpl.colors.LinearSegmentedColormap('colormap',cdict,1024), palette
[ "def", "discrete_rainbow", "(", "N", "=", "7", ",", "cmap", "=", "cm", ".", "Set1", ",", "usepreset", "=", "True", ",", "shuffle", "=", "False", ",", "plot", "=", "False", ")", ":", "import", "random", "from", "scipy", "import", "interpolate", "if", "usepreset", ":", "if", "0", "<", "N", "<=", "5", ":", "cmap", "=", "cm", ".", "gist_rainbow", "elif", "N", "<=", "20", ":", "cmap", "=", "cm", ".", "Set1", "else", ":", "sys", ".", "exit", "(", "discrete_rainbow", ".", "__doc__", ")", "cdict", "=", "cmap", ".", "_segmentdata", ".", "copy", "(", ")", "# N colors", "colors_i", "=", "np", ".", "linspace", "(", "0", ",", "1.", ",", "N", ")", "# N+1 indices", "indices", "=", "np", ".", "linspace", "(", "0", ",", "1.", ",", "N", "+", "1", ")", "rgbs", "=", "[", "]", "for", "key", "in", "(", "'red'", ",", "'green'", ",", "'blue'", ")", ":", "# Find the N colors", "D", "=", "np", ".", "array", "(", "cdict", "[", "key", "]", ")", "I", "=", "interpolate", ".", "interp1d", "(", "D", "[", ":", ",", "0", "]", ",", "D", "[", ":", ",", "1", "]", ")", "colors", "=", "I", "(", "colors_i", ")", "rgbs", ".", "append", "(", "colors", ")", "# Place these colors at the correct indices.", "A", "=", "np", ".", "zeros", "(", "(", "N", "+", "1", ",", "3", ")", ",", "float", ")", "A", "[", ":", ",", "0", "]", "=", "indices", "A", "[", "1", ":", ",", "1", "]", "=", "colors", "A", "[", ":", "-", "1", ",", "2", "]", "=", "colors", "# Create a tuple for the dictionary.", "L", "=", "[", "]", "for", "l", "in", "A", ":", "L", ".", "append", "(", "tuple", "(", "l", ")", ")", "cdict", "[", "key", "]", "=", "tuple", "(", "L", ")", "palette", "=", "zip", "(", "*", "rgbs", ")", "if", "shuffle", ":", "random", ".", "shuffle", "(", "palette", ")", "if", "plot", ":", "print_colors", "(", "palette", ")", "# Return (colormap object, RGB tuples)", "return", "mpl", ".", "colors", ".", "LinearSegmentedColormap", "(", "'colormap'", ",", "cdict", ",", "1024", ")", ",", "palette" ]
Return a discrete colormap and the set of colors. modified from <http://www.scipy.org/Cookbook/Matplotlib/ColormapTransformations> cmap: colormap instance, eg. cm.jet. N: Number of colors. Example >>> x = resize(arange(100), (5,100)) >>> djet = cmap_discretize(cm.jet, 5) >>> imshow(x, cmap=djet) See available matplotlib colormaps at: <http://dept.astro.lsa.umich.edu/~msshin/science/code/matplotlib_cm/> If N>20 the sampled colors might not be very distinctive. If you want to error and try anyway, set usepreset=False
[ "Return", "a", "discrete", "colormap", "and", "the", "set", "of", "colors", "." ]
python
train
26.80303
f3at/feat
src/feat/common/error.py
https://github.com/f3at/feat/blob/15da93fc9d6ec8154f52a9172824e25821195ef8/src/feat/common/error.py#L49-L62
def print_errors(function): """Prints the exceptions raised by the decorated function without interfering. For debugging purpose.""" def wrapper(*args, **kwargs): try: return function(*args, **kwargs) except BaseException as e: print ("Exception raise calling %s: %s" % (reflect.canonical_name(function), get_exception_message(e))) raise return wrapper
[ "def", "print_errors", "(", "function", ")", ":", "def", "wrapper", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "try", ":", "return", "function", "(", "*", "args", ",", "*", "*", "kwargs", ")", "except", "BaseException", "as", "e", ":", "print", "(", "\"Exception raise calling %s: %s\"", "%", "(", "reflect", ".", "canonical_name", "(", "function", ")", ",", "get_exception_message", "(", "e", ")", ")", ")", "raise", "return", "wrapper" ]
Prints the exceptions raised by the decorated function without interfering. For debugging purpose.
[ "Prints", "the", "exceptions", "raised", "by", "the", "decorated", "function", "without", "interfering", ".", "For", "debugging", "purpose", "." ]
python
train
32.142857
radjkarl/fancyWidgets
DUMP/pyqtgraphBased/parametertree/parameterTypes.py
https://github.com/radjkarl/fancyWidgets/blob/ffe0d5747c5296c78575f0e0909af915a4a5698f/DUMP/pyqtgraphBased/parametertree/parameterTypes.py#L238-L247
def selected(self, sel): """Called when this item has been selected (sel=True) OR deselected (sel=False)""" ParameterItem.selected(self, sel) if self.widget is None: return if sel and self.param.writable(): self.showEditor() elif self.hideWidget: self.hideEditor()
[ "def", "selected", "(", "self", ",", "sel", ")", ":", "ParameterItem", ".", "selected", "(", "self", ",", "sel", ")", "if", "self", ".", "widget", "is", "None", ":", "return", "if", "sel", "and", "self", ".", "param", ".", "writable", "(", ")", ":", "self", ".", "showEditor", "(", ")", "elif", "self", ".", "hideWidget", ":", "self", ".", "hideEditor", "(", ")" ]
Called when this item has been selected (sel=True) OR deselected (sel=False)
[ "Called", "when", "this", "item", "has", "been", "selected", "(", "sel", "=", "True", ")", "OR", "deselected", "(", "sel", "=", "False", ")" ]
python
train
33.2
tornadoweb/tornado
tornado/web.py
https://github.com/tornadoweb/tornado/blob/b8b481770bcdb333a69afde5cce7eaa449128326/tornado/web.py#L439-L455
def get_argument( # noqa: F811 self, name: str, default: Union[None, str, _ArgDefaultMarker] = _ARG_DEFAULT, strip: bool = True, ) -> Optional[str]: """Returns the value of the argument with the given name. If default is not provided, the argument is considered to be required, and we raise a `MissingArgumentError` if it is missing. If the argument appears in the request more than once, we return the last value. This method searches both the query and body arguments. """ return self._get_argument(name, default, self.request.arguments, strip)
[ "def", "get_argument", "(", "# noqa: F811", "self", ",", "name", ":", "str", ",", "default", ":", "Union", "[", "None", ",", "str", ",", "_ArgDefaultMarker", "]", "=", "_ARG_DEFAULT", ",", "strip", ":", "bool", "=", "True", ",", ")", "->", "Optional", "[", "str", "]", ":", "return", "self", ".", "_get_argument", "(", "name", ",", "default", ",", "self", ".", "request", ".", "arguments", ",", "strip", ")" ]
Returns the value of the argument with the given name. If default is not provided, the argument is considered to be required, and we raise a `MissingArgumentError` if it is missing. If the argument appears in the request more than once, we return the last value. This method searches both the query and body arguments.
[ "Returns", "the", "value", "of", "the", "argument", "with", "the", "given", "name", "." ]
python
train
37.294118
NoviceLive/intellicoder
intellicoder/synthesizers.py
https://github.com/NoviceLive/intellicoder/blob/6cac5ebfce65c370dbebe47756a1789b120ef982/intellicoder/synthesizers.py#L173-L191
def c_hashes(self): """Get the hashes of the module including functions and DLLs. """ if callable(self.opts.hash_func): hashes = [ '# define {}{} {}\n'.format( self.opts.prefix, name, self.opts.hash_func(name) ) for name, dummy_args in self.funcs ] else: hashes = [ make_c_str(self.opts.prefix + name, name) for name, dummy_args in self.funcs ] if self.name != 'kernel32': hashes = [ make_c_str(self.opts.prefix + self.name, self.name) ] + hashes return hashes
[ "def", "c_hashes", "(", "self", ")", ":", "if", "callable", "(", "self", ".", "opts", ".", "hash_func", ")", ":", "hashes", "=", "[", "'# define {}{} {}\\n'", ".", "format", "(", "self", ".", "opts", ".", "prefix", ",", "name", ",", "self", ".", "opts", ".", "hash_func", "(", "name", ")", ")", "for", "name", ",", "dummy_args", "in", "self", ".", "funcs", "]", "else", ":", "hashes", "=", "[", "make_c_str", "(", "self", ".", "opts", ".", "prefix", "+", "name", ",", "name", ")", "for", "name", ",", "dummy_args", "in", "self", ".", "funcs", "]", "if", "self", ".", "name", "!=", "'kernel32'", ":", "hashes", "=", "[", "make_c_str", "(", "self", ".", "opts", ".", "prefix", "+", "self", ".", "name", ",", "self", ".", "name", ")", "]", "+", "hashes", "return", "hashes" ]
Get the hashes of the module including functions and DLLs.
[ "Get", "the", "hashes", "of", "the", "module", "including", "functions", "and", "DLLs", "." ]
python
train
34.842105
rocky/python-spark
example/python2/py2_scan.py
https://github.com/rocky/python-spark/blob/8899954bcf0e166726841a43e87c23790eb3441f/example/python2/py2_scan.py#L119-L124
def t_name(self, s): r'[A-Za-z_][A-Za-z_0-9]*' if s in RESERVED_WORDS: self.add_token(s.upper(), s) else: self.add_token('NAME', s)
[ "def", "t_name", "(", "self", ",", "s", ")", ":", "if", "s", "in", "RESERVED_WORDS", ":", "self", ".", "add_token", "(", "s", ".", "upper", "(", ")", ",", "s", ")", "else", ":", "self", ".", "add_token", "(", "'NAME'", ",", "s", ")" ]
r'[A-Za-z_][A-Za-z_0-9]*
[ "r", "[", "A", "-", "Za", "-", "z_", "]", "[", "A", "-", "Za", "-", "z_0", "-", "9", "]", "*" ]
python
train
29
opendns/pyinvestigate
investigate/investigate.py
https://github.com/opendns/pyinvestigate/blob/a182e73a750f03e906d9b25842d556db8d2fd54f/investigate/investigate.py#L91-L95
def post_parse(self, uri, params={}, data={}): '''Convenience method to call post() on an arbitrary URI and parse the response into a JSON object. Raises an error on non-200 response status. ''' return self._request_parse(self.post, uri, params, data)
[ "def", "post_parse", "(", "self", ",", "uri", ",", "params", "=", "{", "}", ",", "data", "=", "{", "}", ")", ":", "return", "self", ".", "_request_parse", "(", "self", ".", "post", ",", "uri", ",", "params", ",", "data", ")" ]
Convenience method to call post() on an arbitrary URI and parse the response into a JSON object. Raises an error on non-200 response status.
[ "Convenience", "method", "to", "call", "post", "()", "on", "an", "arbitrary", "URI", "and", "parse", "the", "response", "into", "a", "JSON", "object", ".", "Raises", "an", "error", "on", "non", "-", "200", "response", "status", "." ]
python
train
55.8
tkf/rash
rash/database.py
https://github.com/tkf/rash/blob/585da418ec37dd138f1a4277718b6f507e9536a2/rash/database.py#L379-L421
def search_command_record( self, after_context, before_context, context, context_type, **kwds): """ Search command history. :rtype: [CommandRecord] """ if after_context or before_context or context: kwds['condition_as_column'] = True limit = kwds['limit'] kwds['limit'] = -1 kwds['unique'] = False kwds['sort_by'] = { 'session': ['session_start_time', 'start_time'], 'time': ['start_time'], }[context_type] if not kwds['reverse']: # Default (reverse=False) means latest history comes first. after_context, before_context = before_context, after_context (sql, params, keys) = self._compile_sql_search_command_record(**kwds) records = self._select_rows(CommandRecord, keys, sql, params) # SOMEDAY: optimize context search; do not create CommandRecord # object for all (including non-matching) records. predicate = lambda r: r.condition if context: records = include_context(predicate, context, records) elif before_context: records = include_before(predicate, before_context, records) elif after_context: records = include_after(predicate, after_context, records) if after_context or before_context or context and limit >= 0: records = itertools.islice(records, limit) # NOTE: as SQLite does not support row_number function, let's # do the filtering at Python side when context modifier # is given. This is *very* inefficient but at least it # works.. return records
[ "def", "search_command_record", "(", "self", ",", "after_context", ",", "before_context", ",", "context", ",", "context_type", ",", "*", "*", "kwds", ")", ":", "if", "after_context", "or", "before_context", "or", "context", ":", "kwds", "[", "'condition_as_column'", "]", "=", "True", "limit", "=", "kwds", "[", "'limit'", "]", "kwds", "[", "'limit'", "]", "=", "-", "1", "kwds", "[", "'unique'", "]", "=", "False", "kwds", "[", "'sort_by'", "]", "=", "{", "'session'", ":", "[", "'session_start_time'", ",", "'start_time'", "]", ",", "'time'", ":", "[", "'start_time'", "]", ",", "}", "[", "context_type", "]", "if", "not", "kwds", "[", "'reverse'", "]", ":", "# Default (reverse=False) means latest history comes first.", "after_context", ",", "before_context", "=", "before_context", ",", "after_context", "(", "sql", ",", "params", ",", "keys", ")", "=", "self", ".", "_compile_sql_search_command_record", "(", "*", "*", "kwds", ")", "records", "=", "self", ".", "_select_rows", "(", "CommandRecord", ",", "keys", ",", "sql", ",", "params", ")", "# SOMEDAY: optimize context search; do not create CommandRecord", "# object for all (including non-matching) records.", "predicate", "=", "lambda", "r", ":", "r", ".", "condition", "if", "context", ":", "records", "=", "include_context", "(", "predicate", ",", "context", ",", "records", ")", "elif", "before_context", ":", "records", "=", "include_before", "(", "predicate", ",", "before_context", ",", "records", ")", "elif", "after_context", ":", "records", "=", "include_after", "(", "predicate", ",", "after_context", ",", "records", ")", "if", "after_context", "or", "before_context", "or", "context", "and", "limit", ">=", "0", ":", "records", "=", "itertools", ".", "islice", "(", "records", ",", "limit", ")", "# NOTE: as SQLite does not support row_number function, let's", "# do the filtering at Python side when context modifier", "# is given. This is *very* inefficient but at least it", "# works..", "return", "records" ]
Search command history. :rtype: [CommandRecord]
[ "Search", "command", "history", "." ]
python
train
40.465116
CamDavidsonPilon/lifelines
lifelines/fitters/coxph_fitter.py
https://github.com/CamDavidsonPilon/lifelines/blob/bdf6be6f1d10eea4c46365ee0ee6a47d8c30edf8/lifelines/fitters/coxph_fitter.py#L1580-L1601
def _compute_baseline_survival(self): """ Importantly, this agrees with what the KaplanMeierFitter produces. Ex: Example ------- >>> from lifelines.datasets import load_rossi >>> from lifelines import CoxPHFitter, KaplanMeierFitter >>> rossi = load_rossi() >>> kmf = KaplanMeierFitter() >>> kmf.fit(rossi['week'], rossi['arrest']) >>> rossi2 = rossi[['week', 'arrest']].copy() >>> rossi2['var1'] = np.random.randn(432) >>> cph = CoxPHFitter() >>> cph.fit(rossi2, 'week', 'arrest') >>> ax = cph.baseline_survival_.plot() >>> kmf.plot(ax=ax) """ survival_df = np.exp(-self.baseline_cumulative_hazard_) if self.strata is None: survival_df.columns = ["baseline survival"] return survival_df
[ "def", "_compute_baseline_survival", "(", "self", ")", ":", "survival_df", "=", "np", ".", "exp", "(", "-", "self", ".", "baseline_cumulative_hazard_", ")", "if", "self", ".", "strata", "is", "None", ":", "survival_df", ".", "columns", "=", "[", "\"baseline survival\"", "]", "return", "survival_df" ]
Importantly, this agrees with what the KaplanMeierFitter produces. Ex: Example ------- >>> from lifelines.datasets import load_rossi >>> from lifelines import CoxPHFitter, KaplanMeierFitter >>> rossi = load_rossi() >>> kmf = KaplanMeierFitter() >>> kmf.fit(rossi['week'], rossi['arrest']) >>> rossi2 = rossi[['week', 'arrest']].copy() >>> rossi2['var1'] = np.random.randn(432) >>> cph = CoxPHFitter() >>> cph.fit(rossi2, 'week', 'arrest') >>> ax = cph.baseline_survival_.plot() >>> kmf.plot(ax=ax)
[ "Importantly", "this", "agrees", "with", "what", "the", "KaplanMeierFitter", "produces", ".", "Ex", ":" ]
python
train
37.727273
scrapinghub/dateparser
dateparser/languages/dictionary.py
https://github.com/scrapinghub/dateparser/blob/11a761c99d3ee522a3c63756b70c106a579e8b5c/dateparser/languages/dictionary.py#L96-L114
def are_tokens_valid(self, tokens): """ Check if tokens are valid tokens for the locale. :param tokens: a list of string or unicode tokens. :type tokens: list :return: True if tokens are valid, False otherwise. """ match_relative_regex = self._get_match_relative_regex_cache() for token in tokens: if any([match_relative_regex.match(token), token in self, token.isdigit()]): continue else: return False else: return True
[ "def", "are_tokens_valid", "(", "self", ",", "tokens", ")", ":", "match_relative_regex", "=", "self", ".", "_get_match_relative_regex_cache", "(", ")", "for", "token", "in", "tokens", ":", "if", "any", "(", "[", "match_relative_regex", ".", "match", "(", "token", ")", ",", "token", "in", "self", ",", "token", ".", "isdigit", "(", ")", "]", ")", ":", "continue", "else", ":", "return", "False", "else", ":", "return", "True" ]
Check if tokens are valid tokens for the locale. :param tokens: a list of string or unicode tokens. :type tokens: list :return: True if tokens are valid, False otherwise.
[ "Check", "if", "tokens", "are", "valid", "tokens", "for", "the", "locale", "." ]
python
test
30.315789
MSchnei/pyprf_feature
pyprf_feature/analysis/old/pRF_mdlCrt.py
https://github.com/MSchnei/pyprf_feature/blob/49004ede7ae1ddee07a30afe9ce3e2776750805c/pyprf_feature/analysis/old/pRF_mdlCrt.py#L138-L175
def crtGauss2D(varSizeX, varSizeY, varPosX, varPosY, varSd): """Create 2D Gaussian kernel. Parameters ---------- varSizeX : int, positive Width of the visual field. varSizeY : int, positive Height of the visual field.. varPosX : int, positive X position of centre of 2D Gauss. varPosY : int, positive Y position of centre of 2D Gauss. varSd : float, positive Standard deviation of 2D Gauss. Returns ------- aryGauss : 2d numpy array, shape [varSizeX, varSizeY] 2d Gaussian. Reference --------- [1] """ varSizeX = int(varSizeX) varSizeY = int(varSizeY) # aryX and aryY are in reversed order, this seems to be necessary: aryY, aryX = sp.mgrid[0:varSizeX, 0:varSizeY] # The actual creation of the Gaussian array: aryGauss = ( (np.square((aryX - varPosX)) + np.square((aryY - varPosY))) / (2.0 * np.square(varSd)) ) aryGauss = np.exp(-aryGauss) / (2 * np.pi * np.square(varSd)) return aryGauss
[ "def", "crtGauss2D", "(", "varSizeX", ",", "varSizeY", ",", "varPosX", ",", "varPosY", ",", "varSd", ")", ":", "varSizeX", "=", "int", "(", "varSizeX", ")", "varSizeY", "=", "int", "(", "varSizeY", ")", "# aryX and aryY are in reversed order, this seems to be necessary:", "aryY", ",", "aryX", "=", "sp", ".", "mgrid", "[", "0", ":", "varSizeX", ",", "0", ":", "varSizeY", "]", "# The actual creation of the Gaussian array:", "aryGauss", "=", "(", "(", "np", ".", "square", "(", "(", "aryX", "-", "varPosX", ")", ")", "+", "np", ".", "square", "(", "(", "aryY", "-", "varPosY", ")", ")", ")", "/", "(", "2.0", "*", "np", ".", "square", "(", "varSd", ")", ")", ")", "aryGauss", "=", "np", ".", "exp", "(", "-", "aryGauss", ")", "/", "(", "2", "*", "np", ".", "pi", "*", "np", ".", "square", "(", "varSd", ")", ")", "return", "aryGauss" ]
Create 2D Gaussian kernel. Parameters ---------- varSizeX : int, positive Width of the visual field. varSizeY : int, positive Height of the visual field.. varPosX : int, positive X position of centre of 2D Gauss. varPosY : int, positive Y position of centre of 2D Gauss. varSd : float, positive Standard deviation of 2D Gauss. Returns ------- aryGauss : 2d numpy array, shape [varSizeX, varSizeY] 2d Gaussian. Reference --------- [1]
[ "Create", "2D", "Gaussian", "kernel", "." ]
python
train
27.526316
apache/spark
python/pyspark/sql/dataframe.py
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/dataframe.py#L1941-L1962
def freqItems(self, cols, support=None): """ Finding frequent items for columns, possibly with false positives. Using the frequent element count algorithm described in "https://doi.org/10.1145/762471.762473, proposed by Karp, Schenker, and Papadimitriou". :func:`DataFrame.freqItems` and :func:`DataFrameStatFunctions.freqItems` are aliases. .. note:: This function is meant for exploratory data analysis, as we make no guarantee about the backward compatibility of the schema of the resulting DataFrame. :param cols: Names of the columns to calculate frequent items for as a list or tuple of strings. :param support: The frequency with which to consider an item 'frequent'. Default is 1%. The support must be greater than 1e-4. """ if isinstance(cols, tuple): cols = list(cols) if not isinstance(cols, list): raise ValueError("cols must be a list or tuple of column names as strings.") if not support: support = 0.01 return DataFrame(self._jdf.stat().freqItems(_to_seq(self._sc, cols), support), self.sql_ctx)
[ "def", "freqItems", "(", "self", ",", "cols", ",", "support", "=", "None", ")", ":", "if", "isinstance", "(", "cols", ",", "tuple", ")", ":", "cols", "=", "list", "(", "cols", ")", "if", "not", "isinstance", "(", "cols", ",", "list", ")", ":", "raise", "ValueError", "(", "\"cols must be a list or tuple of column names as strings.\"", ")", "if", "not", "support", ":", "support", "=", "0.01", "return", "DataFrame", "(", "self", ".", "_jdf", ".", "stat", "(", ")", ".", "freqItems", "(", "_to_seq", "(", "self", ".", "_sc", ",", "cols", ")", ",", "support", ")", ",", "self", ".", "sql_ctx", ")" ]
Finding frequent items for columns, possibly with false positives. Using the frequent element count algorithm described in "https://doi.org/10.1145/762471.762473, proposed by Karp, Schenker, and Papadimitriou". :func:`DataFrame.freqItems` and :func:`DataFrameStatFunctions.freqItems` are aliases. .. note:: This function is meant for exploratory data analysis, as we make no guarantee about the backward compatibility of the schema of the resulting DataFrame. :param cols: Names of the columns to calculate frequent items for as a list or tuple of strings. :param support: The frequency with which to consider an item 'frequent'. Default is 1%. The support must be greater than 1e-4.
[ "Finding", "frequent", "items", "for", "columns", "possibly", "with", "false", "positives", ".", "Using", "the", "frequent", "element", "count", "algorithm", "described", "in", "https", ":", "//", "doi", ".", "org", "/", "10", ".", "1145", "/", "762471", ".", "762473", "proposed", "by", "Karp", "Schenker", "and", "Papadimitriou", ".", ":", "func", ":", "DataFrame", ".", "freqItems", "and", ":", "func", ":", "DataFrameStatFunctions", ".", "freqItems", "are", "aliases", "." ]
python
train
53.045455
sibirrer/lenstronomy
lenstronomy/LensModel/Profiles/cnfw.py
https://github.com/sibirrer/lenstronomy/blob/4edb100a4f3f4fdc4fac9b0032d2b0283d0aa1d6/lenstronomy/LensModel/Profiles/cnfw.py#L247-L274
def cnfwAlpha(self, R, Rs, rho0, r_core, ax_x, ax_y): """ deflection angel of NFW profile along the projection to coordinate axis :param R: radius of interest :type R: float/numpy array :param Rs: scale radius :type Rs: float :param rho0: density normalization (characteristic density) :type rho0: float :param r200: radius of (sub)halo :type r200: float>0 :param axis: projection to either x- or y-axis :type axis: same as R :return: Epsilon(R) projected density at radius R """ if isinstance(R, int) or isinstance(R, float): R = max(R, 0.00001) else: R[R <= 0.00001] = 0.00001 x = R / Rs b = r_core * Rs ** -1 b = max(b, 0.000001) gx = self._G(x, b) a = 4*rho0*Rs*gx/x**2 return a * ax_x, a * ax_y
[ "def", "cnfwAlpha", "(", "self", ",", "R", ",", "Rs", ",", "rho0", ",", "r_core", ",", "ax_x", ",", "ax_y", ")", ":", "if", "isinstance", "(", "R", ",", "int", ")", "or", "isinstance", "(", "R", ",", "float", ")", ":", "R", "=", "max", "(", "R", ",", "0.00001", ")", "else", ":", "R", "[", "R", "<=", "0.00001", "]", "=", "0.00001", "x", "=", "R", "/", "Rs", "b", "=", "r_core", "*", "Rs", "**", "-", "1", "b", "=", "max", "(", "b", ",", "0.000001", ")", "gx", "=", "self", ".", "_G", "(", "x", ",", "b", ")", "a", "=", "4", "*", "rho0", "*", "Rs", "*", "gx", "/", "x", "**", "2", "return", "a", "*", "ax_x", ",", "a", "*", "ax_y" ]
deflection angel of NFW profile along the projection to coordinate axis :param R: radius of interest :type R: float/numpy array :param Rs: scale radius :type Rs: float :param rho0: density normalization (characteristic density) :type rho0: float :param r200: radius of (sub)halo :type r200: float>0 :param axis: projection to either x- or y-axis :type axis: same as R :return: Epsilon(R) projected density at radius R
[ "deflection", "angel", "of", "NFW", "profile", "along", "the", "projection", "to", "coordinate", "axis" ]
python
train
31.25
pszafer/epson_projector
epson_projector/main.py
https://github.com/pszafer/epson_projector/blob/b8a10ace56e0a5cf858546041819c0e7ebca208f/epson_projector/main.py#L89-L104
async def get_property(self, command): """Get property state from device.""" _LOGGER.debug("Getting property %s", command) if self.__checkLock(): return BUSY timeout = self.__get_timeout(command) response = await self.send_request( timeout=timeout, params=EPSON_KEY_COMMANDS[command], type='json_query') if not response: return False try: return response['projector']['feature']['reply'] except KeyError: return BUSY
[ "async", "def", "get_property", "(", "self", ",", "command", ")", ":", "_LOGGER", ".", "debug", "(", "\"Getting property %s\"", ",", "command", ")", "if", "self", ".", "__checkLock", "(", ")", ":", "return", "BUSY", "timeout", "=", "self", ".", "__get_timeout", "(", "command", ")", "response", "=", "await", "self", ".", "send_request", "(", "timeout", "=", "timeout", ",", "params", "=", "EPSON_KEY_COMMANDS", "[", "command", "]", ",", "type", "=", "'json_query'", ")", "if", "not", "response", ":", "return", "False", "try", ":", "return", "response", "[", "'projector'", "]", "[", "'feature'", "]", "[", "'reply'", "]", "except", "KeyError", ":", "return", "BUSY" ]
Get property state from device.
[ "Get", "property", "state", "from", "device", "." ]
python
train
34.3125
tariqdaouda/rabaDB
rabaDB/filters.py
https://github.com/tariqdaouda/rabaDB/blob/42e0d6ee65149ae4f1e4c380cc695a9e7d2d1bbc/rabaDB/filters.py#L51-L69
def reset(self, rabaClass, namespace = None) : """rabaClass can either be a raba class of a string of a raba class name. In the latter case you must provide the namespace argument. If it's a Raba Class the argument is ignored. If you fear cicular imports use strings""" if type(rabaClass) is types.StringType : self._raba_namespace = namespace self.con = stp.RabaConnection(self._raba_namespace) self.rabaClass = self.con.getClass(rabaClass) else : self.rabaClass = rabaClass self._raba_namespace = self.rabaClass._raba_namespace self.con = stp.RabaConnection(self._raba_namespace) self.filters = [] self.tables = set() #self.fctPattern = re.compile("\s*([^\s]+)\s*\(\s*([^\s]+)\s*\)\s*([=><])\s*([^\s]+)\s*") self.fieldPattern = re.compile("\s*([^\s\(\)]+)\s*([=><]|([L|l][I|i][K|k][E|e]))\s*(.+)") self.operators = set(['LIKE', '=', '<', '>', '=', '>=', '<=', '<>', '!=', 'IS'])
[ "def", "reset", "(", "self", ",", "rabaClass", ",", "namespace", "=", "None", ")", ":", "if", "type", "(", "rabaClass", ")", "is", "types", ".", "StringType", ":", "self", ".", "_raba_namespace", "=", "namespace", "self", ".", "con", "=", "stp", ".", "RabaConnection", "(", "self", ".", "_raba_namespace", ")", "self", ".", "rabaClass", "=", "self", ".", "con", ".", "getClass", "(", "rabaClass", ")", "else", ":", "self", ".", "rabaClass", "=", "rabaClass", "self", ".", "_raba_namespace", "=", "self", ".", "rabaClass", ".", "_raba_namespace", "self", ".", "con", "=", "stp", ".", "RabaConnection", "(", "self", ".", "_raba_namespace", ")", "self", ".", "filters", "=", "[", "]", "self", ".", "tables", "=", "set", "(", ")", "#self.fctPattern = re.compile(\"\\s*([^\\s]+)\\s*\\(\\s*([^\\s]+)\\s*\\)\\s*([=><])\\s*([^\\s]+)\\s*\")", "self", ".", "fieldPattern", "=", "re", ".", "compile", "(", "\"\\s*([^\\s\\(\\)]+)\\s*([=><]|([L|l][I|i][K|k][E|e]))\\s*(.+)\"", ")", "self", ".", "operators", "=", "set", "(", "[", "'LIKE'", ",", "'='", ",", "'<'", ",", "'>'", ",", "'='", ",", "'>='", ",", "'<='", ",", "'<>'", ",", "'!='", ",", "'IS'", "]", ")" ]
rabaClass can either be a raba class of a string of a raba class name. In the latter case you must provide the namespace argument. If it's a Raba Class the argument is ignored. If you fear cicular imports use strings
[ "rabaClass", "can", "either", "be", "a", "raba", "class", "of", "a", "string", "of", "a", "raba", "class", "name", ".", "In", "the", "latter", "case", "you", "must", "provide", "the", "namespace", "argument", ".", "If", "it", "s", "a", "Raba", "Class", "the", "argument", "is", "ignored", ".", "If", "you", "fear", "cicular", "imports", "use", "strings" ]
python
train
47.368421
bokeh/bokeh
scripts/issues.py
https://github.com/bokeh/bokeh/blob/dc8cf49e4e4302fd38537ad089ece81fbcca4737/scripts/issues.py#L123-L127
def relevent_issue(issue, after): """Returns True iff this issue is something we should show in the changelog.""" return (closed_issue(issue, after) and issue_completed(issue) and issue_section(issue))
[ "def", "relevent_issue", "(", "issue", ",", "after", ")", ":", "return", "(", "closed_issue", "(", "issue", ",", "after", ")", "and", "issue_completed", "(", "issue", ")", "and", "issue_section", "(", "issue", ")", ")" ]
Returns True iff this issue is something we should show in the changelog.
[ "Returns", "True", "iff", "this", "issue", "is", "something", "we", "should", "show", "in", "the", "changelog", "." ]
python
train
45.8
SITools2/pySitools2_1.0
sitools2/core/pySitools2.py
https://github.com/SITools2/pySitools2_1.0/blob/acd13198162456ba401a0b923af989bb29feb3b6/sitools2/core/pySitools2.py#L96-L112
def __parseResponseServer(self): """Parses the response of the server. Exception --------- A Sitools2Exception is raised when the server does not send back a success.""" self.__logger.debug(Sitools2Abstract.getBaseUrl(self) + SITools2Instance.PROJECTS_URI) result = Util.retrieveJsonResponseFromServer(Sitools2Abstract.getBaseUrl(self) + SITools2Instance.PROJECTS_URI) isSuccess = result['success'] if isSuccess: data = result['data'] self.__logger.debug(data) for i, dataItem in enumerate(data): project = Project(Sitools2Abstract.getBaseUrl(self), dataItem) self.__projects.append(project) else: raise Sitools2Exception("Error when loading the server response")
[ "def", "__parseResponseServer", "(", "self", ")", ":", "self", ".", "__logger", ".", "debug", "(", "Sitools2Abstract", ".", "getBaseUrl", "(", "self", ")", "+", "SITools2Instance", ".", "PROJECTS_URI", ")", "result", "=", "Util", ".", "retrieveJsonResponseFromServer", "(", "Sitools2Abstract", ".", "getBaseUrl", "(", "self", ")", "+", "SITools2Instance", ".", "PROJECTS_URI", ")", "isSuccess", "=", "result", "[", "'success'", "]", "if", "isSuccess", ":", "data", "=", "result", "[", "'data'", "]", "self", ".", "__logger", ".", "debug", "(", "data", ")", "for", "i", ",", "dataItem", "in", "enumerate", "(", "data", ")", ":", "project", "=", "Project", "(", "Sitools2Abstract", ".", "getBaseUrl", "(", "self", ")", ",", "dataItem", ")", "self", ".", "__projects", ".", "append", "(", "project", ")", "else", ":", "raise", "Sitools2Exception", "(", "\"Error when loading the server response\"", ")" ]
Parses the response of the server. Exception --------- A Sitools2Exception is raised when the server does not send back a success.
[ "Parses", "the", "response", "of", "the", "server", ".", "Exception", "---------", "A", "Sitools2Exception", "is", "raised", "when", "the", "server", "does", "not", "send", "back", "a", "success", "." ]
python
train
48.705882
jepegit/cellpy
cellpy/readers/instruments/custom.py
https://github.com/jepegit/cellpy/blob/9f4a84cdd11f72cfa02cda8c2d7b5174abbb7370/cellpy/readers/instruments/custom.py#L344-L356
def load(self, file_name): """Load a raw data-file Args: file_name (path) Returns: loaded test """ new_rundata = self.loader(file_name) new_rundata = self.inspect(new_rundata) return new_rundata
[ "def", "load", "(", "self", ",", "file_name", ")", ":", "new_rundata", "=", "self", ".", "loader", "(", "file_name", ")", "new_rundata", "=", "self", ".", "inspect", "(", "new_rundata", ")", "return", "new_rundata" ]
Load a raw data-file Args: file_name (path) Returns: loaded test
[ "Load", "a", "raw", "data", "-", "file" ]
python
train
20.384615
all-umass/graphs
graphs/reorder.py
https://github.com/all-umass/graphs/blob/4fbeb025dfe33340335f34300f58dd3809228822/graphs/reorder.py#L24-L30
def permute_graph(G, order): '''Reorder the graph's vertices, returning a copy of the input graph. order : integer array-like, some permutation of range(G.num_vertices()). ''' adj = G.matrix('dense') adj = adj[np.ix_(order, order)] return Graph.from_adj_matrix(adj)
[ "def", "permute_graph", "(", "G", ",", "order", ")", ":", "adj", "=", "G", ".", "matrix", "(", "'dense'", ")", "adj", "=", "adj", "[", "np", ".", "ix_", "(", "order", ",", "order", ")", "]", "return", "Graph", ".", "from_adj_matrix", "(", "adj", ")" ]
Reorder the graph's vertices, returning a copy of the input graph. order : integer array-like, some permutation of range(G.num_vertices()).
[ "Reorder", "the", "graph", "s", "vertices", "returning", "a", "copy", "of", "the", "input", "graph", ".", "order", ":", "integer", "array", "-", "like", "some", "permutation", "of", "range", "(", "G", ".", "num_vertices", "()", ")", "." ]
python
train
38.714286
dw/mitogen
mitogen/ssh.py
https://github.com/dw/mitogen/blob/a7fdb55e1300a7e0a5e404b09eb730cf9a525da7/mitogen/ssh.py#L189-L200
def _init_create_child(self): """ Initialize the base class :attr:`create_child` and :attr:`create_child_args` according to whether we need a PTY or not. """ if self._requires_pty(): self.create_child = mitogen.parent.hybrid_tty_create_child else: self.create_child = mitogen.parent.create_child self.create_child_args = { 'stderr_pipe': True, }
[ "def", "_init_create_child", "(", "self", ")", ":", "if", "self", ".", "_requires_pty", "(", ")", ":", "self", ".", "create_child", "=", "mitogen", ".", "parent", ".", "hybrid_tty_create_child", "else", ":", "self", ".", "create_child", "=", "mitogen", ".", "parent", ".", "create_child", "self", ".", "create_child_args", "=", "{", "'stderr_pipe'", ":", "True", ",", "}" ]
Initialize the base class :attr:`create_child` and :attr:`create_child_args` according to whether we need a PTY or not.
[ "Initialize", "the", "base", "class", ":", "attr", ":", "create_child", "and", ":", "attr", ":", "create_child_args", "according", "to", "whether", "we", "need", "a", "PTY", "or", "not", "." ]
python
train
37.166667
sibirrer/lenstronomy
lenstronomy/LensModel/Profiles/spp.py
https://github.com/sibirrer/lenstronomy/blob/4edb100a4f3f4fdc4fac9b0032d2b0283d0aa1d6/lenstronomy/LensModel/Profiles/spp.py#L123-L132
def mass_3d(self, r, rho0, gamma): """ mass enclosed a 3d sphere or radius r :param r: :param a: :param s: :return: """ mass_3d = 4 * np.pi * rho0 /(-gamma + 3) * r ** (-gamma + 3) return mass_3d
[ "def", "mass_3d", "(", "self", ",", "r", ",", "rho0", ",", "gamma", ")", ":", "mass_3d", "=", "4", "*", "np", ".", "pi", "*", "rho0", "/", "(", "-", "gamma", "+", "3", ")", "*", "r", "**", "(", "-", "gamma", "+", "3", ")", "return", "mass_3d" ]
mass enclosed a 3d sphere or radius r :param r: :param a: :param s: :return:
[ "mass", "enclosed", "a", "3d", "sphere", "or", "radius", "r", ":", "param", "r", ":", ":", "param", "a", ":", ":", "param", "s", ":", ":", "return", ":" ]
python
train
25.8
openstack/networking-cisco
networking_cisco/neutronclient/hostingdevicescheduler.py
https://github.com/openstack/networking-cisco/blob/aa58a30aec25b86f9aa5952b0863045975debfa9/networking_cisco/neutronclient/hostingdevicescheduler.py#L109-L114
def disassociate_hosting_device_with_config_agent( self, client, config_agent_id, hosting_device_id): """Disassociates a hosting_device with a config agent.""" return client.delete((ConfigAgentHandlingHostingDevice.resource_path + CFG_AGENT_HOSTING_DEVICES + "/%s") % ( config_agent_id, hosting_device_id))
[ "def", "disassociate_hosting_device_with_config_agent", "(", "self", ",", "client", ",", "config_agent_id", ",", "hosting_device_id", ")", ":", "return", "client", ".", "delete", "(", "(", "ConfigAgentHandlingHostingDevice", ".", "resource_path", "+", "CFG_AGENT_HOSTING_DEVICES", "+", "\"/%s\"", ")", "%", "(", "config_agent_id", ",", "hosting_device_id", ")", ")" ]
Disassociates a hosting_device with a config agent.
[ "Disassociates", "a", "hosting_device", "with", "a", "config", "agent", "." ]
python
train
61.833333
airspeed-velocity/asv
asv/feed.py
https://github.com/airspeed-velocity/asv/blob/d23bb8b74e8adacbfa3cf5724bda55fb39d56ba6/asv/feed.py#L172-L198
def _etree_py26_write(f, tree): """ Compatibility workaround for ElementTree shipped with py2.6 """ f.write("<?xml version='1.0' encoding='utf-8'?>\n".encode('utf-8')) if etree.VERSION[:3] == '1.2': def fixtag(tag, namespaces): if tag == XML_NS + 'lang': return 'xml:lang', "" if '}' in tag: j = tag.index('}') + 1 tag = tag[j:] xmlns = '' if tag == 'feed': xmlns = ('xmlns', str('http://www.w3.org/2005/Atom')) namespaces['http://www.w3.org/2005/Atom'] = 'xmlns' return tag, xmlns else: fixtag = etree.fixtag old_fixtag = etree.fixtag etree.fixtag = fixtag try: tree.write(f, encoding=str('utf-8')) finally: etree.fixtag = old_fixtag
[ "def", "_etree_py26_write", "(", "f", ",", "tree", ")", ":", "f", ".", "write", "(", "\"<?xml version='1.0' encoding='utf-8'?>\\n\"", ".", "encode", "(", "'utf-8'", ")", ")", "if", "etree", ".", "VERSION", "[", ":", "3", "]", "==", "'1.2'", ":", "def", "fixtag", "(", "tag", ",", "namespaces", ")", ":", "if", "tag", "==", "XML_NS", "+", "'lang'", ":", "return", "'xml:lang'", ",", "\"\"", "if", "'}'", "in", "tag", ":", "j", "=", "tag", ".", "index", "(", "'}'", ")", "+", "1", "tag", "=", "tag", "[", "j", ":", "]", "xmlns", "=", "''", "if", "tag", "==", "'feed'", ":", "xmlns", "=", "(", "'xmlns'", ",", "str", "(", "'http://www.w3.org/2005/Atom'", ")", ")", "namespaces", "[", "'http://www.w3.org/2005/Atom'", "]", "=", "'xmlns'", "return", "tag", ",", "xmlns", "else", ":", "fixtag", "=", "etree", ".", "fixtag", "old_fixtag", "=", "etree", ".", "fixtag", "etree", ".", "fixtag", "=", "fixtag", "try", ":", "tree", ".", "write", "(", "f", ",", "encoding", "=", "str", "(", "'utf-8'", ")", ")", "finally", ":", "etree", ".", "fixtag", "=", "old_fixtag" ]
Compatibility workaround for ElementTree shipped with py2.6
[ "Compatibility", "workaround", "for", "ElementTree", "shipped", "with", "py2", ".", "6" ]
python
train
30.592593
michaelhelmick/lassie
lassie/core.py
https://github.com/michaelhelmick/lassie/blob/b929f78d7e545cff5fb42eb5edfcaf396456f1ee/lassie/core.py#L97-L223
def fetch(self, url, open_graph=None, twitter_card=None, touch_icon=None, favicon=None, all_images=None, parser=None, handle_file_content=None, canonical=None): """Retrieves content from the specified url, parses it, and returns a beautifully crafted dictionary of important information about that web page. Priority tree is as follows: 1. OEmbed 2. Open Graph 3. Twitter Card 4. Other meta content (i.e. description, keywords) :param url: URL to send a GET request to :param open_graph: (optional) If ``True``, filters web page content for Open Graph meta tags. The content of these properties have top priority on return values. :type open_graph: bool :param twitter_card: (optional) If ``True``, filters web page content for Twitter Card meta tags :type twitter_card: bool :param touch_icon: (optional) If ``True``, retrieves Apple touch icons and includes them in the response ``images`` array :type touch_icon: bool :param favicon: (optional) If ``True``, retrieves any favicon images and includes them in the response ``images`` array :type favicon: bool :param canonical: (optional) If ``True``, retrieves canonical url from meta tags. Default: False :type canonical: bool :param all_images: (optional) If ``True``, retrieves images inside web pages body and includes them in the response ``images`` array. Default: False :type all_images: bool :param parser: (optional) String reference for the parser that BeautifulSoup will use :type parser: string :param handle_file_content: (optional) If ``True``, lassie will return a generic response when a file is fetched. Default: False :type handle_file_content: bool """ # Set params, method params have priority over class params open_graph = merge_settings(open_graph, self.open_graph) twitter_card = merge_settings(twitter_card, self.twitter_card) touch_icon = merge_settings(touch_icon, self.touch_icon) favicon = merge_settings(favicon, self.favicon) canonical = merge_settings(canonical, self.canonical) all_images = merge_settings(all_images, self.all_images) parser = merge_settings(parser, self.parser) handle_file_content = merge_settings(handle_file_content, self.handle_file_content) data = { 'images': [], 'videos': [], } has_file_content = False content_type = None if handle_file_content: headers, status_code = self._retrieve_headers(url) content_type = headers.get('Content-Type') has_file_content = content_type and not 'text/html' in content_type if has_file_content and content_type: has_image_content = content_type in IMAGE_MIMETYPES if has_image_content: parsed_url = urlparse(url) data['title'] = basename(parsed_url.path.lstrip('/')) # TODO: if the url doesn't have an extension, maybe we should match it up to the mimetype and append an ext? data['url'] = url data['images'].append({ 'type': 'body_image', 'src': url, }) else: try: oembed_data, status_code = self._retrieve_oembed_data(url) parse_oembed_data(oembed_data, data) except LassieError: oembed_data = None html, status_code = self._retrieve_content(url) if not html and not oembed_data: raise LassieError('There was no content to parse.') if '<html' not in html: html = re.sub(r'(?:<!DOCTYPE(?:\s\w)?>(?:<head>)?)', '<!DOCTYPE html><html>', html) soup = BeautifulSoup(clean_text(html), parser) self._filter_amp_data(soup, data, url, all_images) if open_graph: self._filter_meta_data('open_graph', soup, data, url) if twitter_card: self._filter_meta_data('twitter_card', soup, data) self._filter_meta_data('generic', soup, data) if touch_icon: self._filter_link_tag_data('touch_icon', soup, data, url) if favicon: self._filter_link_tag_data('favicon', soup, data, url) if canonical: self._filter_link_tag_data('canonical', soup, data, url) if all_images: # Maybe filter out 1x1, no "good" way to do this if image doesn't supply # width/height. self._find_all_images(soup, data, url) # TODO: Find a good place for setting url, title and locale if soup.html.get('lang'): lang = soup.html.get('lang') else: lang = soup.html.get('xml:lang') if lang and ('locale' not in data): locale = normalize_locale(lang) if locale: data['locale'] = locale data_url = data.get('url') if not data_url or (data_url in url and len(data_url) < len(url)): data['url'] = url if ('title' not in data or not data.get('title')) and hasattr(soup.title, 'string'): data['title'] = soup.title.string data['status_code'] = status_code return data
[ "def", "fetch", "(", "self", ",", "url", ",", "open_graph", "=", "None", ",", "twitter_card", "=", "None", ",", "touch_icon", "=", "None", ",", "favicon", "=", "None", ",", "all_images", "=", "None", ",", "parser", "=", "None", ",", "handle_file_content", "=", "None", ",", "canonical", "=", "None", ")", ":", "# Set params, method params have priority over class params", "open_graph", "=", "merge_settings", "(", "open_graph", ",", "self", ".", "open_graph", ")", "twitter_card", "=", "merge_settings", "(", "twitter_card", ",", "self", ".", "twitter_card", ")", "touch_icon", "=", "merge_settings", "(", "touch_icon", ",", "self", ".", "touch_icon", ")", "favicon", "=", "merge_settings", "(", "favicon", ",", "self", ".", "favicon", ")", "canonical", "=", "merge_settings", "(", "canonical", ",", "self", ".", "canonical", ")", "all_images", "=", "merge_settings", "(", "all_images", ",", "self", ".", "all_images", ")", "parser", "=", "merge_settings", "(", "parser", ",", "self", ".", "parser", ")", "handle_file_content", "=", "merge_settings", "(", "handle_file_content", ",", "self", ".", "handle_file_content", ")", "data", "=", "{", "'images'", ":", "[", "]", ",", "'videos'", ":", "[", "]", ",", "}", "has_file_content", "=", "False", "content_type", "=", "None", "if", "handle_file_content", ":", "headers", ",", "status_code", "=", "self", ".", "_retrieve_headers", "(", "url", ")", "content_type", "=", "headers", ".", "get", "(", "'Content-Type'", ")", "has_file_content", "=", "content_type", "and", "not", "'text/html'", "in", "content_type", "if", "has_file_content", "and", "content_type", ":", "has_image_content", "=", "content_type", "in", "IMAGE_MIMETYPES", "if", "has_image_content", ":", "parsed_url", "=", "urlparse", "(", "url", ")", "data", "[", "'title'", "]", "=", "basename", "(", "parsed_url", ".", "path", ".", "lstrip", "(", "'/'", ")", ")", "# TODO: if the url doesn't have an extension, maybe we should match it up to the mimetype and append an ext?", "data", "[", "'url'", "]", "=", "url", "data", "[", "'images'", "]", ".", "append", "(", "{", "'type'", ":", "'body_image'", ",", "'src'", ":", "url", ",", "}", ")", "else", ":", "try", ":", "oembed_data", ",", "status_code", "=", "self", ".", "_retrieve_oembed_data", "(", "url", ")", "parse_oembed_data", "(", "oembed_data", ",", "data", ")", "except", "LassieError", ":", "oembed_data", "=", "None", "html", ",", "status_code", "=", "self", ".", "_retrieve_content", "(", "url", ")", "if", "not", "html", "and", "not", "oembed_data", ":", "raise", "LassieError", "(", "'There was no content to parse.'", ")", "if", "'<html'", "not", "in", "html", ":", "html", "=", "re", ".", "sub", "(", "r'(?:<!DOCTYPE(?:\\s\\w)?>(?:<head>)?)'", ",", "'<!DOCTYPE html><html>'", ",", "html", ")", "soup", "=", "BeautifulSoup", "(", "clean_text", "(", "html", ")", ",", "parser", ")", "self", ".", "_filter_amp_data", "(", "soup", ",", "data", ",", "url", ",", "all_images", ")", "if", "open_graph", ":", "self", ".", "_filter_meta_data", "(", "'open_graph'", ",", "soup", ",", "data", ",", "url", ")", "if", "twitter_card", ":", "self", ".", "_filter_meta_data", "(", "'twitter_card'", ",", "soup", ",", "data", ")", "self", ".", "_filter_meta_data", "(", "'generic'", ",", "soup", ",", "data", ")", "if", "touch_icon", ":", "self", ".", "_filter_link_tag_data", "(", "'touch_icon'", ",", "soup", ",", "data", ",", "url", ")", "if", "favicon", ":", "self", ".", "_filter_link_tag_data", "(", "'favicon'", ",", "soup", ",", "data", ",", "url", ")", "if", "canonical", ":", "self", ".", "_filter_link_tag_data", "(", "'canonical'", ",", "soup", ",", "data", ",", "url", ")", "if", "all_images", ":", "# Maybe filter out 1x1, no \"good\" way to do this if image doesn't supply", "# width/height.", "self", ".", "_find_all_images", "(", "soup", ",", "data", ",", "url", ")", "# TODO: Find a good place for setting url, title and locale", "if", "soup", ".", "html", ".", "get", "(", "'lang'", ")", ":", "lang", "=", "soup", ".", "html", ".", "get", "(", "'lang'", ")", "else", ":", "lang", "=", "soup", ".", "html", ".", "get", "(", "'xml:lang'", ")", "if", "lang", "and", "(", "'locale'", "not", "in", "data", ")", ":", "locale", "=", "normalize_locale", "(", "lang", ")", "if", "locale", ":", "data", "[", "'locale'", "]", "=", "locale", "data_url", "=", "data", ".", "get", "(", "'url'", ")", "if", "not", "data_url", "or", "(", "data_url", "in", "url", "and", "len", "(", "data_url", ")", "<", "len", "(", "url", ")", ")", ":", "data", "[", "'url'", "]", "=", "url", "if", "(", "'title'", "not", "in", "data", "or", "not", "data", ".", "get", "(", "'title'", ")", ")", "and", "hasattr", "(", "soup", ".", "title", ",", "'string'", ")", ":", "data", "[", "'title'", "]", "=", "soup", ".", "title", ".", "string", "data", "[", "'status_code'", "]", "=", "status_code", "return", "data" ]
Retrieves content from the specified url, parses it, and returns a beautifully crafted dictionary of important information about that web page. Priority tree is as follows: 1. OEmbed 2. Open Graph 3. Twitter Card 4. Other meta content (i.e. description, keywords) :param url: URL to send a GET request to :param open_graph: (optional) If ``True``, filters web page content for Open Graph meta tags. The content of these properties have top priority on return values. :type open_graph: bool :param twitter_card: (optional) If ``True``, filters web page content for Twitter Card meta tags :type twitter_card: bool :param touch_icon: (optional) If ``True``, retrieves Apple touch icons and includes them in the response ``images`` array :type touch_icon: bool :param favicon: (optional) If ``True``, retrieves any favicon images and includes them in the response ``images`` array :type favicon: bool :param canonical: (optional) If ``True``, retrieves canonical url from meta tags. Default: False :type canonical: bool :param all_images: (optional) If ``True``, retrieves images inside web pages body and includes them in the response ``images`` array. Default: False :type all_images: bool :param parser: (optional) String reference for the parser that BeautifulSoup will use :type parser: string :param handle_file_content: (optional) If ``True``, lassie will return a generic response when a file is fetched. Default: False :type handle_file_content: bool
[ "Retrieves", "content", "from", "the", "specified", "url", "parses", "it", "and", "returns", "a", "beautifully", "crafted", "dictionary", "of", "important", "information", "about", "that", "web", "page", "." ]
python
train
42.76378
bitesofcode/projexui
projexui/widgets/xpopupwidget.py
https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/widgets/xpopupwidget.py#L914-L923
def reject(self): """ Emits the accepted signal and closes the popup. """ self._result = 0 if not self.signalsBlocked(): self.rejected.emit() if self.autoCloseOnReject(): self.close()
[ "def", "reject", "(", "self", ")", ":", "self", ".", "_result", "=", "0", "if", "not", "self", ".", "signalsBlocked", "(", ")", ":", "self", ".", "rejected", ".", "emit", "(", ")", "if", "self", ".", "autoCloseOnReject", "(", ")", ":", "self", ".", "close", "(", ")" ]
Emits the accepted signal and closes the popup.
[ "Emits", "the", "accepted", "signal", "and", "closes", "the", "popup", "." ]
python
train
26.4
Iotic-Labs/py-IoticAgent
src/IoticAgent/IOT/RemotePoint.py
https://github.com/Iotic-Labs/py-IoticAgent/blob/893e8582ad1dacfe32dfc0ee89452bbd6f57d28d/src/IoticAgent/IOT/RemotePoint.py#L112-L123
def get_recent_async(self, count, callback): """Similar to `get_recent` except instead of returning an iterable, passes each dict to the given function which must accept a single argument. Returns the request. `callback` (mandatory) (function) instead of returning an iterable, pass each dict (as described above) to the given function which must accept a single argument. Nothing is returned. """ validate_nonnegative_int(count, 'count') Validation.callable_check(callback, allow_none=True) evt = self._client._request_sub_recent(self.subid, count=count) self._client._add_recent_cb_for(evt, callback) return evt
[ "def", "get_recent_async", "(", "self", ",", "count", ",", "callback", ")", ":", "validate_nonnegative_int", "(", "count", ",", "'count'", ")", "Validation", ".", "callable_check", "(", "callback", ",", "allow_none", "=", "True", ")", "evt", "=", "self", ".", "_client", ".", "_request_sub_recent", "(", "self", ".", "subid", ",", "count", "=", "count", ")", "self", ".", "_client", ".", "_add_recent_cb_for", "(", "evt", ",", "callback", ")", "return", "evt" ]
Similar to `get_recent` except instead of returning an iterable, passes each dict to the given function which must accept a single argument. Returns the request. `callback` (mandatory) (function) instead of returning an iterable, pass each dict (as described above) to the given function which must accept a single argument. Nothing is returned.
[ "Similar", "to", "get_recent", "except", "instead", "of", "returning", "an", "iterable", "passes", "each", "dict", "to", "the", "given", "function", "which", "must", "accept", "a", "single", "argument", ".", "Returns", "the", "request", "." ]
python
train
56.916667
angr/angr
angr/procedures/stubs/format_parser.py
https://github.com/angr/angr/blob/4e2f97d56af5419ee73bdb30482c8dd8ff5f3e40/angr/procedures/stubs/format_parser.py#L528-L536
def _sim_strlen(self, str_addr): """ Return the result of invoking the strlen simprocedure on `str_addr`. """ from .. import SIM_PROCEDURES strlen = SIM_PROCEDURES['libc']['strlen'] return self.inline_call(strlen, str_addr).ret_expr
[ "def", "_sim_strlen", "(", "self", ",", "str_addr", ")", ":", "from", ".", ".", "import", "SIM_PROCEDURES", "strlen", "=", "SIM_PROCEDURES", "[", "'libc'", "]", "[", "'strlen'", "]", "return", "self", ".", "inline_call", "(", "strlen", ",", "str_addr", ")", ".", "ret_expr" ]
Return the result of invoking the strlen simprocedure on `str_addr`.
[ "Return", "the", "result", "of", "invoking", "the", "strlen", "simprocedure", "on", "str_addr", "." ]
python
train
30.444444
hollenstein/maspy
maspy/xml.py
https://github.com/hollenstein/maspy/blob/f15fcfd24df306d8420540460d902aa3073ec133/maspy/xml.py#L104-L126
def recRemoveTreeFormating(element): """Removes whitespace characters, which are leftovers from previous xml formatting. :param element: an instance of lxml.etree._Element str.strip() is applied to the "text" and the "tail" attribute of the element and recursively to all child elements. """ children = element.getchildren() if len(children) > 0: for child in children: recRemoveTreeFormating(child) if element.text is not None: if len(element.text.strip()) == 0: element.text = None else: element.text = element.text.strip() if element.tail is not None: if len(element.tail.strip()) == 0: element.tail = None else: element.tail = element.tail.strip()
[ "def", "recRemoveTreeFormating", "(", "element", ")", ":", "children", "=", "element", ".", "getchildren", "(", ")", "if", "len", "(", "children", ")", ">", "0", ":", "for", "child", "in", "children", ":", "recRemoveTreeFormating", "(", "child", ")", "if", "element", ".", "text", "is", "not", "None", ":", "if", "len", "(", "element", ".", "text", ".", "strip", "(", ")", ")", "==", "0", ":", "element", ".", "text", "=", "None", "else", ":", "element", ".", "text", "=", "element", ".", "text", ".", "strip", "(", ")", "if", "element", ".", "tail", "is", "not", "None", ":", "if", "len", "(", "element", ".", "tail", ".", "strip", "(", ")", ")", "==", "0", ":", "element", ".", "tail", "=", "None", "else", ":", "element", ".", "tail", "=", "element", ".", "tail", ".", "strip", "(", ")" ]
Removes whitespace characters, which are leftovers from previous xml formatting. :param element: an instance of lxml.etree._Element str.strip() is applied to the "text" and the "tail" attribute of the element and recursively to all child elements.
[ "Removes", "whitespace", "characters", "which", "are", "leftovers", "from", "previous", "xml", "formatting", "." ]
python
train
33.521739
vinci1it2000/schedula
schedula/dispatcher.py
https://github.com/vinci1it2000/schedula/blob/addb9fd685be81544b796c51383ac00a31543ce9/schedula/dispatcher.py#L726-L903
def add_dispatcher(self, dsp, inputs, outputs, dsp_id=None, input_domain=None, weight=None, inp_weight=None, description=None, include_defaults=False, await_domain=None, **kwargs): """ Add a single sub-dispatcher node to dispatcher. :param dsp: Child dispatcher that is added as sub-dispatcher node to the parent dispatcher. :type dsp: Dispatcher | dict[str, list] :param inputs: Inputs mapping. Data node ids from parent dispatcher to child sub-dispatcher. :type inputs: dict[str, str | list[str]] | tuple[str] | (str, ..., dict[str, str | list[str]]) :param outputs: Outputs mapping. Data node ids from child sub-dispatcher to parent dispatcher. :type outputs: dict[str, str | list[str]] | tuple[str] | (str, ..., dict[str, str | list[str]]) :param dsp_id: Sub-dispatcher node id. If None will be assigned as <dsp.name>. :type dsp_id: str, optional :param input_domain: A function that checks if input values satisfy the function domain. This can be any function that takes the a dictionary with the inputs of the sub-dispatcher node and returns True if input values satisfy the domain, otherwise False. .. note:: This function is invoked every time that a data node reach the sub-dispatcher node. :type input_domain: (dict) -> bool, optional :param weight: Node weight. It is a weight coefficient that is used by the dispatch algorithm to estimate the minimum workflow. :type weight: float, int, optional :param inp_weight: Edge weights from data nodes to the sub-dispatcher node. It is a dictionary (key=data node id) with the weight coefficients used by the dispatch algorithm to estimate the minimum workflow. :type inp_weight: dict[str, int | float], optional :param description: Sub-dispatcher node's description. :type description: str, optional :param include_defaults: If True the default values of the sub-dispatcher are added to the current dispatcher. :type include_defaults: bool, optional :param await_domain: If True the Dispatcher waits all input results before executing the `input_domain` function. If a number is defined this is used as `timeout` for `Future.result` method [default: True]. Note this is used when asynchronous or parallel execution is enable. :type await_domain: bool|int|float, optional :param kwargs: Set additional node attributes using key=value. :type kwargs: keyword arguments, optional :return: Sub-dispatcher node id. :rtype: str .. seealso:: :func:`add_data`, :func:`add_func`, :func:`add_function`, :func:`add_from_lists` **--------------------------------------------------------------------** **Example**: .. testsetup:: >>> dsp = Dispatcher(name='Dispatcher') Create a sub-dispatcher:: >>> sub_dsp = Dispatcher() >>> sub_dsp.add_function('max', max, ['a', 'b'], ['c']) 'max' Add the sub-dispatcher to the parent dispatcher:: >>> dsp.add_dispatcher(dsp_id='Sub-Dispatcher', dsp=sub_dsp, ... inputs={'A': 'a', 'B': 'b'}, ... outputs={'c': 'C'}) 'Sub-Dispatcher' Add a sub-dispatcher node with domain:: >>> def my_domain(kwargs): ... return kwargs['C'] > 3 ... >>> dsp.add_dispatcher(dsp_id='Sub-Dispatcher with domain', ... dsp=sub_dsp, inputs={'C': 'a', 'D': 'b'}, ... outputs={('c', 'b'): ('E', 'E1')}, ... input_domain=my_domain) 'Sub-Dispatcher with domain' """ from .utils.blue import _init dsp = _init(dsp) if not isinstance(dsp, self.__class__): kw = dsp dsp = self.__class__( name=dsp_id or 'unknown', executor=self.executor ) dsp.add_from_lists(**kw) if not dsp_id: # Get the dsp id. dsp_id = dsp.name or 'unknown' if description is None: # Get description. description = dsp.__doc__ or None if not isinstance(inputs, dict): # Create the inputs dict. inputs = kk_dict(*inputs) if not isinstance(outputs, dict): # Create the outputs dict. outputs = kk_dict(*outputs) # Set zero as default input distances. # noinspection PyTypeChecker _weight_from = dict.fromkeys(inputs.keys(), 0.0) _weight_from.update(inp_weight or {}) from .utils.alg import _nodes # Return dispatcher node id. dsp_id = self.add_function( dsp_id, dsp, sorted(_nodes(inputs)), sorted(_nodes(outputs.values())), input_domain, weight, _weight_from, type='dispatcher', description=description, wait_inputs=False, await_domain=await_domain, **kwargs ) # Set proper inputs. self.nodes[dsp_id]['inputs'] = inputs # Set proper outputs. self.nodes[dsp_id]['outputs'] = outputs if SINK not in dsp.nodes and \ SINK in _nodes(inputs.values()).union(_nodes(outputs)): dsp.add_data(SINK) # Add sink node. # Import default values from sub-dispatcher. if include_defaults: dsp_dfl = dsp.default_values # Namespace shortcut. remove = set() # Set of nodes to remove after the import. # Set default values. for k, v in inputs.items(): if isinstance(v, str): if v in dsp_dfl: self.set_default_value(k, **dsp_dfl.pop(v)) else: if v[0] in dsp_dfl: self.set_default_value(k, **dsp_dfl.pop(v[0])) remove.update(v[1:]) # Remove default values. for k in remove: dsp_dfl.pop(k, None) return dsp_id
[ "def", "add_dispatcher", "(", "self", ",", "dsp", ",", "inputs", ",", "outputs", ",", "dsp_id", "=", "None", ",", "input_domain", "=", "None", ",", "weight", "=", "None", ",", "inp_weight", "=", "None", ",", "description", "=", "None", ",", "include_defaults", "=", "False", ",", "await_domain", "=", "None", ",", "*", "*", "kwargs", ")", ":", "from", ".", "utils", ".", "blue", "import", "_init", "dsp", "=", "_init", "(", "dsp", ")", "if", "not", "isinstance", "(", "dsp", ",", "self", ".", "__class__", ")", ":", "kw", "=", "dsp", "dsp", "=", "self", ".", "__class__", "(", "name", "=", "dsp_id", "or", "'unknown'", ",", "executor", "=", "self", ".", "executor", ")", "dsp", ".", "add_from_lists", "(", "*", "*", "kw", ")", "if", "not", "dsp_id", ":", "# Get the dsp id.", "dsp_id", "=", "dsp", ".", "name", "or", "'unknown'", "if", "description", "is", "None", ":", "# Get description.", "description", "=", "dsp", ".", "__doc__", "or", "None", "if", "not", "isinstance", "(", "inputs", ",", "dict", ")", ":", "# Create the inputs dict.", "inputs", "=", "kk_dict", "(", "*", "inputs", ")", "if", "not", "isinstance", "(", "outputs", ",", "dict", ")", ":", "# Create the outputs dict.", "outputs", "=", "kk_dict", "(", "*", "outputs", ")", "# Set zero as default input distances.", "# noinspection PyTypeChecker", "_weight_from", "=", "dict", ".", "fromkeys", "(", "inputs", ".", "keys", "(", ")", ",", "0.0", ")", "_weight_from", ".", "update", "(", "inp_weight", "or", "{", "}", ")", "from", ".", "utils", ".", "alg", "import", "_nodes", "# Return dispatcher node id.", "dsp_id", "=", "self", ".", "add_function", "(", "dsp_id", ",", "dsp", ",", "sorted", "(", "_nodes", "(", "inputs", ")", ")", ",", "sorted", "(", "_nodes", "(", "outputs", ".", "values", "(", ")", ")", ")", ",", "input_domain", ",", "weight", ",", "_weight_from", ",", "type", "=", "'dispatcher'", ",", "description", "=", "description", ",", "wait_inputs", "=", "False", ",", "await_domain", "=", "await_domain", ",", "*", "*", "kwargs", ")", "# Set proper inputs.", "self", ".", "nodes", "[", "dsp_id", "]", "[", "'inputs'", "]", "=", "inputs", "# Set proper outputs.", "self", ".", "nodes", "[", "dsp_id", "]", "[", "'outputs'", "]", "=", "outputs", "if", "SINK", "not", "in", "dsp", ".", "nodes", "and", "SINK", "in", "_nodes", "(", "inputs", ".", "values", "(", ")", ")", ".", "union", "(", "_nodes", "(", "outputs", ")", ")", ":", "dsp", ".", "add_data", "(", "SINK", ")", "# Add sink node.", "# Import default values from sub-dispatcher.", "if", "include_defaults", ":", "dsp_dfl", "=", "dsp", ".", "default_values", "# Namespace shortcut.", "remove", "=", "set", "(", ")", "# Set of nodes to remove after the import.", "# Set default values.", "for", "k", ",", "v", "in", "inputs", ".", "items", "(", ")", ":", "if", "isinstance", "(", "v", ",", "str", ")", ":", "if", "v", "in", "dsp_dfl", ":", "self", ".", "set_default_value", "(", "k", ",", "*", "*", "dsp_dfl", ".", "pop", "(", "v", ")", ")", "else", ":", "if", "v", "[", "0", "]", "in", "dsp_dfl", ":", "self", ".", "set_default_value", "(", "k", ",", "*", "*", "dsp_dfl", ".", "pop", "(", "v", "[", "0", "]", ")", ")", "remove", ".", "update", "(", "v", "[", "1", ":", "]", ")", "# Remove default values.", "for", "k", "in", "remove", ":", "dsp_dfl", ".", "pop", "(", "k", ",", "None", ")", "return", "dsp_id" ]
Add a single sub-dispatcher node to dispatcher. :param dsp: Child dispatcher that is added as sub-dispatcher node to the parent dispatcher. :type dsp: Dispatcher | dict[str, list] :param inputs: Inputs mapping. Data node ids from parent dispatcher to child sub-dispatcher. :type inputs: dict[str, str | list[str]] | tuple[str] | (str, ..., dict[str, str | list[str]]) :param outputs: Outputs mapping. Data node ids from child sub-dispatcher to parent dispatcher. :type outputs: dict[str, str | list[str]] | tuple[str] | (str, ..., dict[str, str | list[str]]) :param dsp_id: Sub-dispatcher node id. If None will be assigned as <dsp.name>. :type dsp_id: str, optional :param input_domain: A function that checks if input values satisfy the function domain. This can be any function that takes the a dictionary with the inputs of the sub-dispatcher node and returns True if input values satisfy the domain, otherwise False. .. note:: This function is invoked every time that a data node reach the sub-dispatcher node. :type input_domain: (dict) -> bool, optional :param weight: Node weight. It is a weight coefficient that is used by the dispatch algorithm to estimate the minimum workflow. :type weight: float, int, optional :param inp_weight: Edge weights from data nodes to the sub-dispatcher node. It is a dictionary (key=data node id) with the weight coefficients used by the dispatch algorithm to estimate the minimum workflow. :type inp_weight: dict[str, int | float], optional :param description: Sub-dispatcher node's description. :type description: str, optional :param include_defaults: If True the default values of the sub-dispatcher are added to the current dispatcher. :type include_defaults: bool, optional :param await_domain: If True the Dispatcher waits all input results before executing the `input_domain` function. If a number is defined this is used as `timeout` for `Future.result` method [default: True]. Note this is used when asynchronous or parallel execution is enable. :type await_domain: bool|int|float, optional :param kwargs: Set additional node attributes using key=value. :type kwargs: keyword arguments, optional :return: Sub-dispatcher node id. :rtype: str .. seealso:: :func:`add_data`, :func:`add_func`, :func:`add_function`, :func:`add_from_lists` **--------------------------------------------------------------------** **Example**: .. testsetup:: >>> dsp = Dispatcher(name='Dispatcher') Create a sub-dispatcher:: >>> sub_dsp = Dispatcher() >>> sub_dsp.add_function('max', max, ['a', 'b'], ['c']) 'max' Add the sub-dispatcher to the parent dispatcher:: >>> dsp.add_dispatcher(dsp_id='Sub-Dispatcher', dsp=sub_dsp, ... inputs={'A': 'a', 'B': 'b'}, ... outputs={'c': 'C'}) 'Sub-Dispatcher' Add a sub-dispatcher node with domain:: >>> def my_domain(kwargs): ... return kwargs['C'] > 3 ... >>> dsp.add_dispatcher(dsp_id='Sub-Dispatcher with domain', ... dsp=sub_dsp, inputs={'C': 'a', 'D': 'b'}, ... outputs={('c', 'b'): ('E', 'E1')}, ... input_domain=my_domain) 'Sub-Dispatcher with domain'
[ "Add", "a", "single", "sub", "-", "dispatcher", "node", "to", "dispatcher", "." ]
python
train
36.044944
tylertreat/BigQuery-Python
bigquery/client.py
https://github.com/tylertreat/BigQuery-Python/blob/88d99de42d954d49fc281460068f0e95003da098/bigquery/client.py#L574-L599
def get_table(self, dataset, table, project_id=None): """ Retrieve a table if it exists, otherwise return an empty dict. Parameters ---------- dataset : str The dataset that the table is in table : str The name of the table project_id: str, optional The project that the table is in Returns ------- dict Containing the table object if it exists, else empty """ project_id = self._get_project_id(project_id) try: table = self.bigquery.tables().get( projectId=project_id, datasetId=dataset, tableId=table).execute(num_retries=self.num_retries) except HttpError: table = {} return table
[ "def", "get_table", "(", "self", ",", "dataset", ",", "table", ",", "project_id", "=", "None", ")", ":", "project_id", "=", "self", ".", "_get_project_id", "(", "project_id", ")", "try", ":", "table", "=", "self", ".", "bigquery", ".", "tables", "(", ")", ".", "get", "(", "projectId", "=", "project_id", ",", "datasetId", "=", "dataset", ",", "tableId", "=", "table", ")", ".", "execute", "(", "num_retries", "=", "self", ".", "num_retries", ")", "except", "HttpError", ":", "table", "=", "{", "}", "return", "table" ]
Retrieve a table if it exists, otherwise return an empty dict. Parameters ---------- dataset : str The dataset that the table is in table : str The name of the table project_id: str, optional The project that the table is in Returns ------- dict Containing the table object if it exists, else empty
[ "Retrieve", "a", "table", "if", "it", "exists", "otherwise", "return", "an", "empty", "dict", "." ]
python
train
30.346154
Cymmetria/honeycomb
honeycomb/utils/config_utils.py
https://github.com/Cymmetria/honeycomb/blob/33ea91b5cf675000e4e85dd02efe580ea6e95c86/honeycomb/utils/config_utils.py#L49-L57
def validate_config_parameters(config_json, allowed_keys, allowed_types): """Validate parameters in config file.""" custom_fields = config_json.get(defs.PARAMETERS, []) for field in custom_fields: validate_field(field, allowed_keys, allowed_types) default = field.get(defs.DEFAULT) field_type = field.get(defs.TYPE) if default: validate_field_matches_type(field[defs.VALUE], default, field_type)
[ "def", "validate_config_parameters", "(", "config_json", ",", "allowed_keys", ",", "allowed_types", ")", ":", "custom_fields", "=", "config_json", ".", "get", "(", "defs", ".", "PARAMETERS", ",", "[", "]", ")", "for", "field", "in", "custom_fields", ":", "validate_field", "(", "field", ",", "allowed_keys", ",", "allowed_types", ")", "default", "=", "field", ".", "get", "(", "defs", ".", "DEFAULT", ")", "field_type", "=", "field", ".", "get", "(", "defs", ".", "TYPE", ")", "if", "default", ":", "validate_field_matches_type", "(", "field", "[", "defs", ".", "VALUE", "]", ",", "default", ",", "field_type", ")" ]
Validate parameters in config file.
[ "Validate", "parameters", "in", "config", "file", "." ]
python
train
49.222222
napalm-automation/napalm-junos
napalm_junos/junos.py
https://github.com/napalm-automation/napalm-junos/blob/78c0d161daf2abf26af5835b773f6db57c46efff/napalm_junos/junos.py#L1237-L1247
def get_ntp_servers(self): """Return the NTP servers configured on the device.""" ntp_table = junos_views.junos_ntp_servers_config_table(self.device) ntp_table.get() ntp_servers = ntp_table.items() if not ntp_servers: return {} return {napalm_base.helpers.ip(server[0]): {} for server in ntp_servers}
[ "def", "get_ntp_servers", "(", "self", ")", ":", "ntp_table", "=", "junos_views", ".", "junos_ntp_servers_config_table", "(", "self", ".", "device", ")", "ntp_table", ".", "get", "(", ")", "ntp_servers", "=", "ntp_table", ".", "items", "(", ")", "if", "not", "ntp_servers", ":", "return", "{", "}", "return", "{", "napalm_base", ".", "helpers", ".", "ip", "(", "server", "[", "0", "]", ")", ":", "{", "}", "for", "server", "in", "ntp_servers", "}" ]
Return the NTP servers configured on the device.
[ "Return", "the", "NTP", "servers", "configured", "on", "the", "device", "." ]
python
train
32.090909
RJT1990/pyflux
pyflux/garch/egarchmreg.py
https://github.com/RJT1990/pyflux/blob/297f2afc2095acd97c12e827dd500e8ea5da0c0f/pyflux/garch/egarchmreg.py#L166-L223
def _mb_model(self, beta, mini_batch): """ Creates the structure of the model Parameters ---------- beta : np.array Contains untransformed starting values for latent variables Returns ---------- lambda : np.array Contains the values for the conditional volatility series Y : np.array Contains the length-adjusted time series (accounting for lags) scores : np.array Contains the score terms for the time series """ # Transform latent variables parm = np.array([self.latent_variables.z_list[k].prior.transform(beta[k]) for k in range(beta.shape[0])]) rand_int = np.random.randint(low=0, high=self.data_length-mini_batch+1) sample = np.arange(start=rand_int, stop=rand_int+mini_batch) data = self.y[sample] X = self.X[sample, :] Y = data[self.max_lag:] scores = np.zeros(Y.shape[0]) lmda = np.ones(Y.shape[0]) theta = np.ones(Y.shape[0]) # Loop over time series for t in range(0,Y.shape[0]): if t < self.max_lag: lmda[t] = parm[-len(self.X_names)*2]/(1-np.sum(parm[:self.p])) theta[t] = np.dot(self.X[t],parm[-len(self.X_names):]) else: # Loop over GARCH terms for p_term in range(0,self.p): lmda[t] += parm[p_term]*lmda[t-p_term-1] # Loop over Score terms for q_term in range(0,self.q): lmda[t] += parm[self.p+q_term]*scores[t-q_term-1] if self.leverage is True: lmda[t] += parm[-(len(self.X_names)*2)-3]*np.sign(-(Y[t-1]-theta[t-1]))*(scores[t-1]+1) lmda[t] += np.dot(self.X[t],parm[-len(self.X_names)*2:-len(self.X_names)]) theta[t] = np.dot(self.X[t],parm[-len(self.X_names):]) + parm[-(len(self.X_names)*2)-1]*np.exp(lmda[t]/2.0) scores[t] = (((parm[self.p+self.q]+1.0)*np.power(Y[t]-theta[t],2))/float(parm[self.p+self.q]*np.exp(lmda[t]) + np.power(Y[t]-theta[t],2))) - 1.0 return lmda, Y, scores, theta
[ "def", "_mb_model", "(", "self", ",", "beta", ",", "mini_batch", ")", ":", "# Transform latent variables", "parm", "=", "np", ".", "array", "(", "[", "self", ".", "latent_variables", ".", "z_list", "[", "k", "]", ".", "prior", ".", "transform", "(", "beta", "[", "k", "]", ")", "for", "k", "in", "range", "(", "beta", ".", "shape", "[", "0", "]", ")", "]", ")", "rand_int", "=", "np", ".", "random", ".", "randint", "(", "low", "=", "0", ",", "high", "=", "self", ".", "data_length", "-", "mini_batch", "+", "1", ")", "sample", "=", "np", ".", "arange", "(", "start", "=", "rand_int", ",", "stop", "=", "rand_int", "+", "mini_batch", ")", "data", "=", "self", ".", "y", "[", "sample", "]", "X", "=", "self", ".", "X", "[", "sample", ",", ":", "]", "Y", "=", "data", "[", "self", ".", "max_lag", ":", "]", "scores", "=", "np", ".", "zeros", "(", "Y", ".", "shape", "[", "0", "]", ")", "lmda", "=", "np", ".", "ones", "(", "Y", ".", "shape", "[", "0", "]", ")", "theta", "=", "np", ".", "ones", "(", "Y", ".", "shape", "[", "0", "]", ")", "# Loop over time series", "for", "t", "in", "range", "(", "0", ",", "Y", ".", "shape", "[", "0", "]", ")", ":", "if", "t", "<", "self", ".", "max_lag", ":", "lmda", "[", "t", "]", "=", "parm", "[", "-", "len", "(", "self", ".", "X_names", ")", "*", "2", "]", "/", "(", "1", "-", "np", ".", "sum", "(", "parm", "[", ":", "self", ".", "p", "]", ")", ")", "theta", "[", "t", "]", "=", "np", ".", "dot", "(", "self", ".", "X", "[", "t", "]", ",", "parm", "[", "-", "len", "(", "self", ".", "X_names", ")", ":", "]", ")", "else", ":", "# Loop over GARCH terms", "for", "p_term", "in", "range", "(", "0", ",", "self", ".", "p", ")", ":", "lmda", "[", "t", "]", "+=", "parm", "[", "p_term", "]", "*", "lmda", "[", "t", "-", "p_term", "-", "1", "]", "# Loop over Score terms", "for", "q_term", "in", "range", "(", "0", ",", "self", ".", "q", ")", ":", "lmda", "[", "t", "]", "+=", "parm", "[", "self", ".", "p", "+", "q_term", "]", "*", "scores", "[", "t", "-", "q_term", "-", "1", "]", "if", "self", ".", "leverage", "is", "True", ":", "lmda", "[", "t", "]", "+=", "parm", "[", "-", "(", "len", "(", "self", ".", "X_names", ")", "*", "2", ")", "-", "3", "]", "*", "np", ".", "sign", "(", "-", "(", "Y", "[", "t", "-", "1", "]", "-", "theta", "[", "t", "-", "1", "]", ")", ")", "*", "(", "scores", "[", "t", "-", "1", "]", "+", "1", ")", "lmda", "[", "t", "]", "+=", "np", ".", "dot", "(", "self", ".", "X", "[", "t", "]", ",", "parm", "[", "-", "len", "(", "self", ".", "X_names", ")", "*", "2", ":", "-", "len", "(", "self", ".", "X_names", ")", "]", ")", "theta", "[", "t", "]", "=", "np", ".", "dot", "(", "self", ".", "X", "[", "t", "]", ",", "parm", "[", "-", "len", "(", "self", ".", "X_names", ")", ":", "]", ")", "+", "parm", "[", "-", "(", "len", "(", "self", ".", "X_names", ")", "*", "2", ")", "-", "1", "]", "*", "np", ".", "exp", "(", "lmda", "[", "t", "]", "/", "2.0", ")", "scores", "[", "t", "]", "=", "(", "(", "(", "parm", "[", "self", ".", "p", "+", "self", ".", "q", "]", "+", "1.0", ")", "*", "np", ".", "power", "(", "Y", "[", "t", "]", "-", "theta", "[", "t", "]", ",", "2", ")", ")", "/", "float", "(", "parm", "[", "self", ".", "p", "+", "self", ".", "q", "]", "*", "np", ".", "exp", "(", "lmda", "[", "t", "]", ")", "+", "np", ".", "power", "(", "Y", "[", "t", "]", "-", "theta", "[", "t", "]", ",", "2", ")", ")", ")", "-", "1.0", "return", "lmda", ",", "Y", ",", "scores", ",", "theta" ]
Creates the structure of the model Parameters ---------- beta : np.array Contains untransformed starting values for latent variables Returns ---------- lambda : np.array Contains the values for the conditional volatility series Y : np.array Contains the length-adjusted time series (accounting for lags) scores : np.array Contains the score terms for the time series
[ "Creates", "the", "structure", "of", "the", "model" ]
python
train
37.189655
PureStorage-OpenConnect/rest-client
purestorage/purestorage.py
https://github.com/PureStorage-OpenConnect/rest-client/blob/097d5f2bc6facf607d7e4a92567b09fb8cf5cb34/purestorage/purestorage.py#L3234-L3260
def list_certificates(self): """Get the attributes of the current array certificate. :param \*\*kwargs: See the REST API Guide on your array for the documentation on the request: **GET cert** :type \*\*kwargs: optional :returns: A list of dictionaries describing all configured certificates. :rtype: ResponseList .. note:: Requires use of REST API 1.12 or later. """ # This call takes no parameters. if self._rest_version >= LooseVersion("1.12"): return self._request("GET", "cert") else: # If someone tries to call this against a too-early api version, # do the best we can to provide expected behavior. cert = self._request("GET", "cert") out = ResponseList([cert]) out.headers = cert.headers return out
[ "def", "list_certificates", "(", "self", ")", ":", "# This call takes no parameters.", "if", "self", ".", "_rest_version", ">=", "LooseVersion", "(", "\"1.12\"", ")", ":", "return", "self", ".", "_request", "(", "\"GET\"", ",", "\"cert\"", ")", "else", ":", "# If someone tries to call this against a too-early api version,", "# do the best we can to provide expected behavior.", "cert", "=", "self", ".", "_request", "(", "\"GET\"", ",", "\"cert\"", ")", "out", "=", "ResponseList", "(", "[", "cert", "]", ")", "out", ".", "headers", "=", "cert", ".", "headers", "return", "out" ]
Get the attributes of the current array certificate. :param \*\*kwargs: See the REST API Guide on your array for the documentation on the request: **GET cert** :type \*\*kwargs: optional :returns: A list of dictionaries describing all configured certificates. :rtype: ResponseList .. note:: Requires use of REST API 1.12 or later.
[ "Get", "the", "attributes", "of", "the", "current", "array", "certificate", "." ]
python
train
33.888889
geoneric/starling
starling/pika/decorator.py
https://github.com/geoneric/starling/blob/a8e1324c4d6e8b063a0d353bcd03bb8e57edd888/starling/pika/decorator.py#L99-L134
def consume_message_with_notify( notifier_uri_getter): """ Decorator for methods handling requests from RabbitMQ This decorator builds on the :py:func:`consume_message` decorator. It extents it by logic for notifying a client of the result of handling the request. The *notifier_uri_getter* argument must be a callable which accepts *self* and returns the uri of the notifier service. """ def consume_message_with_notify_decorator( method): @consume_message def wrapper( self, data): notifier_uri = notifier_uri_getter(self) client_id = data["client_id"] # Forward the call to the method and notify the client of the # result try: method(self, data) notify_client(notifier_uri, client_id, 200) except Exception as exception: notify_client(notifier_uri, client_id, 400, str(exception)) raise return wrapper return consume_message_with_notify_decorator
[ "def", "consume_message_with_notify", "(", "notifier_uri_getter", ")", ":", "def", "consume_message_with_notify_decorator", "(", "method", ")", ":", "@", "consume_message", "def", "wrapper", "(", "self", ",", "data", ")", ":", "notifier_uri", "=", "notifier_uri_getter", "(", "self", ")", "client_id", "=", "data", "[", "\"client_id\"", "]", "# Forward the call to the method and notify the client of the", "# result", "try", ":", "method", "(", "self", ",", "data", ")", "notify_client", "(", "notifier_uri", ",", "client_id", ",", "200", ")", "except", "Exception", "as", "exception", ":", "notify_client", "(", "notifier_uri", ",", "client_id", ",", "400", ",", "str", "(", "exception", ")", ")", "raise", "return", "wrapper", "return", "consume_message_with_notify_decorator" ]
Decorator for methods handling requests from RabbitMQ This decorator builds on the :py:func:`consume_message` decorator. It extents it by logic for notifying a client of the result of handling the request. The *notifier_uri_getter* argument must be a callable which accepts *self* and returns the uri of the notifier service.
[ "Decorator", "for", "methods", "handling", "requests", "from", "RabbitMQ" ]
python
valid
29.861111
KelSolaar/Foundations
foundations/parsers.py
https://github.com/KelSolaar/Foundations/blob/5c141330faf09dad70a12bc321f4c564917d0a91/foundations/parsers.py#L591-L601
def preserve_order(self, value): """ Setter method for **self.__preserve_order** attribute. :param value: Attribute value. :type value: bool """ if value is not None: assert type(value) is bool, "'{0}' attribute: '{1}' type is not 'bool'!".format("preserve_order", value) self.__preserve_order = value
[ "def", "preserve_order", "(", "self", ",", "value", ")", ":", "if", "value", "is", "not", "None", ":", "assert", "type", "(", "value", ")", "is", "bool", ",", "\"'{0}' attribute: '{1}' type is not 'bool'!\"", ".", "format", "(", "\"preserve_order\"", ",", "value", ")", "self", ".", "__preserve_order", "=", "value" ]
Setter method for **self.__preserve_order** attribute. :param value: Attribute value. :type value: bool
[ "Setter", "method", "for", "**", "self", ".", "__preserve_order", "**", "attribute", "." ]
python
train
32.818182
aljosa/django-tinymce
tinymce/views.py
https://github.com/aljosa/django-tinymce/blob/a509fdbc6c623ddac6552199da89712c0f026c91/tinymce/views.py#L33-L69
def spell_check(request): """ Returns a HttpResponse that implements the TinyMCE spellchecker protocol. """ try: if not enchant: raise RuntimeError("install pyenchant for spellchecker functionality") raw = force_text(request.body) input = json.loads(raw) id = input['id'] method = input['method'] params = input['params'] lang = params[0] arg = params[1] if not enchant.dict_exists(str(lang)): raise RuntimeError("dictionary not found for language {!r}".format(lang)) checker = enchant.Dict(str(lang)) if method == 'checkWords': result = [word for word in arg if word and not checker.check(word)] elif method == 'getSuggestions': result = checker.suggest(arg) else: raise RuntimeError("Unknown spellcheck method: {!r}".format(method)) output = { 'id': id, 'result': result, 'error': None, } except Exception: logging.exception("Error running spellchecker") return HttpResponse(_("Error running spellchecker")) return HttpResponse(json.dumps(output), content_type='application/json')
[ "def", "spell_check", "(", "request", ")", ":", "try", ":", "if", "not", "enchant", ":", "raise", "RuntimeError", "(", "\"install pyenchant for spellchecker functionality\"", ")", "raw", "=", "force_text", "(", "request", ".", "body", ")", "input", "=", "json", ".", "loads", "(", "raw", ")", "id", "=", "input", "[", "'id'", "]", "method", "=", "input", "[", "'method'", "]", "params", "=", "input", "[", "'params'", "]", "lang", "=", "params", "[", "0", "]", "arg", "=", "params", "[", "1", "]", "if", "not", "enchant", ".", "dict_exists", "(", "str", "(", "lang", ")", ")", ":", "raise", "RuntimeError", "(", "\"dictionary not found for language {!r}\"", ".", "format", "(", "lang", ")", ")", "checker", "=", "enchant", ".", "Dict", "(", "str", "(", "lang", ")", ")", "if", "method", "==", "'checkWords'", ":", "result", "=", "[", "word", "for", "word", "in", "arg", "if", "word", "and", "not", "checker", ".", "check", "(", "word", ")", "]", "elif", "method", "==", "'getSuggestions'", ":", "result", "=", "checker", ".", "suggest", "(", "arg", ")", "else", ":", "raise", "RuntimeError", "(", "\"Unknown spellcheck method: {!r}\"", ".", "format", "(", "method", ")", ")", "output", "=", "{", "'id'", ":", "id", ",", "'result'", ":", "result", ",", "'error'", ":", "None", ",", "}", "except", "Exception", ":", "logging", ".", "exception", "(", "\"Error running spellchecker\"", ")", "return", "HttpResponse", "(", "_", "(", "\"Error running spellchecker\"", ")", ")", "return", "HttpResponse", "(", "json", ".", "dumps", "(", "output", ")", ",", "content_type", "=", "'application/json'", ")" ]
Returns a HttpResponse that implements the TinyMCE spellchecker protocol.
[ "Returns", "a", "HttpResponse", "that", "implements", "the", "TinyMCE", "spellchecker", "protocol", "." ]
python
train
33.243243
galactics/beyond
beyond/frames/iau1980.py
https://github.com/galactics/beyond/blob/7a7590ff0fd4c0bac3e8e383ecca03caa98e5742/beyond/frames/iau1980.py#L158-L174
def equinox(date, eop_correction=True, terms=106, kinematic=True): """Equinox equation in degrees """ epsilon_bar, delta_psi, delta_eps = _nutation(date, eop_correction, terms) equin = delta_psi * 3600. * np.cos(np.deg2rad(epsilon_bar)) if date.d >= 50506 and kinematic: # Starting 1992-02-27, we apply the effect of the moon ttt = date.change_scale('TT').julian_century om_m = 125.04455501 - (5 * 360. + 134.1361851) * ttt\ + 0.0020756 * ttt ** 2 + 2.139e-6 * ttt ** 3 equin += 0.00264 * np.sin(np.deg2rad(om_m)) + 6.3e-5 * np.sin(np.deg2rad(2 * om_m)) # print("equinox = {}\n".format(equin / 3600)) return equin / 3600.
[ "def", "equinox", "(", "date", ",", "eop_correction", "=", "True", ",", "terms", "=", "106", ",", "kinematic", "=", "True", ")", ":", "epsilon_bar", ",", "delta_psi", ",", "delta_eps", "=", "_nutation", "(", "date", ",", "eop_correction", ",", "terms", ")", "equin", "=", "delta_psi", "*", "3600.", "*", "np", ".", "cos", "(", "np", ".", "deg2rad", "(", "epsilon_bar", ")", ")", "if", "date", ".", "d", ">=", "50506", "and", "kinematic", ":", "# Starting 1992-02-27, we apply the effect of the moon", "ttt", "=", "date", ".", "change_scale", "(", "'TT'", ")", ".", "julian_century", "om_m", "=", "125.04455501", "-", "(", "5", "*", "360.", "+", "134.1361851", ")", "*", "ttt", "+", "0.0020756", "*", "ttt", "**", "2", "+", "2.139e-6", "*", "ttt", "**", "3", "equin", "+=", "0.00264", "*", "np", ".", "sin", "(", "np", ".", "deg2rad", "(", "om_m", ")", ")", "+", "6.3e-5", "*", "np", ".", "sin", "(", "np", ".", "deg2rad", "(", "2", "*", "om_m", ")", ")", "# print(\"equinox = {}\\n\".format(equin / 3600))", "return", "equin", "/", "3600." ]
Equinox equation in degrees
[ "Equinox", "equation", "in", "degrees" ]
python
train
40.058824
brocade/pynos
pynos/versions/base/interface.py
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/base/interface.py#L471-L560
def get_ip_addresses(self, **kwargs): """ Get IP Addresses already set on an Interface. Args: int_type (str): Type of interface. (gigabitethernet, tengigabitethernet etc). name (str): Name of interface id. (For interface: 1/0/5, 1/0/10 etc). version (int): 4 or 6 to represent IPv4 or IPv6 address callback (function): A function executed upon completion of the method. The only parameter passed to `callback` will be the ``ElementTree`` `config`. Returns: List of 0 or more IPs configure on the specified interface. Raises: KeyError: if `int_type` or `name` is not passed. ValueError: if `int_type` or `name` are invalid. Examples: >>> import pynos.device >>> switches = ['10.24.39.211', '10.24.39.203'] >>> auth = ('admin', 'password') >>> for switch in switches: ... conn = (switch, '22') ... with pynos.device.Device(conn=conn, auth=auth) as dev: ... int_type = 'tengigabitethernet' ... name = '225/0/4' ... ip_addr = '20.10.10.1/24' ... version = 4 ... output = dev.interface.disable_switchport(inter_type= ... int_type, inter=name) ... output = dev.interface.ip_address(int_type=int_type, ... name=name, ip_addr=ip_addr) ... result = dev.interface.get_ip_addresses( ... int_type=int_type, name=name, version=version) ... assert len(result) >= 1 ... output = dev.interface.ip_address(int_type=int_type, ... name=name, ip_addr=ip_addr, delete=True) ... ip_addr = 'fc00:1:3:1ad3:0:0:23:a/64' ... version = 6 ... output = dev.interface.ip_address(int_type=int_type, ... name=name, ip_addr=ip_addr) ... result = dev.interface.get_ip_addresses( ... int_type=int_type, name=name, version=version) ... assert len(result) >= 1 ... output = dev.interface.ip_address(int_type=int_type, ... name=name, ip_addr=ip_addr, delete=True) """ int_type = str(kwargs.pop('int_type').lower()) name = str(kwargs.pop('name')) version = int(kwargs.pop('version')) callback = kwargs.pop('callback', self._callback) valid_int_types = ['gigabitethernet', 'tengigabitethernet', 'fortygigabitethernet', 'hundredgigabitethernet'] if int_type not in valid_int_types: raise ValueError('int_type must be one of: %s' % repr(valid_int_types)) method_name = None method_class = self._interface if version == 4: method_name = 'interface_%s_ip_ip_config_address_' \ 'address' % int_type elif version == 6: method_name = 'interface_%s_ipv6_ipv6_config_address_ipv6_' \ 'address_address' % int_type if not pynos.utilities.valid_interface(int_type, name): raise ValueError('`name` must be in the format of x/y/z for ' 'physical interfaces.') ip_args = dict(name=name, address='') ip_address_attr = getattr(method_class, method_name) config = ip_address_attr(**ip_args) output = callback(config, handler='get_config') result = [] if version == 4: for item in output.data.findall( './/{*}address/{*}address'): result.append(item.text) elif version == 6: for item in output.data.findall( './/{*}address/{*}ipv6-address/{' '*}address'): result.append(item.text) return result
[ "def", "get_ip_addresses", "(", "self", ",", "*", "*", "kwargs", ")", ":", "int_type", "=", "str", "(", "kwargs", ".", "pop", "(", "'int_type'", ")", ".", "lower", "(", ")", ")", "name", "=", "str", "(", "kwargs", ".", "pop", "(", "'name'", ")", ")", "version", "=", "int", "(", "kwargs", ".", "pop", "(", "'version'", ")", ")", "callback", "=", "kwargs", ".", "pop", "(", "'callback'", ",", "self", ".", "_callback", ")", "valid_int_types", "=", "[", "'gigabitethernet'", ",", "'tengigabitethernet'", ",", "'fortygigabitethernet'", ",", "'hundredgigabitethernet'", "]", "if", "int_type", "not", "in", "valid_int_types", ":", "raise", "ValueError", "(", "'int_type must be one of: %s'", "%", "repr", "(", "valid_int_types", ")", ")", "method_name", "=", "None", "method_class", "=", "self", ".", "_interface", "if", "version", "==", "4", ":", "method_name", "=", "'interface_%s_ip_ip_config_address_'", "'address'", "%", "int_type", "elif", "version", "==", "6", ":", "method_name", "=", "'interface_%s_ipv6_ipv6_config_address_ipv6_'", "'address_address'", "%", "int_type", "if", "not", "pynos", ".", "utilities", ".", "valid_interface", "(", "int_type", ",", "name", ")", ":", "raise", "ValueError", "(", "'`name` must be in the format of x/y/z for '", "'physical interfaces.'", ")", "ip_args", "=", "dict", "(", "name", "=", "name", ",", "address", "=", "''", ")", "ip_address_attr", "=", "getattr", "(", "method_class", ",", "method_name", ")", "config", "=", "ip_address_attr", "(", "*", "*", "ip_args", ")", "output", "=", "callback", "(", "config", ",", "handler", "=", "'get_config'", ")", "result", "=", "[", "]", "if", "version", "==", "4", ":", "for", "item", "in", "output", ".", "data", ".", "findall", "(", "'.//{*}address/{*}address'", ")", ":", "result", ".", "append", "(", "item", ".", "text", ")", "elif", "version", "==", "6", ":", "for", "item", "in", "output", ".", "data", ".", "findall", "(", "'.//{*}address/{*}ipv6-address/{'", "'*}address'", ")", ":", "result", ".", "append", "(", "item", ".", "text", ")", "return", "result" ]
Get IP Addresses already set on an Interface. Args: int_type (str): Type of interface. (gigabitethernet, tengigabitethernet etc). name (str): Name of interface id. (For interface: 1/0/5, 1/0/10 etc). version (int): 4 or 6 to represent IPv4 or IPv6 address callback (function): A function executed upon completion of the method. The only parameter passed to `callback` will be the ``ElementTree`` `config`. Returns: List of 0 or more IPs configure on the specified interface. Raises: KeyError: if `int_type` or `name` is not passed. ValueError: if `int_type` or `name` are invalid. Examples: >>> import pynos.device >>> switches = ['10.24.39.211', '10.24.39.203'] >>> auth = ('admin', 'password') >>> for switch in switches: ... conn = (switch, '22') ... with pynos.device.Device(conn=conn, auth=auth) as dev: ... int_type = 'tengigabitethernet' ... name = '225/0/4' ... ip_addr = '20.10.10.1/24' ... version = 4 ... output = dev.interface.disable_switchport(inter_type= ... int_type, inter=name) ... output = dev.interface.ip_address(int_type=int_type, ... name=name, ip_addr=ip_addr) ... result = dev.interface.get_ip_addresses( ... int_type=int_type, name=name, version=version) ... assert len(result) >= 1 ... output = dev.interface.ip_address(int_type=int_type, ... name=name, ip_addr=ip_addr, delete=True) ... ip_addr = 'fc00:1:3:1ad3:0:0:23:a/64' ... version = 6 ... output = dev.interface.ip_address(int_type=int_type, ... name=name, ip_addr=ip_addr) ... result = dev.interface.get_ip_addresses( ... int_type=int_type, name=name, version=version) ... assert len(result) >= 1 ... output = dev.interface.ip_address(int_type=int_type, ... name=name, ip_addr=ip_addr, delete=True)
[ "Get", "IP", "Addresses", "already", "set", "on", "an", "Interface", "." ]
python
train
44.577778
obulpathi/cdn-fastly-python
fastly/__init__.py
https://github.com/obulpathi/cdn-fastly-python/blob/db2564b047e8af4bce72c3b88d6c27d3d0291425/fastly/__init__.py#L1005-L1008
def get_wordpress(self, service_id, version_number, name): """Get information on a specific wordpress.""" content = self._fetch("/service/%s/version/%d/wordpress/%s" % (service_id, version_number, name)) return FastlyWordpress(self, content)
[ "def", "get_wordpress", "(", "self", ",", "service_id", ",", "version_number", ",", "name", ")", ":", "content", "=", "self", ".", "_fetch", "(", "\"/service/%s/version/%d/wordpress/%s\"", "%", "(", "service_id", ",", "version_number", ",", "name", ")", ")", "return", "FastlyWordpress", "(", "self", ",", "content", ")" ]
Get information on a specific wordpress.
[ "Get", "information", "on", "a", "specific", "wordpress", "." ]
python
train
61
thefactory/marathon-python
marathon/client.py
https://github.com/thefactory/marathon-python/blob/592b253aa8edf2475c97ca438ad7b6936652caf2/marathon/client.py#L666-L676
def delete_event_subscription(self, url): """Deregister a callback URL as an event subscriber. :param str url: callback URL :returns: the deleted event subscription :rtype: dict """ params = {'callbackUrl': url} response = self._do_request('DELETE', '/v2/eventSubscriptions', params) return response.json()
[ "def", "delete_event_subscription", "(", "self", ",", "url", ")", ":", "params", "=", "{", "'callbackUrl'", ":", "url", "}", "response", "=", "self", ".", "_do_request", "(", "'DELETE'", ",", "'/v2/eventSubscriptions'", ",", "params", ")", "return", "response", ".", "json", "(", ")" ]
Deregister a callback URL as an event subscriber. :param str url: callback URL :returns: the deleted event subscription :rtype: dict
[ "Deregister", "a", "callback", "URL", "as", "an", "event", "subscriber", "." ]
python
train
32.909091
mrstephenneal/mysql-toolkit
mysql/toolkit/components/structure/__init__.py
https://github.com/mrstephenneal/mysql-toolkit/blob/6964f718f4b72eb30f2259adfcfaf3090526c53d/mysql/toolkit/components/structure/__init__.py#L16-L20
def count_rows(self, table, cols='*'): """Get the number of rows in a particular table.""" query = 'SELECT COUNT({0}) FROM {1}'.format(join_cols(cols), wrap(table)) result = self.fetch(query) return result if result is not None else 0
[ "def", "count_rows", "(", "self", ",", "table", ",", "cols", "=", "'*'", ")", ":", "query", "=", "'SELECT COUNT({0}) FROM {1}'", ".", "format", "(", "join_cols", "(", "cols", ")", ",", "wrap", "(", "table", ")", ")", "result", "=", "self", ".", "fetch", "(", "query", ")", "return", "result", "if", "result", "is", "not", "None", "else", "0" ]
Get the number of rows in a particular table.
[ "Get", "the", "number", "of", "rows", "in", "a", "particular", "table", "." ]
python
train
52.4
NoviceLive/intellicoder
intellicoder/synthesizers.py
https://github.com/NoviceLive/intellicoder/blob/6cac5ebfce65c370dbebe47756a1789b120ef982/intellicoder/synthesizers.py#L213-L229
def c_var_decls(self): """Get the needed variable definitions.""" if self.opts.no_structs: mod_decl = 'HMODULE {} = NULL;\n'.format(self.name) return [mod_decl] + [ '{} *{} = NULL;\n'.format( self._c_type_name(name), name ) for name, dummy_args in self.funcs ] if self.opts.windll: return '' return [ '{} _{} = {{ 0 }};\n'.format( self._c_struct_names()[1], self.name ) ]
[ "def", "c_var_decls", "(", "self", ")", ":", "if", "self", ".", "opts", ".", "no_structs", ":", "mod_decl", "=", "'HMODULE {} = NULL;\\n'", ".", "format", "(", "self", ".", "name", ")", "return", "[", "mod_decl", "]", "+", "[", "'{} *{} = NULL;\\n'", ".", "format", "(", "self", ".", "_c_type_name", "(", "name", ")", ",", "name", ")", "for", "name", ",", "dummy_args", "in", "self", ".", "funcs", "]", "if", "self", ".", "opts", ".", "windll", ":", "return", "''", "return", "[", "'{} _{} = {{ 0 }};\\n'", ".", "format", "(", "self", ".", "_c_struct_names", "(", ")", "[", "1", "]", ",", "self", ".", "name", ")", "]" ]
Get the needed variable definitions.
[ "Get", "the", "needed", "variable", "definitions", "." ]
python
train
32.411765
josiahcarlson/rom
rom/query.py
https://github.com/josiahcarlson/rom/blob/8b5607a856341df85df33422accc30ba9294dbdb/rom/query.py#L329-L362
def like(self, **kwargs): ''' When provided with keyword arguments of the form ``col=pattern``, this will limit the entities returned to those that include the provided pattern. Note that 'like' queries require that the ``prefix=True`` option must have been provided as part of the column definition. Patterns allow for 4 wildcard characters, whose semantics are as follows: * *?* - will match 0 or 1 of any character * *\** - will match 0 or more of any character * *+* - will match 1 or more of any character * *!* - will match exactly 1 of any character As an example, imagine that you have enabled the required prefix matching on your ``User.email`` column. And lets say that you want to find everyone with an email address that contains the name 'frank' before the ``@`` sign. You can use either of the following patterns to discover those users. * *\*frank\*@* * *\*frank\*@* .. note:: Like queries implicitly start at the beginning of strings checked, so if you want to match a pattern that doesn't start at the beginning of a string, you should prefix it with one of the wildcard characters (like ``*`` as we did with the 'frank' pattern). ''' new = [] for k, v in kwargs.items(): v = self._check(k, v, 'like') new.append(Pattern(k, v)) return self.replace(filters=self._filters+tuple(new))
[ "def", "like", "(", "self", ",", "*", "*", "kwargs", ")", ":", "new", "=", "[", "]", "for", "k", ",", "v", "in", "kwargs", ".", "items", "(", ")", ":", "v", "=", "self", ".", "_check", "(", "k", ",", "v", ",", "'like'", ")", "new", ".", "append", "(", "Pattern", "(", "k", ",", "v", ")", ")", "return", "self", ".", "replace", "(", "filters", "=", "self", ".", "_filters", "+", "tuple", "(", "new", ")", ")" ]
When provided with keyword arguments of the form ``col=pattern``, this will limit the entities returned to those that include the provided pattern. Note that 'like' queries require that the ``prefix=True`` option must have been provided as part of the column definition. Patterns allow for 4 wildcard characters, whose semantics are as follows: * *?* - will match 0 or 1 of any character * *\** - will match 0 or more of any character * *+* - will match 1 or more of any character * *!* - will match exactly 1 of any character As an example, imagine that you have enabled the required prefix matching on your ``User.email`` column. And lets say that you want to find everyone with an email address that contains the name 'frank' before the ``@`` sign. You can use either of the following patterns to discover those users. * *\*frank\*@* * *\*frank\*@* .. note:: Like queries implicitly start at the beginning of strings checked, so if you want to match a pattern that doesn't start at the beginning of a string, you should prefix it with one of the wildcard characters (like ``*`` as we did with the 'frank' pattern).
[ "When", "provided", "with", "keyword", "arguments", "of", "the", "form", "col", "=", "pattern", "this", "will", "limit", "the", "entities", "returned", "to", "those", "that", "include", "the", "provided", "pattern", ".", "Note", "that", "like", "queries", "require", "that", "the", "prefix", "=", "True", "option", "must", "have", "been", "provided", "as", "part", "of", "the", "column", "definition", "." ]
python
test
45.058824
hozn/stravalib
stravalib/attributes.py
https://github.com/hozn/stravalib/blob/5500ebc39e0bf4706bb1ca4c27b25e56becaaa5f/stravalib/attributes.py#L256-L274
def marshal(self, v): """ Turn this value into API format. Do a reverse dictionary lookup on choices to find the original value. If there are no keys or too many keys for now we raise a NotImplementedError as marshal is not used anywhere currently. In the future we will want to fail gracefully. """ if v: orig = [i for i in self.choices if self.choices[i] == v] if len(orig) == 1: return orig[0] elif len(orig) == 0: # No such choice raise NotImplementedError("No such reverse choice {0} for field {1}.".format(v, self)) else: # Too many choices. We could return one possible choice (e.g. orig[0]). raise NotImplementedError("Too many reverse choices {0} for value {1} for field {2}".format(orig, v, self))
[ "def", "marshal", "(", "self", ",", "v", ")", ":", "if", "v", ":", "orig", "=", "[", "i", "for", "i", "in", "self", ".", "choices", "if", "self", ".", "choices", "[", "i", "]", "==", "v", "]", "if", "len", "(", "orig", ")", "==", "1", ":", "return", "orig", "[", "0", "]", "elif", "len", "(", "orig", ")", "==", "0", ":", "# No such choice", "raise", "NotImplementedError", "(", "\"No such reverse choice {0} for field {1}.\"", ".", "format", "(", "v", ",", "self", ")", ")", "else", ":", "# Too many choices. We could return one possible choice (e.g. orig[0]).", "raise", "NotImplementedError", "(", "\"Too many reverse choices {0} for value {1} for field {2}\"", ".", "format", "(", "orig", ",", "v", ",", "self", ")", ")" ]
Turn this value into API format. Do a reverse dictionary lookup on choices to find the original value. If there are no keys or too many keys for now we raise a NotImplementedError as marshal is not used anywhere currently. In the future we will want to fail gracefully.
[ "Turn", "this", "value", "into", "API", "format", "." ]
python
train
46.421053
saltstack/salt
salt/beacons/logs.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/beacons/logs.py#L70-L157
def beacon(config): ''' Read the log file and return match whole string .. code-block:: yaml beacons: log: - file: <path> - tags: <tag>: regex: <pattern> .. note:: regex matching is based on the `re`_ module .. _re: https://docs.python.org/3.6/library/re.html#regular-expression-syntax The defined tag is added to the beacon event tag. This is not the tag in the log. .. code-block:: yaml beacons: log: - file: /var/log/messages #path to log. - tags: goodbye/world: # tag added to beacon event tag. regex: .*good-bye.* # match good-bye string anywhere in the log entry. ''' _config = {} list(map(_config.update, config)) ret = [] if 'file' not in _config: event = SKEL.copy() event['tag'] = 'global' event['error'] = 'file not defined in config' ret.append(event) return ret with salt.utils.files.fopen(_config['file'], 'r') as fp_: loc = __context__.get(LOC_KEY, 0) if loc == 0: fp_.seek(0, 2) __context__[LOC_KEY] = fp_.tell() return ret fp_.seek(0, 2) __context__[LOC_KEY] = fp_.tell() fp_.seek(loc) txt = fp_.read() log.info('txt %s', txt) d = {} for tag in _config.get('tags', {}): if 'regex' not in _config['tags'][tag]: continue if not _config['tags'][tag]['regex']: continue try: d[tag] = re.compile(r'{0}'.format(_config['tags'][tag]['regex'])) except Exception as e: event = SKEL.copy() event['tag'] = tag event['error'] = 'bad regex' ret.append(event) for line in txt.splitlines(): for tag, reg in d.items(): try: m = reg.match(line) if m: event = SKEL.copy() event['tag'] = tag event['raw'] = line event['match'] = 'yes' ret.append(event) except Exception: event = SKEL.copy() event['tag'] = tag event['error'] = 'bad match' ret.append(event) return ret
[ "def", "beacon", "(", "config", ")", ":", "_config", "=", "{", "}", "list", "(", "map", "(", "_config", ".", "update", ",", "config", ")", ")", "ret", "=", "[", "]", "if", "'file'", "not", "in", "_config", ":", "event", "=", "SKEL", ".", "copy", "(", ")", "event", "[", "'tag'", "]", "=", "'global'", "event", "[", "'error'", "]", "=", "'file not defined in config'", "ret", ".", "append", "(", "event", ")", "return", "ret", "with", "salt", ".", "utils", ".", "files", ".", "fopen", "(", "_config", "[", "'file'", "]", ",", "'r'", ")", "as", "fp_", ":", "loc", "=", "__context__", ".", "get", "(", "LOC_KEY", ",", "0", ")", "if", "loc", "==", "0", ":", "fp_", ".", "seek", "(", "0", ",", "2", ")", "__context__", "[", "LOC_KEY", "]", "=", "fp_", ".", "tell", "(", ")", "return", "ret", "fp_", ".", "seek", "(", "0", ",", "2", ")", "__context__", "[", "LOC_KEY", "]", "=", "fp_", ".", "tell", "(", ")", "fp_", ".", "seek", "(", "loc", ")", "txt", "=", "fp_", ".", "read", "(", ")", "log", ".", "info", "(", "'txt %s'", ",", "txt", ")", "d", "=", "{", "}", "for", "tag", "in", "_config", ".", "get", "(", "'tags'", ",", "{", "}", ")", ":", "if", "'regex'", "not", "in", "_config", "[", "'tags'", "]", "[", "tag", "]", ":", "continue", "if", "not", "_config", "[", "'tags'", "]", "[", "tag", "]", "[", "'regex'", "]", ":", "continue", "try", ":", "d", "[", "tag", "]", "=", "re", ".", "compile", "(", "r'{0}'", ".", "format", "(", "_config", "[", "'tags'", "]", "[", "tag", "]", "[", "'regex'", "]", ")", ")", "except", "Exception", "as", "e", ":", "event", "=", "SKEL", ".", "copy", "(", ")", "event", "[", "'tag'", "]", "=", "tag", "event", "[", "'error'", "]", "=", "'bad regex'", "ret", ".", "append", "(", "event", ")", "for", "line", "in", "txt", ".", "splitlines", "(", ")", ":", "for", "tag", ",", "reg", "in", "d", ".", "items", "(", ")", ":", "try", ":", "m", "=", "reg", ".", "match", "(", "line", ")", "if", "m", ":", "event", "=", "SKEL", ".", "copy", "(", ")", "event", "[", "'tag'", "]", "=", "tag", "event", "[", "'raw'", "]", "=", "line", "event", "[", "'match'", "]", "=", "'yes'", "ret", ".", "append", "(", "event", ")", "except", "Exception", ":", "event", "=", "SKEL", ".", "copy", "(", ")", "event", "[", "'tag'", "]", "=", "tag", "event", "[", "'error'", "]", "=", "'bad match'", "ret", ".", "append", "(", "event", ")", "return", "ret" ]
Read the log file and return match whole string .. code-block:: yaml beacons: log: - file: <path> - tags: <tag>: regex: <pattern> .. note:: regex matching is based on the `re`_ module .. _re: https://docs.python.org/3.6/library/re.html#regular-expression-syntax The defined tag is added to the beacon event tag. This is not the tag in the log. .. code-block:: yaml beacons: log: - file: /var/log/messages #path to log. - tags: goodbye/world: # tag added to beacon event tag. regex: .*good-bye.* # match good-bye string anywhere in the log entry.
[ "Read", "the", "log", "file", "and", "return", "match", "whole", "string" ]
python
train
27.659091
Azure/azure-sdk-for-python
azure-mgmt-resource/azure/mgmt/resource/policy/policy_client.py
https://github.com/Azure/azure-sdk-for-python/blob/d7306fde32f60a293a7567678692bdad31e4b667/azure-mgmt-resource/azure/mgmt/resource/policy/policy_client.py#L127-L152
def policy_assignments(self): """Instance depends on the API version: * 2015-10-01-preview: :class:`PolicyAssignmentsOperations<azure.mgmt.resource.policy.v2015_10_01_preview.operations.PolicyAssignmentsOperations>` * 2016-04-01: :class:`PolicyAssignmentsOperations<azure.mgmt.resource.policy.v2016_04_01.operations.PolicyAssignmentsOperations>` * 2016-12-01: :class:`PolicyAssignmentsOperations<azure.mgmt.resource.policy.v2016_12_01.operations.PolicyAssignmentsOperations>` * 2017-06-01-preview: :class:`PolicyAssignmentsOperations<azure.mgmt.resource.policy.v2017_06_01_preview.operations.PolicyAssignmentsOperations>` * 2018-03-01: :class:`PolicyAssignmentsOperations<azure.mgmt.resource.policy.v2018_03_01.operations.PolicyAssignmentsOperations>` * 2018-05-01: :class:`PolicyAssignmentsOperations<azure.mgmt.resource.policy.v2018_05_01.operations.PolicyAssignmentsOperations>` """ api_version = self._get_api_version('policy_assignments') if api_version == '2015-10-01-preview': from .v2015_10_01_preview.operations import PolicyAssignmentsOperations as OperationClass elif api_version == '2016-04-01': from .v2016_04_01.operations import PolicyAssignmentsOperations as OperationClass elif api_version == '2016-12-01': from .v2016_12_01.operations import PolicyAssignmentsOperations as OperationClass elif api_version == '2017-06-01-preview': from .v2017_06_01_preview.operations import PolicyAssignmentsOperations as OperationClass elif api_version == '2018-03-01': from .v2018_03_01.operations import PolicyAssignmentsOperations as OperationClass elif api_version == '2018-05-01': from .v2018_05_01.operations import PolicyAssignmentsOperations as OperationClass else: raise NotImplementedError("APIVersion {} is not available".format(api_version)) return OperationClass(self._client, self.config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
[ "def", "policy_assignments", "(", "self", ")", ":", "api_version", "=", "self", ".", "_get_api_version", "(", "'policy_assignments'", ")", "if", "api_version", "==", "'2015-10-01-preview'", ":", "from", ".", "v2015_10_01_preview", ".", "operations", "import", "PolicyAssignmentsOperations", "as", "OperationClass", "elif", "api_version", "==", "'2016-04-01'", ":", "from", ".", "v2016_04_01", ".", "operations", "import", "PolicyAssignmentsOperations", "as", "OperationClass", "elif", "api_version", "==", "'2016-12-01'", ":", "from", ".", "v2016_12_01", ".", "operations", "import", "PolicyAssignmentsOperations", "as", "OperationClass", "elif", "api_version", "==", "'2017-06-01-preview'", ":", "from", ".", "v2017_06_01_preview", ".", "operations", "import", "PolicyAssignmentsOperations", "as", "OperationClass", "elif", "api_version", "==", "'2018-03-01'", ":", "from", ".", "v2018_03_01", ".", "operations", "import", "PolicyAssignmentsOperations", "as", "OperationClass", "elif", "api_version", "==", "'2018-05-01'", ":", "from", ".", "v2018_05_01", ".", "operations", "import", "PolicyAssignmentsOperations", "as", "OperationClass", "else", ":", "raise", "NotImplementedError", "(", "\"APIVersion {} is not available\"", ".", "format", "(", "api_version", ")", ")", "return", "OperationClass", "(", "self", ".", "_client", ",", "self", ".", "config", ",", "Serializer", "(", "self", ".", "_models_dict", "(", "api_version", ")", ")", ",", "Deserializer", "(", "self", ".", "_models_dict", "(", "api_version", ")", ")", ")" ]
Instance depends on the API version: * 2015-10-01-preview: :class:`PolicyAssignmentsOperations<azure.mgmt.resource.policy.v2015_10_01_preview.operations.PolicyAssignmentsOperations>` * 2016-04-01: :class:`PolicyAssignmentsOperations<azure.mgmt.resource.policy.v2016_04_01.operations.PolicyAssignmentsOperations>` * 2016-12-01: :class:`PolicyAssignmentsOperations<azure.mgmt.resource.policy.v2016_12_01.operations.PolicyAssignmentsOperations>` * 2017-06-01-preview: :class:`PolicyAssignmentsOperations<azure.mgmt.resource.policy.v2017_06_01_preview.operations.PolicyAssignmentsOperations>` * 2018-03-01: :class:`PolicyAssignmentsOperations<azure.mgmt.resource.policy.v2018_03_01.operations.PolicyAssignmentsOperations>` * 2018-05-01: :class:`PolicyAssignmentsOperations<azure.mgmt.resource.policy.v2018_05_01.operations.PolicyAssignmentsOperations>`
[ "Instance", "depends", "on", "the", "API", "version", ":" ]
python
test
81.076923
IdentityPython/oidcendpoint
src/oidcendpoint/token_handler.py
https://github.com/IdentityPython/oidcendpoint/blob/6c1d729d51bfb6332816117fe476073df7a1d823/src/oidcendpoint/token_handler.py#L170-L184
def info(self, token): """ Return token information. :param token: A token :return: dictionary with info about the token """ _res = dict(zip(['_id', 'type', 'sid', 'exp'], self.split_token(token))) if _res['type'] != self.type: raise WrongTokenType(_res['type']) else: _res['handler'] = self _res['black_listed'] = self.is_black_listed(token) return _res
[ "def", "info", "(", "self", ",", "token", ")", ":", "_res", "=", "dict", "(", "zip", "(", "[", "'_id'", ",", "'type'", ",", "'sid'", ",", "'exp'", "]", ",", "self", ".", "split_token", "(", "token", ")", ")", ")", "if", "_res", "[", "'type'", "]", "!=", "self", ".", "type", ":", "raise", "WrongTokenType", "(", "_res", "[", "'type'", "]", ")", "else", ":", "_res", "[", "'handler'", "]", "=", "self", "_res", "[", "'black_listed'", "]", "=", "self", ".", "is_black_listed", "(", "token", ")", "return", "_res" ]
Return token information. :param token: A token :return: dictionary with info about the token
[ "Return", "token", "information", "." ]
python
train
31.8
AmesCornish/buttersink
buttersink/S3Store.py
https://github.com/AmesCornish/buttersink/blob/5cc37e30d9f8071fcf3497dca8b8a91b910321ea/buttersink/S3Store.py#L195-L203
def receiveVolumeInfo(self, paths): """ Return Context Manager for a file-like (stream) object to store volume info. """ path = self.selectReceivePath(paths) path = path + Store.theInfoExtension if self._skipDryRun(logger)("receive info in '%s'", path): return None return _Uploader(self.bucket, path, bufferSize=theInfoBufferSize)
[ "def", "receiveVolumeInfo", "(", "self", ",", "paths", ")", ":", "path", "=", "self", ".", "selectReceivePath", "(", "paths", ")", "path", "=", "path", "+", "Store", ".", "theInfoExtension", "if", "self", ".", "_skipDryRun", "(", "logger", ")", "(", "\"receive info in '%s'\"", ",", "path", ")", ":", "return", "None", "return", "_Uploader", "(", "self", ".", "bucket", ",", "path", ",", "bufferSize", "=", "theInfoBufferSize", ")" ]
Return Context Manager for a file-like (stream) object to store volume info.
[ "Return", "Context", "Manager", "for", "a", "file", "-", "like", "(", "stream", ")", "object", "to", "store", "volume", "info", "." ]
python
train
41.888889
mbj4668/pyang
pyang/plugins/jsonxsl.py
https://github.com/mbj4668/pyang/blob/f2a5cc3142162e5b9ee4e18d154568d939ff63dd/pyang/plugins/jsonxsl.py#L250-L259
def xsl_withparam(self, name, value, parent): """Construct an XSLT 'with-param' element. `parent` is this element's parent. `name` is the parameter name. `value` is the parameter value. """ res = ET.SubElement(parent, "with-param", name=name) res.text = value return res
[ "def", "xsl_withparam", "(", "self", ",", "name", ",", "value", ",", "parent", ")", ":", "res", "=", "ET", ".", "SubElement", "(", "parent", ",", "\"with-param\"", ",", "name", "=", "name", ")", "res", ".", "text", "=", "value", "return", "res" ]
Construct an XSLT 'with-param' element. `parent` is this element's parent. `name` is the parameter name. `value` is the parameter value.
[ "Construct", "an", "XSLT", "with", "-", "param", "element", "." ]
python
train
32.6
berkeley-cocosci/Wallace
wallace/custom.py
https://github.com/berkeley-cocosci/Wallace/blob/3650c0bc3b0804d0adb1d178c5eba9992babb1b0/wallace/custom.py#L197-L206
def launch(): """Launch the experiment.""" exp = experiment(db.init_db(drop_all=False)) exp.log("Launching experiment...", "-----") init_db() exp.recruiter().open_recruitment(n=exp.initial_recruitment_size) session_psiturk.commit() session.commit() return success_response(request_type="launch")
[ "def", "launch", "(", ")", ":", "exp", "=", "experiment", "(", "db", ".", "init_db", "(", "drop_all", "=", "False", ")", ")", "exp", ".", "log", "(", "\"Launching experiment...\"", ",", "\"-----\"", ")", "init_db", "(", ")", "exp", ".", "recruiter", "(", ")", ".", "open_recruitment", "(", "n", "=", "exp", ".", "initial_recruitment_size", ")", "session_psiturk", ".", "commit", "(", ")", "session", ".", "commit", "(", ")", "return", "success_response", "(", "request_type", "=", "\"launch\"", ")" ]
Launch the experiment.
[ "Launch", "the", "experiment", "." ]
python
train
31.9
oxalorg/dystic
dystic/indexer.py
https://github.com/oxalorg/dystic/blob/6f5a449158ec12fc1c9cc25d85e2f8adc27885db/dystic/indexer.py#L17-L48
def index_dir(self, folder): """ Creates a nested dictionary that represents the folder structure of folder. Also extracts meta data from all markdown posts and adds to the dictionary. """ folder_path = folder print('Indexing folder: ' + folder_path) nested_dir = {} folder = folder_path.rstrip(os.sep) start = folder.rfind(os.sep) + 1 for root, dirs, files in os.walk(folder): folders = root[start:].split(os.sep) # subdir = dict.fromkeys(files) subdir = {} for f in files: # Create an entry for every markdown file if os.path.splitext(f)[1] == '.md': with open(os.path.abspath(os.path.join(root, f)), encoding='utf-8') as fp: try: _, meta = self.mrk.extract_meta(fp.read()) except: print("Skipping indexing " + f +"; Could not parse metadata") meta = {'title': f} pass # Value of the entry (the key) is it's metadata subdir[f] = meta parent = nested_dir for fold in folders[:-1]: parent = parent.get(fold) # Attach the config of all children nodes onto the parent parent[folders[-1]] = subdir return nested_dir
[ "def", "index_dir", "(", "self", ",", "folder", ")", ":", "folder_path", "=", "folder", "print", "(", "'Indexing folder: '", "+", "folder_path", ")", "nested_dir", "=", "{", "}", "folder", "=", "folder_path", ".", "rstrip", "(", "os", ".", "sep", ")", "start", "=", "folder", ".", "rfind", "(", "os", ".", "sep", ")", "+", "1", "for", "root", ",", "dirs", ",", "files", "in", "os", ".", "walk", "(", "folder", ")", ":", "folders", "=", "root", "[", "start", ":", "]", ".", "split", "(", "os", ".", "sep", ")", "# subdir = dict.fromkeys(files)", "subdir", "=", "{", "}", "for", "f", "in", "files", ":", "# Create an entry for every markdown file", "if", "os", ".", "path", ".", "splitext", "(", "f", ")", "[", "1", "]", "==", "'.md'", ":", "with", "open", "(", "os", ".", "path", ".", "abspath", "(", "os", ".", "path", ".", "join", "(", "root", ",", "f", ")", ")", ",", "encoding", "=", "'utf-8'", ")", "as", "fp", ":", "try", ":", "_", ",", "meta", "=", "self", ".", "mrk", ".", "extract_meta", "(", "fp", ".", "read", "(", ")", ")", "except", ":", "print", "(", "\"Skipping indexing \"", "+", "f", "+", "\"; Could not parse metadata\"", ")", "meta", "=", "{", "'title'", ":", "f", "}", "pass", "# Value of the entry (the key) is it's metadata", "subdir", "[", "f", "]", "=", "meta", "parent", "=", "nested_dir", "for", "fold", "in", "folders", "[", ":", "-", "1", "]", ":", "parent", "=", "parent", ".", "get", "(", "fold", ")", "# Attach the config of all children nodes onto the parent", "parent", "[", "folders", "[", "-", "1", "]", "]", "=", "subdir", "return", "nested_dir" ]
Creates a nested dictionary that represents the folder structure of folder. Also extracts meta data from all markdown posts and adds to the dictionary.
[ "Creates", "a", "nested", "dictionary", "that", "represents", "the", "folder", "structure", "of", "folder", ".", "Also", "extracts", "meta", "data", "from", "all", "markdown", "posts", "and", "adds", "to", "the", "dictionary", "." ]
python
train
44.78125
edx/edx-django-utils
edx_django_utils/monitoring/utils.py
https://github.com/edx/edx-django-utils/blob/16cb4ac617e53c572bf68ccd19d24afeff1ca769/edx_django_utils/monitoring/utils.py#L103-L113
def function_trace(function_name): """ Wraps a chunk of code that we want to appear as a separate, explicit, segment in our monitoring tools. """ if newrelic: nr_transaction = newrelic.agent.current_transaction() with newrelic.agent.FunctionTrace(nr_transaction, function_name): yield else: yield
[ "def", "function_trace", "(", "function_name", ")", ":", "if", "newrelic", ":", "nr_transaction", "=", "newrelic", ".", "agent", ".", "current_transaction", "(", ")", "with", "newrelic", ".", "agent", ".", "FunctionTrace", "(", "nr_transaction", ",", "function_name", ")", ":", "yield", "else", ":", "yield" ]
Wraps a chunk of code that we want to appear as a separate, explicit, segment in our monitoring tools.
[ "Wraps", "a", "chunk", "of", "code", "that", "we", "want", "to", "appear", "as", "a", "separate", "explicit", "segment", "in", "our", "monitoring", "tools", "." ]
python
train
31.454545
tanghaibao/goatools
goatools/rpt/write_hierarchy_base.py
https://github.com/tanghaibao/goatools/blob/407682e573a108864a79031f8ca19ee3bf377626/goatools/rpt/write_hierarchy_base.py#L97-L102
def _init_item_marks(item_marks): """Initialize the makred item dict.""" if isinstance(item_marks, dict): return item_marks if item_marks: return {item_id:'>' for item_id in item_marks}
[ "def", "_init_item_marks", "(", "item_marks", ")", ":", "if", "isinstance", "(", "item_marks", ",", "dict", ")", ":", "return", "item_marks", "if", "item_marks", ":", "return", "{", "item_id", ":", "'>'", "for", "item_id", "in", "item_marks", "}" ]
Initialize the makred item dict.
[ "Initialize", "the", "makred", "item", "dict", "." ]
python
train
38
tensorflow/probability
tensorflow_probability/python/distributions/linear_gaussian_ssm.py
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/distributions/linear_gaussian_ssm.py#L454-L541
def backward_smoothing_pass(self, filtered_means, filtered_covs, predicted_means, predicted_covs): """Run the backward pass in Kalman smoother. The backward smoothing is using Rauch, Tung and Striebel smoother as as discussed in section 18.3.2 of Kevin P. Murphy, 2012, Machine Learning: A Probabilistic Perspective, The MIT Press. The inputs are returned by `forward_filter` function. Args: filtered_means: Means of the per-timestep filtered marginal distributions p(z_t | x_{:t}), as a Tensor of shape `sample_shape(x) + batch_shape + [num_timesteps, latent_size]`. filtered_covs: Covariances of the per-timestep filtered marginal distributions p(z_t | x_{:t}), as a Tensor of shape `batch_shape + [num_timesteps, latent_size, latent_size]`. predicted_means: Means of the per-timestep predictive distributions over latent states, p(z_{t+1} | x_{:t}), as a Tensor of shape `sample_shape(x) + batch_shape + [num_timesteps, latent_size]`. predicted_covs: Covariances of the per-timestep predictive distributions over latent states, p(z_{t+1} | x_{:t}), as a Tensor of shape `batch_shape + [num_timesteps, latent_size, latent_size]`. Returns: posterior_means: Means of the smoothed marginal distributions p(z_t | x_{1:T}), as a Tensor of shape `sample_shape(x) + batch_shape + [num_timesteps, latent_size]`, which is of the same shape as filtered_means. posterior_covs: Covariances of the smoothed marginal distributions p(z_t | x_{1:T}), as a Tensor of shape `batch_shape + [num_timesteps, latent_size, latent_size]`. which is of the same shape as filtered_covs. """ with tf.name_scope("backward_pass"): filtered_means = tf.convert_to_tensor( value=filtered_means, name="filtered_means") filtered_covs = tf.convert_to_tensor( value=filtered_covs, name="filtered_covs") predicted_means = tf.convert_to_tensor( value=predicted_means, name="predicted_means") predicted_covs = tf.convert_to_tensor( value=predicted_covs, name="predicted_covs") # To scan over time dimension, we need to move 'num_timesteps' from the # event shape to the initial dimension of the tensor. filtered_means = distribution_util.move_dimension(filtered_means, -2, 0) filtered_covs = distribution_util.move_dimension(filtered_covs, -3, 0) predicted_means = distribution_util.move_dimension(predicted_means, -2, 0) predicted_covs = distribution_util.move_dimension(predicted_covs, -3, 0) # The means are assumed to be vectors. Adding a dummy index to # ensure the `matmul` op working smoothly. filtered_means = filtered_means[..., tf.newaxis] predicted_means = predicted_means[..., tf.newaxis] initial_backward_mean = predicted_means[-1, ...] initial_backward_cov = predicted_covs[-1, ...] num_timesteps = tf.shape(input=filtered_means)[0] initial_state = BackwardPassState( backward_mean=initial_backward_mean, backward_cov=initial_backward_cov, timestep=self.initial_step + num_timesteps - 1) update_step_fn = build_backward_pass_step( self.get_transition_matrix_for_timestep) # For backward pass, it scans the `elems` from last to first. posterior_states = tf.scan(update_step_fn, elems=(filtered_means, filtered_covs, predicted_means, predicted_covs), initializer=initial_state, reverse=True) # Move the time dimension back into the event shape. posterior_means = distribution_util.move_dimension( posterior_states.backward_mean[..., 0], 0, -2) posterior_covs = distribution_util.move_dimension( posterior_states.backward_cov, 0, -3) return (posterior_means, posterior_covs)
[ "def", "backward_smoothing_pass", "(", "self", ",", "filtered_means", ",", "filtered_covs", ",", "predicted_means", ",", "predicted_covs", ")", ":", "with", "tf", ".", "name_scope", "(", "\"backward_pass\"", ")", ":", "filtered_means", "=", "tf", ".", "convert_to_tensor", "(", "value", "=", "filtered_means", ",", "name", "=", "\"filtered_means\"", ")", "filtered_covs", "=", "tf", ".", "convert_to_tensor", "(", "value", "=", "filtered_covs", ",", "name", "=", "\"filtered_covs\"", ")", "predicted_means", "=", "tf", ".", "convert_to_tensor", "(", "value", "=", "predicted_means", ",", "name", "=", "\"predicted_means\"", ")", "predicted_covs", "=", "tf", ".", "convert_to_tensor", "(", "value", "=", "predicted_covs", ",", "name", "=", "\"predicted_covs\"", ")", "# To scan over time dimension, we need to move 'num_timesteps' from the", "# event shape to the initial dimension of the tensor.", "filtered_means", "=", "distribution_util", ".", "move_dimension", "(", "filtered_means", ",", "-", "2", ",", "0", ")", "filtered_covs", "=", "distribution_util", ".", "move_dimension", "(", "filtered_covs", ",", "-", "3", ",", "0", ")", "predicted_means", "=", "distribution_util", ".", "move_dimension", "(", "predicted_means", ",", "-", "2", ",", "0", ")", "predicted_covs", "=", "distribution_util", ".", "move_dimension", "(", "predicted_covs", ",", "-", "3", ",", "0", ")", "# The means are assumed to be vectors. Adding a dummy index to", "# ensure the `matmul` op working smoothly.", "filtered_means", "=", "filtered_means", "[", "...", ",", "tf", ".", "newaxis", "]", "predicted_means", "=", "predicted_means", "[", "...", ",", "tf", ".", "newaxis", "]", "initial_backward_mean", "=", "predicted_means", "[", "-", "1", ",", "...", "]", "initial_backward_cov", "=", "predicted_covs", "[", "-", "1", ",", "...", "]", "num_timesteps", "=", "tf", ".", "shape", "(", "input", "=", "filtered_means", ")", "[", "0", "]", "initial_state", "=", "BackwardPassState", "(", "backward_mean", "=", "initial_backward_mean", ",", "backward_cov", "=", "initial_backward_cov", ",", "timestep", "=", "self", ".", "initial_step", "+", "num_timesteps", "-", "1", ")", "update_step_fn", "=", "build_backward_pass_step", "(", "self", ".", "get_transition_matrix_for_timestep", ")", "# For backward pass, it scans the `elems` from last to first.", "posterior_states", "=", "tf", ".", "scan", "(", "update_step_fn", ",", "elems", "=", "(", "filtered_means", ",", "filtered_covs", ",", "predicted_means", ",", "predicted_covs", ")", ",", "initializer", "=", "initial_state", ",", "reverse", "=", "True", ")", "# Move the time dimension back into the event shape.", "posterior_means", "=", "distribution_util", ".", "move_dimension", "(", "posterior_states", ".", "backward_mean", "[", "...", ",", "0", "]", ",", "0", ",", "-", "2", ")", "posterior_covs", "=", "distribution_util", ".", "move_dimension", "(", "posterior_states", ".", "backward_cov", ",", "0", ",", "-", "3", ")", "return", "(", "posterior_means", ",", "posterior_covs", ")" ]
Run the backward pass in Kalman smoother. The backward smoothing is using Rauch, Tung and Striebel smoother as as discussed in section 18.3.2 of Kevin P. Murphy, 2012, Machine Learning: A Probabilistic Perspective, The MIT Press. The inputs are returned by `forward_filter` function. Args: filtered_means: Means of the per-timestep filtered marginal distributions p(z_t | x_{:t}), as a Tensor of shape `sample_shape(x) + batch_shape + [num_timesteps, latent_size]`. filtered_covs: Covariances of the per-timestep filtered marginal distributions p(z_t | x_{:t}), as a Tensor of shape `batch_shape + [num_timesteps, latent_size, latent_size]`. predicted_means: Means of the per-timestep predictive distributions over latent states, p(z_{t+1} | x_{:t}), as a Tensor of shape `sample_shape(x) + batch_shape + [num_timesteps, latent_size]`. predicted_covs: Covariances of the per-timestep predictive distributions over latent states, p(z_{t+1} | x_{:t}), as a Tensor of shape `batch_shape + [num_timesteps, latent_size, latent_size]`. Returns: posterior_means: Means of the smoothed marginal distributions p(z_t | x_{1:T}), as a Tensor of shape `sample_shape(x) + batch_shape + [num_timesteps, latent_size]`, which is of the same shape as filtered_means. posterior_covs: Covariances of the smoothed marginal distributions p(z_t | x_{1:T}), as a Tensor of shape `batch_shape + [num_timesteps, latent_size, latent_size]`. which is of the same shape as filtered_covs.
[ "Run", "the", "backward", "pass", "in", "Kalman", "smoother", "." ]
python
test
47.340909
StagPython/StagPy
stagpy/misc.py
https://github.com/StagPython/StagPy/blob/18c4416cc4a1011db2fd736ee8b0ec29aa6e4fd4/stagpy/misc.py#L15-L31
def out_name(stem, timestep=None): """Return StagPy out file name. Args: stem (str): short description of file content. timestep (int): timestep if relevant. Returns: str: the output file name. Other Parameters: conf.core.outname (str): the generic name stem, defaults to ``'stagpy'``. """ if timestep is not None: stem = (stem + INT_FMT).format(timestep) return conf.core.outname + '_' + stem
[ "def", "out_name", "(", "stem", ",", "timestep", "=", "None", ")", ":", "if", "timestep", "is", "not", "None", ":", "stem", "=", "(", "stem", "+", "INT_FMT", ")", ".", "format", "(", "timestep", ")", "return", "conf", ".", "core", ".", "outname", "+", "'_'", "+", "stem" ]
Return StagPy out file name. Args: stem (str): short description of file content. timestep (int): timestep if relevant. Returns: str: the output file name. Other Parameters: conf.core.outname (str): the generic name stem, defaults to ``'stagpy'``.
[ "Return", "StagPy", "out", "file", "name", "." ]
python
train
27.058824
buildinspace/peru
peru/cache.py
https://github.com/buildinspace/peru/blob/76e4012c6c34e85fb53a4c6d85f4ac3633d93f77/peru/cache.py#L440-L512
async def modify_tree(self, tree, modifications): '''The modifications are a map of the form, {path: TreeEntry}. The tree can be None to indicate an empty starting tree. The entries can be either blobs or trees, or None to indicate a deletion. The return value is either the hash of the resulting tree, or None if the resulting tree is empty. Modifications in parent directories are done before modifications in subdirectories below them, so for example you can insert a tree at a given path and also insert more new stuff beneath that path, without fear of overwriting the new stuff.''' # Read the original contents of the base tree. if tree is None: entries = {} else: entries = await self.ls_tree(tree, '.') # Separate the modifications into two groups, those that refer to # entries at the base of this tree (e.g. 'foo'), and those that refer # to entries in subtrees (e.g. 'foo/bar'). modifications_at_base = dict() modifications_in_subtrees = collections.defaultdict(dict) for path_str, entry in modifications.items(): # Canonicalize paths to get rid of duplicate/trailing slashes. path = pathlib.PurePosixPath(path_str) # Check for nonsense paths. # TODO: Maybe stop recursive calls from repeating these checks. if len(path.parts) == 0: raise ModifyTreeError('Cannot modify an empty path.') elif path.parts[0] == '/': raise ModifyTreeError('Cannot modify an absolute path.') elif '..' in path.parts: raise ModifyTreeError('.. is not allowed in tree paths.') if len(path.parts) == 1: modifications_at_base[str(path)] = entry else: first_dir = path.parts[0] rest = str(pathlib.PurePosixPath(*path.parts[1:])) modifications_in_subtrees[first_dir][rest] = entry # Insert or delete entries in the base tree. Note that this happens # before any subtree operations. for name, entry in modifications_at_base.items(): if entry is None: entries.pop(name, None) else: entries[name] = entry # Recurse to compute modified subtrees. Note how we handle deletions: # If 'a' is a file, inserting a new file at 'a/b' will implicitly # delete 'a', but trying to delete 'a/b' will be a no-op and will not # delete 'a'. empty_tree = (await self.get_empty_tree()) for name, sub_modifications in modifications_in_subtrees.items(): subtree_base = None if name in entries and entries[name].type == TREE_TYPE: subtree_base = entries[name].hash new_subtree = await self.modify_tree(subtree_base, sub_modifications) if new_subtree != empty_tree: entries[name] = TreeEntry(TREE_MODE, TREE_TYPE, new_subtree) # Delete an empty tree if it was actually a tree to begin with. elif name in entries and entries[name].type == TREE_TYPE: del entries[name] # Return the resulting tree, or None if empty. if entries: session = self.no_index_git_session() tree = await session.make_tree_from_entries(entries) return tree else: return empty_tree
[ "async", "def", "modify_tree", "(", "self", ",", "tree", ",", "modifications", ")", ":", "# Read the original contents of the base tree.", "if", "tree", "is", "None", ":", "entries", "=", "{", "}", "else", ":", "entries", "=", "await", "self", ".", "ls_tree", "(", "tree", ",", "'.'", ")", "# Separate the modifications into two groups, those that refer to", "# entries at the base of this tree (e.g. 'foo'), and those that refer", "# to entries in subtrees (e.g. 'foo/bar').", "modifications_at_base", "=", "dict", "(", ")", "modifications_in_subtrees", "=", "collections", ".", "defaultdict", "(", "dict", ")", "for", "path_str", ",", "entry", "in", "modifications", ".", "items", "(", ")", ":", "# Canonicalize paths to get rid of duplicate/trailing slashes.", "path", "=", "pathlib", ".", "PurePosixPath", "(", "path_str", ")", "# Check for nonsense paths.", "# TODO: Maybe stop recursive calls from repeating these checks.", "if", "len", "(", "path", ".", "parts", ")", "==", "0", ":", "raise", "ModifyTreeError", "(", "'Cannot modify an empty path.'", ")", "elif", "path", ".", "parts", "[", "0", "]", "==", "'/'", ":", "raise", "ModifyTreeError", "(", "'Cannot modify an absolute path.'", ")", "elif", "'..'", "in", "path", ".", "parts", ":", "raise", "ModifyTreeError", "(", "'.. is not allowed in tree paths.'", ")", "if", "len", "(", "path", ".", "parts", ")", "==", "1", ":", "modifications_at_base", "[", "str", "(", "path", ")", "]", "=", "entry", "else", ":", "first_dir", "=", "path", ".", "parts", "[", "0", "]", "rest", "=", "str", "(", "pathlib", ".", "PurePosixPath", "(", "*", "path", ".", "parts", "[", "1", ":", "]", ")", ")", "modifications_in_subtrees", "[", "first_dir", "]", "[", "rest", "]", "=", "entry", "# Insert or delete entries in the base tree. Note that this happens", "# before any subtree operations.", "for", "name", ",", "entry", "in", "modifications_at_base", ".", "items", "(", ")", ":", "if", "entry", "is", "None", ":", "entries", ".", "pop", "(", "name", ",", "None", ")", "else", ":", "entries", "[", "name", "]", "=", "entry", "# Recurse to compute modified subtrees. Note how we handle deletions:", "# If 'a' is a file, inserting a new file at 'a/b' will implicitly", "# delete 'a', but trying to delete 'a/b' will be a no-op and will not", "# delete 'a'.", "empty_tree", "=", "(", "await", "self", ".", "get_empty_tree", "(", ")", ")", "for", "name", ",", "sub_modifications", "in", "modifications_in_subtrees", ".", "items", "(", ")", ":", "subtree_base", "=", "None", "if", "name", "in", "entries", "and", "entries", "[", "name", "]", ".", "type", "==", "TREE_TYPE", ":", "subtree_base", "=", "entries", "[", "name", "]", ".", "hash", "new_subtree", "=", "await", "self", ".", "modify_tree", "(", "subtree_base", ",", "sub_modifications", ")", "if", "new_subtree", "!=", "empty_tree", ":", "entries", "[", "name", "]", "=", "TreeEntry", "(", "TREE_MODE", ",", "TREE_TYPE", ",", "new_subtree", ")", "# Delete an empty tree if it was actually a tree to begin with.", "elif", "name", "in", "entries", "and", "entries", "[", "name", "]", ".", "type", "==", "TREE_TYPE", ":", "del", "entries", "[", "name", "]", "# Return the resulting tree, or None if empty.", "if", "entries", ":", "session", "=", "self", ".", "no_index_git_session", "(", ")", "tree", "=", "await", "session", ".", "make_tree_from_entries", "(", "entries", ")", "return", "tree", "else", ":", "return", "empty_tree" ]
The modifications are a map of the form, {path: TreeEntry}. The tree can be None to indicate an empty starting tree. The entries can be either blobs or trees, or None to indicate a deletion. The return value is either the hash of the resulting tree, or None if the resulting tree is empty. Modifications in parent directories are done before modifications in subdirectories below them, so for example you can insert a tree at a given path and also insert more new stuff beneath that path, without fear of overwriting the new stuff.
[ "The", "modifications", "are", "a", "map", "of", "the", "form", "{", "path", ":", "TreeEntry", "}", ".", "The", "tree", "can", "be", "None", "to", "indicate", "an", "empty", "starting", "tree", ".", "The", "entries", "can", "be", "either", "blobs", "or", "trees", "or", "None", "to", "indicate", "a", "deletion", ".", "The", "return", "value", "is", "either", "the", "hash", "of", "the", "resulting", "tree", "or", "None", "if", "the", "resulting", "tree", "is", "empty", ".", "Modifications", "in", "parent", "directories", "are", "done", "before", "modifications", "in", "subdirectories", "below", "them", "so", "for", "example", "you", "can", "insert", "a", "tree", "at", "a", "given", "path", "and", "also", "insert", "more", "new", "stuff", "beneath", "that", "path", "without", "fear", "of", "overwriting", "the", "new", "stuff", "." ]
python
train
47.876712
collectiveacuity/labPack
labpack/databases/couchbase.py
https://github.com/collectiveacuity/labPack/blob/52949ece35e72e3cc308f54d9ffa6bfbd96805b8/labpack/databases/couchbase.py#L1038-L1081
def purge(self, doc_ids): ''' a method to remove docs from the collection :param doc_ids: string or list of strings with document ids to purge :return: list of strings of doc ids purged ''' # https://developer.couchbase.com/documentation/mobile/1.5/references/sync-gateway/admin-rest-api/index.html#/document/post__db___purge title = '%s.purge' % self.__class__.__name__ # ingest arguments if isinstance(doc_ids, str): doc_ids = [ doc_ids ] # validate inputs input_fields = { 'doc_ids': doc_ids } for key, value in input_fields.items(): object_title = '%s(%s=%s)' % (title, key, str(value)) self.fields.validate(value, '.%s' % key, object_title) # construct request fields url = self.bucket_url + '/_purge' json_body = {} for doc in doc_ids: json_body[doc] = [ "*" ] # send request response = requests.post(url, json=json_body) # construct output from response purged_list = [] purged_map = {} response_details = response.json() if 'purged' in response_details.keys(): purged_map = response_details['purged'] for key in purged_map.keys(): purged_list.append(key) return purged_list
[ "def", "purge", "(", "self", ",", "doc_ids", ")", ":", "# https://developer.couchbase.com/documentation/mobile/1.5/references/sync-gateway/admin-rest-api/index.html#/document/post__db___purge", "title", "=", "'%s.purge'", "%", "self", ".", "__class__", ".", "__name__", "# ingest arguments", "if", "isinstance", "(", "doc_ids", ",", "str", ")", ":", "doc_ids", "=", "[", "doc_ids", "]", "# validate inputs", "input_fields", "=", "{", "'doc_ids'", ":", "doc_ids", "}", "for", "key", ",", "value", "in", "input_fields", ".", "items", "(", ")", ":", "object_title", "=", "'%s(%s=%s)'", "%", "(", "title", ",", "key", ",", "str", "(", "value", ")", ")", "self", ".", "fields", ".", "validate", "(", "value", ",", "'.%s'", "%", "key", ",", "object_title", ")", "# construct request fields", "url", "=", "self", ".", "bucket_url", "+", "'/_purge'", "json_body", "=", "{", "}", "for", "doc", "in", "doc_ids", ":", "json_body", "[", "doc", "]", "=", "[", "\"*\"", "]", "# send request ", "response", "=", "requests", ".", "post", "(", "url", ",", "json", "=", "json_body", ")", "# construct output from response", "purged_list", "=", "[", "]", "purged_map", "=", "{", "}", "response_details", "=", "response", ".", "json", "(", ")", "if", "'purged'", "in", "response_details", ".", "keys", "(", ")", ":", "purged_map", "=", "response_details", "[", "'purged'", "]", "for", "key", "in", "purged_map", ".", "keys", "(", ")", ":", "purged_list", ".", "append", "(", "key", ")", "return", "purged_list" ]
a method to remove docs from the collection :param doc_ids: string or list of strings with document ids to purge :return: list of strings of doc ids purged
[ "a", "method", "to", "remove", "docs", "from", "the", "collection", ":", "param", "doc_ids", ":", "string", "or", "list", "of", "strings", "with", "document", "ids", "to", "purge", ":", "return", ":", "list", "of", "strings", "of", "doc", "ids", "purged" ]
python
train
31.409091
ynop/audiomate
audiomate/processing/pipeline/base.py
https://github.com/ynop/audiomate/blob/61727920b23a708293c3d526fa3000d4de9c6c21/audiomate/processing/pipeline/base.py#L113-L137
def update(self, data, offset, is_last, buffer_index=0): """ Update the buffer at the given index. Args: data (np.ndarray): The frames. offset (int): The index of the first frame in `data` within the sequence. is_last (bool): Whether this is the last block of frames in the sequence. buffer_index (int): The index of the buffer to update (< self.num_buffers). """ if buffer_index >= self.num_buffers: raise ValueError('Expected buffer index < {} but got index {}.'.format(self.num_buffers, buffer_index)) if self.buffers[buffer_index] is not None and self.buffers[buffer_index].shape[0] > 0: expected_next_frame = self.current_frame + self.buffers[buffer_index].shape[0] if expected_next_frame != offset: raise ValueError( 'There are missing frames. Last frame in buffer is {}. The passed frames start at {}.'.format( expected_next_frame, offset)) self.buffers[buffer_index] = np.vstack([self.buffers[buffer_index], data]) else: self.buffers[buffer_index] = data self.buffers_full[buffer_index] = is_last
[ "def", "update", "(", "self", ",", "data", ",", "offset", ",", "is_last", ",", "buffer_index", "=", "0", ")", ":", "if", "buffer_index", ">=", "self", ".", "num_buffers", ":", "raise", "ValueError", "(", "'Expected buffer index < {} but got index {}.'", ".", "format", "(", "self", ".", "num_buffers", ",", "buffer_index", ")", ")", "if", "self", ".", "buffers", "[", "buffer_index", "]", "is", "not", "None", "and", "self", ".", "buffers", "[", "buffer_index", "]", ".", "shape", "[", "0", "]", ">", "0", ":", "expected_next_frame", "=", "self", ".", "current_frame", "+", "self", ".", "buffers", "[", "buffer_index", "]", ".", "shape", "[", "0", "]", "if", "expected_next_frame", "!=", "offset", ":", "raise", "ValueError", "(", "'There are missing frames. Last frame in buffer is {}. The passed frames start at {}.'", ".", "format", "(", "expected_next_frame", ",", "offset", ")", ")", "self", ".", "buffers", "[", "buffer_index", "]", "=", "np", ".", "vstack", "(", "[", "self", ".", "buffers", "[", "buffer_index", "]", ",", "data", "]", ")", "else", ":", "self", ".", "buffers", "[", "buffer_index", "]", "=", "data", "self", ".", "buffers_full", "[", "buffer_index", "]", "=", "is_last" ]
Update the buffer at the given index. Args: data (np.ndarray): The frames. offset (int): The index of the first frame in `data` within the sequence. is_last (bool): Whether this is the last block of frames in the sequence. buffer_index (int): The index of the buffer to update (< self.num_buffers).
[ "Update", "the", "buffer", "at", "the", "given", "index", "." ]
python
train
48.64
rosenbrockc/acorn
acorn/analyze/sklearn.py
https://github.com/rosenbrockc/acorn/blob/9a44d1a1ad8bfc2c54a6b56d9efe54433a797820/acorn/analyze/sklearn.py#L124-L143
def predict(fqdn, result, *argl, **argd): """Analyzes the result of a generic predict operation performed by `sklearn`. Args: fqdn (str): full-qualified name of the method that was called. result: result of calling the method with `fqdn`. argl (tuple): positional arguments passed to the method call. argd (dict): keyword arguments passed to the method call. """ #Check the arguments to see what kind of data we are working with, then #choose the appropriate function below to return the analysis dictionary. out = None if len(argl) > 0: machine = argl[0] if isclassifier(machine): out = classify_predict(fqdn, result, None, *argl, **argd) elif isregressor(machine): out = regress_predict(fqdn, result, None, *argl, **argd) return out
[ "def", "predict", "(", "fqdn", ",", "result", ",", "*", "argl", ",", "*", "*", "argd", ")", ":", "#Check the arguments to see what kind of data we are working with, then", "#choose the appropriate function below to return the analysis dictionary.", "out", "=", "None", "if", "len", "(", "argl", ")", ">", "0", ":", "machine", "=", "argl", "[", "0", "]", "if", "isclassifier", "(", "machine", ")", ":", "out", "=", "classify_predict", "(", "fqdn", ",", "result", ",", "None", ",", "*", "argl", ",", "*", "*", "argd", ")", "elif", "isregressor", "(", "machine", ")", ":", "out", "=", "regress_predict", "(", "fqdn", ",", "result", ",", "None", ",", "*", "argl", ",", "*", "*", "argd", ")", "return", "out" ]
Analyzes the result of a generic predict operation performed by `sklearn`. Args: fqdn (str): full-qualified name of the method that was called. result: result of calling the method with `fqdn`. argl (tuple): positional arguments passed to the method call. argd (dict): keyword arguments passed to the method call.
[ "Analyzes", "the", "result", "of", "a", "generic", "predict", "operation", "performed", "by", "sklearn", "." ]
python
train
41.6
COLORFULBOARD/revision
revision/data.py
https://github.com/COLORFULBOARD/revision/blob/2f22e72cce5b60032a80c002ac45c2ecef0ed987/revision/data.py#L88-L110
def parse(self, rev_string): """ :param rev_string: :type rev_string: str """ elements = rev_string.split(MESSAGE_LINE_SEPARATOR) heading = elements[0] heading_elements = heading.split(" ") self.revision_id = heading_elements[2] datetime_str = "{} {}".format( heading_elements[0], heading_elements[1] ) self.release_date = datetime.datetime.strptime( datetime_str, DATETIME_FORMAT ) self.description = elements[1] self.message = elements[2]
[ "def", "parse", "(", "self", ",", "rev_string", ")", ":", "elements", "=", "rev_string", ".", "split", "(", "MESSAGE_LINE_SEPARATOR", ")", "heading", "=", "elements", "[", "0", "]", "heading_elements", "=", "heading", ".", "split", "(", "\" \"", ")", "self", ".", "revision_id", "=", "heading_elements", "[", "2", "]", "datetime_str", "=", "\"{} {}\"", ".", "format", "(", "heading_elements", "[", "0", "]", ",", "heading_elements", "[", "1", "]", ")", "self", ".", "release_date", "=", "datetime", ".", "datetime", ".", "strptime", "(", "datetime_str", ",", "DATETIME_FORMAT", ")", "self", ".", "description", "=", "elements", "[", "1", "]", "self", ".", "message", "=", "elements", "[", "2", "]" ]
:param rev_string: :type rev_string: str
[ ":", "param", "rev_string", ":", ":", "type", "rev_string", ":", "str" ]
python
train
25.304348
cmbruns/pyopenvr
src/openvr/__init__.py
https://github.com/cmbruns/pyopenvr/blob/68395d26bb3df6ab1f0f059c38d441f962938be6/src/openvr/__init__.py#L6044-L6049
def getBoneName(self, action, nBoneIndex, pchBoneName, unNameBufferSize): """Fills the given buffer with the name of the bone at the given index in the skeleton associated with the given action""" fn = self.function_table.getBoneName result = fn(action, nBoneIndex, pchBoneName, unNameBufferSize) return result
[ "def", "getBoneName", "(", "self", ",", "action", ",", "nBoneIndex", ",", "pchBoneName", ",", "unNameBufferSize", ")", ":", "fn", "=", "self", ".", "function_table", ".", "getBoneName", "result", "=", "fn", "(", "action", ",", "nBoneIndex", ",", "pchBoneName", ",", "unNameBufferSize", ")", "return", "result" ]
Fills the given buffer with the name of the bone at the given index in the skeleton associated with the given action
[ "Fills", "the", "given", "buffer", "with", "the", "name", "of", "the", "bone", "at", "the", "given", "index", "in", "the", "skeleton", "associated", "with", "the", "given", "action" ]
python
train
56.333333
twisted/txaws
txaws/ec2/client.py
https://github.com/twisted/txaws/blob/5c3317376cd47e536625027e38c3b37840175ce0/txaws/ec2/client.py#L414-L420
def delete_snapshot(self, snapshot_id): """Remove a previously created snapshot.""" query = self.query_factory( action="DeleteSnapshot", creds=self.creds, endpoint=self.endpoint, other_params={"SnapshotId": snapshot_id}) d = query.submit() return d.addCallback(self.parser.truth_return)
[ "def", "delete_snapshot", "(", "self", ",", "snapshot_id", ")", ":", "query", "=", "self", ".", "query_factory", "(", "action", "=", "\"DeleteSnapshot\"", ",", "creds", "=", "self", ".", "creds", ",", "endpoint", "=", "self", ".", "endpoint", ",", "other_params", "=", "{", "\"SnapshotId\"", ":", "snapshot_id", "}", ")", "d", "=", "query", ".", "submit", "(", ")", "return", "d", ".", "addCallback", "(", "self", ".", "parser", ".", "truth_return", ")" ]
Remove a previously created snapshot.
[ "Remove", "a", "previously", "created", "snapshot", "." ]
python
train
48
benfred/implicit
setup.py
https://github.com/benfred/implicit/blob/6b16c50d1d514a814f2e5b8cf2a829ff23dbba63/setup.py#L101-L116
def set_gcc(): """Try to use GCC on OSX for OpenMP support.""" # For macports and homebrew if 'darwin' in platform.platform().lower(): gcc = extract_gcc_binaries() if gcc is not None: os.environ["CC"] = gcc os.environ["CXX"] = gcc else: global use_openmp use_openmp = False logging.warning('No GCC available. Install gcc from Homebrew ' 'using brew install gcc.')
[ "def", "set_gcc", "(", ")", ":", "# For macports and homebrew", "if", "'darwin'", "in", "platform", ".", "platform", "(", ")", ".", "lower", "(", ")", ":", "gcc", "=", "extract_gcc_binaries", "(", ")", "if", "gcc", "is", "not", "None", ":", "os", ".", "environ", "[", "\"CC\"", "]", "=", "gcc", "os", ".", "environ", "[", "\"CXX\"", "]", "=", "gcc", "else", ":", "global", "use_openmp", "use_openmp", "=", "False", "logging", ".", "warning", "(", "'No GCC available. Install gcc from Homebrew '", "'using brew install gcc.'", ")" ]
Try to use GCC on OSX for OpenMP support.
[ "Try", "to", "use", "GCC", "on", "OSX", "for", "OpenMP", "support", "." ]
python
train
29.6875
rameshg87/pyremotevbox
pyremotevbox/ZSI/schema.py
https://github.com/rameshg87/pyremotevbox/blob/123dffff27da57c8faa3ac1dd4c68b1cf4558b1a/pyremotevbox/ZSI/schema.py#L228-L250
def getSubstitutionElement(self, elt, ps): '''if elt matches a member of the head substitutionGroup, return the GED typecode representation of the member. head -- ElementDeclaration typecode, elt -- the DOM element being parsed ps -- ParsedSoap instance ''' nsuri,ncname = _get_element_nsuri_name(elt) typecode = GED(nsuri,ncname) if typecode is None: return try: nsuri,ncname = typecode.substitutionGroup except (AttributeError, TypeError): return if (ncname == self.pname) and (nsuri == self.nspname or (not nsuri and not self.nspname)): return typecode return
[ "def", "getSubstitutionElement", "(", "self", ",", "elt", ",", "ps", ")", ":", "nsuri", ",", "ncname", "=", "_get_element_nsuri_name", "(", "elt", ")", "typecode", "=", "GED", "(", "nsuri", ",", "ncname", ")", "if", "typecode", "is", "None", ":", "return", "try", ":", "nsuri", ",", "ncname", "=", "typecode", ".", "substitutionGroup", "except", "(", "AttributeError", ",", "TypeError", ")", ":", "return", "if", "(", "ncname", "==", "self", ".", "pname", ")", "and", "(", "nsuri", "==", "self", ".", "nspname", "or", "(", "not", "nsuri", "and", "not", "self", ".", "nspname", ")", ")", ":", "return", "typecode", "return" ]
if elt matches a member of the head substitutionGroup, return the GED typecode representation of the member. head -- ElementDeclaration typecode, elt -- the DOM element being parsed ps -- ParsedSoap instance
[ "if", "elt", "matches", "a", "member", "of", "the", "head", "substitutionGroup", "return", "the", "GED", "typecode", "representation", "of", "the", "member", "." ]
python
train
31.217391
Nekmo/simple-monitor-alert
simple_monitor_alert/management.py
https://github.com/Nekmo/simple-monitor-alert/blob/11d6dbd3c0b3b9a210d6435208066f5636f1f44e/simple_monitor_alert/management.py#L36-L61
def set_default_subparser(self, name, args=None): """default subparser selection. Call after setup, just before parse_args() name: is the name of the subparser to call by default args: if set is the argument list handed to parse_args() , tested with 2.7, 3.2, 3.3, 3.4 it works with 2.6 assuming argparse is installed """ subparser_found = False for arg in sys.argv[1:]: if arg in ['-h', '--help']: # global help if no subparser break else: for x in self._subparsers._actions: if not isinstance(x, argparse._SubParsersAction): continue for sp_name in x._name_parser_map.keys(): if sp_name in sys.argv[1:]: subparser_found = True if not subparser_found: # insert default in first position, this implies no # global options without a sub_parsers specified if args is None: sys.argv.insert(1, name) else: args.insert(0, name)
[ "def", "set_default_subparser", "(", "self", ",", "name", ",", "args", "=", "None", ")", ":", "subparser_found", "=", "False", "for", "arg", "in", "sys", ".", "argv", "[", "1", ":", "]", ":", "if", "arg", "in", "[", "'-h'", ",", "'--help'", "]", ":", "# global help if no subparser", "break", "else", ":", "for", "x", "in", "self", ".", "_subparsers", ".", "_actions", ":", "if", "not", "isinstance", "(", "x", ",", "argparse", ".", "_SubParsersAction", ")", ":", "continue", "for", "sp_name", "in", "x", ".", "_name_parser_map", ".", "keys", "(", ")", ":", "if", "sp_name", "in", "sys", ".", "argv", "[", "1", ":", "]", ":", "subparser_found", "=", "True", "if", "not", "subparser_found", ":", "# insert default in first position, this implies no", "# global options without a sub_parsers specified", "if", "args", "is", "None", ":", "sys", ".", "argv", ".", "insert", "(", "1", ",", "name", ")", "else", ":", "args", ".", "insert", "(", "0", ",", "name", ")" ]
default subparser selection. Call after setup, just before parse_args() name: is the name of the subparser to call by default args: if set is the argument list handed to parse_args() , tested with 2.7, 3.2, 3.3, 3.4 it works with 2.6 assuming argparse is installed
[ "default", "subparser", "selection", ".", "Call", "after", "setup", "just", "before", "parse_args", "()", "name", ":", "is", "the", "name", "of", "the", "subparser", "to", "call", "by", "default", "args", ":", "if", "set", "is", "the", "argument", "list", "handed", "to", "parse_args", "()" ]
python
valid
39.5
Genida/django-meerkat
src/meerkat/logs/models.py
https://github.com/Genida/django-meerkat/blob/486502a75bb0800266db785fd32717d8c0eb8deb/src/meerkat/logs/models.py#L648-L662
def start_daemon(): """ Start a thread to continuously read log files and append lines in DB. Work in progress. Currently the thread doesn't append anything, it only print the information parsed from each line read. Returns: thread: the started thread. """ if RequestLog.daemon is None: parser = get_nginx_parser() RequestLog.daemon = RequestLog.ParseToDBThread(parser, daemon=True) RequestLog.daemon.start() return RequestLog.daemon
[ "def", "start_daemon", "(", ")", ":", "if", "RequestLog", ".", "daemon", "is", "None", ":", "parser", "=", "get_nginx_parser", "(", ")", "RequestLog", ".", "daemon", "=", "RequestLog", ".", "ParseToDBThread", "(", "parser", ",", "daemon", "=", "True", ")", "RequestLog", ".", "daemon", ".", "start", "(", ")", "return", "RequestLog", ".", "daemon" ]
Start a thread to continuously read log files and append lines in DB. Work in progress. Currently the thread doesn't append anything, it only print the information parsed from each line read. Returns: thread: the started thread.
[ "Start", "a", "thread", "to", "continuously", "read", "log", "files", "and", "append", "lines", "in", "DB", "." ]
python
train
35.266667
yunpian/yunpian-python-sdk
yunpian_python_sdk/ypclient.py
https://github.com/yunpian/yunpian-python-sdk/blob/405a1196ec83fdf29ff454f74ef036974be11970/yunpian_python_sdk/ypclient.py#L73-L75
def conf(self, key): '''get config''' return self.__conf[key] if key in self.__conf else _YunpianConf.YP_CONF.get(key)
[ "def", "conf", "(", "self", ",", "key", ")", ":", "return", "self", ".", "__conf", "[", "key", "]", "if", "key", "in", "self", ".", "__conf", "else", "_YunpianConf", ".", "YP_CONF", ".", "get", "(", "key", ")" ]
get config
[ "get", "config" ]
python
train
44
bitesofcode/projexui
projexui/widgets/xwalkthroughwidget/xwalkthroughscene.py
https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/widgets/xwalkthroughwidget/xwalkthroughscene.py#L46-L56
def autoLayout(self, size=None): """ Updates the layout for the graphics within this scene. """ if size is None: size = self._view.size() self.setSceneRect(0, 0, size.width(), size.height()) for item in self.items(): if isinstance(item, XWalkthroughGraphic): item.autoLayout(size)
[ "def", "autoLayout", "(", "self", ",", "size", "=", "None", ")", ":", "if", "size", "is", "None", ":", "size", "=", "self", ".", "_view", ".", "size", "(", ")", "self", ".", "setSceneRect", "(", "0", ",", "0", ",", "size", ".", "width", "(", ")", ",", "size", ".", "height", "(", ")", ")", "for", "item", "in", "self", ".", "items", "(", ")", ":", "if", "isinstance", "(", "item", ",", "XWalkthroughGraphic", ")", ":", "item", ".", "autoLayout", "(", "size", ")" ]
Updates the layout for the graphics within this scene.
[ "Updates", "the", "layout", "for", "the", "graphics", "within", "this", "scene", "." ]
python
train
34.272727
apple/turicreate
deps/src/libxml2-2.9.1/python/libxml2.py
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/libxml2-2.9.1/python/libxml2.py#L4267-L4272
def elemDump(self, f, cur): """Dump an XML/HTML node, recursive behaviour, children are printed too. """ if cur is None: cur__o = None else: cur__o = cur._o libxml2mod.xmlElemDump(f, self._o, cur__o)
[ "def", "elemDump", "(", "self", ",", "f", ",", "cur", ")", ":", "if", "cur", "is", "None", ":", "cur__o", "=", "None", "else", ":", "cur__o", "=", "cur", ".", "_o", "libxml2mod", ".", "xmlElemDump", "(", "f", ",", "self", ".", "_o", ",", "cur__o", ")" ]
Dump an XML/HTML node, recursive behaviour, children are printed too.
[ "Dump", "an", "XML", "/", "HTML", "node", "recursive", "behaviour", "children", "are", "printed", "too", "." ]
python
train
39.5
djordon/queueing-tool
queueing_tool/network/queue_network.py
https://github.com/djordon/queueing-tool/blob/ccd418cf647ac03a54f78ba5e3725903f541b808/queueing_tool/network/queue_network.py#L1138-L1183
def show_active(self, **kwargs): """Draws the network, highlighting active queues. The colored vertices represent vertices that have at least one queue on an in-edge that is active. Dark edges represent queues that are active, light edges represent queues that are inactive. Parameters ---------- **kwargs Any additional parameters to pass to :meth:`.draw`, and :meth:`.QueueNetworkDiGraph.draw_graph`. Notes ----- Active queues are :class:`QueueServers<.QueueServer>` that accept arrivals from outside the network. The colors are defined by the class attribute ``colors``. The relevant keys are ``vertex_active``, ``vertex_inactive``, ``edge_active``, and ``edge_inactive``. """ g = self.g for v in g.nodes(): self.g.set_vp(v, 'vertex_color', [0, 0, 0, 0.9]) is_active = False my_iter = g.in_edges(v) if g.is_directed() else g.out_edges(v) for e in my_iter: ei = g.edge_index[e] if self.edge2queue[ei]._active: is_active = True break if is_active: self.g.set_vp(v, 'vertex_fill_color', self.colors['vertex_active']) else: self.g.set_vp(v, 'vertex_fill_color', self.colors['vertex_inactive']) for e in g.edges(): ei = g.edge_index[e] if self.edge2queue[ei]._active: self.g.set_ep(e, 'edge_color', self.colors['edge_active']) else: self.g.set_ep(e, 'edge_color', self.colors['edge_inactive']) self.draw(update_colors=False, **kwargs) self._update_all_colors()
[ "def", "show_active", "(", "self", ",", "*", "*", "kwargs", ")", ":", "g", "=", "self", ".", "g", "for", "v", "in", "g", ".", "nodes", "(", ")", ":", "self", ".", "g", ".", "set_vp", "(", "v", ",", "'vertex_color'", ",", "[", "0", ",", "0", ",", "0", ",", "0.9", "]", ")", "is_active", "=", "False", "my_iter", "=", "g", ".", "in_edges", "(", "v", ")", "if", "g", ".", "is_directed", "(", ")", "else", "g", ".", "out_edges", "(", "v", ")", "for", "e", "in", "my_iter", ":", "ei", "=", "g", ".", "edge_index", "[", "e", "]", "if", "self", ".", "edge2queue", "[", "ei", "]", ".", "_active", ":", "is_active", "=", "True", "break", "if", "is_active", ":", "self", ".", "g", ".", "set_vp", "(", "v", ",", "'vertex_fill_color'", ",", "self", ".", "colors", "[", "'vertex_active'", "]", ")", "else", ":", "self", ".", "g", ".", "set_vp", "(", "v", ",", "'vertex_fill_color'", ",", "self", ".", "colors", "[", "'vertex_inactive'", "]", ")", "for", "e", "in", "g", ".", "edges", "(", ")", ":", "ei", "=", "g", ".", "edge_index", "[", "e", "]", "if", "self", ".", "edge2queue", "[", "ei", "]", ".", "_active", ":", "self", ".", "g", ".", "set_ep", "(", "e", ",", "'edge_color'", ",", "self", ".", "colors", "[", "'edge_active'", "]", ")", "else", ":", "self", ".", "g", ".", "set_ep", "(", "e", ",", "'edge_color'", ",", "self", ".", "colors", "[", "'edge_inactive'", "]", ")", "self", ".", "draw", "(", "update_colors", "=", "False", ",", "*", "*", "kwargs", ")", "self", ".", "_update_all_colors", "(", ")" ]
Draws the network, highlighting active queues. The colored vertices represent vertices that have at least one queue on an in-edge that is active. Dark edges represent queues that are active, light edges represent queues that are inactive. Parameters ---------- **kwargs Any additional parameters to pass to :meth:`.draw`, and :meth:`.QueueNetworkDiGraph.draw_graph`. Notes ----- Active queues are :class:`QueueServers<.QueueServer>` that accept arrivals from outside the network. The colors are defined by the class attribute ``colors``. The relevant keys are ``vertex_active``, ``vertex_inactive``, ``edge_active``, and ``edge_inactive``.
[ "Draws", "the", "network", "highlighting", "active", "queues", "." ]
python
valid
38.130435
common-workflow-language/schema_salad
schema_salad/avro/schema.py
https://github.com/common-workflow-language/schema_salad/blob/608ba207b9058fe0a9c3db161058ab3782eef015/schema_salad/avro/schema.py#L215-L235
def add_name(self, name_attr, space_attr, new_schema): # type: (Text, Optional[Text], NamedSchema) -> Name """ Add a new schema object to the name set. @arg name_attr: name value read in schema @arg space_attr: namespace value read in schema. @return: the Name that was just added. """ to_add = Name(name_attr, space_attr, self.default_namespace) if to_add.fullname in VALID_TYPES: fail_msg = '%s is a reserved type name.' % to_add.fullname raise SchemaParseException(fail_msg) elif to_add.fullname in self.names: fail_msg = 'The name "%s" is already in use.' % to_add.fullname raise SchemaParseException(fail_msg) self.names[to_add.fullname] = new_schema return to_add
[ "def", "add_name", "(", "self", ",", "name_attr", ",", "space_attr", ",", "new_schema", ")", ":", "# type: (Text, Optional[Text], NamedSchema) -> Name", "to_add", "=", "Name", "(", "name_attr", ",", "space_attr", ",", "self", ".", "default_namespace", ")", "if", "to_add", ".", "fullname", "in", "VALID_TYPES", ":", "fail_msg", "=", "'%s is a reserved type name.'", "%", "to_add", ".", "fullname", "raise", "SchemaParseException", "(", "fail_msg", ")", "elif", "to_add", ".", "fullname", "in", "self", ".", "names", ":", "fail_msg", "=", "'The name \"%s\" is already in use.'", "%", "to_add", ".", "fullname", "raise", "SchemaParseException", "(", "fail_msg", ")", "self", ".", "names", "[", "to_add", ".", "fullname", "]", "=", "new_schema", "return", "to_add" ]
Add a new schema object to the name set. @arg name_attr: name value read in schema @arg space_attr: namespace value read in schema. @return: the Name that was just added.
[ "Add", "a", "new", "schema", "object", "to", "the", "name", "set", "." ]
python
train
38.238095
genialis/resolwe
resolwe/flow/expression_engines/jinja/filters.py
https://github.com/genialis/resolwe/blob/f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86/resolwe/flow/expression_engines/jinja/filters.py#L117-L135
def descriptor(obj, path=''): """Return descriptor of given object. If ``path`` is specified, only the content on that path is returned. """ if isinstance(obj, dict): # Current object is hydrated, so we need to get descriptor from # dict representation. desc = obj['__descriptor'] else: desc = obj.descriptor resp = dict_dot(desc, path) if isinstance(resp, list) or isinstance(resp, dict): return json.dumps(resp) return resp
[ "def", "descriptor", "(", "obj", ",", "path", "=", "''", ")", ":", "if", "isinstance", "(", "obj", ",", "dict", ")", ":", "# Current object is hydrated, so we need to get descriptor from", "# dict representation.", "desc", "=", "obj", "[", "'__descriptor'", "]", "else", ":", "desc", "=", "obj", ".", "descriptor", "resp", "=", "dict_dot", "(", "desc", ",", "path", ")", "if", "isinstance", "(", "resp", ",", "list", ")", "or", "isinstance", "(", "resp", ",", "dict", ")", ":", "return", "json", ".", "dumps", "(", "resp", ")", "return", "resp" ]
Return descriptor of given object. If ``path`` is specified, only the content on that path is returned.
[ "Return", "descriptor", "of", "given", "object", "." ]
python
train
25.631579
Microsoft/botbuilder-python
libraries/botbuilder-core/botbuilder/core/bot_framework_adapter.py
https://github.com/Microsoft/botbuilder-python/blob/274663dd91c811bae6ac4488915ba5880771b0a7/libraries/botbuilder-core/botbuilder/core/bot_framework_adapter.py#L266-L276
async def get_conversations(self, service_url: str, continuation_token: str=None): """ Lists the Conversations in which this bot has participated for a given channel server. The channel server returns results in pages and each page will include a `continuationToken` that can be used to fetch the next page of results from the server. :param service_url: :param continuation_token: :return: """ client = self.create_connector_client(service_url) return await client.conversations.get_conversations(continuation_token)
[ "async", "def", "get_conversations", "(", "self", ",", "service_url", ":", "str", ",", "continuation_token", ":", "str", "=", "None", ")", ":", "client", "=", "self", ".", "create_connector_client", "(", "service_url", ")", "return", "await", "client", ".", "conversations", ".", "get_conversations", "(", "continuation_token", ")" ]
Lists the Conversations in which this bot has participated for a given channel server. The channel server returns results in pages and each page will include a `continuationToken` that can be used to fetch the next page of results from the server. :param service_url: :param continuation_token: :return:
[ "Lists", "the", "Conversations", "in", "which", "this", "bot", "has", "participated", "for", "a", "given", "channel", "server", ".", "The", "channel", "server", "returns", "results", "in", "pages", "and", "each", "page", "will", "include", "a", "continuationToken", "that", "can", "be", "used", "to", "fetch", "the", "next", "page", "of", "results", "from", "the", "server", ".", ":", "param", "service_url", ":", ":", "param", "continuation_token", ":", ":", "return", ":" ]
python
test
53.363636
mardix/Yass
yass/publisher.py
https://github.com/mardix/Yass/blob/32f804c1a916f5b0a13d13fa750e52be3b6d666d/yass/publisher.py#L224-L240
def purge_files(self, exclude_files=["index.html", "error.html"]): """ To delete files that are in the manifest :param excludes_files: list : files to not delete :return: """ for chunk in utils.chunk_list(self._get_manifest_data(), 1000): try: self.s3.delete_objects( Bucket=self.sitename, Delete={ 'Objects': [{"Key": f} for f in chunk if f not in exclude_files] } ) except Exception as ex: pass
[ "def", "purge_files", "(", "self", ",", "exclude_files", "=", "[", "\"index.html\"", ",", "\"error.html\"", "]", ")", ":", "for", "chunk", "in", "utils", ".", "chunk_list", "(", "self", ".", "_get_manifest_data", "(", ")", ",", "1000", ")", ":", "try", ":", "self", ".", "s3", ".", "delete_objects", "(", "Bucket", "=", "self", ".", "sitename", ",", "Delete", "=", "{", "'Objects'", ":", "[", "{", "\"Key\"", ":", "f", "}", "for", "f", "in", "chunk", "if", "f", "not", "in", "exclude_files", "]", "}", ")", "except", "Exception", "as", "ex", ":", "pass" ]
To delete files that are in the manifest :param excludes_files: list : files to not delete :return:
[ "To", "delete", "files", "that", "are", "in", "the", "manifest", ":", "param", "excludes_files", ":", "list", ":", "files", "to", "not", "delete", ":", "return", ":" ]
python
train
36.470588
PyProphet/pyprophet
pyprophet/main.py
https://github.com/PyProphet/pyprophet/blob/f546ad171750cd7685afbde6785fe71f82cadb35/pyprophet/main.py#L144-L154
def peptide(infile, outfile, context, parametric, pfdr, pi0_lambda, pi0_method, pi0_smooth_df, pi0_smooth_log_pi0, lfdr_truncate, lfdr_monotone, lfdr_transformation, lfdr_adj, lfdr_eps): """ Infer peptides and conduct error-rate estimation in different contexts. """ if outfile is None: outfile = infile else: outfile = outfile infer_peptides(infile, outfile, context, parametric, pfdr, pi0_lambda, pi0_method, pi0_smooth_df, pi0_smooth_log_pi0, lfdr_truncate, lfdr_monotone, lfdr_transformation, lfdr_adj, lfdr_eps)
[ "def", "peptide", "(", "infile", ",", "outfile", ",", "context", ",", "parametric", ",", "pfdr", ",", "pi0_lambda", ",", "pi0_method", ",", "pi0_smooth_df", ",", "pi0_smooth_log_pi0", ",", "lfdr_truncate", ",", "lfdr_monotone", ",", "lfdr_transformation", ",", "lfdr_adj", ",", "lfdr_eps", ")", ":", "if", "outfile", "is", "None", ":", "outfile", "=", "infile", "else", ":", "outfile", "=", "outfile", "infer_peptides", "(", "infile", ",", "outfile", ",", "context", ",", "parametric", ",", "pfdr", ",", "pi0_lambda", ",", "pi0_method", ",", "pi0_smooth_df", ",", "pi0_smooth_log_pi0", ",", "lfdr_truncate", ",", "lfdr_monotone", ",", "lfdr_transformation", ",", "lfdr_adj", ",", "lfdr_eps", ")" ]
Infer peptides and conduct error-rate estimation in different contexts.
[ "Infer", "peptides", "and", "conduct", "error", "-", "rate", "estimation", "in", "different", "contexts", "." ]
python
test
49.818182
davidfokkema/artist
artist/multi_plot.py
https://github.com/davidfokkema/artist/blob/26ae7987522622710f2910980770c50012fda47d/artist/multi_plot.py#L482-L489
def get_subplot_at(self, row, column): """Return the subplot at row, column position. :param row,column: specify the subplot. """ idx = row * self.columns + column return self.subplots[idx]
[ "def", "get_subplot_at", "(", "self", ",", "row", ",", "column", ")", ":", "idx", "=", "row", "*", "self", ".", "columns", "+", "column", "return", "self", ".", "subplots", "[", "idx", "]" ]
Return the subplot at row, column position. :param row,column: specify the subplot.
[ "Return", "the", "subplot", "at", "row", "column", "position", "." ]
python
train
28
PMEAL/porespy
porespy/tools/__funcs__.py
https://github.com/PMEAL/porespy/blob/1e13875b56787d8f5b7ffdabce8c4342c33ba9f8/porespy/tools/__funcs__.py#L510-L575
def randomize_colors(im, keep_vals=[0]): r''' Takes a greyscale image and randomly shuffles the greyscale values, so that all voxels labeled X will be labelled Y, and all voxels labeled Y will be labeled Z, where X, Y, Z and so on are randomly selected from the values in the input image. This function is useful for improving the visibility of images with neighboring regions that are only incrementally different from each other, such as that returned by `scipy.ndimage.label`. Parameters ---------- im : array_like An ND image of greyscale values. keep_vals : array_like Indicate which voxel values should NOT be altered. The default is `[0]` which is useful for leaving the background of the image untouched. Returns ------- image : ND-array An image the same size and type as ``im`` but with the greyscale values reassigned. The unique values in both the input and output images will be identical. Notes ----- If the greyscale values in the input image are not contiguous then the neither will they be in the output. Examples -------- >>> import porespy as ps >>> import scipy as sp >>> sp.random.seed(0) >>> im = sp.random.randint(low=0, high=5, size=[4, 4]) >>> print(im) [[4 0 3 3] [3 1 3 2] [4 0 0 4] [2 1 0 1]] >>> im_rand = ps.tools.randomize_colors(im) >>> print(im_rand) [[2 0 4 4] [4 1 4 3] [2 0 0 2] [3 1 0 1]] As can be seen, the 2's have become 3, 3's have become 4, and 4's have become 2. 1's remained 1 by random accident. 0's remain zeros by default, but this can be controlled using the `keep_vals` argument. ''' im_flat = im.flatten() keep_vals = sp.array(keep_vals) swap_vals = ~sp.in1d(im_flat, keep_vals) im_vals = sp.unique(im_flat[swap_vals]) new_vals = sp.random.permutation(im_vals) im_map = sp.zeros(shape=[sp.amax(im_vals) + 1, ], dtype=int) im_map[im_vals] = new_vals im_new = im_map[im_flat] im_new = sp.reshape(im_new, newshape=sp.shape(im)) return im_new
[ "def", "randomize_colors", "(", "im", ",", "keep_vals", "=", "[", "0", "]", ")", ":", "im_flat", "=", "im", ".", "flatten", "(", ")", "keep_vals", "=", "sp", ".", "array", "(", "keep_vals", ")", "swap_vals", "=", "~", "sp", ".", "in1d", "(", "im_flat", ",", "keep_vals", ")", "im_vals", "=", "sp", ".", "unique", "(", "im_flat", "[", "swap_vals", "]", ")", "new_vals", "=", "sp", ".", "random", ".", "permutation", "(", "im_vals", ")", "im_map", "=", "sp", ".", "zeros", "(", "shape", "=", "[", "sp", ".", "amax", "(", "im_vals", ")", "+", "1", ",", "]", ",", "dtype", "=", "int", ")", "im_map", "[", "im_vals", "]", "=", "new_vals", "im_new", "=", "im_map", "[", "im_flat", "]", "im_new", "=", "sp", ".", "reshape", "(", "im_new", ",", "newshape", "=", "sp", ".", "shape", "(", "im", ")", ")", "return", "im_new" ]
r''' Takes a greyscale image and randomly shuffles the greyscale values, so that all voxels labeled X will be labelled Y, and all voxels labeled Y will be labeled Z, where X, Y, Z and so on are randomly selected from the values in the input image. This function is useful for improving the visibility of images with neighboring regions that are only incrementally different from each other, such as that returned by `scipy.ndimage.label`. Parameters ---------- im : array_like An ND image of greyscale values. keep_vals : array_like Indicate which voxel values should NOT be altered. The default is `[0]` which is useful for leaving the background of the image untouched. Returns ------- image : ND-array An image the same size and type as ``im`` but with the greyscale values reassigned. The unique values in both the input and output images will be identical. Notes ----- If the greyscale values in the input image are not contiguous then the neither will they be in the output. Examples -------- >>> import porespy as ps >>> import scipy as sp >>> sp.random.seed(0) >>> im = sp.random.randint(low=0, high=5, size=[4, 4]) >>> print(im) [[4 0 3 3] [3 1 3 2] [4 0 0 4] [2 1 0 1]] >>> im_rand = ps.tools.randomize_colors(im) >>> print(im_rand) [[2 0 4 4] [4 1 4 3] [2 0 0 2] [3 1 0 1]] As can be seen, the 2's have become 3, 3's have become 4, and 4's have become 2. 1's remained 1 by random accident. 0's remain zeros by default, but this can be controlled using the `keep_vals` argument.
[ "r", "Takes", "a", "greyscale", "image", "and", "randomly", "shuffles", "the", "greyscale", "values", "so", "that", "all", "voxels", "labeled", "X", "will", "be", "labelled", "Y", "and", "all", "voxels", "labeled", "Y", "will", "be", "labeled", "Z", "where", "X", "Y", "Z", "and", "so", "on", "are", "randomly", "selected", "from", "the", "values", "in", "the", "input", "image", "." ]
python
train
31.787879
gem/oq-engine
openquake/hazardlib/gsim/zhao_2016.py
https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/hazardlib/gsim/zhao_2016.py#L82-L102
def get_mean_and_stddevs(self, sites, rup, dists, imt, stddev_types): """ See :meth:`superclass method <.base.GroundShakingIntensityModel.get_mean_and_stddevs>` for spec of input and result values. """ # extracting dictionary of coefficients specific to required # intensity measure type. C = self.COEFFS[imt] C_SITE = self.SITE_COEFFS[imt] s_c, idx = self._get_site_classification(sites.vs30) sa_rock = (self.get_magnitude_scaling_term(C, rup) + self.get_sof_term(C, rup) + self.get_depth_term(C, rup) + self.get_distance_term(C, dists, rup)) sa_soil = self.add_site_amplification(C, C_SITE, sites, sa_rock, idx, rup) stddevs = self.get_stddevs(C, sites.vs30.shape, idx, stddev_types) return sa_soil, stddevs
[ "def", "get_mean_and_stddevs", "(", "self", ",", "sites", ",", "rup", ",", "dists", ",", "imt", ",", "stddev_types", ")", ":", "# extracting dictionary of coefficients specific to required", "# intensity measure type.", "C", "=", "self", ".", "COEFFS", "[", "imt", "]", "C_SITE", "=", "self", ".", "SITE_COEFFS", "[", "imt", "]", "s_c", ",", "idx", "=", "self", ".", "_get_site_classification", "(", "sites", ".", "vs30", ")", "sa_rock", "=", "(", "self", ".", "get_magnitude_scaling_term", "(", "C", ",", "rup", ")", "+", "self", ".", "get_sof_term", "(", "C", ",", "rup", ")", "+", "self", ".", "get_depth_term", "(", "C", ",", "rup", ")", "+", "self", ".", "get_distance_term", "(", "C", ",", "dists", ",", "rup", ")", ")", "sa_soil", "=", "self", ".", "add_site_amplification", "(", "C", ",", "C_SITE", ",", "sites", ",", "sa_rock", ",", "idx", ",", "rup", ")", "stddevs", "=", "self", ".", "get_stddevs", "(", "C", ",", "sites", ".", "vs30", ".", "shape", ",", "idx", ",", "stddev_types", ")", "return", "sa_soil", ",", "stddevs" ]
See :meth:`superclass method <.base.GroundShakingIntensityModel.get_mean_and_stddevs>` for spec of input and result values.
[ "See", ":", "meth", ":", "superclass", "method", "<", ".", "base", ".", "GroundShakingIntensityModel", ".", "get_mean_and_stddevs", ">", "for", "spec", "of", "input", "and", "result", "values", "." ]
python
train
43.142857
LuminosoInsight/luminoso-api-client-python
luminoso_api/v5_upload.py
https://github.com/LuminosoInsight/luminoso-api-client-python/blob/3bedf2a454aee39214c11fbf556ead3eecc27881/luminoso_api/v5_upload.py#L35-L47
def _simplify_doc(doc): """ Limit a document to just the three fields we should upload. """ # Mutate a copy of the document to fill in missing fields doc = dict(doc) if 'text' not in doc: raise ValueError("The document {!r} has no text field".format(doc)) return { 'text': doc['text'], 'metadata': doc.get('metadata', []), 'title': doc.get('title', '') }
[ "def", "_simplify_doc", "(", "doc", ")", ":", "# Mutate a copy of the document to fill in missing fields", "doc", "=", "dict", "(", "doc", ")", "if", "'text'", "not", "in", "doc", ":", "raise", "ValueError", "(", "\"The document {!r} has no text field\"", ".", "format", "(", "doc", ")", ")", "return", "{", "'text'", ":", "doc", "[", "'text'", "]", ",", "'metadata'", ":", "doc", ".", "get", "(", "'metadata'", ",", "[", "]", ")", ",", "'title'", ":", "doc", ".", "get", "(", "'title'", ",", "''", ")", "}" ]
Limit a document to just the three fields we should upload.
[ "Limit", "a", "document", "to", "just", "the", "three", "fields", "we", "should", "upload", "." ]
python
test
31.230769
gatkin/declxml
declxml.py
https://github.com/gatkin/declxml/blob/3a2324b43aee943e82a04587fbb68932c6f392ba/declxml.py#L1202-L1214
def parse_at_element( self, element, # type: ET.Element state # type: _ProcessorState ): # type: (...) -> Any """Parse the primitive value at the XML element.""" if self._attribute: parsed_value = self._parse_attribute(element, self._attribute, state) else: parsed_value = self._parser_func(element.text, state) return _hooks_apply_after_parse(self._hooks, state, parsed_value)
[ "def", "parse_at_element", "(", "self", ",", "element", ",", "# type: ET.Element", "state", "# type: _ProcessorState", ")", ":", "# type: (...) -> Any", "if", "self", ".", "_attribute", ":", "parsed_value", "=", "self", ".", "_parse_attribute", "(", "element", ",", "self", ".", "_attribute", ",", "state", ")", "else", ":", "parsed_value", "=", "self", ".", "_parser_func", "(", "element", ".", "text", ",", "state", ")", "return", "_hooks_apply_after_parse", "(", "self", ".", "_hooks", ",", "state", ",", "parsed_value", ")" ]
Parse the primitive value at the XML element.
[ "Parse", "the", "primitive", "value", "at", "the", "XML", "element", "." ]
python
train
36.307692
pyblish/pyblish-qml
pyblish_qml/ipc/service.py
https://github.com/pyblish/pyblish-qml/blob/6095d18b2ec0afd0409a9b1a17e53b0658887283/pyblish_qml/ipc/service.py#L109-L118
def _dispatch(self, method, params): """Customise exception handling""" self._count += 1 func = getattr(self, method) try: return func(*params) except Exception as e: traceback.print_exc() raise e
[ "def", "_dispatch", "(", "self", ",", "method", ",", "params", ")", ":", "self", ".", "_count", "+=", "1", "func", "=", "getattr", "(", "self", ",", "method", ")", "try", ":", "return", "func", "(", "*", "params", ")", "except", "Exception", "as", "e", ":", "traceback", ".", "print_exc", "(", ")", "raise", "e" ]
Customise exception handling
[ "Customise", "exception", "handling" ]
python
train
26.4
happyleavesaoc/python-limitlessled
limitlessled/group/__init__.py
https://github.com/happyleavesaoc/python-limitlessled/blob/70307c2bf8c91430a99579d2ad18b228ec7a8488/limitlessled/group/__init__.py#L139-L153
def _scale_steps(self, duration, commands, *steps): """ Scale steps :param duration: Total time (in seconds) :param commands: Number of commands to be executed. :param steps: Steps for one or many properties to take. :return: Steps scaled to time and total. """ factor = duration / ((self.wait * self.reps * commands) - \ (self.wait * self.reps * self._bridge.active)) steps = [math.ceil(factor * step) for step in steps] if len(steps) == 1: return steps[0] else: return steps
[ "def", "_scale_steps", "(", "self", ",", "duration", ",", "commands", ",", "*", "steps", ")", ":", "factor", "=", "duration", "/", "(", "(", "self", ".", "wait", "*", "self", ".", "reps", "*", "commands", ")", "-", "(", "self", ".", "wait", "*", "self", ".", "reps", "*", "self", ".", "_bridge", ".", "active", ")", ")", "steps", "=", "[", "math", ".", "ceil", "(", "factor", "*", "step", ")", "for", "step", "in", "steps", "]", "if", "len", "(", "steps", ")", "==", "1", ":", "return", "steps", "[", "0", "]", "else", ":", "return", "steps" ]
Scale steps :param duration: Total time (in seconds) :param commands: Number of commands to be executed. :param steps: Steps for one or many properties to take. :return: Steps scaled to time and total.
[ "Scale", "steps" ]
python
train
38.933333
vrtsystems/pyat
pyat/sync.py
https://github.com/vrtsystems/pyat/blob/23f87904e5f9f6902665bbf825e65e7eddd64995/pyat/sync.py#L120-L131
def result(self): ''' The result from the executed task. Raises NotExecutedYet if not yet executed. ''' if self.cancelled or (self._fn is not None): raise NotExecutedYet() if self._fn_exc is not None: six.reraise(*self._fn_exc) else: return self._fn_res
[ "def", "result", "(", "self", ")", ":", "if", "self", ".", "cancelled", "or", "(", "self", ".", "_fn", "is", "not", "None", ")", ":", "raise", "NotExecutedYet", "(", ")", "if", "self", ".", "_fn_exc", "is", "not", "None", ":", "six", ".", "reraise", "(", "*", "self", ".", "_fn_exc", ")", "else", ":", "return", "self", ".", "_fn_res" ]
The result from the executed task. Raises NotExecutedYet if not yet executed.
[ "The", "result", "from", "the", "executed", "task", ".", "Raises", "NotExecutedYet", "if", "not", "yet", "executed", "." ]
python
train
28
rameshg87/pyremotevbox
pyremotevbox/ZSI/wstools/XMLSchema.py
https://github.com/rameshg87/pyremotevbox/blob/123dffff27da57c8faa3ac1dd4c68b1cf4558b1a/pyremotevbox/ZSI/wstools/XMLSchema.py#L126-L141
def loadFromURL(self, url, schema=None): """Return an XMLSchema instance loaded from the given url. url -- URL to dereference schema -- Optional XMLSchema instance. """ reader = self.__readerClass() if self.__base_url: url = basejoin(self.__base_url,url) reader.loadFromURL(url) schema = schema or XMLSchema() schema.setBaseUrl(url) schema.load(reader) self.__setIncludes(schema) self.__setImports(schema) return schema
[ "def", "loadFromURL", "(", "self", ",", "url", ",", "schema", "=", "None", ")", ":", "reader", "=", "self", ".", "__readerClass", "(", ")", "if", "self", ".", "__base_url", ":", "url", "=", "basejoin", "(", "self", ".", "__base_url", ",", "url", ")", "reader", ".", "loadFromURL", "(", "url", ")", "schema", "=", "schema", "or", "XMLSchema", "(", ")", "schema", ".", "setBaseUrl", "(", "url", ")", "schema", ".", "load", "(", "reader", ")", "self", ".", "__setIncludes", "(", "schema", ")", "self", ".", "__setImports", "(", "schema", ")", "return", "schema" ]
Return an XMLSchema instance loaded from the given url. url -- URL to dereference schema -- Optional XMLSchema instance.
[ "Return", "an", "XMLSchema", "instance", "loaded", "from", "the", "given", "url", ".", "url", "--", "URL", "to", "dereference", "schema", "--", "Optional", "XMLSchema", "instance", "." ]
python
train
32.9375
atztogo/phonopy
phonopy/harmonic/force_constants.py
https://github.com/atztogo/phonopy/blob/869cc2ba9e7d495d5f4cf6942415ab3fc9e2a10f/phonopy/harmonic/force_constants.py#L298-L349
def set_tensor_symmetry_old(force_constants, lattice, # column vectors positions, symmetry): """Full force constants are symmetrized using crystal symmetry. This method extracts symmetrically equivalent sets of atomic pairs and take sum of their force constants and average the sum. Since get_force_constants_disps may include crystal symmetry, this method is usually meaningless. """ rotations = symmetry.get_symmetry_operations()['rotations'] translations = symmetry.get_symmetry_operations()['translations'] symprec = symmetry.get_symmetry_tolerance() fc_bak = force_constants.copy() # Create mapping table between an atom and the symmetry operated atom # map[ i, j ] # i: atom index # j: operation index mapping = [] for pos_i in positions: map_local = [] for rot, trans in zip(rotations, translations): rot_pos = np.dot(pos_i, rot.T) + trans for j, pos_j in enumerate(positions): diff = pos_j - rot_pos diff -= np.rint(diff) diff = np.dot(diff, lattice.T) if np.linalg.norm(diff) < symprec: map_local.append(j) break mapping.append(map_local) mapping = np.array(mapping) # Look for the symmetrically equivalent force constant tensors for i, pos_i in enumerate(positions): for j, pos_j in enumerate(positions): tmp_fc = np.zeros((3, 3), dtype='double') for k, rot in enumerate(rotations): cart_rot = similarity_transformation(lattice, rot) # Reverse rotation of force constant is summed tmp_fc += similarity_transformation(cart_rot.T, fc_bak[mapping[i, k], mapping[j, k]]) # Take average and set to new force cosntants force_constants[i, j] = tmp_fc / len(rotations)
[ "def", "set_tensor_symmetry_old", "(", "force_constants", ",", "lattice", ",", "# column vectors", "positions", ",", "symmetry", ")", ":", "rotations", "=", "symmetry", ".", "get_symmetry_operations", "(", ")", "[", "'rotations'", "]", "translations", "=", "symmetry", ".", "get_symmetry_operations", "(", ")", "[", "'translations'", "]", "symprec", "=", "symmetry", ".", "get_symmetry_tolerance", "(", ")", "fc_bak", "=", "force_constants", ".", "copy", "(", ")", "# Create mapping table between an atom and the symmetry operated atom", "# map[ i, j ]", "# i: atom index", "# j: operation index", "mapping", "=", "[", "]", "for", "pos_i", "in", "positions", ":", "map_local", "=", "[", "]", "for", "rot", ",", "trans", "in", "zip", "(", "rotations", ",", "translations", ")", ":", "rot_pos", "=", "np", ".", "dot", "(", "pos_i", ",", "rot", ".", "T", ")", "+", "trans", "for", "j", ",", "pos_j", "in", "enumerate", "(", "positions", ")", ":", "diff", "=", "pos_j", "-", "rot_pos", "diff", "-=", "np", ".", "rint", "(", "diff", ")", "diff", "=", "np", ".", "dot", "(", "diff", ",", "lattice", ".", "T", ")", "if", "np", ".", "linalg", ".", "norm", "(", "diff", ")", "<", "symprec", ":", "map_local", ".", "append", "(", "j", ")", "break", "mapping", ".", "append", "(", "map_local", ")", "mapping", "=", "np", ".", "array", "(", "mapping", ")", "# Look for the symmetrically equivalent force constant tensors", "for", "i", ",", "pos_i", "in", "enumerate", "(", "positions", ")", ":", "for", "j", ",", "pos_j", "in", "enumerate", "(", "positions", ")", ":", "tmp_fc", "=", "np", ".", "zeros", "(", "(", "3", ",", "3", ")", ",", "dtype", "=", "'double'", ")", "for", "k", ",", "rot", "in", "enumerate", "(", "rotations", ")", ":", "cart_rot", "=", "similarity_transformation", "(", "lattice", ",", "rot", ")", "# Reverse rotation of force constant is summed", "tmp_fc", "+=", "similarity_transformation", "(", "cart_rot", ".", "T", ",", "fc_bak", "[", "mapping", "[", "i", ",", "k", "]", ",", "mapping", "[", "j", ",", "k", "]", "]", ")", "# Take average and set to new force cosntants", "force_constants", "[", "i", ",", "j", "]", "=", "tmp_fc", "/", "len", "(", "rotations", ")" ]
Full force constants are symmetrized using crystal symmetry. This method extracts symmetrically equivalent sets of atomic pairs and take sum of their force constants and average the sum. Since get_force_constants_disps may include crystal symmetry, this method is usually meaningless.
[ "Full", "force", "constants", "are", "symmetrized", "using", "crystal", "symmetry", "." ]
python
train
39.519231
coldfix/udiskie
udiskie/udisks2.py
https://github.com/coldfix/udiskie/blob/804c9d27df6f7361fec3097c432398f2d702f911/udiskie/udisks2.py#L446-L454
def unmount(self, force=None, auth_no_user_interaction=None): """Unmount filesystem.""" return self._M.Filesystem.Unmount( '(a{sv})', filter_opt({ 'force': ('b', force), 'auth.no_user_interaction': ('b', auth_no_user_interaction), }) )
[ "def", "unmount", "(", "self", ",", "force", "=", "None", ",", "auth_no_user_interaction", "=", "None", ")", ":", "return", "self", ".", "_M", ".", "Filesystem", ".", "Unmount", "(", "'(a{sv})'", ",", "filter_opt", "(", "{", "'force'", ":", "(", "'b'", ",", "force", ")", ",", "'auth.no_user_interaction'", ":", "(", "'b'", ",", "auth_no_user_interaction", ")", ",", "}", ")", ")" ]
Unmount filesystem.
[ "Unmount", "filesystem", "." ]
python
train
35.444444
suurjaak/InputScope
inputscope/webui.py
https://github.com/suurjaak/InputScope/blob/245ff045163a1995e8cd5ac558d0a93024eb86eb/inputscope/webui.py#L52-L64
def keyboard(table, day=None): """Handler for showing the keyboard statistics page.""" cols, group = "realkey AS key, COUNT(*) AS count", "realkey" where = (("day", day),) if day else () counts_display = counts = db.fetch(table, cols, where, group, "count DESC") if "combos" == table: counts_display = db.fetch(table, "key, COUNT(*) AS count", where, "key", "count DESC") events = db.fetch(table, where=where, order="stamp") for e in events: e["dt"] = datetime.datetime.fromtimestamp(e["stamp"]) stats, collatedevents = stats_keyboard(events, table) days, input = db.fetch("counts", order="day", type=table), "keyboard" return bottle.template("heatmap.tpl", locals(), conf=conf)
[ "def", "keyboard", "(", "table", ",", "day", "=", "None", ")", ":", "cols", ",", "group", "=", "\"realkey AS key, COUNT(*) AS count\"", ",", "\"realkey\"", "where", "=", "(", "(", "\"day\"", ",", "day", ")", ",", ")", "if", "day", "else", "(", ")", "counts_display", "=", "counts", "=", "db", ".", "fetch", "(", "table", ",", "cols", ",", "where", ",", "group", ",", "\"count DESC\"", ")", "if", "\"combos\"", "==", "table", ":", "counts_display", "=", "db", ".", "fetch", "(", "table", ",", "\"key, COUNT(*) AS count\"", ",", "where", ",", "\"key\"", ",", "\"count DESC\"", ")", "events", "=", "db", ".", "fetch", "(", "table", ",", "where", "=", "where", ",", "order", "=", "\"stamp\"", ")", "for", "e", "in", "events", ":", "e", "[", "\"dt\"", "]", "=", "datetime", ".", "datetime", ".", "fromtimestamp", "(", "e", "[", "\"stamp\"", "]", ")", "stats", ",", "collatedevents", "=", "stats_keyboard", "(", "events", ",", "table", ")", "days", ",", "input", "=", "db", ".", "fetch", "(", "\"counts\"", ",", "order", "=", "\"day\"", ",", "type", "=", "table", ")", ",", "\"keyboard\"", "return", "bottle", ".", "template", "(", "\"heatmap.tpl\"", ",", "locals", "(", ")", ",", "conf", "=", "conf", ")" ]
Handler for showing the keyboard statistics page.
[ "Handler", "for", "showing", "the", "keyboard", "statistics", "page", "." ]
python
train
58.461538
andrewramsay/sk8-drivers
pysk8/calibration/sk8_calibration_gui.py
https://github.com/andrewramsay/sk8-drivers/blob/67347a71762fb421f5ae65a595def5c7879e8b0c/pysk8/calibration/sk8_calibration_gui.py#L309-L316
def mag_calibration(self): """Perform magnetometer calibration for current IMU.""" self.calibration_state = self.CAL_MAG self.mag_dialog = SK8MagDialog(self.sk8.get_imu(self.spinIMU.value()), self) if self.mag_dialog.exec_() == QDialog.Rejected: return self.calculate_mag_calibration(self.mag_dialog.samples)
[ "def", "mag_calibration", "(", "self", ")", ":", "self", ".", "calibration_state", "=", "self", ".", "CAL_MAG", "self", ".", "mag_dialog", "=", "SK8MagDialog", "(", "self", ".", "sk8", ".", "get_imu", "(", "self", ".", "spinIMU", ".", "value", "(", ")", ")", ",", "self", ")", "if", "self", ".", "mag_dialog", ".", "exec_", "(", ")", "==", "QDialog", ".", "Rejected", ":", "return", "self", ".", "calculate_mag_calibration", "(", "self", ".", "mag_dialog", ".", "samples", ")" ]
Perform magnetometer calibration for current IMU.
[ "Perform", "magnetometer", "calibration", "for", "current", "IMU", "." ]
python
train
44.25
seatgeek/fuzzywuzzy
benchmarks.py
https://github.com/seatgeek/fuzzywuzzy/blob/778162c5a73256745eb6ae22f925bc2dbcf7c894/benchmarks.py#L47-L57
def print_result_from_timeit(stmt='pass', setup='pass', number=1000000): """ Clean function to know how much time took the execution of one statement """ units = ["s", "ms", "us", "ns"] duration = timeit(stmt, setup, number=int(number)) avg_duration = duration / float(number) thousands = int(math.floor(math.log(avg_duration, 1000))) print("Total time: %fs. Average run: %.3f%s." % ( duration, avg_duration * (1000 ** -thousands), units[-thousands]))
[ "def", "print_result_from_timeit", "(", "stmt", "=", "'pass'", ",", "setup", "=", "'pass'", ",", "number", "=", "1000000", ")", ":", "units", "=", "[", "\"s\"", ",", "\"ms\"", ",", "\"us\"", ",", "\"ns\"", "]", "duration", "=", "timeit", "(", "stmt", ",", "setup", ",", "number", "=", "int", "(", "number", ")", ")", "avg_duration", "=", "duration", "/", "float", "(", "number", ")", "thousands", "=", "int", "(", "math", ".", "floor", "(", "math", ".", "log", "(", "avg_duration", ",", "1000", ")", ")", ")", "print", "(", "\"Total time: %fs. Average run: %.3f%s.\"", "%", "(", "duration", ",", "avg_duration", "*", "(", "1000", "**", "-", "thousands", ")", ",", "units", "[", "-", "thousands", "]", ")", ")" ]
Clean function to know how much time took the execution of one statement
[ "Clean", "function", "to", "know", "how", "much", "time", "took", "the", "execution", "of", "one", "statement" ]
python
train
43.818182
gbiggs/rtctree
rtctree/component.py
https://github.com/gbiggs/rtctree/blob/bd725a47ac87c259c8bce06156ccc9ab71111c26/rtctree/component.py#L554-L574
def deactivate_in_ec(self, ec_index): '''Deactivate this component in an execution context. @param ec_index The index of the execution context to deactivate in. This index is into the total array of contexts, that is both owned and participating contexts. If the value of ec_index is greater than the length of @ref owned_ecs, that length is subtracted from ec_index and the result used as an index into @ref participating_ecs. ''' with self._mutex: if ec_index >= len(self.owned_ecs): ec_index -= len(self.owned_ecs) if ec_index >= len(self.participating_ecs): raise exceptions.BadECIndexError(ec_index) ec = self.participating_ecs[ec_index] else: ec = self.owned_ecs[ec_index] ec.deactivate_component(self._obj)
[ "def", "deactivate_in_ec", "(", "self", ",", "ec_index", ")", ":", "with", "self", ".", "_mutex", ":", "if", "ec_index", ">=", "len", "(", "self", ".", "owned_ecs", ")", ":", "ec_index", "-=", "len", "(", "self", ".", "owned_ecs", ")", "if", "ec_index", ">=", "len", "(", "self", ".", "participating_ecs", ")", ":", "raise", "exceptions", ".", "BadECIndexError", "(", "ec_index", ")", "ec", "=", "self", ".", "participating_ecs", "[", "ec_index", "]", "else", ":", "ec", "=", "self", ".", "owned_ecs", "[", "ec_index", "]", "ec", ".", "deactivate_component", "(", "self", ".", "_obj", ")" ]
Deactivate this component in an execution context. @param ec_index The index of the execution context to deactivate in. This index is into the total array of contexts, that is both owned and participating contexts. If the value of ec_index is greater than the length of @ref owned_ecs, that length is subtracted from ec_index and the result used as an index into @ref participating_ecs.
[ "Deactivate", "this", "component", "in", "an", "execution", "context", "." ]
python
train
47.190476
gabstopper/smc-python
smc/core/collection.py
https://github.com/gabstopper/smc-python/blob/e027b8a5dcfaf884eada32d113d41c1e56b32457/smc/core/collection.py#L533-L599
def add_layer3_cluster_interface(self, interface_id, cluster_virtual=None, network_value=None, macaddress=None, nodes=None, cvi_mode='packetdispatch', zone_ref=None, comment=None, **kw): """ Add cluster virtual interface. A "CVI" interface is used as a VIP address for clustered engines. Providing 'nodes' will create the node specific interfaces. You can also add a cluster address with only a CVI, or only NDI's. Add CVI only:: engine.physical_interface.add_cluster_virtual_interface( interface_id=30, cluster_virtual='30.30.30.1', network_value='30.30.30.0/24', macaddress='02:02:02:02:02:06') Add NDI's only:: engine.physical_interface.add_cluster_virtual_interface( interface_id=30, nodes=nodes) Add CVI and NDI's:: engine.physical_interface.add_cluster_virtual_interface( cluster_virtual='5.5.5.1', network_value='5.5.5.0/24', macaddress='02:03:03:03:03:03', nodes=[{'address':'5.5.5.2', 'network_value':'5.5.5.0/24', 'nodeid':1}, {'address':'5.5.5.3', 'network_value':'5.5.5.0/24', 'nodeid':2}]) .. versionchanged:: 0.6.1 Renamed from add_cluster_virtual_interface :param str,int interface_id: physical interface identifier :param str cluster_virtual: CVI address (VIP) for this interface :param str network_value: network value for VIP; format: 10.10.10.0/24 :param str macaddress: mandatory mac address if cluster_virtual and cluster_mask provided :param list nodes: list of dictionary items identifying cluster nodes :param str cvi_mode: packetdispatch is recommended setting :param str zone_ref: zone reference, can be name, href or Zone :param kw: key word arguments are valid NodeInterface sub-interface settings passed in during create time. For example, 'backup_mgt=True' to enable this interface as the management backup. :raises EngineCommandFailed: failure creating interface :return: None """ interfaces = [{'nodes': nodes if nodes else [], 'cluster_virtual': cluster_virtual, 'network_value': network_value}] try: interface = self._engine.interface.get(interface_id) interface._add_interface(interface_id, interfaces=interfaces) return interface.update() except InterfaceNotFound: interface = ClusterPhysicalInterface( engine=self._engine, interface_id=interface_id, interfaces=interfaces, cvi_mode=cvi_mode if macaddress else 'none', macaddress=macaddress, zone_ref=zone_ref, comment=comment, **kw) return self._engine.add_interface(interface)
[ "def", "add_layer3_cluster_interface", "(", "self", ",", "interface_id", ",", "cluster_virtual", "=", "None", ",", "network_value", "=", "None", ",", "macaddress", "=", "None", ",", "nodes", "=", "None", ",", "cvi_mode", "=", "'packetdispatch'", ",", "zone_ref", "=", "None", ",", "comment", "=", "None", ",", "*", "*", "kw", ")", ":", "interfaces", "=", "[", "{", "'nodes'", ":", "nodes", "if", "nodes", "else", "[", "]", ",", "'cluster_virtual'", ":", "cluster_virtual", ",", "'network_value'", ":", "network_value", "}", "]", "try", ":", "interface", "=", "self", ".", "_engine", ".", "interface", ".", "get", "(", "interface_id", ")", "interface", ".", "_add_interface", "(", "interface_id", ",", "interfaces", "=", "interfaces", ")", "return", "interface", ".", "update", "(", ")", "except", "InterfaceNotFound", ":", "interface", "=", "ClusterPhysicalInterface", "(", "engine", "=", "self", ".", "_engine", ",", "interface_id", "=", "interface_id", ",", "interfaces", "=", "interfaces", ",", "cvi_mode", "=", "cvi_mode", "if", "macaddress", "else", "'none'", ",", "macaddress", "=", "macaddress", ",", "zone_ref", "=", "zone_ref", ",", "comment", "=", "comment", ",", "*", "*", "kw", ")", "return", "self", ".", "_engine", ".", "add_interface", "(", "interface", ")" ]
Add cluster virtual interface. A "CVI" interface is used as a VIP address for clustered engines. Providing 'nodes' will create the node specific interfaces. You can also add a cluster address with only a CVI, or only NDI's. Add CVI only:: engine.physical_interface.add_cluster_virtual_interface( interface_id=30, cluster_virtual='30.30.30.1', network_value='30.30.30.0/24', macaddress='02:02:02:02:02:06') Add NDI's only:: engine.physical_interface.add_cluster_virtual_interface( interface_id=30, nodes=nodes) Add CVI and NDI's:: engine.physical_interface.add_cluster_virtual_interface( cluster_virtual='5.5.5.1', network_value='5.5.5.0/24', macaddress='02:03:03:03:03:03', nodes=[{'address':'5.5.5.2', 'network_value':'5.5.5.0/24', 'nodeid':1}, {'address':'5.5.5.3', 'network_value':'5.5.5.0/24', 'nodeid':2}]) .. versionchanged:: 0.6.1 Renamed from add_cluster_virtual_interface :param str,int interface_id: physical interface identifier :param str cluster_virtual: CVI address (VIP) for this interface :param str network_value: network value for VIP; format: 10.10.10.0/24 :param str macaddress: mandatory mac address if cluster_virtual and cluster_mask provided :param list nodes: list of dictionary items identifying cluster nodes :param str cvi_mode: packetdispatch is recommended setting :param str zone_ref: zone reference, can be name, href or Zone :param kw: key word arguments are valid NodeInterface sub-interface settings passed in during create time. For example, 'backup_mgt=True' to enable this interface as the management backup. :raises EngineCommandFailed: failure creating interface :return: None
[ "Add", "cluster", "virtual", "interface", ".", "A", "CVI", "interface", "is", "used", "as", "a", "VIP", "address", "for", "clustered", "engines", ".", "Providing", "nodes", "will", "create", "the", "node", "specific", "interfaces", ".", "You", "can", "also", "add", "a", "cluster", "address", "with", "only", "a", "CVI", "or", "only", "NDI", "s", ".", "Add", "CVI", "only", "::", "engine", ".", "physical_interface", ".", "add_cluster_virtual_interface", "(", "interface_id", "=", "30", "cluster_virtual", "=", "30", ".", "30", ".", "30", ".", "1", "network_value", "=", "30", ".", "30", ".", "30", ".", "0", "/", "24", "macaddress", "=", "02", ":", "02", ":", "02", ":", "02", ":", "02", ":", "06", ")", "Add", "NDI", "s", "only", "::", "engine", ".", "physical_interface", ".", "add_cluster_virtual_interface", "(", "interface_id", "=", "30", "nodes", "=", "nodes", ")", "Add", "CVI", "and", "NDI", "s", "::", "engine", ".", "physical_interface", ".", "add_cluster_virtual_interface", "(", "cluster_virtual", "=", "5", ".", "5", ".", "5", ".", "1", "network_value", "=", "5", ".", "5", ".", "5", ".", "0", "/", "24", "macaddress", "=", "02", ":", "03", ":", "03", ":", "03", ":", "03", ":", "03", "nodes", "=", "[", "{", "address", ":", "5", ".", "5", ".", "5", ".", "2", "network_value", ":", "5", ".", "5", ".", "5", ".", "0", "/", "24", "nodeid", ":", "1", "}", "{", "address", ":", "5", ".", "5", ".", "5", ".", "3", "network_value", ":", "5", ".", "5", ".", "5", ".", "0", "/", "24", "nodeid", ":", "2", "}", "]", ")" ]
python
train
45.134328
ultrabug/py3status
py3status/modules/battery_level.py
https://github.com/ultrabug/py3status/blob/4c105f1b44f7384ca4f7da5f821a47e468c7dee2/py3status/modules/battery_level.py#L266-L325
def _extract_battery_info_from_sys(self): """ Extract the percent charged, charging state, time remaining, and capacity for a battery, using Linux's kernel /sys interface Only available in kernel 2.6.24(?) and newer. Before kernel provided a similar, yet incompatible interface in /proc """ if not os.listdir(self.sys_battery_path): return [] def _parse_battery_info(sys_path): """ Extract battery information from uevent file, already convert to int if necessary """ raw_values = {} with open(os.path.join(sys_path, u"uevent")) as f: for var in f.read().splitlines(): k, v = var.split("=") try: raw_values[k] = int(v) except ValueError: raw_values[k] = v return raw_values battery_list = [] for path in iglob(os.path.join(self.sys_battery_path, "BAT*")): r = _parse_battery_info(path) capacity = r.get( "POWER_SUPPLY_ENERGY_FULL", r.get("POWER_SUPPLY_CHARGE_FULL") ) present_rate = r.get( "POWER_SUPPLY_POWER_NOW", r.get("POWER_SUPPLY_CURRENT_NOW", r.get("POWER_SUPPLY_VOLTAGE_NOW")), ) remaining_energy = r.get( "POWER_SUPPLY_ENERGY_NOW", r.get("POWER_SUPPLY_CHARGE_NOW") ) battery = {} battery["capacity"] = capacity battery["charging"] = "Charging" in r["POWER_SUPPLY_STATUS"] battery["percent_charged"] = int( math.floor(remaining_energy / capacity * 100) ) try: if battery["charging"]: time_in_secs = (capacity - remaining_energy) / present_rate * 3600 else: time_in_secs = remaining_energy / present_rate * 3600 battery["time_remaining"] = self._seconds_to_hms(time_in_secs) except ZeroDivisionError: # Battery is either full charged or is not discharging battery["time_remaining"] = FULLY_CHARGED battery_list.append(battery) return battery_list
[ "def", "_extract_battery_info_from_sys", "(", "self", ")", ":", "if", "not", "os", ".", "listdir", "(", "self", ".", "sys_battery_path", ")", ":", "return", "[", "]", "def", "_parse_battery_info", "(", "sys_path", ")", ":", "\"\"\"\n Extract battery information from uevent file, already convert to\n int if necessary\n \"\"\"", "raw_values", "=", "{", "}", "with", "open", "(", "os", ".", "path", ".", "join", "(", "sys_path", ",", "u\"uevent\"", ")", ")", "as", "f", ":", "for", "var", "in", "f", ".", "read", "(", ")", ".", "splitlines", "(", ")", ":", "k", ",", "v", "=", "var", ".", "split", "(", "\"=\"", ")", "try", ":", "raw_values", "[", "k", "]", "=", "int", "(", "v", ")", "except", "ValueError", ":", "raw_values", "[", "k", "]", "=", "v", "return", "raw_values", "battery_list", "=", "[", "]", "for", "path", "in", "iglob", "(", "os", ".", "path", ".", "join", "(", "self", ".", "sys_battery_path", ",", "\"BAT*\"", ")", ")", ":", "r", "=", "_parse_battery_info", "(", "path", ")", "capacity", "=", "r", ".", "get", "(", "\"POWER_SUPPLY_ENERGY_FULL\"", ",", "r", ".", "get", "(", "\"POWER_SUPPLY_CHARGE_FULL\"", ")", ")", "present_rate", "=", "r", ".", "get", "(", "\"POWER_SUPPLY_POWER_NOW\"", ",", "r", ".", "get", "(", "\"POWER_SUPPLY_CURRENT_NOW\"", ",", "r", ".", "get", "(", "\"POWER_SUPPLY_VOLTAGE_NOW\"", ")", ")", ",", ")", "remaining_energy", "=", "r", ".", "get", "(", "\"POWER_SUPPLY_ENERGY_NOW\"", ",", "r", ".", "get", "(", "\"POWER_SUPPLY_CHARGE_NOW\"", ")", ")", "battery", "=", "{", "}", "battery", "[", "\"capacity\"", "]", "=", "capacity", "battery", "[", "\"charging\"", "]", "=", "\"Charging\"", "in", "r", "[", "\"POWER_SUPPLY_STATUS\"", "]", "battery", "[", "\"percent_charged\"", "]", "=", "int", "(", "math", ".", "floor", "(", "remaining_energy", "/", "capacity", "*", "100", ")", ")", "try", ":", "if", "battery", "[", "\"charging\"", "]", ":", "time_in_secs", "=", "(", "capacity", "-", "remaining_energy", ")", "/", "present_rate", "*", "3600", "else", ":", "time_in_secs", "=", "remaining_energy", "/", "present_rate", "*", "3600", "battery", "[", "\"time_remaining\"", "]", "=", "self", ".", "_seconds_to_hms", "(", "time_in_secs", ")", "except", "ZeroDivisionError", ":", "# Battery is either full charged or is not discharging", "battery", "[", "\"time_remaining\"", "]", "=", "FULLY_CHARGED", "battery_list", ".", "append", "(", "battery", ")", "return", "battery_list" ]
Extract the percent charged, charging state, time remaining, and capacity for a battery, using Linux's kernel /sys interface Only available in kernel 2.6.24(?) and newer. Before kernel provided a similar, yet incompatible interface in /proc
[ "Extract", "the", "percent", "charged", "charging", "state", "time", "remaining", "and", "capacity", "for", "a", "battery", "using", "Linux", "s", "kernel", "/", "sys", "interface" ]
python
train
38.15