repo
stringlengths
7
55
path
stringlengths
4
223
url
stringlengths
87
315
code
stringlengths
75
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
avg_line_len
float64
7.91
980
praw-dev/prawtools
prawtools/mod.py
https://github.com/praw-dev/prawtools/blob/571d5c28c2222f6f8dbbca8c815b8da0a776ab85/prawtools/mod.py#L180-L198
def output_flair_stats(self): """Display statistics (number of users) for each unique flair item.""" css_counter = Counter() text_counter = Counter() for flair in self.current_flair(): if flair['flair_css_class']: css_counter[flair['flair_css_class']] += 1 if flair['flair_text']: text_counter[flair['flair_text']] += 1 print('Flair CSS Statistics') for flair, count in sorted(css_counter.items(), key=lambda x: (x[1], x[0])): print('{0:3} {1}'.format(count, flair)) print('Flair Text Statistics') for flair, count in sorted(text_counter.items(), key=lambda x: (x[1], x[0]), reverse=True): print('{0:3} {1}'.format(count, flair))
[ "def", "output_flair_stats", "(", "self", ")", ":", "css_counter", "=", "Counter", "(", ")", "text_counter", "=", "Counter", "(", ")", "for", "flair", "in", "self", ".", "current_flair", "(", ")", ":", "if", "flair", "[", "'flair_css_class'", "]", ":", "css_counter", "[", "flair", "[", "'flair_css_class'", "]", "]", "+=", "1", "if", "flair", "[", "'flair_text'", "]", ":", "text_counter", "[", "flair", "[", "'flair_text'", "]", "]", "+=", "1", "print", "(", "'Flair CSS Statistics'", ")", "for", "flair", ",", "count", "in", "sorted", "(", "css_counter", ".", "items", "(", ")", ",", "key", "=", "lambda", "x", ":", "(", "x", "[", "1", "]", ",", "x", "[", "0", "]", ")", ")", ":", "print", "(", "'{0:3} {1}'", ".", "format", "(", "count", ",", "flair", ")", ")", "print", "(", "'Flair Text Statistics'", ")", "for", "flair", ",", "count", "in", "sorted", "(", "text_counter", ".", "items", "(", ")", ",", "key", "=", "lambda", "x", ":", "(", "x", "[", "1", "]", ",", "x", "[", "0", "]", ")", ",", "reverse", "=", "True", ")", ":", "print", "(", "'{0:3} {1}'", ".", "format", "(", "count", ",", "flair", ")", ")" ]
Display statistics (number of users) for each unique flair item.
[ "Display", "statistics", "(", "number", "of", "users", ")", "for", "each", "unique", "flair", "item", "." ]
python
train
43.526316
j0ack/flask-codemirror
flask_codemirror/__init__.py
https://github.com/j0ack/flask-codemirror/blob/81ad831ff849b60bb34de5db727ad626ff3c9bdc/flask_codemirror/__init__.py#L92-L127
def include_codemirror(self): """Include resources in pages""" contents = [] # base js = self._get_tag('codemirror.js', 'script') css = self._get_tag('codemirror.css', 'stylesheet') if js and css: contents.append(js) contents.append(css) # languages for language in self.languages: url = self.__class__.LANGUAGE_REL_URL.format(language) js = self._get_tag(url, 'script') if js: contents.append(js) # theme if self.theme: url = self.__class__.THEME_REL_URL.format(self.theme) css = self._get_tag(url, 'stylesheet') if css: contents.append(css) # addons if self.addons: # add to list for addon_type, name in self.addons: url = self.__class__.ADDON_REL_URL.format(addon_type, name) js = self._get_tag(url, 'script') if js: contents.append(js) # if there is a css file relative to this addon url = self.__class__.ADDON_CSS_REL_URL.format(addon_type, name) css = self._get_tag(url, 'stylesheet', False) if css: contents.append(css) # return html return Markup('\n'.join(contents))
[ "def", "include_codemirror", "(", "self", ")", ":", "contents", "=", "[", "]", "# base", "js", "=", "self", ".", "_get_tag", "(", "'codemirror.js'", ",", "'script'", ")", "css", "=", "self", ".", "_get_tag", "(", "'codemirror.css'", ",", "'stylesheet'", ")", "if", "js", "and", "css", ":", "contents", ".", "append", "(", "js", ")", "contents", ".", "append", "(", "css", ")", "# languages", "for", "language", "in", "self", ".", "languages", ":", "url", "=", "self", ".", "__class__", ".", "LANGUAGE_REL_URL", ".", "format", "(", "language", ")", "js", "=", "self", ".", "_get_tag", "(", "url", ",", "'script'", ")", "if", "js", ":", "contents", ".", "append", "(", "js", ")", "# theme", "if", "self", ".", "theme", ":", "url", "=", "self", ".", "__class__", ".", "THEME_REL_URL", ".", "format", "(", "self", ".", "theme", ")", "css", "=", "self", ".", "_get_tag", "(", "url", ",", "'stylesheet'", ")", "if", "css", ":", "contents", ".", "append", "(", "css", ")", "# addons", "if", "self", ".", "addons", ":", "# add to list", "for", "addon_type", ",", "name", "in", "self", ".", "addons", ":", "url", "=", "self", ".", "__class__", ".", "ADDON_REL_URL", ".", "format", "(", "addon_type", ",", "name", ")", "js", "=", "self", ".", "_get_tag", "(", "url", ",", "'script'", ")", "if", "js", ":", "contents", ".", "append", "(", "js", ")", "# if there is a css file relative to this addon", "url", "=", "self", ".", "__class__", ".", "ADDON_CSS_REL_URL", ".", "format", "(", "addon_type", ",", "name", ")", "css", "=", "self", ".", "_get_tag", "(", "url", ",", "'stylesheet'", ",", "False", ")", "if", "css", ":", "contents", ".", "append", "(", "css", ")", "# return html", "return", "Markup", "(", "'\\n'", ".", "join", "(", "contents", ")", ")" ]
Include resources in pages
[ "Include", "resources", "in", "pages" ]
python
train
37.666667
nerdvegas/rez
src/rez/vendor/amqp/connection.py
https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/vendor/amqp/connection.py#L886-L915
def heartbeat_tick(self, rate=2): """Send heartbeat packets, if necessary, and fail if none have been received recently. This should be called frequently, on the order of once per second. :keyword rate: Ignored """ if not self.heartbeat: return # treat actual data exchange in either direction as a heartbeat sent_now = self.method_writer.bytes_sent recv_now = self.method_reader.bytes_recv if self.prev_sent is None or self.prev_sent != sent_now: self.last_heartbeat_sent = monotonic() if self.prev_recv is None or self.prev_recv != recv_now: self.last_heartbeat_received = monotonic() self.prev_sent, self.prev_recv = sent_now, recv_now # send a heartbeat if it's time to do so if monotonic() > self.last_heartbeat_sent + self.heartbeat: self.send_heartbeat() self.last_heartbeat_sent = monotonic() # if we've missed two intervals' heartbeats, fail; this gives the # server enough time to send heartbeats a little late if (self.last_heartbeat_received and self.last_heartbeat_received + 2 * self.heartbeat < monotonic()): raise ConnectionForced('Too many heartbeats missed')
[ "def", "heartbeat_tick", "(", "self", ",", "rate", "=", "2", ")", ":", "if", "not", "self", ".", "heartbeat", ":", "return", "# treat actual data exchange in either direction as a heartbeat", "sent_now", "=", "self", ".", "method_writer", ".", "bytes_sent", "recv_now", "=", "self", ".", "method_reader", ".", "bytes_recv", "if", "self", ".", "prev_sent", "is", "None", "or", "self", ".", "prev_sent", "!=", "sent_now", ":", "self", ".", "last_heartbeat_sent", "=", "monotonic", "(", ")", "if", "self", ".", "prev_recv", "is", "None", "or", "self", ".", "prev_recv", "!=", "recv_now", ":", "self", ".", "last_heartbeat_received", "=", "monotonic", "(", ")", "self", ".", "prev_sent", ",", "self", ".", "prev_recv", "=", "sent_now", ",", "recv_now", "# send a heartbeat if it's time to do so", "if", "monotonic", "(", ")", ">", "self", ".", "last_heartbeat_sent", "+", "self", ".", "heartbeat", ":", "self", ".", "send_heartbeat", "(", ")", "self", ".", "last_heartbeat_sent", "=", "monotonic", "(", ")", "# if we've missed two intervals' heartbeats, fail; this gives the", "# server enough time to send heartbeats a little late", "if", "(", "self", ".", "last_heartbeat_received", "and", "self", ".", "last_heartbeat_received", "+", "2", "*", "self", ".", "heartbeat", "<", "monotonic", "(", ")", ")", ":", "raise", "ConnectionForced", "(", "'Too many heartbeats missed'", ")" ]
Send heartbeat packets, if necessary, and fail if none have been received recently. This should be called frequently, on the order of once per second. :keyword rate: Ignored
[ "Send", "heartbeat", "packets", "if", "necessary", "and", "fail", "if", "none", "have", "been", "received", "recently", ".", "This", "should", "be", "called", "frequently", "on", "the", "order", "of", "once", "per", "second", "." ]
python
train
43.066667
openego/ding0
ding0/core/__init__.py
https://github.com/openego/ding0/blob/e2d6528f96255e4bb22ba15514a4f1883564ed5d/ding0/core/__init__.py#L327-L362
def build_mv_grid_district(self, poly_id, subst_id, grid_district_geo_data, station_geo_data): """Initiates single MV grid_district including station and grid Parameters ---------- poly_id: int ID of grid_district according to database table. Also used as ID for created grid #TODO: check type subst_id: int ID of station according to database table #TODO: check type grid_district_geo_data: :shapely:`Shapely Polygon object<polygons>` Polygon of grid district station_geo_data: :shapely:`Shapely Point object<points>` Point of station Returns ------- :shapely:`Shapely Polygon object<polygons>` Description of return #TODO: check """ mv_station = MVStationDing0(id_db=subst_id, geo_data=station_geo_data) mv_grid = MVGridDing0(network=self, id_db=poly_id, station=mv_station) mv_grid_district = MVGridDistrictDing0(id_db=poly_id, mv_grid=mv_grid, geo_data=grid_district_geo_data) mv_grid.grid_district = mv_grid_district mv_station.grid = mv_grid self.add_mv_grid_district(mv_grid_district) return mv_grid_district
[ "def", "build_mv_grid_district", "(", "self", ",", "poly_id", ",", "subst_id", ",", "grid_district_geo_data", ",", "station_geo_data", ")", ":", "mv_station", "=", "MVStationDing0", "(", "id_db", "=", "subst_id", ",", "geo_data", "=", "station_geo_data", ")", "mv_grid", "=", "MVGridDing0", "(", "network", "=", "self", ",", "id_db", "=", "poly_id", ",", "station", "=", "mv_station", ")", "mv_grid_district", "=", "MVGridDistrictDing0", "(", "id_db", "=", "poly_id", ",", "mv_grid", "=", "mv_grid", ",", "geo_data", "=", "grid_district_geo_data", ")", "mv_grid", ".", "grid_district", "=", "mv_grid_district", "mv_station", ".", "grid", "=", "mv_grid", "self", ".", "add_mv_grid_district", "(", "mv_grid_district", ")", "return", "mv_grid_district" ]
Initiates single MV grid_district including station and grid Parameters ---------- poly_id: int ID of grid_district according to database table. Also used as ID for created grid #TODO: check type subst_id: int ID of station according to database table #TODO: check type grid_district_geo_data: :shapely:`Shapely Polygon object<polygons>` Polygon of grid district station_geo_data: :shapely:`Shapely Point object<points>` Point of station Returns ------- :shapely:`Shapely Polygon object<polygons>` Description of return #TODO: check
[ "Initiates", "single", "MV", "grid_district", "including", "station", "and", "grid" ]
python
train
38.361111
diamondman/proteusisc
proteusisc/drivers/digilentdriver.py
https://github.com/diamondman/proteusisc/blob/7622b7b04e63f9dc0f5a04429ff78d9a490c9c5c/proteusisc/drivers/digilentdriver.py#L263-L283
def jtag_disable(self): """ Disables JTAG output on the controller. JTAG operations executed immediately after this function will return useless data or fail. Usage: >>> from proteusisc import getAttachedControllers, bitarray >>> c = getAttachedControllers()[0] >>> c.jtag_enable() >>> c.write_tms_bits(bitarray("001011111"), return_tdo=True) >>> c.jtag_disable() """ if not self._jtagon: return status, _ = self.bulkCommand(_BMSG_DISABLE_JTAG) if status == 0: self._jtagon = False elif status == 3: raise JTAGControlError("Error Code %s"%status) self.close_handle()
[ "def", "jtag_disable", "(", "self", ")", ":", "if", "not", "self", ".", "_jtagon", ":", "return", "status", ",", "_", "=", "self", ".", "bulkCommand", "(", "_BMSG_DISABLE_JTAG", ")", "if", "status", "==", "0", ":", "self", ".", "_jtagon", "=", "False", "elif", "status", "==", "3", ":", "raise", "JTAGControlError", "(", "\"Error Code %s\"", "%", "status", ")", "self", ".", "close_handle", "(", ")" ]
Disables JTAG output on the controller. JTAG operations executed immediately after this function will return useless data or fail. Usage: >>> from proteusisc import getAttachedControllers, bitarray >>> c = getAttachedControllers()[0] >>> c.jtag_enable() >>> c.write_tms_bits(bitarray("001011111"), return_tdo=True) >>> c.jtag_disable()
[ "Disables", "JTAG", "output", "on", "the", "controller", ".", "JTAG", "operations", "executed", "immediately", "after", "this", "function", "will", "return", "useless", "data", "or", "fail", "." ]
python
train
33.952381
geertj/gruvi
lib/gruvi/stream.py
https://github.com/geertj/gruvi/blob/1d77ca439600b6ea7a19aa1ee85dca0f3be3f3f8/lib/gruvi/stream.py#L295-L320
def readlines(self, hint=-1): """Read lines until EOF, and return them as a list. If *hint* is specified, then stop reading lines as soon as the total size of all lines exceeds *hint*. """ self._check_readable() lines = [] chunks = [] bytes_read = 0 while True: chunk = self._buffer.get_chunk(-1, b'\n') if not chunk: break chunks.append(chunk) if chunk.endswith(b'\n'): lines.append(b''.join(chunks)) del chunks[:] bytes_read += len(lines[-1]) if hint >= 0 and bytes_read > hint: break if chunks: lines.append(b''.join(chunks)) if not lines and not self._buffer.eof and self._buffer.error: raise compat.saved_exc(self._buffer.error) return lines
[ "def", "readlines", "(", "self", ",", "hint", "=", "-", "1", ")", ":", "self", ".", "_check_readable", "(", ")", "lines", "=", "[", "]", "chunks", "=", "[", "]", "bytes_read", "=", "0", "while", "True", ":", "chunk", "=", "self", ".", "_buffer", ".", "get_chunk", "(", "-", "1", ",", "b'\\n'", ")", "if", "not", "chunk", ":", "break", "chunks", ".", "append", "(", "chunk", ")", "if", "chunk", ".", "endswith", "(", "b'\\n'", ")", ":", "lines", ".", "append", "(", "b''", ".", "join", "(", "chunks", ")", ")", "del", "chunks", "[", ":", "]", "bytes_read", "+=", "len", "(", "lines", "[", "-", "1", "]", ")", "if", "hint", ">=", "0", "and", "bytes_read", ">", "hint", ":", "break", "if", "chunks", ":", "lines", ".", "append", "(", "b''", ".", "join", "(", "chunks", ")", ")", "if", "not", "lines", "and", "not", "self", ".", "_buffer", ".", "eof", "and", "self", ".", "_buffer", ".", "error", ":", "raise", "compat", ".", "saved_exc", "(", "self", ".", "_buffer", ".", "error", ")", "return", "lines" ]
Read lines until EOF, and return them as a list. If *hint* is specified, then stop reading lines as soon as the total size of all lines exceeds *hint*.
[ "Read", "lines", "until", "EOF", "and", "return", "them", "as", "a", "list", "." ]
python
train
33.923077
priestc/giotto
giotto/__init__.py
https://github.com/priestc/giotto/blob/d4c26380caefa7745bb27135e315de830f7254d3/giotto/__init__.py#L11-L77
def initialize(module_name=None): """ Build the giotto settings object. This function gets called at the very begining of every request cycle. """ import giotto from giotto.utils import random_string, switchout_keyvalue from django.conf import settings setattr(giotto, '_config', GiottoSettings()) if not module_name: # For testing. No settings will be set. return project_module = importlib.import_module(module_name) project_path = os.path.dirname(project_module.__file__) setattr(giotto._config, 'project_path', project_path) try: secrets = importlib.import_module("%s.controllers.secrets" % module_name) except ImportError: secrets = None try: machine = importlib.import_module("%s.controllers.machine" % module_name) except ImportError: machine = None config = importlib.import_module("%s.controllers.config" % module_name) if config: for item in dir(config): setting_value = getattr(config, item) setattr(giotto._config, item, setting_value) if secrets: for item in dir(secrets): setting_value = getattr(secrets, item) setattr(giotto._config, item, setting_value) else: logging.warning("No secrets.py found") if machine: for item in dir(machine): setting_value = getattr(machine, item) setattr(giotto._config, item, setting_value) else: logging.warning("No machine.py found") settings.configure( SECRET_KEY=random_string(32), DATABASES=get_config('DATABASES'), INSTALLED_APPS=(module_name, 'giotto') ) ss = get_config('session_store', None) if ss: class_ = switchout_keyvalue(ss) setattr(giotto._config, "session_store", class_()) cache_engine = get_config("cache", None) if hasattr(cache_engine, 'lower'): # session engine was passed in as string, exchange for engine object. class_ = switchout_keyvalue(cache_engine) e = class_(host=get_config("cache_host", "localhost")) setattr(giotto._config, "cache_engine", e)
[ "def", "initialize", "(", "module_name", "=", "None", ")", ":", "import", "giotto", "from", "giotto", ".", "utils", "import", "random_string", ",", "switchout_keyvalue", "from", "django", ".", "conf", "import", "settings", "setattr", "(", "giotto", ",", "'_config'", ",", "GiottoSettings", "(", ")", ")", "if", "not", "module_name", ":", "# For testing. No settings will be set.", "return", "project_module", "=", "importlib", ".", "import_module", "(", "module_name", ")", "project_path", "=", "os", ".", "path", ".", "dirname", "(", "project_module", ".", "__file__", ")", "setattr", "(", "giotto", ".", "_config", ",", "'project_path'", ",", "project_path", ")", "try", ":", "secrets", "=", "importlib", ".", "import_module", "(", "\"%s.controllers.secrets\"", "%", "module_name", ")", "except", "ImportError", ":", "secrets", "=", "None", "try", ":", "machine", "=", "importlib", ".", "import_module", "(", "\"%s.controllers.machine\"", "%", "module_name", ")", "except", "ImportError", ":", "machine", "=", "None", "config", "=", "importlib", ".", "import_module", "(", "\"%s.controllers.config\"", "%", "module_name", ")", "if", "config", ":", "for", "item", "in", "dir", "(", "config", ")", ":", "setting_value", "=", "getattr", "(", "config", ",", "item", ")", "setattr", "(", "giotto", ".", "_config", ",", "item", ",", "setting_value", ")", "if", "secrets", ":", "for", "item", "in", "dir", "(", "secrets", ")", ":", "setting_value", "=", "getattr", "(", "secrets", ",", "item", ")", "setattr", "(", "giotto", ".", "_config", ",", "item", ",", "setting_value", ")", "else", ":", "logging", ".", "warning", "(", "\"No secrets.py found\"", ")", "if", "machine", ":", "for", "item", "in", "dir", "(", "machine", ")", ":", "setting_value", "=", "getattr", "(", "machine", ",", "item", ")", "setattr", "(", "giotto", ".", "_config", ",", "item", ",", "setting_value", ")", "else", ":", "logging", ".", "warning", "(", "\"No machine.py found\"", ")", "settings", ".", "configure", "(", "SECRET_KEY", "=", "random_string", "(", "32", ")", ",", "DATABASES", "=", "get_config", "(", "'DATABASES'", ")", ",", "INSTALLED_APPS", "=", "(", "module_name", ",", "'giotto'", ")", ")", "ss", "=", "get_config", "(", "'session_store'", ",", "None", ")", "if", "ss", ":", "class_", "=", "switchout_keyvalue", "(", "ss", ")", "setattr", "(", "giotto", ".", "_config", ",", "\"session_store\"", ",", "class_", "(", ")", ")", "cache_engine", "=", "get_config", "(", "\"cache\"", ",", "None", ")", "if", "hasattr", "(", "cache_engine", ",", "'lower'", ")", ":", "# session engine was passed in as string, exchange for engine object.", "class_", "=", "switchout_keyvalue", "(", "cache_engine", ")", "e", "=", "class_", "(", "host", "=", "get_config", "(", "\"cache_host\"", ",", "\"localhost\"", ")", ")", "setattr", "(", "giotto", ".", "_config", ",", "\"cache_engine\"", ",", "e", ")" ]
Build the giotto settings object. This function gets called at the very begining of every request cycle.
[ "Build", "the", "giotto", "settings", "object", ".", "This", "function", "gets", "called", "at", "the", "very", "begining", "of", "every", "request", "cycle", "." ]
python
train
31.522388
ManiacalLabs/BiblioPixelAnimations
BiblioPixelAnimations/matrix/TicTacToe.py
https://github.com/ManiacalLabs/BiblioPixelAnimations/blob/fba81f6b94f5265272a53f462ef013df1ccdb426/BiblioPixelAnimations/matrix/TicTacToe.py#L32-L38
def complete(self): """is the game over?""" if None not in [v for v in self.squares]: return True if self.winner() is not None: return True return False
[ "def", "complete", "(", "self", ")", ":", "if", "None", "not", "in", "[", "v", "for", "v", "in", "self", ".", "squares", "]", ":", "return", "True", "if", "self", ".", "winner", "(", ")", "is", "not", "None", ":", "return", "True", "return", "False" ]
is the game over?
[ "is", "the", "game", "over?" ]
python
train
28.857143
CalebBell/fluids
fluids/fittings.py
https://github.com/CalebBell/fluids/blob/57f556752e039f1d3e5a822f408c184783db2828/fluids/fittings.py#L1129-L1189
def bend_rounded_Crane(Di, angle, rc=None, bend_diameters=None): r'''Calculates the loss coefficient for any rounded bend in a pipe according to the Crane TP 410M [1]_ method. This method effectively uses an interpolation from tabulated values in [1]_ for friction factor multipliers vs. curvature radius. .. figure:: fittings/bend_rounded.png :scale: 30 % :alt: rounded bend; after [1]_ Parameters ---------- Di : float Inside diameter of pipe, [m] angle : float Angle of bend, [degrees] rc : float, optional Radius of curvature of the entrance, optional [m] bend_diameters : float, optional (used if rc not provided) Number of diameters of pipe making up the bend radius [-] Returns ------- K : float Loss coefficient [-] Notes ----- The Crane method does match the trend of increased pressure drop as roughness increases. The points in [1]_ are extrapolated to other angles via a well-fitting Chebyshev approximation, whose accuracy can be seen in the below plot. .. plot:: plots/bend_rounded_Crane.py Examples -------- >>> bend_rounded_Crane(Di=.4020, rc=.4*5, angle=30) 0.09321910015613409 References ---------- .. [1] Crane Co. Flow of Fluids Through Valves, Fittings, and Pipe. Crane, 2009. ''' if not rc: if bend_diameters is None: bend_diameters = 5.0 rc = Di*bend_diameters fd = ft_Crane(Di) radius_ratio = rc/Di if radius_ratio < 1.0: radius_ratio = 1.0 elif radius_ratio > 20.0: radius_ratio = 20.0 factor = horner(bend_rounded_Crane_coeffs, 0.105263157894736836*(radius_ratio - 10.5)) K = fd*factor K = (angle/90.0 - 1.0)*(0.25*pi*fd*radius_ratio + 0.5*K) + K return K
[ "def", "bend_rounded_Crane", "(", "Di", ",", "angle", ",", "rc", "=", "None", ",", "bend_diameters", "=", "None", ")", ":", "if", "not", "rc", ":", "if", "bend_diameters", "is", "None", ":", "bend_diameters", "=", "5.0", "rc", "=", "Di", "*", "bend_diameters", "fd", "=", "ft_Crane", "(", "Di", ")", "radius_ratio", "=", "rc", "/", "Di", "if", "radius_ratio", "<", "1.0", ":", "radius_ratio", "=", "1.0", "elif", "radius_ratio", ">", "20.0", ":", "radius_ratio", "=", "20.0", "factor", "=", "horner", "(", "bend_rounded_Crane_coeffs", ",", "0.105263157894736836", "*", "(", "radius_ratio", "-", "10.5", ")", ")", "K", "=", "fd", "*", "factor", "K", "=", "(", "angle", "/", "90.0", "-", "1.0", ")", "*", "(", "0.25", "*", "pi", "*", "fd", "*", "radius_ratio", "+", "0.5", "*", "K", ")", "+", "K", "return", "K" ]
r'''Calculates the loss coefficient for any rounded bend in a pipe according to the Crane TP 410M [1]_ method. This method effectively uses an interpolation from tabulated values in [1]_ for friction factor multipliers vs. curvature radius. .. figure:: fittings/bend_rounded.png :scale: 30 % :alt: rounded bend; after [1]_ Parameters ---------- Di : float Inside diameter of pipe, [m] angle : float Angle of bend, [degrees] rc : float, optional Radius of curvature of the entrance, optional [m] bend_diameters : float, optional (used if rc not provided) Number of diameters of pipe making up the bend radius [-] Returns ------- K : float Loss coefficient [-] Notes ----- The Crane method does match the trend of increased pressure drop as roughness increases. The points in [1]_ are extrapolated to other angles via a well-fitting Chebyshev approximation, whose accuracy can be seen in the below plot. .. plot:: plots/bend_rounded_Crane.py Examples -------- >>> bend_rounded_Crane(Di=.4020, rc=.4*5, angle=30) 0.09321910015613409 References ---------- .. [1] Crane Co. Flow of Fluids Through Valves, Fittings, and Pipe. Crane, 2009.
[ "r", "Calculates", "the", "loss", "coefficient", "for", "any", "rounded", "bend", "in", "a", "pipe", "according", "to", "the", "Crane", "TP", "410M", "[", "1", "]", "_", "method", ".", "This", "method", "effectively", "uses", "an", "interpolation", "from", "tabulated", "values", "in", "[", "1", "]", "_", "for", "friction", "factor", "multipliers", "vs", ".", "curvature", "radius", ".", "..", "figure", "::", "fittings", "/", "bend_rounded", ".", "png", ":", "scale", ":", "30", "%", ":", "alt", ":", "rounded", "bend", ";", "after", "[", "1", "]", "_" ]
python
train
29.491803
saltstack/salt
salt/netapi/rest_tornado/saltnado.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/netapi/rest_tornado/saltnado.py#L658-L788
def post(self): ''' :ref:`Authenticate <rest_tornado-auth>` against Salt's eauth system .. http:post:: /login :reqheader X-Auth-Token: |req_token| :reqheader Accept: |req_accept| :reqheader Content-Type: |req_ct| :form eauth: the eauth backend configured for the user :form username: username :form password: password :status 200: |200| :status 400: |400| :status 401: |401| :status 406: |406| :status 500: |500| **Example request:** .. code-block:: bash curl -si localhost:8000/login \\ -H "Accept: application/json" \\ -d username='saltuser' \\ -d password='saltpass' \\ -d eauth='pam' .. code-block:: text POST / HTTP/1.1 Host: localhost:8000 Content-Length: 42 Content-Type: application/x-www-form-urlencoded Accept: application/json username=saltuser&password=saltpass&eauth=pam **Example response:** .. code-block:: text HTTP/1.1 200 OK Content-Type: application/json Content-Length: 206 X-Auth-Token: 6d1b722e Set-Cookie: session_id=6d1b722e; expires=Sat, 17 Nov 2012 03:23:52 GMT; Path=/ {"return": { "token": "6d1b722e", "start": 1363805943.776223, "expire": 1363849143.776224, "user": "saltuser", "eauth": "pam", "perms": [ "grains.*", "status.*", "sys.*", "test.*" ] }} ''' try: if not isinstance(self.request_payload, dict): self.send_error(400) return creds = {'username': self.request_payload['username'], 'password': self.request_payload['password'], 'eauth': self.request_payload['eauth'], } # if any of the args are missing, its a bad request except KeyError: self.send_error(400) return token = self.application.auth.mk_token(creds) if 'token' not in token: # TODO: nicer error message # 'Could not authenticate using provided credentials') self.send_error(401) # return since we don't want to execute any more return # Grab eauth config for the current backend for the current user try: eauth = self.application.opts['external_auth'][token['eauth']] # Get sum of '*' perms, user-specific perms, and group-specific perms _perms = eauth.get(token['name'], []) _perms.extend(eauth.get('*', [])) if 'groups' in token and token['groups']: user_groups = set(token['groups']) eauth_groups = set([i.rstrip('%') for i in eauth.keys() if i.endswith('%')]) for group in user_groups & eauth_groups: _perms.extend(eauth['{0}%'.format(group)]) # dedup. perm can be a complex dict, so we cant use set perms = [] for perm in _perms: if perm not in perms: perms.append(perm) # If we can't find the creds, then they aren't authorized except KeyError: self.send_error(401) return except (AttributeError, IndexError): log.debug( "Configuration for external_auth malformed for eauth '%s', " "and user '%s'.", token.get('eauth'), token.get('name'), exc_info=True ) # TODO better error -- 'Configuration for external_auth could not be read.' self.send_error(500) return ret = {'return': [{ 'token': token['token'], 'expire': token['expire'], 'start': token['start'], 'user': token['name'], 'eauth': token['eauth'], 'perms': perms, }]} self.write(self.serialize(ret))
[ "def", "post", "(", "self", ")", ":", "try", ":", "if", "not", "isinstance", "(", "self", ".", "request_payload", ",", "dict", ")", ":", "self", ".", "send_error", "(", "400", ")", "return", "creds", "=", "{", "'username'", ":", "self", ".", "request_payload", "[", "'username'", "]", ",", "'password'", ":", "self", ".", "request_payload", "[", "'password'", "]", ",", "'eauth'", ":", "self", ".", "request_payload", "[", "'eauth'", "]", ",", "}", "# if any of the args are missing, its a bad request", "except", "KeyError", ":", "self", ".", "send_error", "(", "400", ")", "return", "token", "=", "self", ".", "application", ".", "auth", ".", "mk_token", "(", "creds", ")", "if", "'token'", "not", "in", "token", ":", "# TODO: nicer error message", "# 'Could not authenticate using provided credentials')", "self", ".", "send_error", "(", "401", ")", "# return since we don't want to execute any more", "return", "# Grab eauth config for the current backend for the current user", "try", ":", "eauth", "=", "self", ".", "application", ".", "opts", "[", "'external_auth'", "]", "[", "token", "[", "'eauth'", "]", "]", "# Get sum of '*' perms, user-specific perms, and group-specific perms", "_perms", "=", "eauth", ".", "get", "(", "token", "[", "'name'", "]", ",", "[", "]", ")", "_perms", ".", "extend", "(", "eauth", ".", "get", "(", "'*'", ",", "[", "]", ")", ")", "if", "'groups'", "in", "token", "and", "token", "[", "'groups'", "]", ":", "user_groups", "=", "set", "(", "token", "[", "'groups'", "]", ")", "eauth_groups", "=", "set", "(", "[", "i", ".", "rstrip", "(", "'%'", ")", "for", "i", "in", "eauth", ".", "keys", "(", ")", "if", "i", ".", "endswith", "(", "'%'", ")", "]", ")", "for", "group", "in", "user_groups", "&", "eauth_groups", ":", "_perms", ".", "extend", "(", "eauth", "[", "'{0}%'", ".", "format", "(", "group", ")", "]", ")", "# dedup. perm can be a complex dict, so we cant use set", "perms", "=", "[", "]", "for", "perm", "in", "_perms", ":", "if", "perm", "not", "in", "perms", ":", "perms", ".", "append", "(", "perm", ")", "# If we can't find the creds, then they aren't authorized", "except", "KeyError", ":", "self", ".", "send_error", "(", "401", ")", "return", "except", "(", "AttributeError", ",", "IndexError", ")", ":", "log", ".", "debug", "(", "\"Configuration for external_auth malformed for eauth '%s', \"", "\"and user '%s'.\"", ",", "token", ".", "get", "(", "'eauth'", ")", ",", "token", ".", "get", "(", "'name'", ")", ",", "exc_info", "=", "True", ")", "# TODO better error -- 'Configuration for external_auth could not be read.'", "self", ".", "send_error", "(", "500", ")", "return", "ret", "=", "{", "'return'", ":", "[", "{", "'token'", ":", "token", "[", "'token'", "]", ",", "'expire'", ":", "token", "[", "'expire'", "]", ",", "'start'", ":", "token", "[", "'start'", "]", ",", "'user'", ":", "token", "[", "'name'", "]", ",", "'eauth'", ":", "token", "[", "'eauth'", "]", ",", "'perms'", ":", "perms", ",", "}", "]", "}", "self", ".", "write", "(", "self", ".", "serialize", "(", "ret", ")", ")" ]
:ref:`Authenticate <rest_tornado-auth>` against Salt's eauth system .. http:post:: /login :reqheader X-Auth-Token: |req_token| :reqheader Accept: |req_accept| :reqheader Content-Type: |req_ct| :form eauth: the eauth backend configured for the user :form username: username :form password: password :status 200: |200| :status 400: |400| :status 401: |401| :status 406: |406| :status 500: |500| **Example request:** .. code-block:: bash curl -si localhost:8000/login \\ -H "Accept: application/json" \\ -d username='saltuser' \\ -d password='saltpass' \\ -d eauth='pam' .. code-block:: text POST / HTTP/1.1 Host: localhost:8000 Content-Length: 42 Content-Type: application/x-www-form-urlencoded Accept: application/json username=saltuser&password=saltpass&eauth=pam **Example response:** .. code-block:: text HTTP/1.1 200 OK Content-Type: application/json Content-Length: 206 X-Auth-Token: 6d1b722e Set-Cookie: session_id=6d1b722e; expires=Sat, 17 Nov 2012 03:23:52 GMT; Path=/ {"return": { "token": "6d1b722e", "start": 1363805943.776223, "expire": 1363849143.776224, "user": "saltuser", "eauth": "pam", "perms": [ "grains.*", "status.*", "sys.*", "test.*" ] }}
[ ":", "ref", ":", "Authenticate", "<rest_tornado", "-", "auth", ">", "against", "Salt", "s", "eauth", "system" ]
python
train
32.175573
apache/incubator-heron
heron/instance/src/python/utils/misc/communicator.py
https://github.com/apache/incubator-heron/blob/ad10325a0febe89ad337e561ebcbe37ec5d9a5ac/heron/instance/src/python/utils/misc/communicator.py#L64-L77
def poll(self): """Poll from the buffer It is a non-blocking operation, and when the buffer is empty, it raises Queue.Empty exception """ try: # non-blocking ret = self._buffer.get(block=False) if self._producer_callback is not None: self._producer_callback() return ret except Queue.Empty: Log.debug("%s: Empty in poll()" % str(self)) raise Queue.Empty
[ "def", "poll", "(", "self", ")", ":", "try", ":", "# non-blocking", "ret", "=", "self", ".", "_buffer", ".", "get", "(", "block", "=", "False", ")", "if", "self", ".", "_producer_callback", "is", "not", "None", ":", "self", ".", "_producer_callback", "(", ")", "return", "ret", "except", "Queue", ".", "Empty", ":", "Log", ".", "debug", "(", "\"%s: Empty in poll()\"", "%", "str", "(", "self", ")", ")", "raise", "Queue", ".", "Empty" ]
Poll from the buffer It is a non-blocking operation, and when the buffer is empty, it raises Queue.Empty exception
[ "Poll", "from", "the", "buffer" ]
python
valid
28.928571
chrippa/python-librtmp
librtmp/rtmp.py
https://github.com/chrippa/python-librtmp/blob/6efefd5edd76cad7a3b53f7c87c1c7350448224d/librtmp/rtmp.py#L379-L403
def call(self, method, *args, **params): """Calls a method on the server.""" transaction_id = params.get("transaction_id") if not transaction_id: self.transaction_id += 1 transaction_id = self.transaction_id obj = params.get("obj") args = [method, transaction_id, obj] + list(args) args_encoded = map(lambda x: encode_amf(x), args) body = b"".join(args_encoded) format = params.get("format", PACKET_SIZE_MEDIUM) channel = params.get("channel", 0x03) packet = RTMPPacket(type=PACKET_TYPE_INVOKE, format=format, channel=channel, body=body) self.send_packet(packet) return RTMPCall(self, transaction_id)
[ "def", "call", "(", "self", ",", "method", ",", "*", "args", ",", "*", "*", "params", ")", ":", "transaction_id", "=", "params", ".", "get", "(", "\"transaction_id\"", ")", "if", "not", "transaction_id", ":", "self", ".", "transaction_id", "+=", "1", "transaction_id", "=", "self", ".", "transaction_id", "obj", "=", "params", ".", "get", "(", "\"obj\"", ")", "args", "=", "[", "method", ",", "transaction_id", ",", "obj", "]", "+", "list", "(", "args", ")", "args_encoded", "=", "map", "(", "lambda", "x", ":", "encode_amf", "(", "x", ")", ",", "args", ")", "body", "=", "b\"\"", ".", "join", "(", "args_encoded", ")", "format", "=", "params", ".", "get", "(", "\"format\"", ",", "PACKET_SIZE_MEDIUM", ")", "channel", "=", "params", ".", "get", "(", "\"channel\"", ",", "0x03", ")", "packet", "=", "RTMPPacket", "(", "type", "=", "PACKET_TYPE_INVOKE", ",", "format", "=", "format", ",", "channel", "=", "channel", ",", "body", "=", "body", ")", "self", ".", "send_packet", "(", "packet", ")", "return", "RTMPCall", "(", "self", ",", "transaction_id", ")" ]
Calls a method on the server.
[ "Calls", "a", "method", "on", "the", "server", "." ]
python
train
30.4
greenbone/ospd
ospd/xml.py
https://github.com/greenbone/ospd/blob/cef773166b15a19c17764721d3fe404fa0e107bf/ospd/xml.py#L47-L69
def simple_response_str(command, status, status_text, content=""): """ Creates an OSP response XML string. Arguments: command (str): OSP Command to respond to. status (int): Status of the response. status_text (str): Status text of the response. content (str): Text part of the response XML element. Return: String of response in xml format. """ response = Element('%s_response' % command) for name, value in [('status', str(status)), ('status_text', status_text)]: response.set(name, str(value)) if isinstance(content, list): for elem in content: response.append(elem) elif isinstance(content, Element): response.append(content) else: response.text = content return tostring(response)
[ "def", "simple_response_str", "(", "command", ",", "status", ",", "status_text", ",", "content", "=", "\"\"", ")", ":", "response", "=", "Element", "(", "'%s_response'", "%", "command", ")", "for", "name", ",", "value", "in", "[", "(", "'status'", ",", "str", "(", "status", ")", ")", ",", "(", "'status_text'", ",", "status_text", ")", "]", ":", "response", ".", "set", "(", "name", ",", "str", "(", "value", ")", ")", "if", "isinstance", "(", "content", ",", "list", ")", ":", "for", "elem", "in", "content", ":", "response", ".", "append", "(", "elem", ")", "elif", "isinstance", "(", "content", ",", "Element", ")", ":", "response", ".", "append", "(", "content", ")", "else", ":", "response", ".", "text", "=", "content", "return", "tostring", "(", "response", ")" ]
Creates an OSP response XML string. Arguments: command (str): OSP Command to respond to. status (int): Status of the response. status_text (str): Status text of the response. content (str): Text part of the response XML element. Return: String of response in xml format.
[ "Creates", "an", "OSP", "response", "XML", "string", "." ]
python
train
34.304348
apache/incubator-superset
superset/viz.py
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/viz.py#L472-L480
def data(self): """This is the data object serialized to the js layer""" content = { 'form_data': self.form_data, 'token': self.token, 'viz_name': self.viz_type, 'filter_select_enabled': self.datasource.filter_select_enabled, } return content
[ "def", "data", "(", "self", ")", ":", "content", "=", "{", "'form_data'", ":", "self", ".", "form_data", ",", "'token'", ":", "self", ".", "token", ",", "'viz_name'", ":", "self", ".", "viz_type", ",", "'filter_select_enabled'", ":", "self", ".", "datasource", ".", "filter_select_enabled", ",", "}", "return", "content" ]
This is the data object serialized to the js layer
[ "This", "is", "the", "data", "object", "serialized", "to", "the", "js", "layer" ]
python
train
34.888889
datacats/datacats
datacats/userprofile.py
https://github.com/datacats/datacats/blob/e4bae503efa997660fb3f34fe166699569653157/datacats/userprofile.py#L60-L75
def save(self): """ Save profile settings into user profile directory """ config = self.profiledir + '/config' if not isdir(self.profiledir): makedirs(self.profiledir) cp = SafeConfigParser() cp.add_section('ssh') cp.set('ssh', 'private_key', self.ssh_private_key) cp.set('ssh', 'public_key', self.ssh_public_key) with open(config, 'w') as cfile: cp.write(cfile)
[ "def", "save", "(", "self", ")", ":", "config", "=", "self", ".", "profiledir", "+", "'/config'", "if", "not", "isdir", "(", "self", ".", "profiledir", ")", ":", "makedirs", "(", "self", ".", "profiledir", ")", "cp", "=", "SafeConfigParser", "(", ")", "cp", ".", "add_section", "(", "'ssh'", ")", "cp", ".", "set", "(", "'ssh'", ",", "'private_key'", ",", "self", ".", "ssh_private_key", ")", "cp", ".", "set", "(", "'ssh'", ",", "'public_key'", ",", "self", ".", "ssh_public_key", ")", "with", "open", "(", "config", ",", "'w'", ")", "as", "cfile", ":", "cp", ".", "write", "(", "cfile", ")" ]
Save profile settings into user profile directory
[ "Save", "profile", "settings", "into", "user", "profile", "directory" ]
python
train
28.375
rbarrois/xworkflows
src/xworkflows/base.py
https://github.com/rbarrois/xworkflows/blob/4a94b04ba83cb43f61d4b0f7db6964a667c86b5b/src/xworkflows/base.py#L993-L1004
def _find_workflows(mcs, attrs): """Finds all occurrences of a workflow in the attributes definitions. Returns: dict(str => StateField): maps an attribute name to a StateField describing the related Workflow. """ workflows = {} for attribute, value in attrs.items(): if isinstance(value, Workflow): workflows[attribute] = StateField(value) return workflows
[ "def", "_find_workflows", "(", "mcs", ",", "attrs", ")", ":", "workflows", "=", "{", "}", "for", "attribute", ",", "value", "in", "attrs", ".", "items", "(", ")", ":", "if", "isinstance", "(", "value", ",", "Workflow", ")", ":", "workflows", "[", "attribute", "]", "=", "StateField", "(", "value", ")", "return", "workflows" ]
Finds all occurrences of a workflow in the attributes definitions. Returns: dict(str => StateField): maps an attribute name to a StateField describing the related Workflow.
[ "Finds", "all", "occurrences", "of", "a", "workflow", "in", "the", "attributes", "definitions", "." ]
python
train
37.5
tanghaibao/jcvi
jcvi/utils/progressbar.py
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/utils/progressbar.py#L293-L306
def update(self, pbar, width): 'Updates the progress bar and its subcomponents' left, marker, right = (format_updatable(i, pbar) for i in (self.left, self.marker, self.right)) width -= len(left) + len(right) # Marker must *always* have length of 1 marker *= int(pbar.currval / pbar.maxval * width) if self.fill_left: return '%s%s%s' % (left, marker.ljust(width, self.fill), right) else: return '%s%s%s' % (left, marker.rjust(width, self.fill), right)
[ "def", "update", "(", "self", ",", "pbar", ",", "width", ")", ":", "left", ",", "marker", ",", "right", "=", "(", "format_updatable", "(", "i", ",", "pbar", ")", "for", "i", "in", "(", "self", ".", "left", ",", "self", ".", "marker", ",", "self", ".", "right", ")", ")", "width", "-=", "len", "(", "left", ")", "+", "len", "(", "right", ")", "# Marker must *always* have length of 1", "marker", "*=", "int", "(", "pbar", ".", "currval", "/", "pbar", ".", "maxval", "*", "width", ")", "if", "self", ".", "fill_left", ":", "return", "'%s%s%s'", "%", "(", "left", ",", "marker", ".", "ljust", "(", "width", ",", "self", ".", "fill", ")", ",", "right", ")", "else", ":", "return", "'%s%s%s'", "%", "(", "left", ",", "marker", ".", "rjust", "(", "width", ",", "self", ".", "fill", ")", ",", "right", ")" ]
Updates the progress bar and its subcomponents
[ "Updates", "the", "progress", "bar", "and", "its", "subcomponents" ]
python
train
39.357143
ericmjl/nxviz
nxviz/geometry.py
https://github.com/ericmjl/nxviz/blob/6ea5823a8030a686f165fbe37d7a04d0f037ecc9/nxviz/geometry.py#L101-L117
def circos_radius(n_nodes, node_r): """ Automatically computes the origin-to-node centre radius of the Circos plot using the triangle equality sine rule. a / sin(A) = b / sin(B) = c / sin(C) :param n_nodes: the number of nodes in the plot. :type n_nodes: int :param node_r: the radius of each node. :type node_r: float :returns: Origin-to-node centre radius. """ A = 2 * np.pi / n_nodes # noqa B = (np.pi - A) / 2 # noqa a = 2 * node_r return a * np.sin(B) / np.sin(A)
[ "def", "circos_radius", "(", "n_nodes", ",", "node_r", ")", ":", "A", "=", "2", "*", "np", ".", "pi", "/", "n_nodes", "# noqa", "B", "=", "(", "np", ".", "pi", "-", "A", ")", "/", "2", "# noqa", "a", "=", "2", "*", "node_r", "return", "a", "*", "np", ".", "sin", "(", "B", ")", "/", "np", ".", "sin", "(", "A", ")" ]
Automatically computes the origin-to-node centre radius of the Circos plot using the triangle equality sine rule. a / sin(A) = b / sin(B) = c / sin(C) :param n_nodes: the number of nodes in the plot. :type n_nodes: int :param node_r: the radius of each node. :type node_r: float :returns: Origin-to-node centre radius.
[ "Automatically", "computes", "the", "origin", "-", "to", "-", "node", "centre", "radius", "of", "the", "Circos", "plot", "using", "the", "triangle", "equality", "sine", "rule", "." ]
python
train
30.117647
koszullab/instaGRAAL
instagraal/leastsqbound.py
https://github.com/koszullab/instaGRAAL/blob/1c02ca838e57d8178eec79f223644b2acd0153dd/instagraal/leastsqbound.py#L81-L103
def external2internal(xe, bounds): """ Convert a series of external variables to internal variables""" xi = np.empty_like(xe) for i, (v, bound) in enumerate(zip(xe, bounds)): a = bound[0] # minimum b = bound[1] # maximum if a == None and b == None: # No constraints xi[i] = v elif b == None: # only min xi[i] = np.sqrt((v - a + 1.) ** 2. - 1) elif a == None: # only max xi[i] = np.sqrt((b - v + 1.) ** 2. - 1) else: # both min and max xi[i] = np.arcsin((2. * (v - a) / (b - a)) - 1.) return xi
[ "def", "external2internal", "(", "xe", ",", "bounds", ")", ":", "xi", "=", "np", ".", "empty_like", "(", "xe", ")", "for", "i", ",", "(", "v", ",", "bound", ")", "in", "enumerate", "(", "zip", "(", "xe", ",", "bounds", ")", ")", ":", "a", "=", "bound", "[", "0", "]", "# minimum", "b", "=", "bound", "[", "1", "]", "# maximum", "if", "a", "==", "None", "and", "b", "==", "None", ":", "# No constraints", "xi", "[", "i", "]", "=", "v", "elif", "b", "==", "None", ":", "# only min", "xi", "[", "i", "]", "=", "np", ".", "sqrt", "(", "(", "v", "-", "a", "+", "1.", ")", "**", "2.", "-", "1", ")", "elif", "a", "==", "None", ":", "# only max", "xi", "[", "i", "]", "=", "np", ".", "sqrt", "(", "(", "b", "-", "v", "+", "1.", ")", "**", "2.", "-", "1", ")", "else", ":", "# both min and max", "xi", "[", "i", "]", "=", "np", ".", "arcsin", "(", "(", "2.", "*", "(", "v", "-", "a", ")", "/", "(", "b", "-", "a", ")", ")", "-", "1.", ")", "return", "xi" ]
Convert a series of external variables to internal variables
[ "Convert", "a", "series", "of", "external", "variables", "to", "internal", "variables" ]
python
train
25.956522
pandas-dev/pandas
pandas/core/arrays/categorical.py
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/arrays/categorical.py#L2119-L2155
def _reverse_indexer(self): """ Compute the inverse of a categorical, returning a dict of categories -> indexers. *This is an internal function* Returns ------- dict of categories -> indexers Example ------- In [1]: c = pd.Categorical(list('aabca')) In [2]: c Out[2]: [a, a, b, c, a] Categories (3, object): [a, b, c] In [3]: c.categories Out[3]: Index(['a', 'b', 'c'], dtype='object') In [4]: c.codes Out[4]: array([0, 0, 1, 2, 0], dtype=int8) In [5]: c._reverse_indexer() Out[5]: {'a': array([0, 1, 4]), 'b': array([2]), 'c': array([3])} """ categories = self.categories r, counts = libalgos.groupsort_indexer(self.codes.astype('int64'), categories.size) counts = counts.cumsum() result = (r[start:end] for start, end in zip(counts, counts[1:])) result = dict(zip(categories, result)) return result
[ "def", "_reverse_indexer", "(", "self", ")", ":", "categories", "=", "self", ".", "categories", "r", ",", "counts", "=", "libalgos", ".", "groupsort_indexer", "(", "self", ".", "codes", ".", "astype", "(", "'int64'", ")", ",", "categories", ".", "size", ")", "counts", "=", "counts", ".", "cumsum", "(", ")", "result", "=", "(", "r", "[", "start", ":", "end", "]", "for", "start", ",", "end", "in", "zip", "(", "counts", ",", "counts", "[", "1", ":", "]", ")", ")", "result", "=", "dict", "(", "zip", "(", "categories", ",", "result", ")", ")", "return", "result" ]
Compute the inverse of a categorical, returning a dict of categories -> indexers. *This is an internal function* Returns ------- dict of categories -> indexers Example ------- In [1]: c = pd.Categorical(list('aabca')) In [2]: c Out[2]: [a, a, b, c, a] Categories (3, object): [a, b, c] In [3]: c.categories Out[3]: Index(['a', 'b', 'c'], dtype='object') In [4]: c.codes Out[4]: array([0, 0, 1, 2, 0], dtype=int8) In [5]: c._reverse_indexer() Out[5]: {'a': array([0, 1, 4]), 'b': array([2]), 'c': array([3])}
[ "Compute", "the", "inverse", "of", "a", "categorical", "returning", "a", "dict", "of", "categories", "-", ">", "indexers", "." ]
python
train
27.972973
craffel/mir_eval
mir_eval/segment.py
https://github.com/craffel/mir_eval/blob/f41c8dafaea04b411252a516d1965af43c7d531b/mir_eval/segment.py#L663-L701
def _mutual_info_score(reference_indices, estimated_indices, contingency=None): """Compute the mutual information between two sequence labelings. Parameters ---------- reference_indices : np.ndarray Array of reference indices estimated_indices : np.ndarray Array of estimated indices contingency : np.ndarray Pre-computed contingency matrix. If None, one will be computed. (Default value = None) Returns ------- mi : float Mutual information .. note:: Based on sklearn.metrics.cluster.mutual_info_score """ if contingency is None: contingency = _contingency_matrix(reference_indices, estimated_indices).astype(float) contingency_sum = np.sum(contingency) pi = np.sum(contingency, axis=1) pj = np.sum(contingency, axis=0) outer = np.outer(pi, pj) nnz = contingency != 0.0 # normalized contingency contingency_nm = contingency[nnz] log_contingency_nm = np.log(contingency_nm) contingency_nm /= contingency_sum # log(a / b) should be calculated as log(a) - log(b) for # possible loss of precision log_outer = -np.log(outer[nnz]) + np.log(pi.sum()) + np.log(pj.sum()) mi = (contingency_nm * (log_contingency_nm - np.log(contingency_sum)) + contingency_nm * log_outer) return mi.sum()
[ "def", "_mutual_info_score", "(", "reference_indices", ",", "estimated_indices", ",", "contingency", "=", "None", ")", ":", "if", "contingency", "is", "None", ":", "contingency", "=", "_contingency_matrix", "(", "reference_indices", ",", "estimated_indices", ")", ".", "astype", "(", "float", ")", "contingency_sum", "=", "np", ".", "sum", "(", "contingency", ")", "pi", "=", "np", ".", "sum", "(", "contingency", ",", "axis", "=", "1", ")", "pj", "=", "np", ".", "sum", "(", "contingency", ",", "axis", "=", "0", ")", "outer", "=", "np", ".", "outer", "(", "pi", ",", "pj", ")", "nnz", "=", "contingency", "!=", "0.0", "# normalized contingency", "contingency_nm", "=", "contingency", "[", "nnz", "]", "log_contingency_nm", "=", "np", ".", "log", "(", "contingency_nm", ")", "contingency_nm", "/=", "contingency_sum", "# log(a / b) should be calculated as log(a) - log(b) for", "# possible loss of precision", "log_outer", "=", "-", "np", ".", "log", "(", "outer", "[", "nnz", "]", ")", "+", "np", ".", "log", "(", "pi", ".", "sum", "(", ")", ")", "+", "np", ".", "log", "(", "pj", ".", "sum", "(", ")", ")", "mi", "=", "(", "contingency_nm", "*", "(", "log_contingency_nm", "-", "np", ".", "log", "(", "contingency_sum", ")", ")", "+", "contingency_nm", "*", "log_outer", ")", "return", "mi", ".", "sum", "(", ")" ]
Compute the mutual information between two sequence labelings. Parameters ---------- reference_indices : np.ndarray Array of reference indices estimated_indices : np.ndarray Array of estimated indices contingency : np.ndarray Pre-computed contingency matrix. If None, one will be computed. (Default value = None) Returns ------- mi : float Mutual information .. note:: Based on sklearn.metrics.cluster.mutual_info_score
[ "Compute", "the", "mutual", "information", "between", "two", "sequence", "labelings", "." ]
python
train
34.615385
wonambi-python/wonambi
wonambi/detect/spindle.py
https://github.com/wonambi-python/wonambi/blob/1d8e3d7e53df8017c199f703bcab582914676e76/wonambi/detect/spindle.py#L1652-L1697
def merge_close(events, min_interval, merge_to_longer=False): """Merge events that are separated by a less than a minimum interval. Parameters ---------- events : list of dict events with 'start' and 'end' times, from one or several channels. **Events must be sorted by their start time.** min_interval : float minimum delay between consecutive events, in seconds merge_to_longer : bool (default: False) If True, info (chan, peak, etc.) from the longer of the 2 events is kept. Otherwise, info from the earlier onset spindle is kept. Returns ------- list of dict original events list with close events merged. """ half_iv = min_interval / 2 merged = [] for higher in events: if not merged: merged.append(higher) else: lower = merged[-1] if higher['start'] - half_iv <= lower['end'] + half_iv: if merge_to_longer and (higher['end'] - higher['start'] > lower['end'] - lower['start']): start = min(lower['start'], higher['start']) higher.update({'start': start}) merged[-1] = higher else: end = max(lower['end'], higher['end']) merged[-1].update({'end': end}) else: merged.append(higher) return merged
[ "def", "merge_close", "(", "events", ",", "min_interval", ",", "merge_to_longer", "=", "False", ")", ":", "half_iv", "=", "min_interval", "/", "2", "merged", "=", "[", "]", "for", "higher", "in", "events", ":", "if", "not", "merged", ":", "merged", ".", "append", "(", "higher", ")", "else", ":", "lower", "=", "merged", "[", "-", "1", "]", "if", "higher", "[", "'start'", "]", "-", "half_iv", "<=", "lower", "[", "'end'", "]", "+", "half_iv", ":", "if", "merge_to_longer", "and", "(", "higher", "[", "'end'", "]", "-", "higher", "[", "'start'", "]", ">", "lower", "[", "'end'", "]", "-", "lower", "[", "'start'", "]", ")", ":", "start", "=", "min", "(", "lower", "[", "'start'", "]", ",", "higher", "[", "'start'", "]", ")", "higher", ".", "update", "(", "{", "'start'", ":", "start", "}", ")", "merged", "[", "-", "1", "]", "=", "higher", "else", ":", "end", "=", "max", "(", "lower", "[", "'end'", "]", ",", "higher", "[", "'end'", "]", ")", "merged", "[", "-", "1", "]", ".", "update", "(", "{", "'end'", ":", "end", "}", ")", "else", ":", "merged", ".", "append", "(", "higher", ")", "return", "merged" ]
Merge events that are separated by a less than a minimum interval. Parameters ---------- events : list of dict events with 'start' and 'end' times, from one or several channels. **Events must be sorted by their start time.** min_interval : float minimum delay between consecutive events, in seconds merge_to_longer : bool (default: False) If True, info (chan, peak, etc.) from the longer of the 2 events is kept. Otherwise, info from the earlier onset spindle is kept. Returns ------- list of dict original events list with close events merged.
[ "Merge", "events", "that", "are", "separated", "by", "a", "less", "than", "a", "minimum", "interval", "." ]
python
train
30.326087
apple/turicreate
src/unity/python/turicreate/toolkits/text_classifier/_text_classifier.py
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/text_classifier/_text_classifier.py#L289-L332
def evaluate(self, dataset, metric='auto', **kwargs): """ Evaluate the model by making predictions of target values and comparing these to actual values. Parameters ---------- dataset : SFrame An SFrame having the same feature columns as provided when creating the model. metric : str, optional Name of the evaluation metric. Possible values are: - 'auto' : Returns all available metrics. - 'accuracy' : Classification accuracy (micro average). - 'auc' : Area under the ROC curve (macro average) - 'precision' : Precision score (macro average) - 'recall' : Recall score (macro average) - 'f1_score' : F1 score (macro average) - 'log_loss' : Log loss - 'confusion_matrix' : An SFrame with counts of possible prediction/true label combinations. - 'roc_curve' : An SFrame containing information needed for an ROC curve For more flexibility in calculating evaluation metrics, use the :class:`~turicreate.evaluation` module. Returns ------- out : dict Dictionary of evaluation results where the key is the name of the evaluation metric (e.g. `accuracy`) and the value is the evaluation score. See Also ---------- create, predict, classify """ m = self.__proxy__['classifier'] target = self.__proxy__['target'] f = _BOW_FEATURE_EXTRACTOR test = f(dataset, target) return m.evaluate(test, metric, **kwargs)
[ "def", "evaluate", "(", "self", ",", "dataset", ",", "metric", "=", "'auto'", ",", "*", "*", "kwargs", ")", ":", "m", "=", "self", ".", "__proxy__", "[", "'classifier'", "]", "target", "=", "self", ".", "__proxy__", "[", "'target'", "]", "f", "=", "_BOW_FEATURE_EXTRACTOR", "test", "=", "f", "(", "dataset", ",", "target", ")", "return", "m", ".", "evaluate", "(", "test", ",", "metric", ",", "*", "*", "kwargs", ")" ]
Evaluate the model by making predictions of target values and comparing these to actual values. Parameters ---------- dataset : SFrame An SFrame having the same feature columns as provided when creating the model. metric : str, optional Name of the evaluation metric. Possible values are: - 'auto' : Returns all available metrics. - 'accuracy' : Classification accuracy (micro average). - 'auc' : Area under the ROC curve (macro average) - 'precision' : Precision score (macro average) - 'recall' : Recall score (macro average) - 'f1_score' : F1 score (macro average) - 'log_loss' : Log loss - 'confusion_matrix' : An SFrame with counts of possible prediction/true label combinations. - 'roc_curve' : An SFrame containing information needed for an ROC curve For more flexibility in calculating evaluation metrics, use the :class:`~turicreate.evaluation` module. Returns ------- out : dict Dictionary of evaluation results where the key is the name of the evaluation metric (e.g. `accuracy`) and the value is the evaluation score. See Also ---------- create, predict, classify
[ "Evaluate", "the", "model", "by", "making", "predictions", "of", "target", "values", "and", "comparing", "these", "to", "actual", "values", "." ]
python
train
38.477273
RJT1990/pyflux
pyflux/ensembles/mixture_of_experts.py
https://github.com/RJT1990/pyflux/blob/297f2afc2095acd97c12e827dd500e8ea5da0c0f/pyflux/ensembles/mixture_of_experts.py#L283-L304
def plot_weights(self, h, **kwargs): """ Plot the weights from the aggregating algorithm Parameters ---------- h : int How many steps to run the aggregating algorithm on Returns ---------- - A plot of the weights for each model constituent over time """ import matplotlib.pyplot as plt import seaborn as sns figsize = kwargs.get('figsize',(10,7)) weights, _, _ = self.run(h=h) plt.figure(figsize=figsize) plt.plot(self.index[-h:],weights) plt.legend(self.model_names) plt.show()
[ "def", "plot_weights", "(", "self", ",", "h", ",", "*", "*", "kwargs", ")", ":", "import", "matplotlib", ".", "pyplot", "as", "plt", "import", "seaborn", "as", "sns", "figsize", "=", "kwargs", ".", "get", "(", "'figsize'", ",", "(", "10", ",", "7", ")", ")", "weights", ",", "_", ",", "_", "=", "self", ".", "run", "(", "h", "=", "h", ")", "plt", ".", "figure", "(", "figsize", "=", "figsize", ")", "plt", ".", "plot", "(", "self", ".", "index", "[", "-", "h", ":", "]", ",", "weights", ")", "plt", ".", "legend", "(", "self", ".", "model_names", ")", "plt", ".", "show", "(", ")" ]
Plot the weights from the aggregating algorithm Parameters ---------- h : int How many steps to run the aggregating algorithm on Returns ---------- - A plot of the weights for each model constituent over time
[ "Plot", "the", "weights", "from", "the", "aggregating", "algorithm", "Parameters", "----------", "h", ":", "int", "How", "many", "steps", "to", "run", "the", "aggregating", "algorithm", "on" ]
python
train
27.727273
dhermes/bezier
src/bezier/_algebraic_intersection.py
https://github.com/dhermes/bezier/blob/4f941f82637a8e70a5b159a9203132192e23406b/src/bezier/_algebraic_intersection.py#L549-L576
def polynomial_norm(coeffs): r"""Computes :math:`L_2` norm of polynomial on :math:`\left[0, 1\right]`. We have .. math:: \left\langle f, f \right\rangle = \sum_{i, j} \int_0^1 c_i c_j x^{i + j} \, dx = \sum_{i, j} \frac{c_i c_j}{i + j + 1} = \sum_{i} \frac{c_i^2}{2 i + 1} + 2 \sum_{j > i} \frac{c_i c_j}{i + j + 1}. Args: coeffs (numpy.ndarray): ``d + 1``-array of coefficients in monomial / power basis. Returns: float: The :math:`L_2` norm of the polynomial. """ num_coeffs, = coeffs.shape result = 0.0 for i in six.moves.xrange(num_coeffs): coeff_i = coeffs[i] result += coeff_i * coeff_i / (2.0 * i + 1.0) for j in six.moves.xrange(i + 1, num_coeffs): coeff_j = coeffs[j] result += 2.0 * coeff_i * coeff_j / (i + j + 1.0) return np.sqrt(result)
[ "def", "polynomial_norm", "(", "coeffs", ")", ":", "num_coeffs", ",", "=", "coeffs", ".", "shape", "result", "=", "0.0", "for", "i", "in", "six", ".", "moves", ".", "xrange", "(", "num_coeffs", ")", ":", "coeff_i", "=", "coeffs", "[", "i", "]", "result", "+=", "coeff_i", "*", "coeff_i", "/", "(", "2.0", "*", "i", "+", "1.0", ")", "for", "j", "in", "six", ".", "moves", ".", "xrange", "(", "i", "+", "1", ",", "num_coeffs", ")", ":", "coeff_j", "=", "coeffs", "[", "j", "]", "result", "+=", "2.0", "*", "coeff_i", "*", "coeff_j", "/", "(", "i", "+", "j", "+", "1.0", ")", "return", "np", ".", "sqrt", "(", "result", ")" ]
r"""Computes :math:`L_2` norm of polynomial on :math:`\left[0, 1\right]`. We have .. math:: \left\langle f, f \right\rangle = \sum_{i, j} \int_0^1 c_i c_j x^{i + j} \, dx = \sum_{i, j} \frac{c_i c_j}{i + j + 1} = \sum_{i} \frac{c_i^2}{2 i + 1} + 2 \sum_{j > i} \frac{c_i c_j}{i + j + 1}. Args: coeffs (numpy.ndarray): ``d + 1``-array of coefficients in monomial / power basis. Returns: float: The :math:`L_2` norm of the polynomial.
[ "r", "Computes", ":", "math", ":", "L_2", "norm", "of", "polynomial", "on", ":", "math", ":", "\\", "left", "[", "0", "1", "\\", "right", "]", "." ]
python
train
31.535714
wuher/devil
devil/docs/resource.py
https://github.com/wuher/devil/blob/a8834d4f88d915a21754c6b96f99d0ad9123ad4d/devil/docs/resource.py#L53-L60
def _validate_output_data( self, original_res, serialized_res, formatted_res, request): """ Override to not validate doc output. """ if self._is_doc_request(request): return else: return super(DocumentedResource, self)._validate_output_data( original_res, serialized_res, formatted_res, request)
[ "def", "_validate_output_data", "(", "self", ",", "original_res", ",", "serialized_res", ",", "formatted_res", ",", "request", ")", ":", "if", "self", ".", "_is_doc_request", "(", "request", ")", ":", "return", "else", ":", "return", "super", "(", "DocumentedResource", ",", "self", ")", ".", "_validate_output_data", "(", "original_res", ",", "serialized_res", ",", "formatted_res", ",", "request", ")" ]
Override to not validate doc output.
[ "Override", "to", "not", "validate", "doc", "output", "." ]
python
train
45
globality-corp/microcosm-flask
microcosm_flask/formatting/csv_formatter.py
https://github.com/globality-corp/microcosm-flask/blob/c2eaf57f03e7d041eea343751a4a90fcc80df418/microcosm_flask/formatting/csv_formatter.py#L41-L68
def format(self, response_data): """ Make Flask `Response` object, with data returned as a generator for the CSV content The CSV is built from JSON-like object (Python `dict` or list of `dicts`) """ if "items" in response_data: list_response_data = response_data["items"] else: list_response_data = [response_data] write_column_names = type(list_response_data[0]) not in (tuple, list) output = StringIO() csv_writer = writer(output, quoting=QUOTE_MINIMAL) if write_column_names: column_names = self.get_column_names(list_response_data) csv_writer.writerow(column_names) for item in list_response_data: csv_writer.writerow( [item[column] for column in column_names] if write_column_names else list(item) ) # Ideally we'd want to `yield` each line to stream the content # But something downstream seems to break streaming yield output.getvalue()
[ "def", "format", "(", "self", ",", "response_data", ")", ":", "if", "\"items\"", "in", "response_data", ":", "list_response_data", "=", "response_data", "[", "\"items\"", "]", "else", ":", "list_response_data", "=", "[", "response_data", "]", "write_column_names", "=", "type", "(", "list_response_data", "[", "0", "]", ")", "not", "in", "(", "tuple", ",", "list", ")", "output", "=", "StringIO", "(", ")", "csv_writer", "=", "writer", "(", "output", ",", "quoting", "=", "QUOTE_MINIMAL", ")", "if", "write_column_names", ":", "column_names", "=", "self", ".", "get_column_names", "(", "list_response_data", ")", "csv_writer", ".", "writerow", "(", "column_names", ")", "for", "item", "in", "list_response_data", ":", "csv_writer", ".", "writerow", "(", "[", "item", "[", "column", "]", "for", "column", "in", "column_names", "]", "if", "write_column_names", "else", "list", "(", "item", ")", ")", "# Ideally we'd want to `yield` each line to stream the content", "# But something downstream seems to break streaming", "yield", "output", ".", "getvalue", "(", ")" ]
Make Flask `Response` object, with data returned as a generator for the CSV content The CSV is built from JSON-like object (Python `dict` or list of `dicts`)
[ "Make", "Flask", "Response", "object", "with", "data", "returned", "as", "a", "generator", "for", "the", "CSV", "content", "The", "CSV", "is", "built", "from", "JSON", "-", "like", "object", "(", "Python", "dict", "or", "list", "of", "dicts", ")" ]
python
train
36.5
Jammy2211/PyAutoLens
autolens/plotters/array_plotters.py
https://github.com/Jammy2211/PyAutoLens/blob/91e50369c7a9c048c83d217625578b72423cd5a7/autolens/plotters/array_plotters.py#L552-L578
def plot_points(points_arcsec, array, units, kpc_per_arcsec, pointsize, zoom_offset_arcsec): """Plot a set of points over the array of data on the figure. Parameters ----------- positions : [[]] Lists of (y,x) coordinates on the image which are plotted as colored dots, to highlight specific pixels. array : data.array.scaled_array.ScaledArray The 2D array of data which is plotted. units : str The units of the y / x axis of the plots, in arc-seconds ('arcsec') or kiloparsecs ('kpc'). kpc_per_arcsec : float or None The conversion factor between arc-seconds and kiloparsecs, required to plot the units in kpc. pointsize : int The size of the points plotted to show the input positions. """ if points_arcsec is not None: points_arcsec = list(map(lambda position_set: np.asarray(position_set), points_arcsec)) point_colors = itertools.cycle(["m", "y", "r", "w", "c", "b", "g", "k"]) for point_set_arcsec in points_arcsec: if zoom_offset_arcsec is not None: point_set_arcsec -= zoom_offset_arcsec point_set_units = convert_grid_units(array=array, grid_arcsec=point_set_arcsec, units=units, kpc_per_arcsec=kpc_per_arcsec) plt.scatter(y=point_set_units[:,0], x=point_set_units[:,1], color=next(point_colors), s=pointsize)
[ "def", "plot_points", "(", "points_arcsec", ",", "array", ",", "units", ",", "kpc_per_arcsec", ",", "pointsize", ",", "zoom_offset_arcsec", ")", ":", "if", "points_arcsec", "is", "not", "None", ":", "points_arcsec", "=", "list", "(", "map", "(", "lambda", "position_set", ":", "np", ".", "asarray", "(", "position_set", ")", ",", "points_arcsec", ")", ")", "point_colors", "=", "itertools", ".", "cycle", "(", "[", "\"m\"", ",", "\"y\"", ",", "\"r\"", ",", "\"w\"", ",", "\"c\"", ",", "\"b\"", ",", "\"g\"", ",", "\"k\"", "]", ")", "for", "point_set_arcsec", "in", "points_arcsec", ":", "if", "zoom_offset_arcsec", "is", "not", "None", ":", "point_set_arcsec", "-=", "zoom_offset_arcsec", "point_set_units", "=", "convert_grid_units", "(", "array", "=", "array", ",", "grid_arcsec", "=", "point_set_arcsec", ",", "units", "=", "units", ",", "kpc_per_arcsec", "=", "kpc_per_arcsec", ")", "plt", ".", "scatter", "(", "y", "=", "point_set_units", "[", ":", ",", "0", "]", ",", "x", "=", "point_set_units", "[", ":", ",", "1", "]", ",", "color", "=", "next", "(", "point_colors", ")", ",", "s", "=", "pointsize", ")" ]
Plot a set of points over the array of data on the figure. Parameters ----------- positions : [[]] Lists of (y,x) coordinates on the image which are plotted as colored dots, to highlight specific pixels. array : data.array.scaled_array.ScaledArray The 2D array of data which is plotted. units : str The units of the y / x axis of the plots, in arc-seconds ('arcsec') or kiloparsecs ('kpc'). kpc_per_arcsec : float or None The conversion factor between arc-seconds and kiloparsecs, required to plot the units in kpc. pointsize : int The size of the points plotted to show the input positions.
[ "Plot", "a", "set", "of", "points", "over", "the", "array", "of", "data", "on", "the", "figure", "." ]
python
valid
51.851852
sony/nnabla
python/src/nnabla/monitor.py
https://github.com/sony/nnabla/blob/aaf3d33b7cbb38f2a03aa754178ba8f7c8481320/python/src/nnabla/monitor.py#L222-L263
def add(self, index, var): """Add a minibatch of images to the monitor. Args: index (int): Index. var (:obj:`~nnabla.Variable`, :obj:`~nnabla.NdArray`, or :obj:`~numpy.ndarray`): A minibatch of images with ``(N, ..., C, H, W)`` format. If C == 2, blue channel is appended with ones. If C > 3, the array will be sliced to remove C > 3 sub-array. """ import nnabla as nn from nnabla.utils.image_utils import imsave if index != 0 and (index + 1) % self.interval != 0: return if isinstance(var, nn.Variable): data = var.d.copy() elif isinstance(var, nn.NdArray): data = var.data.copy() else: assert isinstance(var, np.ndarray) data = var.copy() assert data.ndim > 2 channels = data.shape[-3] data = data.reshape(-1, *data.shape[-3:]) data = data[:min(data.shape[0], self.num_images)] data = self.normalize_method(data) if channels > 3: data = data[:, :3] elif channels == 2: data = np.concatenate( [data, np.ones((data.shape[0], 1) + data.shape[-2:])], axis=1) path_tmpl = os.path.join(self.save_dir, '{:06d}-{}.png') for j in range(min(self.num_images, data.shape[0])): img = data[j].transpose(1, 2, 0) if img.shape[-1] == 1: img = img[..., 0] path = path_tmpl.format(index, '{:03d}'.format(j)) imsave(path, img) if self.verbose: logger.info("iter={} {{{}}} are written to {}.".format( index, self.name, path_tmpl.format(index, '*')))
[ "def", "add", "(", "self", ",", "index", ",", "var", ")", ":", "import", "nnabla", "as", "nn", "from", "nnabla", ".", "utils", ".", "image_utils", "import", "imsave", "if", "index", "!=", "0", "and", "(", "index", "+", "1", ")", "%", "self", ".", "interval", "!=", "0", ":", "return", "if", "isinstance", "(", "var", ",", "nn", ".", "Variable", ")", ":", "data", "=", "var", ".", "d", ".", "copy", "(", ")", "elif", "isinstance", "(", "var", ",", "nn", ".", "NdArray", ")", ":", "data", "=", "var", ".", "data", ".", "copy", "(", ")", "else", ":", "assert", "isinstance", "(", "var", ",", "np", ".", "ndarray", ")", "data", "=", "var", ".", "copy", "(", ")", "assert", "data", ".", "ndim", ">", "2", "channels", "=", "data", ".", "shape", "[", "-", "3", "]", "data", "=", "data", ".", "reshape", "(", "-", "1", ",", "*", "data", ".", "shape", "[", "-", "3", ":", "]", ")", "data", "=", "data", "[", ":", "min", "(", "data", ".", "shape", "[", "0", "]", ",", "self", ".", "num_images", ")", "]", "data", "=", "self", ".", "normalize_method", "(", "data", ")", "if", "channels", ">", "3", ":", "data", "=", "data", "[", ":", ",", ":", "3", "]", "elif", "channels", "==", "2", ":", "data", "=", "np", ".", "concatenate", "(", "[", "data", ",", "np", ".", "ones", "(", "(", "data", ".", "shape", "[", "0", "]", ",", "1", ")", "+", "data", ".", "shape", "[", "-", "2", ":", "]", ")", "]", ",", "axis", "=", "1", ")", "path_tmpl", "=", "os", ".", "path", ".", "join", "(", "self", ".", "save_dir", ",", "'{:06d}-{}.png'", ")", "for", "j", "in", "range", "(", "min", "(", "self", ".", "num_images", ",", "data", ".", "shape", "[", "0", "]", ")", ")", ":", "img", "=", "data", "[", "j", "]", ".", "transpose", "(", "1", ",", "2", ",", "0", ")", "if", "img", ".", "shape", "[", "-", "1", "]", "==", "1", ":", "img", "=", "img", "[", "...", ",", "0", "]", "path", "=", "path_tmpl", ".", "format", "(", "index", ",", "'{:03d}'", ".", "format", "(", "j", ")", ")", "imsave", "(", "path", ",", "img", ")", "if", "self", ".", "verbose", ":", "logger", ".", "info", "(", "\"iter={} {{{}}} are written to {}.\"", ".", "format", "(", "index", ",", "self", ".", "name", ",", "path_tmpl", ".", "format", "(", "index", ",", "'*'", ")", ")", ")" ]
Add a minibatch of images to the monitor. Args: index (int): Index. var (:obj:`~nnabla.Variable`, :obj:`~nnabla.NdArray`, or :obj:`~numpy.ndarray`): A minibatch of images with ``(N, ..., C, H, W)`` format. If C == 2, blue channel is appended with ones. If C > 3, the array will be sliced to remove C > 3 sub-array.
[ "Add", "a", "minibatch", "of", "images", "to", "the", "monitor", "." ]
python
train
40.666667
lehins/python-wepay
wepay/calls/subscription_plan.py
https://github.com/lehins/python-wepay/blob/414d25a1a8d0ecb22a3ddd1f16c60b805bb52a1f/wepay/calls/subscription_plan.py#L118-L141
def __get_button(self, account_id, button_type, **kwargs): """Call documentation: `/subscription_plan/get_button <https://www.wepay.com/developer/reference/subscription_plan#get_button>`_, plus extra keyword parameters: :keyword str access_token: will be used instead of instance's ``access_token``, with ``batch_mode=True`` will set `authorization` param to it's value. :keyword bool batch_mode: turn on/off the batch_mode, see :class:`wepay.api.WePay` :keyword str batch_reference_id: `reference_id` param for batch call, see :class:`wepay.api.WePay` :keyword str api_version: WePay API version, see :class:`wepay.api.WePay` """ params = { 'account_id': account_id, 'button_type': button_type } return self.make_call(self.__get_button, params, kwargs)
[ "def", "__get_button", "(", "self", ",", "account_id", ",", "button_type", ",", "*", "*", "kwargs", ")", ":", "params", "=", "{", "'account_id'", ":", "account_id", ",", "'button_type'", ":", "button_type", "}", "return", "self", ".", "make_call", "(", "self", ".", "__get_button", ",", "params", ",", "kwargs", ")" ]
Call documentation: `/subscription_plan/get_button <https://www.wepay.com/developer/reference/subscription_plan#get_button>`_, plus extra keyword parameters: :keyword str access_token: will be used instead of instance's ``access_token``, with ``batch_mode=True`` will set `authorization` param to it's value. :keyword bool batch_mode: turn on/off the batch_mode, see :class:`wepay.api.WePay` :keyword str batch_reference_id: `reference_id` param for batch call, see :class:`wepay.api.WePay` :keyword str api_version: WePay API version, see :class:`wepay.api.WePay`
[ "Call", "documentation", ":", "/", "subscription_plan", "/", "get_button", "<https", ":", "//", "www", ".", "wepay", ".", "com", "/", "developer", "/", "reference", "/", "subscription_plan#get_button", ">", "_", "plus", "extra", "keyword", "parameters", ":", ":", "keyword", "str", "access_token", ":", "will", "be", "used", "instead", "of", "instance", "s", "access_token", "with", "batch_mode", "=", "True", "will", "set", "authorization", "param", "to", "it", "s", "value", "." ]
python
train
37.958333
edx/edx-enterprise
enterprise/utils.py
https://github.com/edx/edx-enterprise/blob/aea91379ab0a87cd3bc798961fce28b60ee49a80/enterprise/utils.py#L446-L467
def update_query_parameters(url, query_parameters): """ Return url with updated query parameters. Arguments: url (str): Original url whose query parameters need to be updated. query_parameters (dict): A dictionary containing query parameters to be added to course selection url. Returns: (slug): slug identifier for the identity provider that can be used for identity verification of users associated the enterprise customer of the given user. """ scheme, netloc, path, query_string, fragment = urlsplit(url) url_params = parse_qs(query_string) # Update url query parameters url_params.update(query_parameters) return urlunsplit( (scheme, netloc, path, urlencode(sorted(url_params.items()), doseq=True), fragment), )
[ "def", "update_query_parameters", "(", "url", ",", "query_parameters", ")", ":", "scheme", ",", "netloc", ",", "path", ",", "query_string", ",", "fragment", "=", "urlsplit", "(", "url", ")", "url_params", "=", "parse_qs", "(", "query_string", ")", "# Update url query parameters", "url_params", ".", "update", "(", "query_parameters", ")", "return", "urlunsplit", "(", "(", "scheme", ",", "netloc", ",", "path", ",", "urlencode", "(", "sorted", "(", "url_params", ".", "items", "(", ")", ")", ",", "doseq", "=", "True", ")", ",", "fragment", ")", ",", ")" ]
Return url with updated query parameters. Arguments: url (str): Original url whose query parameters need to be updated. query_parameters (dict): A dictionary containing query parameters to be added to course selection url. Returns: (slug): slug identifier for the identity provider that can be used for identity verification of users associated the enterprise customer of the given user.
[ "Return", "url", "with", "updated", "query", "parameters", "." ]
python
valid
35.818182
henzk/ape
ape/__init__.py
https://github.com/henzk/ape/blob/a1b7ea5e5b25c42beffeaaa5c32d94ad82634819/ape/__init__.py#L119-L127
def get_tasks(self): """ Return tasks as list of (name, function) tuples. """ def predicate(item): return (inspect.isfunction(item) and item.__name__ not in self._helper_names) return inspect.getmembers(self._tasks, predicate)
[ "def", "get_tasks", "(", "self", ")", ":", "def", "predicate", "(", "item", ")", ":", "return", "(", "inspect", ".", "isfunction", "(", "item", ")", "and", "item", ".", "__name__", "not", "in", "self", ".", "_helper_names", ")", "return", "inspect", ".", "getmembers", "(", "self", ".", "_tasks", ",", "predicate", ")" ]
Return tasks as list of (name, function) tuples.
[ "Return", "tasks", "as", "list", "of", "(", "name", "function", ")", "tuples", "." ]
python
train
32.333333
MacHu-GWU/angora-project
angora/bot/anjian.py
https://github.com/MacHu-GWU/angora-project/blob/689a60da51cd88680ddbe26e28dbe81e6b01d275/angora/bot/anjian.py#L91-L98
def _delay(self, ms): """Implement default delay mechanism. """ if ms: self.Delay(ms) else: if self.default_delay: self.Delay(self.default_delay)
[ "def", "_delay", "(", "self", ",", "ms", ")", ":", "if", "ms", ":", "self", ".", "Delay", "(", "ms", ")", "else", ":", "if", "self", ".", "default_delay", ":", "self", ".", "Delay", "(", "self", ".", "default_delay", ")" ]
Implement default delay mechanism.
[ "Implement", "default", "delay", "mechanism", "." ]
python
train
26.25
jtwhite79/pyemu
pyemu/en.py
https://github.com/jtwhite79/pyemu/blob/c504d8e7a4097cec07655a6318d275739bd8148a/pyemu/en.py#L168-L196
def plot(self,bins=10,facecolor='0.5',plot_cols=None, filename="ensemble.pdf",func_dict = None, **kwargs): """plot ensemble histograms to multipage pdf Parameters ---------- bins : int number of bins facecolor : str color plot_cols : list of str subset of ensemble columns to plot. If None, all are plotted. Default is None filename : str pdf filename. Default is "ensemble.pdf" func_dict : dict a dict of functions to apply to specific columns (e.g., np.log10) **kwargs : dict keyword args to pass to plot_utils.ensemble_helper() Returns ------- None """ ensemble_helper(self,bins=bins,facecolor=facecolor,plot_cols=plot_cols, filename=filename)
[ "def", "plot", "(", "self", ",", "bins", "=", "10", ",", "facecolor", "=", "'0.5'", ",", "plot_cols", "=", "None", ",", "filename", "=", "\"ensemble.pdf\"", ",", "func_dict", "=", "None", ",", "*", "*", "kwargs", ")", ":", "ensemble_helper", "(", "self", ",", "bins", "=", "bins", ",", "facecolor", "=", "facecolor", ",", "plot_cols", "=", "plot_cols", ",", "filename", "=", "filename", ")" ]
plot ensemble histograms to multipage pdf Parameters ---------- bins : int number of bins facecolor : str color plot_cols : list of str subset of ensemble columns to plot. If None, all are plotted. Default is None filename : str pdf filename. Default is "ensemble.pdf" func_dict : dict a dict of functions to apply to specific columns (e.g., np.log10) **kwargs : dict keyword args to pass to plot_utils.ensemble_helper() Returns ------- None
[ "plot", "ensemble", "histograms", "to", "multipage", "pdf" ]
python
train
30.448276
MisterWil/abodepy
abodepy/devices/camera.py
https://github.com/MisterWil/abodepy/blob/6f84bb428fd1da98855f55083cd427bebbcc57ae/abodepy/devices/camera.py#L50-L89
def update_image_location(self, timeline_json): """Update the image location.""" if not timeline_json: return False # If we get a list of objects back (likely) # then we just want the first one as it should be the "newest" if isinstance(timeline_json, (tuple, list)): timeline_json = timeline_json[0] # Verify that the event code is of the "CAPTURE IMAGE" event event_code = timeline_json.get('event_code') if event_code != TIMELINE.CAPTURE_IMAGE['event_code']: raise AbodeException((ERROR.CAM_TIMELINE_EVENT_INVALID)) # The timeline response has an entry for "file_path" that acts as the # location of the image within the Abode servers. file_path = timeline_json.get('file_path') if not file_path: raise AbodeException((ERROR.CAM_IMAGE_REFRESH_NO_FILE)) # Perform a "head" request for the image and look for a # 302 Found response url = CONST.BASE_URL + file_path response = self._abode.send_request("head", url) if response.status_code != 302: _LOGGER.warning("Unexected response code %s with body: %s", str(response.status_code), response.text) raise AbodeException((ERROR.CAM_IMAGE_UNEXPECTED_RESPONSE)) # The response should have a location header that is the actual # location of the image stored on AWS location = response.headers.get('location') if not location: raise AbodeException((ERROR.CAM_IMAGE_NO_LOCATION_HEADER)) self._image_url = location return True
[ "def", "update_image_location", "(", "self", ",", "timeline_json", ")", ":", "if", "not", "timeline_json", ":", "return", "False", "# If we get a list of objects back (likely)", "# then we just want the first one as it should be the \"newest\"", "if", "isinstance", "(", "timeline_json", ",", "(", "tuple", ",", "list", ")", ")", ":", "timeline_json", "=", "timeline_json", "[", "0", "]", "# Verify that the event code is of the \"CAPTURE IMAGE\" event", "event_code", "=", "timeline_json", ".", "get", "(", "'event_code'", ")", "if", "event_code", "!=", "TIMELINE", ".", "CAPTURE_IMAGE", "[", "'event_code'", "]", ":", "raise", "AbodeException", "(", "(", "ERROR", ".", "CAM_TIMELINE_EVENT_INVALID", ")", ")", "# The timeline response has an entry for \"file_path\" that acts as the", "# location of the image within the Abode servers.", "file_path", "=", "timeline_json", ".", "get", "(", "'file_path'", ")", "if", "not", "file_path", ":", "raise", "AbodeException", "(", "(", "ERROR", ".", "CAM_IMAGE_REFRESH_NO_FILE", ")", ")", "# Perform a \"head\" request for the image and look for a", "# 302 Found response", "url", "=", "CONST", ".", "BASE_URL", "+", "file_path", "response", "=", "self", ".", "_abode", ".", "send_request", "(", "\"head\"", ",", "url", ")", "if", "response", ".", "status_code", "!=", "302", ":", "_LOGGER", ".", "warning", "(", "\"Unexected response code %s with body: %s\"", ",", "str", "(", "response", ".", "status_code", ")", ",", "response", ".", "text", ")", "raise", "AbodeException", "(", "(", "ERROR", ".", "CAM_IMAGE_UNEXPECTED_RESPONSE", ")", ")", "# The response should have a location header that is the actual", "# location of the image stored on AWS", "location", "=", "response", ".", "headers", ".", "get", "(", "'location'", ")", "if", "not", "location", ":", "raise", "AbodeException", "(", "(", "ERROR", ".", "CAM_IMAGE_NO_LOCATION_HEADER", ")", ")", "self", ".", "_image_url", "=", "location", "return", "True" ]
Update the image location.
[ "Update", "the", "image", "location", "." ]
python
train
40.85
vinta/haul
haul/extenders/pipeline/google.py
https://github.com/vinta/haul/blob/234024ab8452ea2f41b18561377295cf2879fb20/haul/extenders/pipeline/google.py#L33-L57
def ggpht_s1600_extender(pipeline_index, finder_image_urls, extender_image_urls=[], *args, **kwargs): """ Example: http://lh4.ggpht.com/-fFi-qJRuxeY/UjwHSOTHGOI/AAAAAAAArgE/SWTMT-hXzB4/s640/Celeber-ru-Emma-Watson-Net-A-Porter-The-Edit-Magazine-Photoshoot-2013-01.jpg to http://lh4.ggpht.com/-fFi-qJRuxeY/UjwHSOTHGOI/AAAAAAAArgE/SWTMT-hXzB4/s1600/Celeber-ru-Emma-Watson-Net-A-Porter-The-Edit-Magazine-Photoshoot-2013-01.jpg """ now_extender_image_urls = [] search_re = re.compile(r'/s\d+/', re.IGNORECASE) for image_url in finder_image_urls: if 'ggpht.com/' in image_url.lower(): if search_re.search(image_url): extender_image_url = search_re.sub('/s1600/', image_url) now_extender_image_urls.append(extender_image_url) output = {} output['extender_image_urls'] = extender_image_urls + now_extender_image_urls return output
[ "def", "ggpht_s1600_extender", "(", "pipeline_index", ",", "finder_image_urls", ",", "extender_image_urls", "=", "[", "]", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "now_extender_image_urls", "=", "[", "]", "search_re", "=", "re", ".", "compile", "(", "r'/s\\d+/'", ",", "re", ".", "IGNORECASE", ")", "for", "image_url", "in", "finder_image_urls", ":", "if", "'ggpht.com/'", "in", "image_url", ".", "lower", "(", ")", ":", "if", "search_re", ".", "search", "(", "image_url", ")", ":", "extender_image_url", "=", "search_re", ".", "sub", "(", "'/s1600/'", ",", "image_url", ")", "now_extender_image_urls", ".", "append", "(", "extender_image_url", ")", "output", "=", "{", "}", "output", "[", "'extender_image_urls'", "]", "=", "extender_image_urls", "+", "now_extender_image_urls", "return", "output" ]
Example: http://lh4.ggpht.com/-fFi-qJRuxeY/UjwHSOTHGOI/AAAAAAAArgE/SWTMT-hXzB4/s640/Celeber-ru-Emma-Watson-Net-A-Porter-The-Edit-Magazine-Photoshoot-2013-01.jpg to http://lh4.ggpht.com/-fFi-qJRuxeY/UjwHSOTHGOI/AAAAAAAArgE/SWTMT-hXzB4/s1600/Celeber-ru-Emma-Watson-Net-A-Porter-The-Edit-Magazine-Photoshoot-2013-01.jpg
[ "Example", ":", "http", ":", "//", "lh4", ".", "ggpht", ".", "com", "/", "-", "fFi", "-", "qJRuxeY", "/", "UjwHSOTHGOI", "/", "AAAAAAAArgE", "/", "SWTMT", "-", "hXzB4", "/", "s640", "/", "Celeber", "-", "ru", "-", "Emma", "-", "Watson", "-", "Net", "-", "A", "-", "Porter", "-", "The", "-", "Edit", "-", "Magazine", "-", "Photoshoot", "-", "2013", "-", "01", ".", "jpg", "to", "http", ":", "//", "lh4", ".", "ggpht", ".", "com", "/", "-", "fFi", "-", "qJRuxeY", "/", "UjwHSOTHGOI", "/", "AAAAAAAArgE", "/", "SWTMT", "-", "hXzB4", "/", "s1600", "/", "Celeber", "-", "ru", "-", "Emma", "-", "Watson", "-", "Net", "-", "A", "-", "Porter", "-", "The", "-", "Edit", "-", "Magazine", "-", "Photoshoot", "-", "2013", "-", "01", ".", "jpg" ]
python
valid
39.12
signalfx/signalfx-python
signalfx/rest.py
https://github.com/signalfx/signalfx-python/blob/650eb9a2b301bcc795e4e3a8c031574ade69849d/signalfx/rest.py#L296-L308
def get_tag(self, tag_name, **kwargs): """get a tag by name Args: tag_name (string): name of tag to get Returns: dictionary of the response """ return self._get_object_by_name(self._TAG_ENDPOINT_SUFFIX, tag_name, **kwargs)
[ "def", "get_tag", "(", "self", ",", "tag_name", ",", "*", "*", "kwargs", ")", ":", "return", "self", ".", "_get_object_by_name", "(", "self", ".", "_TAG_ENDPOINT_SUFFIX", ",", "tag_name", ",", "*", "*", "kwargs", ")" ]
get a tag by name Args: tag_name (string): name of tag to get Returns: dictionary of the response
[ "get", "a", "tag", "by", "name" ]
python
train
27.461538
sendgrid/sendgrid-python
sendgrid/helpers/mail/mail.py
https://github.com/sendgrid/sendgrid-python/blob/266c2abde7a35dfcce263e06bedc6a0bbdebeac9/sendgrid/helpers/mail/mail.py#L656-L666
def from_email(self, value): """The email address of the sender :param value: The email address of the sender :type value: From, str, tuple """ if isinstance(value, str): value = From(value, None) if isinstance(value, tuple): value = From(value[0], value[1]) self._from_email = value
[ "def", "from_email", "(", "self", ",", "value", ")", ":", "if", "isinstance", "(", "value", ",", "str", ")", ":", "value", "=", "From", "(", "value", ",", "None", ")", "if", "isinstance", "(", "value", ",", "tuple", ")", ":", "value", "=", "From", "(", "value", "[", "0", "]", ",", "value", "[", "1", "]", ")", "self", ".", "_from_email", "=", "value" ]
The email address of the sender :param value: The email address of the sender :type value: From, str, tuple
[ "The", "email", "address", "of", "the", "sender" ]
python
train
32.181818
google/grr
grr/core/grr_response_core/lib/type_info.py
https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/core/grr_response_core/lib/type_info.py#L253-L260
def Add(self, other): """Returns a copy of this set with a new element added.""" new_descriptors = [] for desc in self.descriptors + other.descriptors: if desc not in new_descriptors: new_descriptors.append(desc) return TypeDescriptorSet(*new_descriptors)
[ "def", "Add", "(", "self", ",", "other", ")", ":", "new_descriptors", "=", "[", "]", "for", "desc", "in", "self", ".", "descriptors", "+", "other", ".", "descriptors", ":", "if", "desc", "not", "in", "new_descriptors", ":", "new_descriptors", ".", "append", "(", "desc", ")", "return", "TypeDescriptorSet", "(", "*", "new_descriptors", ")" ]
Returns a copy of this set with a new element added.
[ "Returns", "a", "copy", "of", "this", "set", "with", "a", "new", "element", "added", "." ]
python
train
34.875
apache/incubator-heron
heron/instance/src/python/utils/misc/custom_grouping_helper.py
https://github.com/apache/incubator-heron/blob/ad10325a0febe89ad337e561ebcbe37ec5d9a5ac/heron/instance/src/python/utils/misc/custom_grouping_helper.py#L52-L60
def choose_tasks(self, stream_id, values): """Choose tasks for a given stream_id and values and Returns a list of target tasks""" if stream_id not in self.targets: return [] ret = [] for target in self.targets[stream_id]: ret.extend(target.choose_tasks(values)) return ret
[ "def", "choose_tasks", "(", "self", ",", "stream_id", ",", "values", ")", ":", "if", "stream_id", "not", "in", "self", ".", "targets", ":", "return", "[", "]", "ret", "=", "[", "]", "for", "target", "in", "self", ".", "targets", "[", "stream_id", "]", ":", "ret", ".", "extend", "(", "target", ".", "choose_tasks", "(", "values", ")", ")", "return", "ret" ]
Choose tasks for a given stream_id and values and Returns a list of target tasks
[ "Choose", "tasks", "for", "a", "given", "stream_id", "and", "values", "and", "Returns", "a", "list", "of", "target", "tasks" ]
python
valid
33
bcbio/bcbio-nextgen
bcbio/variation/vfilter.py
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/vfilter.py#L135-L147
def _average_called_depth(in_file): """Retrieve the average depth of called reads in the provided VCF. """ import cyvcf2 depths = [] for rec in cyvcf2.VCF(str(in_file)): d = rec.INFO.get("DP") if d is not None: depths.append(int(d)) if len(depths) > 0: return int(math.ceil(numpy.mean(depths))) else: return 0
[ "def", "_average_called_depth", "(", "in_file", ")", ":", "import", "cyvcf2", "depths", "=", "[", "]", "for", "rec", "in", "cyvcf2", ".", "VCF", "(", "str", "(", "in_file", ")", ")", ":", "d", "=", "rec", ".", "INFO", ".", "get", "(", "\"DP\"", ")", "if", "d", "is", "not", "None", ":", "depths", ".", "append", "(", "int", "(", "d", ")", ")", "if", "len", "(", "depths", ")", ">", "0", ":", "return", "int", "(", "math", ".", "ceil", "(", "numpy", ".", "mean", "(", "depths", ")", ")", ")", "else", ":", "return", "0" ]
Retrieve the average depth of called reads in the provided VCF.
[ "Retrieve", "the", "average", "depth", "of", "called", "reads", "in", "the", "provided", "VCF", "." ]
python
train
28.384615
jorisroovers/pymarkdownlint
pymarkdownlint/filefinder.py
https://github.com/jorisroovers/pymarkdownlint/blob/c1044e25e18afd78b3fda8fd9b00a4f67cfbbc65/pymarkdownlint/filefinder.py#L7-L17
def find_files(path, filter="*.md"): """ Finds files with an (optional) given extension in a given path. """ if os.path.isfile(path): return [path] if os.path.isdir(path): matches = [] for root, dirnames, filenames in os.walk(path): for filename in fnmatch.filter(filenames, filter): matches.append(os.path.join(root, filename)) return matches
[ "def", "find_files", "(", "path", ",", "filter", "=", "\"*.md\"", ")", ":", "if", "os", ".", "path", ".", "isfile", "(", "path", ")", ":", "return", "[", "path", "]", "if", "os", ".", "path", ".", "isdir", "(", "path", ")", ":", "matches", "=", "[", "]", "for", "root", ",", "dirnames", ",", "filenames", "in", "os", ".", "walk", "(", "path", ")", ":", "for", "filename", "in", "fnmatch", ".", "filter", "(", "filenames", ",", "filter", ")", ":", "matches", ".", "append", "(", "os", ".", "path", ".", "join", "(", "root", ",", "filename", ")", ")", "return", "matches" ]
Finds files with an (optional) given extension in a given path.
[ "Finds", "files", "with", "an", "(", "optional", ")", "given", "extension", "in", "a", "given", "path", "." ]
python
train
40.181818
workforce-data-initiative/skills-utils
skills_utils/s3.py
https://github.com/workforce-data-initiative/skills-utils/blob/4cf9b7c2938984f34bbcc33d45482d23c52c7539/skills_utils/s3.py#L44-L63
def upload_dict(s3_conn, s3_prefix, data_to_sync): """Syncs a dictionary to an S3 bucket, serializing each value in the dictionary as a JSON file with the key as its name. Args: s3_conn: (boto.s3.connection) an s3 connection s3_prefix: (str) the destination prefix data_to_sync: (dict) """ bucket_name, prefix = split_s3_path(s3_prefix) bucket = s3_conn.get_bucket(bucket_name) for key, value in data_to_sync.items(): full_name = '{}/{}.json'.format(prefix, key) s3_key = boto.s3.key.Key( bucket=bucket, name=full_name ) logging.info('uploading key %s', full_name) s3_key.set_contents_from_string(json.dumps(value))
[ "def", "upload_dict", "(", "s3_conn", ",", "s3_prefix", ",", "data_to_sync", ")", ":", "bucket_name", ",", "prefix", "=", "split_s3_path", "(", "s3_prefix", ")", "bucket", "=", "s3_conn", ".", "get_bucket", "(", "bucket_name", ")", "for", "key", ",", "value", "in", "data_to_sync", ".", "items", "(", ")", ":", "full_name", "=", "'{}/{}.json'", ".", "format", "(", "prefix", ",", "key", ")", "s3_key", "=", "boto", ".", "s3", ".", "key", ".", "Key", "(", "bucket", "=", "bucket", ",", "name", "=", "full_name", ")", "logging", ".", "info", "(", "'uploading key %s'", ",", "full_name", ")", "s3_key", ".", "set_contents_from_string", "(", "json", ".", "dumps", "(", "value", ")", ")" ]
Syncs a dictionary to an S3 bucket, serializing each value in the dictionary as a JSON file with the key as its name. Args: s3_conn: (boto.s3.connection) an s3 connection s3_prefix: (str) the destination prefix data_to_sync: (dict)
[ "Syncs", "a", "dictionary", "to", "an", "S3", "bucket", "serializing", "each", "value", "in", "the", "dictionary", "as", "a", "JSON", "file", "with", "the", "key", "as", "its", "name", "." ]
python
train
35.7
authomatic/authomatic
authomatic/providers/oauth1.py
https://github.com/authomatic/authomatic/blob/90a9ce60cc405ae8a2bf5c3713acd5d78579a04e/authomatic/providers/oauth1.py#L95-L102
def _create_base_string(method, base, params): """ Returns base string for HMAC-SHA1 signature as specified in: http://oauth.net/core/1.0a/#rfc.section.9.1.3. """ normalized_qs = _normalize_params(params) return _join_by_ampersand(method, base, normalized_qs)
[ "def", "_create_base_string", "(", "method", ",", "base", ",", "params", ")", ":", "normalized_qs", "=", "_normalize_params", "(", "params", ")", "return", "_join_by_ampersand", "(", "method", ",", "base", ",", "normalized_qs", ")" ]
Returns base string for HMAC-SHA1 signature as specified in: http://oauth.net/core/1.0a/#rfc.section.9.1.3.
[ "Returns", "base", "string", "for", "HMAC", "-", "SHA1", "signature", "as", "specified", "in", ":", "http", ":", "//", "oauth", ".", "net", "/", "core", "/", "1", ".", "0a", "/", "#rfc", ".", "section", ".", "9", ".", "1", ".", "3", "." ]
python
test
34.625
MartinThoma/hwrt
hwrt/preprocessing.py
https://github.com/MartinThoma/hwrt/blob/725c21a3d0f5a30b8492cbc184b3688ceb364e1c/hwrt/preprocessing.py#L43-L58
def get_preprocessing_queue(preprocessing_list): """Get preprocessing queue from a list of dictionaries >>> l = [{'RemoveDuplicateTime': None}, {'ScaleAndShift': [{'center': True}]} ] >>> get_preprocessing_queue(l) [RemoveDuplicateTime, ScaleAndShift - center: True - max_width: 1 - max_height: 1 ] """ return utils.get_objectlist(preprocessing_list, config_key='preprocessing', module=sys.modules[__name__])
[ "def", "get_preprocessing_queue", "(", "preprocessing_list", ")", ":", "return", "utils", ".", "get_objectlist", "(", "preprocessing_list", ",", "config_key", "=", "'preprocessing'", ",", "module", "=", "sys", ".", "modules", "[", "__name__", "]", ")" ]
Get preprocessing queue from a list of dictionaries >>> l = [{'RemoveDuplicateTime': None}, {'ScaleAndShift': [{'center': True}]} ] >>> get_preprocessing_queue(l) [RemoveDuplicateTime, ScaleAndShift - center: True - max_width: 1 - max_height: 1 ]
[ "Get", "preprocessing", "queue", "from", "a", "list", "of", "dictionaries" ]
python
train
32.875
nanoporetech/ont_fast5_api
ont_fast5_api/fast5_file.py
https://github.com/nanoporetech/ont_fast5_api/blob/352b3903155fcf4f19234c4f429dcefaa6d6bc4a/ont_fast5_api/fast5_file.py#L288-L313
def add_chain(self, group_name, component_map): """ Adds the component chain to ``group_name`` in the fast5. These are added as attributes to the group. :param group_name: The group name you wish to add chaining data to, e.g. ``Test_000`` :param component_map: The set of components and corresponding group names or group paths that contribute data to the analysis. If group names are provided, these will be converted into group paths. If ``Test_000`` uses data from the results of ``first_component`` stored at ``Analyses/First_000/`` the component_map could be ``{'first_component': 'First_000'}`` or ``{'first_component': 'Analyses/First_000'}``. """ self.assert_writeable() for component, path in component_map.items(): if not path.startswith('Analyses/'): path = 'Analyses/{}'.format(path) component_map[component] = path self.add_analysis_attributes(group_name, component_map)
[ "def", "add_chain", "(", "self", ",", "group_name", ",", "component_map", ")", ":", "self", ".", "assert_writeable", "(", ")", "for", "component", ",", "path", "in", "component_map", ".", "items", "(", ")", ":", "if", "not", "path", ".", "startswith", "(", "'Analyses/'", ")", ":", "path", "=", "'Analyses/{}'", ".", "format", "(", "path", ")", "component_map", "[", "component", "]", "=", "path", "self", ".", "add_analysis_attributes", "(", "group_name", ",", "component_map", ")" ]
Adds the component chain to ``group_name`` in the fast5. These are added as attributes to the group. :param group_name: The group name you wish to add chaining data to, e.g. ``Test_000`` :param component_map: The set of components and corresponding group names or group paths that contribute data to the analysis. If group names are provided, these will be converted into group paths. If ``Test_000`` uses data from the results of ``first_component`` stored at ``Analyses/First_000/`` the component_map could be ``{'first_component': 'First_000'}`` or ``{'first_component': 'Analyses/First_000'}``.
[ "Adds", "the", "component", "chain", "to", "group_name", "in", "the", "fast5", ".", "These", "are", "added", "as", "attributes", "to", "the", "group", "." ]
python
train
41.192308
keans/lmnotify
lmnotify/lmnotify.py
https://github.com/keans/lmnotify/blob/b0a5282a582e5090852dc20fea8a135ca258d0d3/lmnotify/lmnotify.py#L616-L622
def alarm_disable(self): """ disable the alarm """ log.debug("alarm => disable...") params = {"enabled": False} self._app_exec("com.lametric.clock", "clock.alarm", params=params)
[ "def", "alarm_disable", "(", "self", ")", ":", "log", ".", "debug", "(", "\"alarm => disable...\"", ")", "params", "=", "{", "\"enabled\"", ":", "False", "}", "self", ".", "_app_exec", "(", "\"com.lametric.clock\"", ",", "\"clock.alarm\"", ",", "params", "=", "params", ")" ]
disable the alarm
[ "disable", "the", "alarm" ]
python
train
31.428571
mikedh/trimesh
trimesh/path/path.py
https://github.com/mikedh/trimesh/blob/25e059bf6d4caa74f62ffd58ce4f61a90ee4e518/trimesh/path/path.py#L1136-L1146
def length(self): """ The total discretized length of every entity. Returns -------- length: float, summed length of every entity """ length = float(sum(i.length(self.vertices) for i in self.entities)) return length
[ "def", "length", "(", "self", ")", ":", "length", "=", "float", "(", "sum", "(", "i", ".", "length", "(", "self", ".", "vertices", ")", "for", "i", "in", "self", ".", "entities", ")", ")", "return", "length" ]
The total discretized length of every entity. Returns -------- length: float, summed length of every entity
[ "The", "total", "discretized", "length", "of", "every", "entity", "." ]
python
train
27
tensorflow/tensor2tensor
tensor2tensor/trax/trax.py
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/trax/trax.py#L218-L228
def log_metrics(metrics, summ_writer, log_prefix, step, history=None): """Log metrics to summary writer and history.""" rjust_len = max([len(name) for name in metrics]) for name, value in six.iteritems(metrics): step_log(step, "%s %s | % .8f" % ( log_prefix.ljust(5), name.rjust(rjust_len), value)) full_name = "metrics/" + name if history: history.append(log_prefix, full_name, step, value) if summ_writer: summ_writer.scalar(full_name, value, step)
[ "def", "log_metrics", "(", "metrics", ",", "summ_writer", ",", "log_prefix", ",", "step", ",", "history", "=", "None", ")", ":", "rjust_len", "=", "max", "(", "[", "len", "(", "name", ")", "for", "name", "in", "metrics", "]", ")", "for", "name", ",", "value", "in", "six", ".", "iteritems", "(", "metrics", ")", ":", "step_log", "(", "step", ",", "\"%s %s | % .8f\"", "%", "(", "log_prefix", ".", "ljust", "(", "5", ")", ",", "name", ".", "rjust", "(", "rjust_len", ")", ",", "value", ")", ")", "full_name", "=", "\"metrics/\"", "+", "name", "if", "history", ":", "history", ".", "append", "(", "log_prefix", ",", "full_name", ",", "step", ",", "value", ")", "if", "summ_writer", ":", "summ_writer", ".", "scalar", "(", "full_name", ",", "value", ",", "step", ")" ]
Log metrics to summary writer and history.
[ "Log", "metrics", "to", "summary", "writer", "and", "history", "." ]
python
train
43.818182
n1analytics/python-paillier
phe/paillier.py
https://github.com/n1analytics/python-paillier/blob/955f8c0bfa9623be15b75462b121d28acf70f04b/phe/paillier.py#L144-L174
def encrypt(self, value, precision=None, r_value=None): """Encode and Paillier encrypt a real number *value*. Args: value: an int or float to be encrypted. If int, it must satisfy abs(*value*) < :attr:`n`/3. If float, it must satisfy abs(*value* / *precision*) << :attr:`n`/3 (i.e. if a float is near the limit then detectable overflow may still occur) precision (float): Passed to :meth:`EncodedNumber.encode`. If *value* is a float then *precision* is the maximum **absolute** error allowed when encoding *value*. Defaults to encoding *value* exactly. r_value (int): obfuscator for the ciphertext; by default (i.e. if *r_value* is None), a random value is used. Returns: EncryptedNumber: An encryption of *value*. Raises: ValueError: if *value* is out of range or *precision* is so high that *value* is rounded to zero. """ if isinstance(value, EncodedNumber): encoding = value else: encoding = EncodedNumber.encode(self, value, precision) return self.encrypt_encoded(encoding, r_value)
[ "def", "encrypt", "(", "self", ",", "value", ",", "precision", "=", "None", ",", "r_value", "=", "None", ")", ":", "if", "isinstance", "(", "value", ",", "EncodedNumber", ")", ":", "encoding", "=", "value", "else", ":", "encoding", "=", "EncodedNumber", ".", "encode", "(", "self", ",", "value", ",", "precision", ")", "return", "self", ".", "encrypt_encoded", "(", "encoding", ",", "r_value", ")" ]
Encode and Paillier encrypt a real number *value*. Args: value: an int or float to be encrypted. If int, it must satisfy abs(*value*) < :attr:`n`/3. If float, it must satisfy abs(*value* / *precision*) << :attr:`n`/3 (i.e. if a float is near the limit then detectable overflow may still occur) precision (float): Passed to :meth:`EncodedNumber.encode`. If *value* is a float then *precision* is the maximum **absolute** error allowed when encoding *value*. Defaults to encoding *value* exactly. r_value (int): obfuscator for the ciphertext; by default (i.e. if *r_value* is None), a random value is used. Returns: EncryptedNumber: An encryption of *value*. Raises: ValueError: if *value* is out of range or *precision* is so high that *value* is rounded to zero.
[ "Encode", "and", "Paillier", "encrypt", "a", "real", "number", "*", "value", "*", "." ]
python
train
39.387097
incf-nidash/nidmresults
nidmresults/objects/contrast.py
https://github.com/incf-nidash/nidmresults/blob/438f7cce6abc4a4379b629bd76f4d427891e033f/nidmresults/objects/contrast.py#L181-L203
def export(self, nidm_version, export_dir): """ Create prov graph. """ # Contrast Map entity atts = ( (PROV['type'], NIDM_CONTRAST_MAP), (NIDM_CONTRAST_NAME, self.name)) if not self.isderfrommap: atts = atts + ( (NIDM_IN_COORDINATE_SPACE, self.coord_space.id),) if self.label is not None: atts = atts + ( (PROV['label'], self.label),) if self.name is not None: atts = atts + ( (NIDM_CONTRAST_NAME, self.name),) # Parameter estimate entity self.add_attributes(atts)
[ "def", "export", "(", "self", ",", "nidm_version", ",", "export_dir", ")", ":", "# Contrast Map entity", "atts", "=", "(", "(", "PROV", "[", "'type'", "]", ",", "NIDM_CONTRAST_MAP", ")", ",", "(", "NIDM_CONTRAST_NAME", ",", "self", ".", "name", ")", ")", "if", "not", "self", ".", "isderfrommap", ":", "atts", "=", "atts", "+", "(", "(", "NIDM_IN_COORDINATE_SPACE", ",", "self", ".", "coord_space", ".", "id", ")", ",", ")", "if", "self", ".", "label", "is", "not", "None", ":", "atts", "=", "atts", "+", "(", "(", "PROV", "[", "'label'", "]", ",", "self", ".", "label", ")", ",", ")", "if", "self", ".", "name", "is", "not", "None", ":", "atts", "=", "atts", "+", "(", "(", "NIDM_CONTRAST_NAME", ",", "self", ".", "name", ")", ",", ")", "# Parameter estimate entity", "self", ".", "add_attributes", "(", "atts", ")" ]
Create prov graph.
[ "Create", "prov", "graph", "." ]
python
train
27.565217
openstax/cnx-publishing
cnxpublishing/publish.py
https://github.com/openstax/cnx-publishing/blob/f55b4a2c45d8618737288f1b74b4139d5ac74154/cnxpublishing/publish.py#L236-L252
def _insert_file(cursor, file, media_type): """Upsert the ``file`` and ``media_type`` into the files table. Returns the ``fileid`` and ``sha1`` of the upserted file. """ resource_hash = _get_file_sha1(file) cursor.execute("SELECT fileid FROM files WHERE sha1 = %s", (resource_hash,)) try: fileid = cursor.fetchone()[0] except (IndexError, TypeError): cursor.execute("INSERT INTO files (file, media_type) " "VALUES (%s, %s)" "RETURNING fileid", (psycopg2.Binary(file.read()), media_type,)) fileid = cursor.fetchone()[0] return fileid, resource_hash
[ "def", "_insert_file", "(", "cursor", ",", "file", ",", "media_type", ")", ":", "resource_hash", "=", "_get_file_sha1", "(", "file", ")", "cursor", ".", "execute", "(", "\"SELECT fileid FROM files WHERE sha1 = %s\"", ",", "(", "resource_hash", ",", ")", ")", "try", ":", "fileid", "=", "cursor", ".", "fetchone", "(", ")", "[", "0", "]", "except", "(", "IndexError", ",", "TypeError", ")", ":", "cursor", ".", "execute", "(", "\"INSERT INTO files (file, media_type) \"", "\"VALUES (%s, %s)\"", "\"RETURNING fileid\"", ",", "(", "psycopg2", ".", "Binary", "(", "file", ".", "read", "(", ")", ")", ",", "media_type", ",", ")", ")", "fileid", "=", "cursor", ".", "fetchone", "(", ")", "[", "0", "]", "return", "fileid", ",", "resource_hash" ]
Upsert the ``file`` and ``media_type`` into the files table. Returns the ``fileid`` and ``sha1`` of the upserted file.
[ "Upsert", "the", "file", "and", "media_type", "into", "the", "files", "table", ".", "Returns", "the", "fileid", "and", "sha1", "of", "the", "upserted", "file", "." ]
python
valid
39.764706
gwastro/pycbc
pycbc/waveform/compress.py
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/waveform/compress.py#L70-L106
def mchirp_compression(m1, m2, fmin, fmax, min_seglen=0.02, df_multiple=None): """Return the frequencies needed to compress a waveform with the given chirp mass. This is based on the estimate in rough_time_estimate. Parameters ---------- m1: float mass of first component object in solar masses m2: float mass of second component object in solar masses fmin : float The starting frequency of the compressed waveform. fmax : float The ending frequency of the compressed waveform. min_seglen : float The inverse of this gives the maximum frequency step that is used. df_multiple : {None, float} Make the compressed sampling frequencies a multiple of the given value. If None provided, the returned sample points can have any floating point value. Returns ------- array The frequencies at which to evaluate the compressed waveform. """ sample_points = [] f = fmin while f < fmax: if df_multiple is not None: f = int(f/df_multiple)*df_multiple sample_points.append(f) f += 1.0 / rough_time_estimate(m1, m2, f, fudge_min=min_seglen) # add the last point if sample_points[-1] < fmax: sample_points.append(fmax) return numpy.array(sample_points)
[ "def", "mchirp_compression", "(", "m1", ",", "m2", ",", "fmin", ",", "fmax", ",", "min_seglen", "=", "0.02", ",", "df_multiple", "=", "None", ")", ":", "sample_points", "=", "[", "]", "f", "=", "fmin", "while", "f", "<", "fmax", ":", "if", "df_multiple", "is", "not", "None", ":", "f", "=", "int", "(", "f", "/", "df_multiple", ")", "*", "df_multiple", "sample_points", ".", "append", "(", "f", ")", "f", "+=", "1.0", "/", "rough_time_estimate", "(", "m1", ",", "m2", ",", "f", ",", "fudge_min", "=", "min_seglen", ")", "# add the last point", "if", "sample_points", "[", "-", "1", "]", "<", "fmax", ":", "sample_points", ".", "append", "(", "fmax", ")", "return", "numpy", ".", "array", "(", "sample_points", ")" ]
Return the frequencies needed to compress a waveform with the given chirp mass. This is based on the estimate in rough_time_estimate. Parameters ---------- m1: float mass of first component object in solar masses m2: float mass of second component object in solar masses fmin : float The starting frequency of the compressed waveform. fmax : float The ending frequency of the compressed waveform. min_seglen : float The inverse of this gives the maximum frequency step that is used. df_multiple : {None, float} Make the compressed sampling frequencies a multiple of the given value. If None provided, the returned sample points can have any floating point value. Returns ------- array The frequencies at which to evaluate the compressed waveform.
[ "Return", "the", "frequencies", "needed", "to", "compress", "a", "waveform", "with", "the", "given", "chirp", "mass", ".", "This", "is", "based", "on", "the", "estimate", "in", "rough_time_estimate", "." ]
python
train
35.135135
KelSolaar/Umbra
umbra/ui/widgets/codeEditor_QPlainTextEdit.py
https://github.com/KelSolaar/Umbra/blob/66f45f08d9d723787f1191989f8b0dda84b412ce/umbra/ui/widgets/codeEditor_QPlainTextEdit.py#L171-L182
def separator_width(self, value): """ Setter for **self.__separator_width** attribute. :param value: Attribute value. :type value: int """ if value is not None: assert type(value) is int, "'{0}' attribute: '{1}' type is not 'int'!".format("separator_width", value) assert value > 0, "'{0}' attribute: '{1}' need to be exactly positive!".format("separator_width", value) self.__separator_width = value
[ "def", "separator_width", "(", "self", ",", "value", ")", ":", "if", "value", "is", "not", "None", ":", "assert", "type", "(", "value", ")", "is", "int", ",", "\"'{0}' attribute: '{1}' type is not 'int'!\"", ".", "format", "(", "\"separator_width\"", ",", "value", ")", "assert", "value", ">", "0", ",", "\"'{0}' attribute: '{1}' need to be exactly positive!\"", ".", "format", "(", "\"separator_width\"", ",", "value", ")", "self", ".", "__separator_width", "=", "value" ]
Setter for **self.__separator_width** attribute. :param value: Attribute value. :type value: int
[ "Setter", "for", "**", "self", ".", "__separator_width", "**", "attribute", "." ]
python
train
39.25
IDSIA/sacred
sacred/dependencies.py
https://github.com/IDSIA/sacred/blob/72633776bed9b5bddf93ae7d215188e61970973a/sacred/dependencies.py#L91-L99
def get_py_file_if_possible(pyc_name): """Try to retrieve a X.py file for a given X.py[c] file.""" if pyc_name.endswith(('.py', '.so', '.pyd')): return pyc_name assert pyc_name.endswith('.pyc') non_compiled_file = pyc_name[:-1] if os.path.exists(non_compiled_file): return non_compiled_file return pyc_name
[ "def", "get_py_file_if_possible", "(", "pyc_name", ")", ":", "if", "pyc_name", ".", "endswith", "(", "(", "'.py'", ",", "'.so'", ",", "'.pyd'", ")", ")", ":", "return", "pyc_name", "assert", "pyc_name", ".", "endswith", "(", "'.pyc'", ")", "non_compiled_file", "=", "pyc_name", "[", ":", "-", "1", "]", "if", "os", ".", "path", ".", "exists", "(", "non_compiled_file", ")", ":", "return", "non_compiled_file", "return", "pyc_name" ]
Try to retrieve a X.py file for a given X.py[c] file.
[ "Try", "to", "retrieve", "a", "X", ".", "py", "file", "for", "a", "given", "X", ".", "py", "[", "c", "]", "file", "." ]
python
train
37.555556
Rockhopper-Technologies/enlighten
enlighten/_terminal.py
https://github.com/Rockhopper-Technologies/enlighten/blob/857855f940e6c1bb84d0be849b999a18fff5bf5a/enlighten/_terminal.py#L63-L68
def move_to(self, xpos, ypos): """ Move cursor to specified position """ self.stream.write(self.move(ypos, xpos))
[ "def", "move_to", "(", "self", ",", "xpos", ",", "ypos", ")", ":", "self", ".", "stream", ".", "write", "(", "self", ".", "move", "(", "ypos", ",", "xpos", ")", ")" ]
Move cursor to specified position
[ "Move", "cursor", "to", "specified", "position" ]
python
train
23.5
YosaiProject/yosai
yosai/core/subject/subject.py
https://github.com/YosaiProject/yosai/blob/7f96aa6b837ceae9bf3d7387cd7e35f5ab032575/yosai/core/subject/subject.py#L260-L274
def is_permitted(self, permission_s): """ :param permission_s: a collection of 1..N permissions :type permission_s: List of authz_abcs.Permission object(s) or String(s) :returns: a List of tuple(s), containing the authz_abcs.Permission and a Boolean indicating whether the permission is granted """ if self.authorized: self.check_security_manager() return (self.security_manager.is_permitted( self.identifiers, permission_s)) msg = 'Cannot check permission when user isn\'t authenticated nor remembered' raise ValueError(msg)
[ "def", "is_permitted", "(", "self", ",", "permission_s", ")", ":", "if", "self", ".", "authorized", ":", "self", ".", "check_security_manager", "(", ")", "return", "(", "self", ".", "security_manager", ".", "is_permitted", "(", "self", ".", "identifiers", ",", "permission_s", ")", ")", "msg", "=", "'Cannot check permission when user isn\\'t authenticated nor remembered'", "raise", "ValueError", "(", "msg", ")" ]
:param permission_s: a collection of 1..N permissions :type permission_s: List of authz_abcs.Permission object(s) or String(s) :returns: a List of tuple(s), containing the authz_abcs.Permission and a Boolean indicating whether the permission is granted
[ ":", "param", "permission_s", ":", "a", "collection", "of", "1", "..", "N", "permissions", ":", "type", "permission_s", ":", "List", "of", "authz_abcs", ".", "Permission", "object", "(", "s", ")", "or", "String", "(", "s", ")" ]
python
train
42.6
urda/nistbeacon
scripts/version_manager.py
https://github.com/urda/nistbeacon/blob/43e0c3d1e186e71387f072daf98911abb14469dd/scripts/version_manager.py#L58-L83
def get_version(self) -> str: """ Open the file referenced in this object, and scrape the version. :return: The version as a string, an empty string if there is no match to the magic_line, or any file exception messages encountered. """ try: f = open(self.file_path, 'r') lines = f.readlines() f.close() except Exception as e: return str(e) result = '' for line in lines: if self.magic_line in line: start = len(self.magic_line) end = len(line) - self.strip_end_chars result = line[start:end] break return result
[ "def", "get_version", "(", "self", ")", "->", "str", ":", "try", ":", "f", "=", "open", "(", "self", ".", "file_path", ",", "'r'", ")", "lines", "=", "f", ".", "readlines", "(", ")", "f", ".", "close", "(", ")", "except", "Exception", "as", "e", ":", "return", "str", "(", "e", ")", "result", "=", "''", "for", "line", "in", "lines", ":", "if", "self", ".", "magic_line", "in", "line", ":", "start", "=", "len", "(", "self", ".", "magic_line", ")", "end", "=", "len", "(", "line", ")", "-", "self", ".", "strip_end_chars", "result", "=", "line", "[", "start", ":", "end", "]", "break", "return", "result" ]
Open the file referenced in this object, and scrape the version. :return: The version as a string, an empty string if there is no match to the magic_line, or any file exception messages encountered.
[ "Open", "the", "file", "referenced", "in", "this", "object", "and", "scrape", "the", "version", "." ]
python
test
27.384615
collectiveacuity/labPack
labpack/events/meetup.py
https://github.com/collectiveacuity/labPack/blob/52949ece35e72e3cc308f54d9ffa6bfbd96805b8/labpack/events/meetup.py#L1296-L1346
def list_member_groups(self, member_id): ''' a method to retrieve a list of meetup groups member belongs to :param member_id: integer with meetup member id :return: dictionary with list of group details in [json] group_details = self.objects.group_profile.schema ''' # https://www.meetup.com/meetup_api/docs/members/:member_id/#get title = '%s.list_member_groups' % self.__class__.__name__ # validate inputs input_fields = { 'member_id': member_id } for key, value in input_fields.items(): if value: object_title = '%s(%s=%s)' % (title, key, str(value)) self.fields.validate(value, '.%s' % key, object_title) # construct member id if not member_id: raise IndexError('%s requires member id argument.' % title) # compose request fields url = '%s/members/%s' % (self.endpoint, str(member_id)) params = { 'fields': 'memberships' } # send requests response_details = self._get_request(url, params=params) # construct method output dictionary member_groups = { 'json': [] } for key, value in response_details.items(): if not key == 'json': member_groups[key] = value # parse response if response_details['json']: if 'memberships' in response_details['json'].keys(): for group in response_details['json']['memberships']['member']: member_groups['json'].append(self.objects.group_profile.ingest(**group)) return member_groups
[ "def", "list_member_groups", "(", "self", ",", "member_id", ")", ":", "# https://www.meetup.com/meetup_api/docs/members/:member_id/#get\r", "title", "=", "'%s.list_member_groups'", "%", "self", ".", "__class__", ".", "__name__", "# validate inputs\r", "input_fields", "=", "{", "'member_id'", ":", "member_id", "}", "for", "key", ",", "value", "in", "input_fields", ".", "items", "(", ")", ":", "if", "value", ":", "object_title", "=", "'%s(%s=%s)'", "%", "(", "title", ",", "key", ",", "str", "(", "value", ")", ")", "self", ".", "fields", ".", "validate", "(", "value", ",", "'.%s'", "%", "key", ",", "object_title", ")", "# construct member id\r", "if", "not", "member_id", ":", "raise", "IndexError", "(", "'%s requires member id argument.'", "%", "title", ")", "# compose request fields\r", "url", "=", "'%s/members/%s'", "%", "(", "self", ".", "endpoint", ",", "str", "(", "member_id", ")", ")", "params", "=", "{", "'fields'", ":", "'memberships'", "}", "# send requests\r", "response_details", "=", "self", ".", "_get_request", "(", "url", ",", "params", "=", "params", ")", "# construct method output dictionary\r", "member_groups", "=", "{", "'json'", ":", "[", "]", "}", "for", "key", ",", "value", "in", "response_details", ".", "items", "(", ")", ":", "if", "not", "key", "==", "'json'", ":", "member_groups", "[", "key", "]", "=", "value", "# parse response\r", "if", "response_details", "[", "'json'", "]", ":", "if", "'memberships'", "in", "response_details", "[", "'json'", "]", ".", "keys", "(", ")", ":", "for", "group", "in", "response_details", "[", "'json'", "]", "[", "'memberships'", "]", "[", "'member'", "]", ":", "member_groups", "[", "'json'", "]", ".", "append", "(", "self", ".", "objects", ".", "group_profile", ".", "ingest", "(", "*", "*", "group", ")", ")", "return", "member_groups" ]
a method to retrieve a list of meetup groups member belongs to :param member_id: integer with meetup member id :return: dictionary with list of group details in [json] group_details = self.objects.group_profile.schema
[ "a", "method", "to", "retrieve", "a", "list", "of", "meetup", "groups", "member", "belongs", "to", ":", "param", "member_id", ":", "integer", "with", "meetup", "member", "id", ":", "return", ":", "dictionary", "with", "list", "of", "group", "details", "in", "[", "json", "]", "group_details", "=", "self", ".", "objects", ".", "group_profile", ".", "schema" ]
python
train
32.882353
ViiSiX/FlaskRedislite
flask_redislite.py
https://github.com/ViiSiX/FlaskRedislite/blob/01bc9fbbeb415aac621c7a9cc091a666e728e651/flask_redislite.py#L150-L158
def collection(self): """Return the redis-collection instance.""" if not self.include_collections: return None ctx = stack.top if ctx is not None: if not hasattr(ctx, 'redislite_collection'): ctx.redislite_collection = Collection(redis=self.connection) return ctx.redislite_collection
[ "def", "collection", "(", "self", ")", ":", "if", "not", "self", ".", "include_collections", ":", "return", "None", "ctx", "=", "stack", ".", "top", "if", "ctx", "is", "not", "None", ":", "if", "not", "hasattr", "(", "ctx", ",", "'redislite_collection'", ")", ":", "ctx", ".", "redislite_collection", "=", "Collection", "(", "redis", "=", "self", ".", "connection", ")", "return", "ctx", ".", "redislite_collection" ]
Return the redis-collection instance.
[ "Return", "the", "redis", "-", "collection", "instance", "." ]
python
train
40
OSLL/jabba
jabba/dep_extractor.py
https://github.com/OSLL/jabba/blob/71c1d008ab497020fba6ffa12a600721eb3f5ef7/jabba/dep_extractor.py#L47-L56
def get_calls(self, job_name): ''' Reads file by given name and returns CallEdge array ''' config = self.file_index.get_by_name(job_name).yaml calls = self.get_calls_from_dict(config, from_name=job_name) return calls
[ "def", "get_calls", "(", "self", ",", "job_name", ")", ":", "config", "=", "self", ".", "file_index", ".", "get_by_name", "(", "job_name", ")", ".", "yaml", "calls", "=", "self", ".", "get_calls_from_dict", "(", "config", ",", "from_name", "=", "job_name", ")", "return", "calls" ]
Reads file by given name and returns CallEdge array
[ "Reads", "file", "by", "given", "name", "and", "returns", "CallEdge", "array" ]
python
train
25.8
lago-project/lago
lago/virt.py
https://github.com/lago-project/lago/blob/5b8970f7687e063e4619066d5b8093ca997678c9/lago/virt.py#L232-L293
def generate_init(self, dst, out_format, vms_to_include, filters=None): """ Generate an init file which represents this env and can be used with the images created by self.export_vms Args: dst (str): path and name of the new init file out_format (plugins.output.OutFormatPlugin): formatter for the output (the default is yaml) filters (list): list of paths to keys that should be removed from the init file vms_to_include (list of :class:lago.plugins.vm.VMPlugin): list of vms to include in the init file Returns: None """ # todo: move this logic to PrefixExportManager with LogTask('Exporting init file to: {}'.format(dst)): # Set the default formatter to yaml. The default formatter # doesn't generate a valid init file, so it's not reasonable # to use it if isinstance(out_format, plugins.output.DefaultOutFormatPlugin): out_format = plugins.output.YAMLOutFormatPlugin() if not filters: filters = [ 'domains/*/disks/*/metadata', 'domains/*/metadata/deploy-scripts', 'domains/*/snapshots', 'domains/*/name', 'nets/*/mapping', 'nets/*/dns_records' ] spec = self.get_env_spec(filters) temp = {} for vm in vms_to_include: temp[vm.name()] = spec['domains'][vm.name()] spec['domains'] = temp for _, domain in spec['domains'].viewitems(): domain['disks'] = [ d for d in domain['disks'] if not d.get('skip-export') ] for disk in domain['disks']: if disk['type'] == 'template': disk['template_type'] = 'qcow2' elif disk['type'] == 'empty': disk['type'] = 'file' disk['make_a_copy'] = 'True' # Insert the relative path to the exported images disk['path'] = os.path.join( '$LAGO_INITFILE_PATH', os.path.basename(disk['path']) ) with open(dst, 'wt') as f: if isinstance(out_format, plugins.output.YAMLOutFormatPlugin): # Dump the yaml file without type tags # TODO: Allow passing parameters to output plugins f.write(yaml.safe_dump(spec)) else: f.write(out_format.format(spec))
[ "def", "generate_init", "(", "self", ",", "dst", ",", "out_format", ",", "vms_to_include", ",", "filters", "=", "None", ")", ":", "# todo: move this logic to PrefixExportManager", "with", "LogTask", "(", "'Exporting init file to: {}'", ".", "format", "(", "dst", ")", ")", ":", "# Set the default formatter to yaml. The default formatter", "# doesn't generate a valid init file, so it's not reasonable", "# to use it", "if", "isinstance", "(", "out_format", ",", "plugins", ".", "output", ".", "DefaultOutFormatPlugin", ")", ":", "out_format", "=", "plugins", ".", "output", ".", "YAMLOutFormatPlugin", "(", ")", "if", "not", "filters", ":", "filters", "=", "[", "'domains/*/disks/*/metadata'", ",", "'domains/*/metadata/deploy-scripts'", ",", "'domains/*/snapshots'", ",", "'domains/*/name'", ",", "'nets/*/mapping'", ",", "'nets/*/dns_records'", "]", "spec", "=", "self", ".", "get_env_spec", "(", "filters", ")", "temp", "=", "{", "}", "for", "vm", "in", "vms_to_include", ":", "temp", "[", "vm", ".", "name", "(", ")", "]", "=", "spec", "[", "'domains'", "]", "[", "vm", ".", "name", "(", ")", "]", "spec", "[", "'domains'", "]", "=", "temp", "for", "_", ",", "domain", "in", "spec", "[", "'domains'", "]", ".", "viewitems", "(", ")", ":", "domain", "[", "'disks'", "]", "=", "[", "d", "for", "d", "in", "domain", "[", "'disks'", "]", "if", "not", "d", ".", "get", "(", "'skip-export'", ")", "]", "for", "disk", "in", "domain", "[", "'disks'", "]", ":", "if", "disk", "[", "'type'", "]", "==", "'template'", ":", "disk", "[", "'template_type'", "]", "=", "'qcow2'", "elif", "disk", "[", "'type'", "]", "==", "'empty'", ":", "disk", "[", "'type'", "]", "=", "'file'", "disk", "[", "'make_a_copy'", "]", "=", "'True'", "# Insert the relative path to the exported images", "disk", "[", "'path'", "]", "=", "os", ".", "path", ".", "join", "(", "'$LAGO_INITFILE_PATH'", ",", "os", ".", "path", ".", "basename", "(", "disk", "[", "'path'", "]", ")", ")", "with", "open", "(", "dst", ",", "'wt'", ")", "as", "f", ":", "if", "isinstance", "(", "out_format", ",", "plugins", ".", "output", ".", "YAMLOutFormatPlugin", ")", ":", "# Dump the yaml file without type tags", "# TODO: Allow passing parameters to output plugins", "f", ".", "write", "(", "yaml", ".", "safe_dump", "(", "spec", ")", ")", "else", ":", "f", ".", "write", "(", "out_format", ".", "format", "(", "spec", ")", ")" ]
Generate an init file which represents this env and can be used with the images created by self.export_vms Args: dst (str): path and name of the new init file out_format (plugins.output.OutFormatPlugin): formatter for the output (the default is yaml) filters (list): list of paths to keys that should be removed from the init file vms_to_include (list of :class:lago.plugins.vm.VMPlugin): list of vms to include in the init file Returns: None
[ "Generate", "an", "init", "file", "which", "represents", "this", "env", "and", "can", "be", "used", "with", "the", "images", "created", "by", "self", ".", "export_vms", "Args", ":", "dst", "(", "str", ")", ":", "path", "and", "name", "of", "the", "new", "init", "file", "out_format", "(", "plugins", ".", "output", ".", "OutFormatPlugin", ")", ":", "formatter", "for", "the", "output", "(", "the", "default", "is", "yaml", ")", "filters", "(", "list", ")", ":", "list", "of", "paths", "to", "keys", "that", "should", "be", "removed", "from", "the", "init", "file", "vms_to_include", "(", "list", "of", ":", "class", ":", "lago", ".", "plugins", ".", "vm", ".", "VMPlugin", ")", ":", "list", "of", "vms", "to", "include", "in", "the", "init", "file", "Returns", ":", "None" ]
python
train
42.241935
inveniosoftware/invenio-deposit
invenio_deposit/views/rest.py
https://github.com/inveniosoftware/invenio-deposit/blob/f243ea1d01ab0a3bc92ade3262d1abdd2bc32447/invenio_deposit/views/rest.py#L380-L403
def put(self, pid, record, key): """Handle the file rename through the PUT deposit file. Permission required: `update_permission_factory`. :param pid: Pid object (from url). :param record: Record object resolved from the pid. :param key: Unique identifier for the file in the deposit. """ try: data = json.loads(request.data.decode('utf-8')) new_key = data['filename'] except KeyError: raise WrongFile() new_key_secure = secure_filename(new_key) if not new_key_secure or new_key != new_key_secure: raise WrongFile() try: obj = record.files.rename(str(key), new_key_secure) except KeyError: abort(404) record.commit() db.session.commit() return self.make_response(obj=obj, pid=pid, record=record)
[ "def", "put", "(", "self", ",", "pid", ",", "record", ",", "key", ")", ":", "try", ":", "data", "=", "json", ".", "loads", "(", "request", ".", "data", ".", "decode", "(", "'utf-8'", ")", ")", "new_key", "=", "data", "[", "'filename'", "]", "except", "KeyError", ":", "raise", "WrongFile", "(", ")", "new_key_secure", "=", "secure_filename", "(", "new_key", ")", "if", "not", "new_key_secure", "or", "new_key", "!=", "new_key_secure", ":", "raise", "WrongFile", "(", ")", "try", ":", "obj", "=", "record", ".", "files", ".", "rename", "(", "str", "(", "key", ")", ",", "new_key_secure", ")", "except", "KeyError", ":", "abort", "(", "404", ")", "record", ".", "commit", "(", ")", "db", ".", "session", ".", "commit", "(", ")", "return", "self", ".", "make_response", "(", "obj", "=", "obj", ",", "pid", "=", "pid", ",", "record", "=", "record", ")" ]
Handle the file rename through the PUT deposit file. Permission required: `update_permission_factory`. :param pid: Pid object (from url). :param record: Record object resolved from the pid. :param key: Unique identifier for the file in the deposit.
[ "Handle", "the", "file", "rename", "through", "the", "PUT", "deposit", "file", "." ]
python
valid
36.083333
f3at/feat
src/feat/agents/base/partners.py
https://github.com/f3at/feat/blob/15da93fc9d6ec8154f52a9172824e25821195ef8/src/feat/agents/base/partners.py#L281-L297
def query_handler(cls, identifier, role=None): ''' Lookup the handler for the giving idetifier (descriptor_type) and role. In case it was not found return the default. Logic goes as follows: - First try to find exact match for identifier and role, - Try to find match for identifier and role=None, - Return default handler. ''' key = cls._key_for(identifier, role) handler = cls._handlers.get(key, None) if handler is None: default_for_identifier = cls._key_for(identifier, None) handler = cls._handlers.get(default_for_identifier, cls._handlers['_default']) return handler
[ "def", "query_handler", "(", "cls", ",", "identifier", ",", "role", "=", "None", ")", ":", "key", "=", "cls", ".", "_key_for", "(", "identifier", ",", "role", ")", "handler", "=", "cls", ".", "_handlers", ".", "get", "(", "key", ",", "None", ")", "if", "handler", "is", "None", ":", "default_for_identifier", "=", "cls", ".", "_key_for", "(", "identifier", ",", "None", ")", "handler", "=", "cls", ".", "_handlers", ".", "get", "(", "default_for_identifier", ",", "cls", ".", "_handlers", "[", "'_default'", "]", ")", "return", "handler" ]
Lookup the handler for the giving idetifier (descriptor_type) and role. In case it was not found return the default. Logic goes as follows: - First try to find exact match for identifier and role, - Try to find match for identifier and role=None, - Return default handler.
[ "Lookup", "the", "handler", "for", "the", "giving", "idetifier", "(", "descriptor_type", ")", "and", "role", ".", "In", "case", "it", "was", "not", "found", "return", "the", "default", "." ]
python
train
42.411765
saltstack/salt
salt/states/grafana_dashboard.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/grafana_dashboard.py#L306-L324
def _inherited_panel(panel, base_panels_from_pillar, ret): '''Return a panel with properties from parents.''' base_panels = [] for base_panel_from_pillar in base_panels_from_pillar: base_panel = __salt__['pillar.get'](base_panel_from_pillar) if base_panel: base_panels.append(base_panel) elif base_panel_from_pillar != _DEFAULT_PANEL_PILLAR: ret.setdefault('warnings', []) warning_message = 'Cannot find panel pillar "{0}".'.format( base_panel_from_pillar) if warning_message not in ret['warnings']: ret['warnings'].append(warning_message) base_panels.append(panel) result_panel = {} for panel in base_panels: result_panel.update(panel) return result_panel
[ "def", "_inherited_panel", "(", "panel", ",", "base_panels_from_pillar", ",", "ret", ")", ":", "base_panels", "=", "[", "]", "for", "base_panel_from_pillar", "in", "base_panels_from_pillar", ":", "base_panel", "=", "__salt__", "[", "'pillar.get'", "]", "(", "base_panel_from_pillar", ")", "if", "base_panel", ":", "base_panels", ".", "append", "(", "base_panel", ")", "elif", "base_panel_from_pillar", "!=", "_DEFAULT_PANEL_PILLAR", ":", "ret", ".", "setdefault", "(", "'warnings'", ",", "[", "]", ")", "warning_message", "=", "'Cannot find panel pillar \"{0}\".'", ".", "format", "(", "base_panel_from_pillar", ")", "if", "warning_message", "not", "in", "ret", "[", "'warnings'", "]", ":", "ret", "[", "'warnings'", "]", ".", "append", "(", "warning_message", ")", "base_panels", ".", "append", "(", "panel", ")", "result_panel", "=", "{", "}", "for", "panel", "in", "base_panels", ":", "result_panel", ".", "update", "(", "panel", ")", "return", "result_panel" ]
Return a panel with properties from parents.
[ "Return", "a", "panel", "with", "properties", "from", "parents", "." ]
python
train
41
ska-sa/purr
Purr/Render.py
https://github.com/ska-sa/purr/blob/4c848768d0485d0f88b30850d0d5372221b21b66/Purr/Render.py#L129-L148
def addOption(classobj, name, default, dtype=str, doc=None): """Adds a renderer option named 'name', with the given default value. 'dtype' must be a callable to convert a string to an option. 'doc' is a doc string. Options will be initialized from config file here. """ # make a config object if not hasattr(classobj, '_config'): classobj._config = Kittens.config.SectionParser(ConfigFile, "render-" + classobj.renderer_id) # make a class-specific copy of the current option set if classobj._options_owner is not classobj: classobj.options = dict(DefaultRenderer.options) classobj._options_owner = classobj # overrid default value from config file if dtype is bool: value = classobj._config.getbool(name, default) else: value = dtype(classobj._config.get(name, default)) # insert into dict classobj.options[name] = (value, default, dtype, doc)
[ "def", "addOption", "(", "classobj", ",", "name", ",", "default", ",", "dtype", "=", "str", ",", "doc", "=", "None", ")", ":", "# make a config object", "if", "not", "hasattr", "(", "classobj", ",", "'_config'", ")", ":", "classobj", ".", "_config", "=", "Kittens", ".", "config", ".", "SectionParser", "(", "ConfigFile", ",", "\"render-\"", "+", "classobj", ".", "renderer_id", ")", "# make a class-specific copy of the current option set", "if", "classobj", ".", "_options_owner", "is", "not", "classobj", ":", "classobj", ".", "options", "=", "dict", "(", "DefaultRenderer", ".", "options", ")", "classobj", ".", "_options_owner", "=", "classobj", "# overrid default value from config file", "if", "dtype", "is", "bool", ":", "value", "=", "classobj", ".", "_config", ".", "getbool", "(", "name", ",", "default", ")", "else", ":", "value", "=", "dtype", "(", "classobj", ".", "_config", ".", "get", "(", "name", ",", "default", ")", ")", "# insert into dict", "classobj", ".", "options", "[", "name", "]", "=", "(", "value", ",", "default", ",", "dtype", ",", "doc", ")" ]
Adds a renderer option named 'name', with the given default value. 'dtype' must be a callable to convert a string to an option. 'doc' is a doc string. Options will be initialized from config file here.
[ "Adds", "a", "renderer", "option", "named", "name", "with", "the", "given", "default", "value", ".", "dtype", "must", "be", "a", "callable", "to", "convert", "a", "string", "to", "an", "option", ".", "doc", "is", "a", "doc", "string", ".", "Options", "will", "be", "initialized", "from", "config", "file", "here", "." ]
python
train
49.8
AnthonyBloomer/daftlistings
daftlistings/daft.py
https://github.com/AnthonyBloomer/daftlistings/blob/f6c1b52425bc740f443b5efe6632a4bf18ee997f/daftlistings/daft.py#L342-L347
def set_public_transport_route(self, public_transport_route): """ Set the public transport route. :param public_transport_route: TransportRoute """ self._query_params += str(QueryParam.ROUTE_ID) + str(public_transport_route)
[ "def", "set_public_transport_route", "(", "self", ",", "public_transport_route", ")", ":", "self", ".", "_query_params", "+=", "str", "(", "QueryParam", ".", "ROUTE_ID", ")", "+", "str", "(", "public_transport_route", ")" ]
Set the public transport route. :param public_transport_route: TransportRoute
[ "Set", "the", "public", "transport", "route", ".", ":", "param", "public_transport_route", ":", "TransportRoute" ]
python
train
43.166667
saltstack/salt
salt/modules/aptpkg.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/aptpkg.py#L980-L1066
def upgrade(refresh=True, dist_upgrade=False, **kwargs): ''' .. versionchanged:: 2015.8.12,2016.3.3,2016.11.0 On minions running systemd>=205, `systemd-run(1)`_ is now used to isolate commands which modify installed packages from the ``salt-minion`` daemon's control group. This is done to keep systemd from killing any apt-get/dpkg commands spawned by Salt when the ``salt-minion`` service is restarted. (see ``KillMode`` in the `systemd.kill(5)`_ manpage for more information). If desired, usage of `systemd-run(1)`_ can be suppressed by setting a :mod:`config option <salt.modules.config.get>` called ``systemd.scope``, with a value of ``False`` (no quotes). .. _`systemd-run(1)`: https://www.freedesktop.org/software/systemd/man/systemd-run.html .. _`systemd.kill(5)`: https://www.freedesktop.org/software/systemd/man/systemd.kill.html Upgrades all packages via ``apt-get upgrade`` or ``apt-get dist-upgrade`` if ``dist_upgrade`` is ``True``. Returns a dictionary containing the changes: .. code-block:: python {'<package>': {'old': '<old-version>', 'new': '<new-version>'}} dist_upgrade Whether to perform the upgrade using dist-upgrade vs upgrade. Default is to use upgrade. .. versionadded:: 2014.7.0 cache_valid_time .. versionadded:: 2016.11.0 Skip refreshing the package database if refresh has already occurred within <value> seconds download_only Only download the packages, don't unpack or install them .. versionadded:: 2018.3.0 force_conf_new Always install the new version of any configuration files. .. versionadded:: 2015.8.0 CLI Example: .. code-block:: bash salt '*' pkg.upgrade ''' cache_valid_time = kwargs.pop('cache_valid_time', 0) if salt.utils.data.is_true(refresh): refresh_db(cache_valid_time) old = list_pkgs() if 'force_conf_new' in kwargs and kwargs['force_conf_new']: force_conf = '--force-confnew' else: force_conf = '--force-confold' cmd = ['apt-get', '-q', '-y', '-o', 'DPkg::Options::={0}'.format(force_conf), '-o', 'DPkg::Options::=--force-confdef'] if kwargs.get('force_yes', False): cmd.append('--force-yes') if kwargs.get('skip_verify', False): cmd.append('--allow-unauthenticated') if kwargs.get('download_only', False): cmd.append('--download-only') cmd.append('dist-upgrade' if dist_upgrade else 'upgrade') result = _call_apt(cmd, env=DPKG_ENV_VARS.copy()) __context__.pop('pkg.list_pkgs', None) new = list_pkgs() ret = salt.utils.data.compare_dicts(old, new) if result['retcode'] != 0: raise CommandExecutionError( 'Problem encountered upgrading packages', info={'changes': ret, 'result': result} ) return ret
[ "def", "upgrade", "(", "refresh", "=", "True", ",", "dist_upgrade", "=", "False", ",", "*", "*", "kwargs", ")", ":", "cache_valid_time", "=", "kwargs", ".", "pop", "(", "'cache_valid_time'", ",", "0", ")", "if", "salt", ".", "utils", ".", "data", ".", "is_true", "(", "refresh", ")", ":", "refresh_db", "(", "cache_valid_time", ")", "old", "=", "list_pkgs", "(", ")", "if", "'force_conf_new'", "in", "kwargs", "and", "kwargs", "[", "'force_conf_new'", "]", ":", "force_conf", "=", "'--force-confnew'", "else", ":", "force_conf", "=", "'--force-confold'", "cmd", "=", "[", "'apt-get'", ",", "'-q'", ",", "'-y'", ",", "'-o'", ",", "'DPkg::Options::={0}'", ".", "format", "(", "force_conf", ")", ",", "'-o'", ",", "'DPkg::Options::=--force-confdef'", "]", "if", "kwargs", ".", "get", "(", "'force_yes'", ",", "False", ")", ":", "cmd", ".", "append", "(", "'--force-yes'", ")", "if", "kwargs", ".", "get", "(", "'skip_verify'", ",", "False", ")", ":", "cmd", ".", "append", "(", "'--allow-unauthenticated'", ")", "if", "kwargs", ".", "get", "(", "'download_only'", ",", "False", ")", ":", "cmd", ".", "append", "(", "'--download-only'", ")", "cmd", ".", "append", "(", "'dist-upgrade'", "if", "dist_upgrade", "else", "'upgrade'", ")", "result", "=", "_call_apt", "(", "cmd", ",", "env", "=", "DPKG_ENV_VARS", ".", "copy", "(", ")", ")", "__context__", ".", "pop", "(", "'pkg.list_pkgs'", ",", "None", ")", "new", "=", "list_pkgs", "(", ")", "ret", "=", "salt", ".", "utils", ".", "data", ".", "compare_dicts", "(", "old", ",", "new", ")", "if", "result", "[", "'retcode'", "]", "!=", "0", ":", "raise", "CommandExecutionError", "(", "'Problem encountered upgrading packages'", ",", "info", "=", "{", "'changes'", ":", "ret", ",", "'result'", ":", "result", "}", ")", "return", "ret" ]
.. versionchanged:: 2015.8.12,2016.3.3,2016.11.0 On minions running systemd>=205, `systemd-run(1)`_ is now used to isolate commands which modify installed packages from the ``salt-minion`` daemon's control group. This is done to keep systemd from killing any apt-get/dpkg commands spawned by Salt when the ``salt-minion`` service is restarted. (see ``KillMode`` in the `systemd.kill(5)`_ manpage for more information). If desired, usage of `systemd-run(1)`_ can be suppressed by setting a :mod:`config option <salt.modules.config.get>` called ``systemd.scope``, with a value of ``False`` (no quotes). .. _`systemd-run(1)`: https://www.freedesktop.org/software/systemd/man/systemd-run.html .. _`systemd.kill(5)`: https://www.freedesktop.org/software/systemd/man/systemd.kill.html Upgrades all packages via ``apt-get upgrade`` or ``apt-get dist-upgrade`` if ``dist_upgrade`` is ``True``. Returns a dictionary containing the changes: .. code-block:: python {'<package>': {'old': '<old-version>', 'new': '<new-version>'}} dist_upgrade Whether to perform the upgrade using dist-upgrade vs upgrade. Default is to use upgrade. .. versionadded:: 2014.7.0 cache_valid_time .. versionadded:: 2016.11.0 Skip refreshing the package database if refresh has already occurred within <value> seconds download_only Only download the packages, don't unpack or install them .. versionadded:: 2018.3.0 force_conf_new Always install the new version of any configuration files. .. versionadded:: 2015.8.0 CLI Example: .. code-block:: bash salt '*' pkg.upgrade
[ "..", "versionchanged", "::", "2015", ".", "8", ".", "12", "2016", ".", "3", ".", "3", "2016", ".", "11", ".", "0", "On", "minions", "running", "systemd", ">", "=", "205", "systemd", "-", "run", "(", "1", ")", "_", "is", "now", "used", "to", "isolate", "commands", "which", "modify", "installed", "packages", "from", "the", "salt", "-", "minion", "daemon", "s", "control", "group", ".", "This", "is", "done", "to", "keep", "systemd", "from", "killing", "any", "apt", "-", "get", "/", "dpkg", "commands", "spawned", "by", "Salt", "when", "the", "salt", "-", "minion", "service", "is", "restarted", ".", "(", "see", "KillMode", "in", "the", "systemd", ".", "kill", "(", "5", ")", "_", "manpage", "for", "more", "information", ")", ".", "If", "desired", "usage", "of", "systemd", "-", "run", "(", "1", ")", "_", "can", "be", "suppressed", "by", "setting", "a", ":", "mod", ":", "config", "option", "<salt", ".", "modules", ".", "config", ".", "get", ">", "called", "systemd", ".", "scope", "with", "a", "value", "of", "False", "(", "no", "quotes", ")", "." ]
python
train
33.413793
AtomHash/evernode
evernode/models/json_model.py
https://github.com/AtomHash/evernode/blob/b2fb91555fb937a3f3eba41db56dee26f9b034be/evernode/models/json_model.py#L21-L40
def __json(self): """ Using the exclude lists, convert fields to a string. """ if self.exclude_list is None: self.exclude_list = [] fields = {} for key, item in vars(self).items(): if hasattr(self, '_sa_instance_state'): # load only deferred objects if len(orm.attributes.instance_state(self).unloaded) > 0: mapper = inspect(self) for column in mapper.attrs: column.key column.value if str(key).startswith('_') or key in self.exclude_list: continue fields[key] = item obj = Json.safe_object(fields) return str(obj)
[ "def", "__json", "(", "self", ")", ":", "if", "self", ".", "exclude_list", "is", "None", ":", "self", ".", "exclude_list", "=", "[", "]", "fields", "=", "{", "}", "for", "key", ",", "item", "in", "vars", "(", "self", ")", ".", "items", "(", ")", ":", "if", "hasattr", "(", "self", ",", "'_sa_instance_state'", ")", ":", "# load only deferred objects\r", "if", "len", "(", "orm", ".", "attributes", ".", "instance_state", "(", "self", ")", ".", "unloaded", ")", ">", "0", ":", "mapper", "=", "inspect", "(", "self", ")", "for", "column", "in", "mapper", ".", "attrs", ":", "column", ".", "key", "column", ".", "value", "if", "str", "(", "key", ")", ".", "startswith", "(", "'_'", ")", "or", "key", "in", "self", ".", "exclude_list", ":", "continue", "fields", "[", "key", "]", "=", "item", "obj", "=", "Json", ".", "safe_object", "(", "fields", ")", "return", "str", "(", "obj", ")" ]
Using the exclude lists, convert fields to a string.
[ "Using", "the", "exclude", "lists", "convert", "fields", "to", "a", "string", "." ]
python
train
38.1
projectshift/shift-boiler
boiler/user/models.py
https://github.com/projectshift/shift-boiler/blob/8e6f3a3e4b9493fb6c8bd16bed160ede153bfb0b/boiler/user/models.py#L224-L232
def increment_failed_logins(self): """ Increment failed logins counter""" if not self.failed_logins: self.failed_logins = 1 elif not self.failed_login_limit_reached(): self.failed_logins += 1 else: self.reset_login_counter() self.lock_account(30)
[ "def", "increment_failed_logins", "(", "self", ")", ":", "if", "not", "self", ".", "failed_logins", ":", "self", ".", "failed_logins", "=", "1", "elif", "not", "self", ".", "failed_login_limit_reached", "(", ")", ":", "self", ".", "failed_logins", "+=", "1", "else", ":", "self", ".", "reset_login_counter", "(", ")", "self", ".", "lock_account", "(", "30", ")" ]
Increment failed logins counter
[ "Increment", "failed", "logins", "counter" ]
python
train
35.333333
knipknap/exscript
Exscript/protocols/protocol.py
https://github.com/knipknap/exscript/blob/72718eee3e87b345d5a5255be9824e867e42927b/Exscript/protocols/protocol.py#L496-L510
def set_prompt(self, prompt=None): """ Defines a pattern that is waited for when calling the expect_prompt() method. If the set_prompt() method is not called, or if it is called with the prompt argument set to None, a default prompt is used that should work with many devices running Unix, IOS, IOS-XR, or Junos and others. :type prompt: RegEx :param prompt: The pattern that matches the prompt of the remote host. """ if prompt is None: self.manual_prompt_re = prompt else: self.manual_prompt_re = to_regexs(prompt)
[ "def", "set_prompt", "(", "self", ",", "prompt", "=", "None", ")", ":", "if", "prompt", "is", "None", ":", "self", ".", "manual_prompt_re", "=", "prompt", "else", ":", "self", ".", "manual_prompt_re", "=", "to_regexs", "(", "prompt", ")" ]
Defines a pattern that is waited for when calling the expect_prompt() method. If the set_prompt() method is not called, or if it is called with the prompt argument set to None, a default prompt is used that should work with many devices running Unix, IOS, IOS-XR, or Junos and others. :type prompt: RegEx :param prompt: The pattern that matches the prompt of the remote host.
[ "Defines", "a", "pattern", "that", "is", "waited", "for", "when", "calling", "the", "expect_prompt", "()", "method", ".", "If", "the", "set_prompt", "()", "method", "is", "not", "called", "or", "if", "it", "is", "called", "with", "the", "prompt", "argument", "set", "to", "None", "a", "default", "prompt", "is", "used", "that", "should", "work", "with", "many", "devices", "running", "Unix", "IOS", "IOS", "-", "XR", "or", "Junos", "and", "others", "." ]
python
train
41.066667
Jajcus/pyxmpp2
pyxmpp2/ext/vcard.py
https://github.com/Jajcus/pyxmpp2/blob/14a40a3950910a9cd008b55f0d8905aa0186ce18/pyxmpp2/ext/vcard.py#L1018-L1031
def as_xml(self,parent): """Create vcard-tmp XML representation of the field. :Parameters: - `parent`: parent node for the element :Types: - `parent`: `libxml2.xmlNode` :return: xml node with the field data. :returntype: `libxml2.xmlNode`""" n=parent.newChild(None,"CATEGORIES",None) for k in self.keywords: n.newTextChild(None,"KEYWORD",to_utf8(k)) return n
[ "def", "as_xml", "(", "self", ",", "parent", ")", ":", "n", "=", "parent", ".", "newChild", "(", "None", ",", "\"CATEGORIES\"", ",", "None", ")", "for", "k", "in", "self", ".", "keywords", ":", "n", ".", "newTextChild", "(", "None", ",", "\"KEYWORD\"", ",", "to_utf8", "(", "k", ")", ")", "return", "n" ]
Create vcard-tmp XML representation of the field. :Parameters: - `parent`: parent node for the element :Types: - `parent`: `libxml2.xmlNode` :return: xml node with the field data. :returntype: `libxml2.xmlNode`
[ "Create", "vcard", "-", "tmp", "XML", "representation", "of", "the", "field", "." ]
python
valid
31.928571
tulir/mautrix-python
mautrix_appservice/intent_api.py
https://github.com/tulir/mautrix-python/blob/21bb0870e4103dd03ecc61396ce02adb9301f382/mautrix_appservice/intent_api.py#L498-L527
async def invite(self, room_id: str, user_id: str, check_cache: bool = False ) -> Optional[dict]: """ Invite a user to participate in a particular room. See also: `API reference`_ Args: room_id: The room identifier (not alias) to which to invite the user. user_id: The fully qualified user ID of the invitee. check_cache: Whether or not to check the state cache before inviting. If true, the actual invite HTTP request will only be made if the user is not in the room according to local state caches. Returns: .. _API reference: https://matrix.org/docs/spec/client_server/r0.3.0.html#post-matrix-client-r0-createroom """ await self.ensure_joined(room_id) try: ok_states = {"invite", "join"} do_invite = (not check_cache or self.state_store.get_membership(room_id, user_id) not in ok_states) if do_invite: response = await self._invite_direct(room_id, user_id) self.state_store.invited(room_id, user_id) return response except MatrixRequestError as e: if e.errcode != "M_FORBIDDEN": raise IntentError(f"Failed to invite {user_id} to {room_id}", e) if "is already in the room" in e.message: self.state_store.joined(room_id, user_id)
[ "async", "def", "invite", "(", "self", ",", "room_id", ":", "str", ",", "user_id", ":", "str", ",", "check_cache", ":", "bool", "=", "False", ")", "->", "Optional", "[", "dict", "]", ":", "await", "self", ".", "ensure_joined", "(", "room_id", ")", "try", ":", "ok_states", "=", "{", "\"invite\"", ",", "\"join\"", "}", "do_invite", "=", "(", "not", "check_cache", "or", "self", ".", "state_store", ".", "get_membership", "(", "room_id", ",", "user_id", ")", "not", "in", "ok_states", ")", "if", "do_invite", ":", "response", "=", "await", "self", ".", "_invite_direct", "(", "room_id", ",", "user_id", ")", "self", ".", "state_store", ".", "invited", "(", "room_id", ",", "user_id", ")", "return", "response", "except", "MatrixRequestError", "as", "e", ":", "if", "e", ".", "errcode", "!=", "\"M_FORBIDDEN\"", ":", "raise", "IntentError", "(", "f\"Failed to invite {user_id} to {room_id}\"", ",", "e", ")", "if", "\"is already in the room\"", "in", "e", ".", "message", ":", "self", ".", "state_store", ".", "joined", "(", "room_id", ",", "user_id", ")" ]
Invite a user to participate in a particular room. See also: `API reference`_ Args: room_id: The room identifier (not alias) to which to invite the user. user_id: The fully qualified user ID of the invitee. check_cache: Whether or not to check the state cache before inviting. If true, the actual invite HTTP request will only be made if the user is not in the room according to local state caches. Returns: .. _API reference: https://matrix.org/docs/spec/client_server/r0.3.0.html#post-matrix-client-r0-createroom
[ "Invite", "a", "user", "to", "participate", "in", "a", "particular", "room", ".", "See", "also", ":", "API", "reference", "_" ]
python
train
48
sdispater/orator
orator/query/builder.py
https://github.com/sdispater/orator/blob/bd90bf198ee897751848f9a92e49d18e60a74136/orator/query/builder.py#L1314-L1341
def aggregate(self, func, *columns): """ Execute an aggregate function against the database :param func: The aggregate function :type func: str :param columns: The columns to execute the fnction for :type columns: tuple :return: The aggregate result :rtype: mixed """ if not columns: columns = ["*"] self.aggregate_ = {"function": func, "columns": columns} previous_columns = self.columns results = self.get(*columns).all() self.aggregate_ = None self.columns = previous_columns if len(results) > 0: return dict((k.lower(), v) for k, v in results[0].items())["aggregate"]
[ "def", "aggregate", "(", "self", ",", "func", ",", "*", "columns", ")", ":", "if", "not", "columns", ":", "columns", "=", "[", "\"*\"", "]", "self", ".", "aggregate_", "=", "{", "\"function\"", ":", "func", ",", "\"columns\"", ":", "columns", "}", "previous_columns", "=", "self", ".", "columns", "results", "=", "self", ".", "get", "(", "*", "columns", ")", ".", "all", "(", ")", "self", ".", "aggregate_", "=", "None", "self", ".", "columns", "=", "previous_columns", "if", "len", "(", "results", ")", ">", "0", ":", "return", "dict", "(", "(", "k", ".", "lower", "(", ")", ",", "v", ")", "for", "k", ",", "v", "in", "results", "[", "0", "]", ".", "items", "(", ")", ")", "[", "\"aggregate\"", "]" ]
Execute an aggregate function against the database :param func: The aggregate function :type func: str :param columns: The columns to execute the fnction for :type columns: tuple :return: The aggregate result :rtype: mixed
[ "Execute", "an", "aggregate", "function", "against", "the", "database" ]
python
train
25.178571
guma44/GEOparse
GEOparse/GEOTypes.py
https://github.com/guma44/GEOparse/blob/7ee8d5b8678d780382a6bf884afa69d2033f5ca0/GEOparse/GEOTypes.py#L484-L519
def download_SRA(self, email, directory='./', **kwargs): """Download RAW data as SRA file. The files will be downloaded to the sample directory created ad hoc or the directory specified by the parameter. The sample has to come from sequencing eg. mRNA-seq, CLIP etc. An important parameter is a filetype. By default an SRA is accessed by FTP and such file is downloaded. This does not require additional libraries. However in order to produce FASTA of FASTQ files one would need to use SRA-Toolkit. Thus, it is assumed that this library is already installed or it will be installed in the near future. One can immediately specify the download type to fasta or fastq. To see all possible ``**kwargs`` that could be passed to the function see the description of :class:`~GEOparse.sra_downloader.SRADownloader`. Args: email (:obj:`str`): an email (any) - Required by NCBI for access directory (:obj:`str`, optional): The directory to which download the data. Defaults to "./". **kwargs: Arbitrary keyword arguments, see description Returns: :obj:`dict`: A dictionary containing only one key (``SRA``) with the list of downloaded files. Raises: :obj:`TypeError`: Type to download unknown :obj:`NoSRARelationException`: No SRAToolkit :obj:`Exception`: Wrong e-mail :obj:`HTTPError`: Cannot access or connect to DB """ downloader = SRADownloader(self, email, directory, **kwargs) return {"SRA": downloader.download()}
[ "def", "download_SRA", "(", "self", ",", "email", ",", "directory", "=", "'./'", ",", "*", "*", "kwargs", ")", ":", "downloader", "=", "SRADownloader", "(", "self", ",", "email", ",", "directory", ",", "*", "*", "kwargs", ")", "return", "{", "\"SRA\"", ":", "downloader", ".", "download", "(", ")", "}" ]
Download RAW data as SRA file. The files will be downloaded to the sample directory created ad hoc or the directory specified by the parameter. The sample has to come from sequencing eg. mRNA-seq, CLIP etc. An important parameter is a filetype. By default an SRA is accessed by FTP and such file is downloaded. This does not require additional libraries. However in order to produce FASTA of FASTQ files one would need to use SRA-Toolkit. Thus, it is assumed that this library is already installed or it will be installed in the near future. One can immediately specify the download type to fasta or fastq. To see all possible ``**kwargs`` that could be passed to the function see the description of :class:`~GEOparse.sra_downloader.SRADownloader`. Args: email (:obj:`str`): an email (any) - Required by NCBI for access directory (:obj:`str`, optional): The directory to which download the data. Defaults to "./". **kwargs: Arbitrary keyword arguments, see description Returns: :obj:`dict`: A dictionary containing only one key (``SRA``) with the list of downloaded files. Raises: :obj:`TypeError`: Type to download unknown :obj:`NoSRARelationException`: No SRAToolkit :obj:`Exception`: Wrong e-mail :obj:`HTTPError`: Cannot access or connect to DB
[ "Download", "RAW", "data", "as", "SRA", "file", "." ]
python
train
46.166667
FNNDSC/pfmisc
pfmisc/C_snode.py
https://github.com/FNNDSC/pfmisc/blob/960b4d6135fcc50bed0a8e55db2ab1ddad9b99d8/pfmisc/C_snode.py#L864-L893
def rm(self, name): """ Remove a data analog called 'name'. The 'name' can contain a path specifier. Warning: see http://stackoverflow.com/questions/5844672/delete-an-element-from-a-dictionary deleting from the snode_current changes dictionary contents for any other agents that have references to the same instance. This deletes either directories or files. """ b_OK = False str_here = self.cwd() l_path = name.split('/') if len(l_path) > 1: self.cd('/'.join(l_path[0:-1])) name = l_path[-1] if name in self.snode_current.d_data: del self.snode_current.d_data[name] b_OK = True if name in self.snode_current.d_nodes: del self.snode_current.d_nodes[name] b_OK = True self.cd(str_here) return b_OK
[ "def", "rm", "(", "self", ",", "name", ")", ":", "b_OK", "=", "False", "str_here", "=", "self", ".", "cwd", "(", ")", "l_path", "=", "name", ".", "split", "(", "'/'", ")", "if", "len", "(", "l_path", ")", ">", "1", ":", "self", ".", "cd", "(", "'/'", ".", "join", "(", "l_path", "[", "0", ":", "-", "1", "]", ")", ")", "name", "=", "l_path", "[", "-", "1", "]", "if", "name", "in", "self", ".", "snode_current", ".", "d_data", ":", "del", "self", ".", "snode_current", ".", "d_data", "[", "name", "]", "b_OK", "=", "True", "if", "name", "in", "self", ".", "snode_current", ".", "d_nodes", ":", "del", "self", ".", "snode_current", ".", "d_nodes", "[", "name", "]", "b_OK", "=", "True", "self", ".", "cd", "(", "str_here", ")", "return", "b_OK" ]
Remove a data analog called 'name'. The 'name' can contain a path specifier. Warning: see http://stackoverflow.com/questions/5844672/delete-an-element-from-a-dictionary deleting from the snode_current changes dictionary contents for any other agents that have references to the same instance. This deletes either directories or files.
[ "Remove", "a", "data", "analog", "called", "name", "." ]
python
train
32.966667
push-things/django-th
th_reddit/my_reddit.py
https://github.com/push-things/django-th/blob/86c999d16bcf30b6224206e5b40824309834ac8c/th_reddit/my_reddit.py#L88-L114
def save_data(self, trigger_id, **data): """ let's save the data :param trigger_id: trigger ID from which to save data :param data: the data to check to be used and save :type trigger_id: int :type data: dict :return: the status of the save statement :rtype: boolean """ # convert the format to be released in Markdown status = False data['output_format'] = 'md' title, content = super(ServiceReddit, self).save_data(trigger_id, **data) if self.token: trigger = Reddit.objects.get(trigger_id=trigger_id) if trigger.share_link: status = self.reddit.subreddit(trigger.subreddit).submit(title=title, url=content) else: status = self.reddit.subreddit(trigger.subreddit).submit(title=title, selftext=content) sentence = str('reddit submission {} created').format(title) logger.debug(sentence) else: msg = "no token or link provided for trigger ID {} ".format(trigger_id) logger.critical(msg) update_result(trigger_id, msg=msg, status=False) return status
[ "def", "save_data", "(", "self", ",", "trigger_id", ",", "*", "*", "data", ")", ":", "# convert the format to be released in Markdown", "status", "=", "False", "data", "[", "'output_format'", "]", "=", "'md'", "title", ",", "content", "=", "super", "(", "ServiceReddit", ",", "self", ")", ".", "save_data", "(", "trigger_id", ",", "*", "*", "data", ")", "if", "self", ".", "token", ":", "trigger", "=", "Reddit", ".", "objects", ".", "get", "(", "trigger_id", "=", "trigger_id", ")", "if", "trigger", ".", "share_link", ":", "status", "=", "self", ".", "reddit", ".", "subreddit", "(", "trigger", ".", "subreddit", ")", ".", "submit", "(", "title", "=", "title", ",", "url", "=", "content", ")", "else", ":", "status", "=", "self", ".", "reddit", ".", "subreddit", "(", "trigger", ".", "subreddit", ")", ".", "submit", "(", "title", "=", "title", ",", "selftext", "=", "content", ")", "sentence", "=", "str", "(", "'reddit submission {} created'", ")", ".", "format", "(", "title", ")", "logger", ".", "debug", "(", "sentence", ")", "else", ":", "msg", "=", "\"no token or link provided for trigger ID {} \"", ".", "format", "(", "trigger_id", ")", "logger", ".", "critical", "(", "msg", ")", "update_result", "(", "trigger_id", ",", "msg", "=", "msg", ",", "status", "=", "False", ")", "return", "status" ]
let's save the data :param trigger_id: trigger ID from which to save data :param data: the data to check to be used and save :type trigger_id: int :type data: dict :return: the status of the save statement :rtype: boolean
[ "let", "s", "save", "the", "data", ":", "param", "trigger_id", ":", "trigger", "ID", "from", "which", "to", "save", "data", ":", "param", "data", ":", "the", "data", "to", "check", "to", "be", "used", "and", "save", ":", "type", "trigger_id", ":", "int", ":", "type", "data", ":", "dict", ":", "return", ":", "the", "status", "of", "the", "save", "statement", ":", "rtype", ":", "boolean" ]
python
train
44.740741
bcbio/bcbio-nextgen
bcbio/cwl/workflow.py
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/cwl/workflow.py#L421-L437
def _create_variable(orig_v, step, variables): """Create a new output variable, potentially over-writing existing or creating new. """ # get current variable, and convert to be the output of our process step try: v = _get_variable(orig_v["id"], variables) except ValueError: v = copy.deepcopy(orig_v) if not isinstance(v["id"], six.string_types): v["id"] = _get_string_vid(v["id"]) for key, val in orig_v.items(): if key not in ["id", "type"]: v[key] = val if orig_v.get("type") != "null": v["type"] = orig_v["type"] v["id"] = "%s/%s" % (step.name, get_base_id(v["id"])) return v
[ "def", "_create_variable", "(", "orig_v", ",", "step", ",", "variables", ")", ":", "# get current variable, and convert to be the output of our process step", "try", ":", "v", "=", "_get_variable", "(", "orig_v", "[", "\"id\"", "]", ",", "variables", ")", "except", "ValueError", ":", "v", "=", "copy", ".", "deepcopy", "(", "orig_v", ")", "if", "not", "isinstance", "(", "v", "[", "\"id\"", "]", ",", "six", ".", "string_types", ")", ":", "v", "[", "\"id\"", "]", "=", "_get_string_vid", "(", "v", "[", "\"id\"", "]", ")", "for", "key", ",", "val", "in", "orig_v", ".", "items", "(", ")", ":", "if", "key", "not", "in", "[", "\"id\"", ",", "\"type\"", "]", ":", "v", "[", "key", "]", "=", "val", "if", "orig_v", ".", "get", "(", "\"type\"", ")", "!=", "\"null\"", ":", "v", "[", "\"type\"", "]", "=", "orig_v", "[", "\"type\"", "]", "v", "[", "\"id\"", "]", "=", "\"%s/%s\"", "%", "(", "step", ".", "name", ",", "get_base_id", "(", "v", "[", "\"id\"", "]", ")", ")", "return", "v" ]
Create a new output variable, potentially over-writing existing or creating new.
[ "Create", "a", "new", "output", "variable", "potentially", "over", "-", "writing", "existing", "or", "creating", "new", "." ]
python
train
39
jantman/awslimitchecker
awslimitchecker/trustedadvisor.py
https://github.com/jantman/awslimitchecker/blob/e50197f70f3d0abcc5cfc7fde6336f548b790e34/awslimitchecker/trustedadvisor.py#L248-L291
def _get_refreshed_check_result(self, check_id): """ Given the ``check_id``, return the dict of Trusted Advisor check results. This handles refreshing the Trusted Advisor check, if desired, according to ``self.refresh_mode`` and ``self.refresh_timeout``. :param check_id: the Trusted Advisor check ID :type check_id: str :returns: dict check result. The return value of :py:meth:`Support.Client.describe_trusted_advisor_check_result` :rtype: dict """ # handle a refresh_mode of None right off the bat if self.refresh_mode is None: logger.info("Not refreshing Trusted Advisor check (refresh mode " "is None)") return self._get_check_result(check_id)[0] logger.debug("Handling refresh of check: %s", check_id) # if we want to refresh, step 1 is to see if we can yet... if not self._can_refresh_check(check_id): return self._get_check_result(check_id)[0] # either it's not too soon to refresh, or we have no idea... if isinstance(self.refresh_mode, type(1)): # mode is an int, check the last refresh time and compare checks, check_datetime = self._get_check_result(check_id) logger.debug('ta_refresh_mode older; check last refresh: %s; ' 'threshold=%d seconds', check_datetime, self.refresh_mode) if check_datetime >= datetime.now(utc) - timedelta( seconds=self.refresh_mode): logger.warning('Trusted Advisor check %s last refresh time ' 'of %s is newer than refresh threshold of %d ' 'seconds.', check_id, check_datetime, self.refresh_mode) return self._get_check_result(check_id)[0] # do the refresh logger.info("Refreshing Trusted Advisor check: %s", check_id) self.conn.refresh_trusted_advisor_check(checkId=check_id) # if mode isn't trigger, wait for refresh up to timeout if self.refresh_mode == 'trigger': result = self._get_check_result(check_id)[0] else: result = self._poll_for_refresh(check_id) return result
[ "def", "_get_refreshed_check_result", "(", "self", ",", "check_id", ")", ":", "# handle a refresh_mode of None right off the bat", "if", "self", ".", "refresh_mode", "is", "None", ":", "logger", ".", "info", "(", "\"Not refreshing Trusted Advisor check (refresh mode \"", "\"is None)\"", ")", "return", "self", ".", "_get_check_result", "(", "check_id", ")", "[", "0", "]", "logger", ".", "debug", "(", "\"Handling refresh of check: %s\"", ",", "check_id", ")", "# if we want to refresh, step 1 is to see if we can yet...", "if", "not", "self", ".", "_can_refresh_check", "(", "check_id", ")", ":", "return", "self", ".", "_get_check_result", "(", "check_id", ")", "[", "0", "]", "# either it's not too soon to refresh, or we have no idea...", "if", "isinstance", "(", "self", ".", "refresh_mode", ",", "type", "(", "1", ")", ")", ":", "# mode is an int, check the last refresh time and compare", "checks", ",", "check_datetime", "=", "self", ".", "_get_check_result", "(", "check_id", ")", "logger", ".", "debug", "(", "'ta_refresh_mode older; check last refresh: %s; '", "'threshold=%d seconds'", ",", "check_datetime", ",", "self", ".", "refresh_mode", ")", "if", "check_datetime", ">=", "datetime", ".", "now", "(", "utc", ")", "-", "timedelta", "(", "seconds", "=", "self", ".", "refresh_mode", ")", ":", "logger", ".", "warning", "(", "'Trusted Advisor check %s last refresh time '", "'of %s is newer than refresh threshold of %d '", "'seconds.'", ",", "check_id", ",", "check_datetime", ",", "self", ".", "refresh_mode", ")", "return", "self", ".", "_get_check_result", "(", "check_id", ")", "[", "0", "]", "# do the refresh", "logger", ".", "info", "(", "\"Refreshing Trusted Advisor check: %s\"", ",", "check_id", ")", "self", ".", "conn", ".", "refresh_trusted_advisor_check", "(", "checkId", "=", "check_id", ")", "# if mode isn't trigger, wait for refresh up to timeout", "if", "self", ".", "refresh_mode", "==", "'trigger'", ":", "result", "=", "self", ".", "_get_check_result", "(", "check_id", ")", "[", "0", "]", "else", ":", "result", "=", "self", ".", "_poll_for_refresh", "(", "check_id", ")", "return", "result" ]
Given the ``check_id``, return the dict of Trusted Advisor check results. This handles refreshing the Trusted Advisor check, if desired, according to ``self.refresh_mode`` and ``self.refresh_timeout``. :param check_id: the Trusted Advisor check ID :type check_id: str :returns: dict check result. The return value of :py:meth:`Support.Client.describe_trusted_advisor_check_result` :rtype: dict
[ "Given", "the", "check_id", "return", "the", "dict", "of", "Trusted", "Advisor", "check", "results", ".", "This", "handles", "refreshing", "the", "Trusted", "Advisor", "check", "if", "desired", "according", "to", "self", ".", "refresh_mode", "and", "self", ".", "refresh_timeout", "." ]
python
train
52.159091
saltstack/salt
salt/output/__init__.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/output/__init__.py#L217-L222
def html_format(data, out, opts=None, **kwargs): ''' Return the formatted string as HTML. ''' ansi_escaped_string = string_format(data, out, opts, **kwargs) return ansi_escaped_string.replace(' ', '&nbsp;').replace('\n', '<br />')
[ "def", "html_format", "(", "data", ",", "out", ",", "opts", "=", "None", ",", "*", "*", "kwargs", ")", ":", "ansi_escaped_string", "=", "string_format", "(", "data", ",", "out", ",", "opts", ",", "*", "*", "kwargs", ")", "return", "ansi_escaped_string", ".", "replace", "(", "' '", ",", "'&nbsp;'", ")", ".", "replace", "(", "'\\n'", ",", "'<br />'", ")" ]
Return the formatted string as HTML.
[ "Return", "the", "formatted", "string", "as", "HTML", "." ]
python
train
40.833333
proycon/pynlpl
pynlpl/textprocessors.py
https://github.com/proycon/pynlpl/blob/7707f69a91caaa6cde037f0d0379f1d42500a68b/pynlpl/textprocessors.py#L317-L386
def tokenize(text, regexps=TOKENIZERRULES): """Tokenizes a string and returns a list of tokens :param text: The text to tokenise :type text: string :param regexps: Regular expressions to use as tokeniser rules in tokenisation (default=_pynlpl.textprocessors.TOKENIZERRULES_) :type regexps: Tuple/list of regular expressions to use in tokenisation :rtype: Returns a list of tokens Examples: >>> for token in tokenize("This is a test."): ... print(token) This is a test . """ for i,regexp in list(enumerate(regexps)): if isstring(regexp): regexps[i] = re.compile(regexp) tokens = [] begin = 0 for i, c in enumerate(text): if begin > i: continue elif i == begin: m = False for regexp in regexps: m = regexp.findall(text[i:i+300]) if m: tokens.append(m[0]) begin = i + len(m[0]) break if m: continue if c in string.punctuation or c in WHITESPACE: prev = text[i-1] if i > 0 else "" next = text[i+1] if i < len(text)-1 else "" if (c == '.' or c == ',') and prev.isdigit() and next.isdigit(): #punctuation in between numbers, keep as one token pass elif (c == "'" or c == "`") and prev.isalpha() and next.isalpha(): #quote in between chars, keep... pass elif c not in WHITESPACE and next == c: #group clusters of identical punctuation together continue elif c == '\r' and prev == '\n': #ignore begin = i+1 continue else: token = text[begin:i] if token: tokens.append(token) if c not in WHITESPACE: tokens.append(c) #anything but spaces and newlines (i.e. punctuation) counts as a token too begin = i + 1 #set the begin cursor if begin <= len(text) - 1: token = text[begin:] tokens.append(token) return tokens
[ "def", "tokenize", "(", "text", ",", "regexps", "=", "TOKENIZERRULES", ")", ":", "for", "i", ",", "regexp", "in", "list", "(", "enumerate", "(", "regexps", ")", ")", ":", "if", "isstring", "(", "regexp", ")", ":", "regexps", "[", "i", "]", "=", "re", ".", "compile", "(", "regexp", ")", "tokens", "=", "[", "]", "begin", "=", "0", "for", "i", ",", "c", "in", "enumerate", "(", "text", ")", ":", "if", "begin", ">", "i", ":", "continue", "elif", "i", "==", "begin", ":", "m", "=", "False", "for", "regexp", "in", "regexps", ":", "m", "=", "regexp", ".", "findall", "(", "text", "[", "i", ":", "i", "+", "300", "]", ")", "if", "m", ":", "tokens", ".", "append", "(", "m", "[", "0", "]", ")", "begin", "=", "i", "+", "len", "(", "m", "[", "0", "]", ")", "break", "if", "m", ":", "continue", "if", "c", "in", "string", ".", "punctuation", "or", "c", "in", "WHITESPACE", ":", "prev", "=", "text", "[", "i", "-", "1", "]", "if", "i", ">", "0", "else", "\"\"", "next", "=", "text", "[", "i", "+", "1", "]", "if", "i", "<", "len", "(", "text", ")", "-", "1", "else", "\"\"", "if", "(", "c", "==", "'.'", "or", "c", "==", "','", ")", "and", "prev", ".", "isdigit", "(", ")", "and", "next", ".", "isdigit", "(", ")", ":", "#punctuation in between numbers, keep as one token", "pass", "elif", "(", "c", "==", "\"'\"", "or", "c", "==", "\"`\"", ")", "and", "prev", ".", "isalpha", "(", ")", "and", "next", ".", "isalpha", "(", ")", ":", "#quote in between chars, keep...", "pass", "elif", "c", "not", "in", "WHITESPACE", "and", "next", "==", "c", ":", "#group clusters of identical punctuation together", "continue", "elif", "c", "==", "'\\r'", "and", "prev", "==", "'\\n'", ":", "#ignore", "begin", "=", "i", "+", "1", "continue", "else", ":", "token", "=", "text", "[", "begin", ":", "i", "]", "if", "token", ":", "tokens", ".", "append", "(", "token", ")", "if", "c", "not", "in", "WHITESPACE", ":", "tokens", ".", "append", "(", "c", ")", "#anything but spaces and newlines (i.e. punctuation) counts as a token too", "begin", "=", "i", "+", "1", "#set the begin cursor", "if", "begin", "<=", "len", "(", "text", ")", "-", "1", ":", "token", "=", "text", "[", "begin", ":", "]", "tokens", ".", "append", "(", "token", ")", "return", "tokens" ]
Tokenizes a string and returns a list of tokens :param text: The text to tokenise :type text: string :param regexps: Regular expressions to use as tokeniser rules in tokenisation (default=_pynlpl.textprocessors.TOKENIZERRULES_) :type regexps: Tuple/list of regular expressions to use in tokenisation :rtype: Returns a list of tokens Examples: >>> for token in tokenize("This is a test."): ... print(token) This is a test .
[ "Tokenizes", "a", "string", "and", "returns", "a", "list", "of", "tokens" ]
python
train
30.4
sys-git/certifiable
certifiable/complex.py
https://github.com/sys-git/certifiable/blob/a3c33c0d4f3ac2c53be9eded3fae633fa5f697f8/certifiable/complex.py#L285-L322
def certify_set( value, certifier=None, min_len=None, max_len=None, include_collections=False, required=True, ): """ Certifier for a set. :param set value: The set to be certified. :param func certifier: A function to be called on each value in the list to check that it is valid. :param int min_len: The minimum acceptable length for the list. If None, the minimum length is not checked. :param int max_len: The maximum acceptable length for the list. If None, the maximum length is not checked. :param bool include_collections: Include types from collections. :param bool required: Whether the value can be `None`. Defaults to True. :return: The certified set. :rtype: set :raises CertifierTypeError: The type is invalid :raises CertifierValueError: The valid is invalid """ certify_bool(include_collections, required=True) certify_iterable( value=value, types=tuple([set, MutableSet, Set]) if include_collections else tuple([set]), certifier=certifier, min_len=min_len, max_len=max_len, schema=None, required=required, )
[ "def", "certify_set", "(", "value", ",", "certifier", "=", "None", ",", "min_len", "=", "None", ",", "max_len", "=", "None", ",", "include_collections", "=", "False", ",", "required", "=", "True", ",", ")", ":", "certify_bool", "(", "include_collections", ",", "required", "=", "True", ")", "certify_iterable", "(", "value", "=", "value", ",", "types", "=", "tuple", "(", "[", "set", ",", "MutableSet", ",", "Set", "]", ")", "if", "include_collections", "else", "tuple", "(", "[", "set", "]", ")", ",", "certifier", "=", "certifier", ",", "min_len", "=", "min_len", ",", "max_len", "=", "max_len", ",", "schema", "=", "None", ",", "required", "=", "required", ",", ")" ]
Certifier for a set. :param set value: The set to be certified. :param func certifier: A function to be called on each value in the list to check that it is valid. :param int min_len: The minimum acceptable length for the list. If None, the minimum length is not checked. :param int max_len: The maximum acceptable length for the list. If None, the maximum length is not checked. :param bool include_collections: Include types from collections. :param bool required: Whether the value can be `None`. Defaults to True. :return: The certified set. :rtype: set :raises CertifierTypeError: The type is invalid :raises CertifierValueError: The valid is invalid
[ "Certifier", "for", "a", "set", "." ]
python
train
31.447368
knipknap/exscript
Exscript/queue.py
https://github.com/knipknap/exscript/blob/72718eee3e87b345d5a5255be9824e867e42927b/Exscript/queue.py#L554-L575
def destroy(self, force=False): """ Like shutdown(), but also removes all accounts, hosts, etc., and does not restart the queue. In other words, the queue can no longer be used after calling this method. :type force: bool :param force: Whether to wait until all jobs were processed. """ try: if not force: self.join() finally: self._dbg(2, 'Destroying queue...') self.workqueue.destroy() self.account_manager.reset() self.completed = 0 self.total = 0 self.failed = 0 self.status_bar_length = 0 self._dbg(2, 'Queue destroyed.') self._del_status_bar()
[ "def", "destroy", "(", "self", ",", "force", "=", "False", ")", ":", "try", ":", "if", "not", "force", ":", "self", ".", "join", "(", ")", "finally", ":", "self", ".", "_dbg", "(", "2", ",", "'Destroying queue...'", ")", "self", ".", "workqueue", ".", "destroy", "(", ")", "self", ".", "account_manager", ".", "reset", "(", ")", "self", ".", "completed", "=", "0", "self", ".", "total", "=", "0", "self", ".", "failed", "=", "0", "self", ".", "status_bar_length", "=", "0", "self", ".", "_dbg", "(", "2", ",", "'Queue destroyed.'", ")", "self", ".", "_del_status_bar", "(", ")" ]
Like shutdown(), but also removes all accounts, hosts, etc., and does not restart the queue. In other words, the queue can no longer be used after calling this method. :type force: bool :param force: Whether to wait until all jobs were processed.
[ "Like", "shutdown", "()", "but", "also", "removes", "all", "accounts", "hosts", "etc", ".", "and", "does", "not", "restart", "the", "queue", ".", "In", "other", "words", "the", "queue", "can", "no", "longer", "be", "used", "after", "calling", "this", "method", "." ]
python
train
33.545455
edublancas/sklearn-evaluation
sklearn_evaluation/plot/roc.py
https://github.com/edublancas/sklearn-evaluation/blob/79ee6e4dfe911b5a5a9b78a5caaed7c73eef6f39/sklearn_evaluation/plot/roc.py#L72-L98
def _roc(y_true, y_score, ax=None): """ Plot ROC curve for binary classification. Parameters ---------- y_true : array-like, shape = [n_samples] Correct target values (ground truth). y_score : array-like, shape = [n_samples] Target scores (estimator predictions). ax: matplotlib Axes Axes object to draw the plot onto, otherwise uses current Axes Returns ------- ax: matplotlib Axes Axes containing the plot """ # check dimensions fpr, tpr, _ = roc_curve(y_true, y_score) roc_auc = auc(fpr, tpr) ax.plot(fpr, tpr, label=('ROC curve (area = {0:0.2f})'.format(roc_auc))) _set_ax_settings(ax) return ax
[ "def", "_roc", "(", "y_true", ",", "y_score", ",", "ax", "=", "None", ")", ":", "# check dimensions", "fpr", ",", "tpr", ",", "_", "=", "roc_curve", "(", "y_true", ",", "y_score", ")", "roc_auc", "=", "auc", "(", "fpr", ",", "tpr", ")", "ax", ".", "plot", "(", "fpr", ",", "tpr", ",", "label", "=", "(", "'ROC curve (area = {0:0.2f})'", ".", "format", "(", "roc_auc", ")", ")", ")", "_set_ax_settings", "(", "ax", ")", "return", "ax" ]
Plot ROC curve for binary classification. Parameters ---------- y_true : array-like, shape = [n_samples] Correct target values (ground truth). y_score : array-like, shape = [n_samples] Target scores (estimator predictions). ax: matplotlib Axes Axes object to draw the plot onto, otherwise uses current Axes Returns ------- ax: matplotlib Axes Axes containing the plot
[ "Plot", "ROC", "curve", "for", "binary", "classification", "." ]
python
train
25.111111
gc3-uzh-ch/elasticluster
elasticluster/subcommands.py
https://github.com/gc3-uzh-ch/elasticluster/blob/e6345633308c76de13b889417df572815aabe744/elasticluster/subcommands.py#L263-L282
def execute(self): """ Stops the cluster if it's running. """ cluster_name = self.params.cluster creator = make_creator(self.params.config, storage_path=self.params.storage) try: cluster = creator.load_cluster(cluster_name) except (ClusterNotFound, ConfigurationError) as err: log.error("Cannot stop cluster `%s`: %s", cluster_name, err) return os.EX_NOINPUT if not self.params.yes: confirm_or_abort( "Do you want really want to stop cluster `{cluster_name}`?" .format(cluster_name=cluster_name), msg="Aborting upon user request.") print("Destroying cluster `%s` ..." % cluster_name) cluster.stop(force=self.params.force, wait=self.params.wait)
[ "def", "execute", "(", "self", ")", ":", "cluster_name", "=", "self", ".", "params", ".", "cluster", "creator", "=", "make_creator", "(", "self", ".", "params", ".", "config", ",", "storage_path", "=", "self", ".", "params", ".", "storage", ")", "try", ":", "cluster", "=", "creator", ".", "load_cluster", "(", "cluster_name", ")", "except", "(", "ClusterNotFound", ",", "ConfigurationError", ")", "as", "err", ":", "log", ".", "error", "(", "\"Cannot stop cluster `%s`: %s\"", ",", "cluster_name", ",", "err", ")", "return", "os", ".", "EX_NOINPUT", "if", "not", "self", ".", "params", ".", "yes", ":", "confirm_or_abort", "(", "\"Do you want really want to stop cluster `{cluster_name}`?\"", ".", "format", "(", "cluster_name", "=", "cluster_name", ")", ",", "msg", "=", "\"Aborting upon user request.\"", ")", "print", "(", "\"Destroying cluster `%s` ...\"", "%", "cluster_name", ")", "cluster", ".", "stop", "(", "force", "=", "self", ".", "params", ".", "force", ",", "wait", "=", "self", ".", "params", ".", "wait", ")" ]
Stops the cluster if it's running.
[ "Stops", "the", "cluster", "if", "it", "s", "running", "." ]
python
train
41.65
fprimex/zdeskcfg
zdeskcfg.py
https://github.com/fprimex/zdeskcfg/blob/4283733123a62c0ab7679ca8aba0d4b02e6bb8d7/zdeskcfg.py#L165-L171
def get_ini_config(config=os.path.join(os.path.expanduser('~'), '.zdeskcfg'), default_section=None, section=None): """This is a convenience function for getting the zdesk configuration from an ini file without the need to decorate and call your own function. Handy when using zdesk and zdeskcfg from the interactive prompt.""" plac_ini.call(__placeholder__, config=config, default_section=default_section) return __placeholder__.getconfig(section)
[ "def", "get_ini_config", "(", "config", "=", "os", ".", "path", ".", "join", "(", "os", ".", "path", ".", "expanduser", "(", "'~'", ")", ",", "'.zdeskcfg'", ")", ",", "default_section", "=", "None", ",", "section", "=", "None", ")", ":", "plac_ini", ".", "call", "(", "__placeholder__", ",", "config", "=", "config", ",", "default_section", "=", "default_section", ")", "return", "__placeholder__", ".", "getconfig", "(", "section", ")" ]
This is a convenience function for getting the zdesk configuration from an ini file without the need to decorate and call your own function. Handy when using zdesk and zdeskcfg from the interactive prompt.
[ "This", "is", "a", "convenience", "function", "for", "getting", "the", "zdesk", "configuration", "from", "an", "ini", "file", "without", "the", "need", "to", "decorate", "and", "call", "your", "own", "function", ".", "Handy", "when", "using", "zdesk", "and", "zdeskcfg", "from", "the", "interactive", "prompt", "." ]
python
train
67
biplap-sarkar/pylimit
pylimit/pyratelimit.py
https://github.com/biplap-sarkar/pylimit/blob/d2170a8c02a9be083f37c9e4ec1e28700a33d64e/pylimit/pyratelimit.py#L53-L83
def __can_attempt(self, namespace: str, add_attempt=True) -> bool: """ Checks if a namespace is rate limited or not with including/excluding the current call :param namespace: Rate limiting namespace :type namespace: str :param add_attempt: Boolean value indicating if the current call should be considered as an attempt or not :type add_attempt: bool :return: Returns true if attempt can go ahead under current rate limiting rules, false otherwise """ can_attempt = False if not PyRateLimit.redis_helper: raise PyRateLimitException("redis connection information not provided") connection = PyRateLimit.redis_helper.get_atomic_connection() current_time = int(round(time.time() * 1000000)) old_time_limit = current_time - (self.period * 1000000) connection.zremrangebyscore(namespace, 0, old_time_limit) connection.expire(namespace, self.period) if add_attempt: current_count = 0 connection.zadd(namespace, current_time, current_time) else: current_count = 1 # initialize at 1 to compensate the case that this attempt is not getting counted connection.zcard(namespace) redis_result = connection.execute() current_count += redis_result[-1] if current_count <= self.limit: can_attempt = True return can_attempt
[ "def", "__can_attempt", "(", "self", ",", "namespace", ":", "str", ",", "add_attempt", "=", "True", ")", "->", "bool", ":", "can_attempt", "=", "False", "if", "not", "PyRateLimit", ".", "redis_helper", ":", "raise", "PyRateLimitException", "(", "\"redis connection information not provided\"", ")", "connection", "=", "PyRateLimit", ".", "redis_helper", ".", "get_atomic_connection", "(", ")", "current_time", "=", "int", "(", "round", "(", "time", ".", "time", "(", ")", "*", "1000000", ")", ")", "old_time_limit", "=", "current_time", "-", "(", "self", ".", "period", "*", "1000000", ")", "connection", ".", "zremrangebyscore", "(", "namespace", ",", "0", ",", "old_time_limit", ")", "connection", ".", "expire", "(", "namespace", ",", "self", ".", "period", ")", "if", "add_attempt", ":", "current_count", "=", "0", "connection", ".", "zadd", "(", "namespace", ",", "current_time", ",", "current_time", ")", "else", ":", "current_count", "=", "1", "# initialize at 1 to compensate the case that this attempt is not getting counted", "connection", ".", "zcard", "(", "namespace", ")", "redis_result", "=", "connection", ".", "execute", "(", ")", "current_count", "+=", "redis_result", "[", "-", "1", "]", "if", "current_count", "<=", "self", ".", "limit", ":", "can_attempt", "=", "True", "return", "can_attempt" ]
Checks if a namespace is rate limited or not with including/excluding the current call :param namespace: Rate limiting namespace :type namespace: str :param add_attempt: Boolean value indicating if the current call should be considered as an attempt or not :type add_attempt: bool :return: Returns true if attempt can go ahead under current rate limiting rules, false otherwise
[ "Checks", "if", "a", "namespace", "is", "rate", "limited", "or", "not", "with", "including", "/", "excluding", "the", "current", "call" ]
python
train
45.741935
asmodehn/filefinder2
filefinder2/_fileloader2.py
https://github.com/asmodehn/filefinder2/blob/3f0b211ce11a34562e2a2160e039ae5290b68d6b/filefinder2/_fileloader2.py#L273-L306
def load_module(self, name): """Load a namespace module as if coming from an empty file. """ _verbose_message('namespace module loaded with path {!r}', self.path) # Adjusting code from LoaderBasics if name in sys.modules: mod = sys.modules[name] self.exec_module(mod) # In this case we do not want to remove the module in case of error # Ref : https://docs.python.org/3/reference/import.html#loaders else: try: # Building custom spec and loading as in _LoaderBasics... spec = ModuleSpec(name, self, origin='namespace', is_package=True) spec.submodule_search_locations = self.path # this will call create_module and also initialize the module properly (like for py3) mod = module_from_spec(spec) # as per https://docs.python.org/3/reference/import.html#loaders assert mod.__name__ in sys.modules self.exec_module(mod) # We don't ensure that the import-related module attributes get # set in the sys.modules replacement case. Such modules are on # their own. except: # as per https://docs.python.org/3/reference/import.html#loaders if name in sys.modules: del sys.modules[name] raise return sys.modules[name]
[ "def", "load_module", "(", "self", ",", "name", ")", ":", "_verbose_message", "(", "'namespace module loaded with path {!r}'", ",", "self", ".", "path", ")", "# Adjusting code from LoaderBasics", "if", "name", "in", "sys", ".", "modules", ":", "mod", "=", "sys", ".", "modules", "[", "name", "]", "self", ".", "exec_module", "(", "mod", ")", "# In this case we do not want to remove the module in case of error", "# Ref : https://docs.python.org/3/reference/import.html#loaders", "else", ":", "try", ":", "# Building custom spec and loading as in _LoaderBasics...", "spec", "=", "ModuleSpec", "(", "name", ",", "self", ",", "origin", "=", "'namespace'", ",", "is_package", "=", "True", ")", "spec", ".", "submodule_search_locations", "=", "self", ".", "path", "# this will call create_module and also initialize the module properly (like for py3)", "mod", "=", "module_from_spec", "(", "spec", ")", "# as per https://docs.python.org/3/reference/import.html#loaders", "assert", "mod", ".", "__name__", "in", "sys", ".", "modules", "self", ".", "exec_module", "(", "mod", ")", "# We don't ensure that the import-related module attributes get", "# set in the sys.modules replacement case. Such modules are on", "# their own.", "except", ":", "# as per https://docs.python.org/3/reference/import.html#loaders", "if", "name", "in", "sys", ".", "modules", ":", "del", "sys", ".", "modules", "[", "name", "]", "raise", "return", "sys", ".", "modules", "[", "name", "]" ]
Load a namespace module as if coming from an empty file.
[ "Load", "a", "namespace", "module", "as", "if", "coming", "from", "an", "empty", "file", "." ]
python
train
42.676471
boundary/pulse-api-cli
boundary/measurement_get.py
https://github.com/boundary/pulse-api-cli/blob/b01ca65b442eed19faac309c9d62bbc3cb2c098f/boundary/measurement_get.py#L48-L75
def add_arguments(self): """ Add specific command line arguments for this command """ # Call our parent to add the default arguments ApiCli.add_arguments(self) # Command specific arguments self.parser.add_argument('-f', '--format', dest='format', action='store', required=False, choices=['csv', 'json', 'raw', 'xml'], help='Output format. Default is raw') self.parser.add_argument('-n', '--name', dest='metric_name', action='store', required=True, metavar="metric_name", help='Metric identifier') self.parser.add_argument('-g', '--aggregate', dest='aggregate', action='store', required=False, choices=['sum', 'avg', 'max', 'min'], help='Metric default aggregate') self.parser.add_argument('-r', '--sample', dest='sample', action='store', type=int, metavar="sample", help='Down sample rate sample in seconds') self.parser.add_argument('-s', '--source', dest='source', action='store', metavar="source", required=True, help='Source of measurement') self.parser.add_argument('-b', '--start', dest='start', action='store', required=True, metavar="start", help='Start of time range as ISO 8601 string or epoch seconds') self.parser.add_argument('-d', '--end', dest='end', action='store', metavar="end", required=False, help='End of time range as ISO 8601 string or epoch seconds') self.parser.add_argument('-o', '--date-format', dest='date_format', action='store', metavar="format", required=False, help='For CSV, JSON, and XML output formats dates (see Python date.strftime). ' + 'Default format is %%s')
[ "def", "add_arguments", "(", "self", ")", ":", "# Call our parent to add the default arguments", "ApiCli", ".", "add_arguments", "(", "self", ")", "# Command specific arguments", "self", ".", "parser", ".", "add_argument", "(", "'-f'", ",", "'--format'", ",", "dest", "=", "'format'", ",", "action", "=", "'store'", ",", "required", "=", "False", ",", "choices", "=", "[", "'csv'", ",", "'json'", ",", "'raw'", ",", "'xml'", "]", ",", "help", "=", "'Output format. Default is raw'", ")", "self", ".", "parser", ".", "add_argument", "(", "'-n'", ",", "'--name'", ",", "dest", "=", "'metric_name'", ",", "action", "=", "'store'", ",", "required", "=", "True", ",", "metavar", "=", "\"metric_name\"", ",", "help", "=", "'Metric identifier'", ")", "self", ".", "parser", ".", "add_argument", "(", "'-g'", ",", "'--aggregate'", ",", "dest", "=", "'aggregate'", ",", "action", "=", "'store'", ",", "required", "=", "False", ",", "choices", "=", "[", "'sum'", ",", "'avg'", ",", "'max'", ",", "'min'", "]", ",", "help", "=", "'Metric default aggregate'", ")", "self", ".", "parser", ".", "add_argument", "(", "'-r'", ",", "'--sample'", ",", "dest", "=", "'sample'", ",", "action", "=", "'store'", ",", "type", "=", "int", ",", "metavar", "=", "\"sample\"", ",", "help", "=", "'Down sample rate sample in seconds'", ")", "self", ".", "parser", ".", "add_argument", "(", "'-s'", ",", "'--source'", ",", "dest", "=", "'source'", ",", "action", "=", "'store'", ",", "metavar", "=", "\"source\"", ",", "required", "=", "True", ",", "help", "=", "'Source of measurement'", ")", "self", ".", "parser", ".", "add_argument", "(", "'-b'", ",", "'--start'", ",", "dest", "=", "'start'", ",", "action", "=", "'store'", ",", "required", "=", "True", ",", "metavar", "=", "\"start\"", ",", "help", "=", "'Start of time range as ISO 8601 string or epoch seconds'", ")", "self", ".", "parser", ".", "add_argument", "(", "'-d'", ",", "'--end'", ",", "dest", "=", "'end'", ",", "action", "=", "'store'", ",", "metavar", "=", "\"end\"", ",", "required", "=", "False", ",", "help", "=", "'End of time range as ISO 8601 string or epoch seconds'", ")", "self", ".", "parser", ".", "add_argument", "(", "'-o'", ",", "'--date-format'", ",", "dest", "=", "'date_format'", ",", "action", "=", "'store'", ",", "metavar", "=", "\"format\"", ",", "required", "=", "False", ",", "help", "=", "'For CSV, JSON, and XML output formats dates (see Python date.strftime). '", "+", "'Default format is %%s'", ")" ]
Add specific command line arguments for this command
[ "Add", "specific", "command", "line", "arguments", "for", "this", "command" ]
python
test
68.75
FlorianLudwig/rueckenwind
rw/cli.py
https://github.com/FlorianLudwig/rueckenwind/blob/47fec7af05ea10b3cf6d59b9f7bf4d12c02dddea/rw/cli.py#L53-L71
def serv(args): """Serve a rueckenwind application""" if not args.no_debug: tornado.autoreload.start() extra = [] if sys.stdout.isatty(): # set terminal title sys.stdout.write('\x1b]2;rw: {}\x07'.format(' '.join(sys.argv[2:]))) if args.cfg: extra.append(os.path.abspath(args.cfg)) listen = (int(args.port), args.address) ioloop = tornado.ioloop.IOLoop.instance() setup_app(app=args.MODULE, extra_configs=extra, ioloop=ioloop, listen=listen) ioloop.start()
[ "def", "serv", "(", "args", ")", ":", "if", "not", "args", ".", "no_debug", ":", "tornado", ".", "autoreload", ".", "start", "(", ")", "extra", "=", "[", "]", "if", "sys", ".", "stdout", ".", "isatty", "(", ")", ":", "# set terminal title", "sys", ".", "stdout", ".", "write", "(", "'\\x1b]2;rw: {}\\x07'", ".", "format", "(", "' '", ".", "join", "(", "sys", ".", "argv", "[", "2", ":", "]", ")", ")", ")", "if", "args", ".", "cfg", ":", "extra", ".", "append", "(", "os", ".", "path", ".", "abspath", "(", "args", ".", "cfg", ")", ")", "listen", "=", "(", "int", "(", "args", ".", "port", ")", ",", "args", ".", "address", ")", "ioloop", "=", "tornado", ".", "ioloop", ".", "IOLoop", ".", "instance", "(", ")", "setup_app", "(", "app", "=", "args", ".", "MODULE", ",", "extra_configs", "=", "extra", ",", "ioloop", "=", "ioloop", ",", "listen", "=", "listen", ")", "ioloop", ".", "start", "(", ")" ]
Serve a rueckenwind application
[ "Serve", "a", "rueckenwind", "application" ]
python
train
27.526316
brmscheiner/ideogram
ideogram/converter.py
https://github.com/brmscheiner/ideogram/blob/422bf566c51fd56f7bbb6e75b16d18d52b4c7568/ideogram/converter.py#L239-L272
def matchImpObjStrs(fdefs,imp_obj_strs,cdefs): '''returns imp_funcs, a dictionary with filepath keys that contains lists of function definition nodes that were imported using from __ import __ style syntax. also returns imp_classes, which is the same for class definition nodes.''' imp_funcs=dict() imp_classes=dict() for source in imp_obj_strs: if not imp_obj_strs[source]: continue imp_funcs[source]=[] imp_classes[source]=[] for (mod,func) in imp_obj_strs[source]: if mod not in fdefs: #print(mod+" is not part of the project.") continue if func=='*': all_fns = [x for x in fdefs[mod] if x.name!='body'] imp_funcs[source] += all_fns all_cls = [x for x in cdefs[mod]] imp_classes[source] += all_cls else: fn_node = [x for x in fdefs[mod] if x.name==func] cls_node = [x for x in cdefs[mod] if x.name==func] #assert len(fn_node) in [1,0] #assert len(cls_node) in [1,0] if cls_node: imp_classes[source] += cls_node if fn_node: imp_funcs[source] += fn_node if not fn_node and not cls_node: pass #print(func+' not found in function and class definitions.') return imp_funcs,imp_classes
[ "def", "matchImpObjStrs", "(", "fdefs", ",", "imp_obj_strs", ",", "cdefs", ")", ":", "imp_funcs", "=", "dict", "(", ")", "imp_classes", "=", "dict", "(", ")", "for", "source", "in", "imp_obj_strs", ":", "if", "not", "imp_obj_strs", "[", "source", "]", ":", "continue", "imp_funcs", "[", "source", "]", "=", "[", "]", "imp_classes", "[", "source", "]", "=", "[", "]", "for", "(", "mod", ",", "func", ")", "in", "imp_obj_strs", "[", "source", "]", ":", "if", "mod", "not", "in", "fdefs", ":", "#print(mod+\" is not part of the project.\")", "continue", "if", "func", "==", "'*'", ":", "all_fns", "=", "[", "x", "for", "x", "in", "fdefs", "[", "mod", "]", "if", "x", ".", "name", "!=", "'body'", "]", "imp_funcs", "[", "source", "]", "+=", "all_fns", "all_cls", "=", "[", "x", "for", "x", "in", "cdefs", "[", "mod", "]", "]", "imp_classes", "[", "source", "]", "+=", "all_cls", "else", ":", "fn_node", "=", "[", "x", "for", "x", "in", "fdefs", "[", "mod", "]", "if", "x", ".", "name", "==", "func", "]", "cls_node", "=", "[", "x", "for", "x", "in", "cdefs", "[", "mod", "]", "if", "x", ".", "name", "==", "func", "]", "#assert len(fn_node) in [1,0]", "#assert len(cls_node) in [1,0]", "if", "cls_node", ":", "imp_classes", "[", "source", "]", "+=", "cls_node", "if", "fn_node", ":", "imp_funcs", "[", "source", "]", "+=", "fn_node", "if", "not", "fn_node", "and", "not", "cls_node", ":", "pass", "#print(func+' not found in function and class definitions.')", "return", "imp_funcs", ",", "imp_classes" ]
returns imp_funcs, a dictionary with filepath keys that contains lists of function definition nodes that were imported using from __ import __ style syntax. also returns imp_classes, which is the same for class definition nodes.
[ "returns", "imp_funcs", "a", "dictionary", "with", "filepath", "keys", "that", "contains", "lists", "of", "function", "definition", "nodes", "that", "were", "imported", "using", "from", "__", "import", "__", "style", "syntax", ".", "also", "returns", "imp_classes", "which", "is", "the", "same", "for", "class", "definition", "nodes", "." ]
python
train
42.705882
isogeo/isogeo-api-py-minsdk
isogeo_pysdk/checker.py
https://github.com/isogeo/isogeo-api-py-minsdk/blob/57a604be92c7767b26abd247012cc1a584b386a0/isogeo_pysdk/checker.py#L413-L460
def _check_subresource(self, subresource: str): """Check if specific_resources parameter is valid. :param str resource: subresource to check. """ warnings.warn( "subresource in URL is deprecated." " Use _include mecanism instead.", DeprecationWarning, ) l_subresources = ( "conditions", "contacts", "coordinate-system", "events", "feature-attributes", "keywords", "layers", "limitations", "links", "operations", "specifications", ) if isinstance(subresource, str): if subresource in l_subresources: subresource = subresource elif subresource == "tags": subresource = "keywords" logging.debug( "'tags' is an include not a subresource." " Don't worry, it has be automatically renamed " "into 'keywords' which is the correct subresource." ) elif subresource == "serviceLayers": subresource = "layers" logging.debug( "'serviceLayers' is an include not a subresource." " Don't worry, it has be automatically renamed " "into 'layers' which is the correct subresource." ) else: raise ValueError( "Invalid subresource. Must be one of: {}".format( "|".join(l_subresources) ) ) else: raise TypeError("'subresource' expects a str") return subresource
[ "def", "_check_subresource", "(", "self", ",", "subresource", ":", "str", ")", ":", "warnings", ".", "warn", "(", "\"subresource in URL is deprecated.\"", "\" Use _include mecanism instead.\"", ",", "DeprecationWarning", ",", ")", "l_subresources", "=", "(", "\"conditions\"", ",", "\"contacts\"", ",", "\"coordinate-system\"", ",", "\"events\"", ",", "\"feature-attributes\"", ",", "\"keywords\"", ",", "\"layers\"", ",", "\"limitations\"", ",", "\"links\"", ",", "\"operations\"", ",", "\"specifications\"", ",", ")", "if", "isinstance", "(", "subresource", ",", "str", ")", ":", "if", "subresource", "in", "l_subresources", ":", "subresource", "=", "subresource", "elif", "subresource", "==", "\"tags\"", ":", "subresource", "=", "\"keywords\"", "logging", ".", "debug", "(", "\"'tags' is an include not a subresource.\"", "\" Don't worry, it has be automatically renamed \"", "\"into 'keywords' which is the correct subresource.\"", ")", "elif", "subresource", "==", "\"serviceLayers\"", ":", "subresource", "=", "\"layers\"", "logging", ".", "debug", "(", "\"'serviceLayers' is an include not a subresource.\"", "\" Don't worry, it has be automatically renamed \"", "\"into 'layers' which is the correct subresource.\"", ")", "else", ":", "raise", "ValueError", "(", "\"Invalid subresource. Must be one of: {}\"", ".", "format", "(", "\"|\"", ".", "join", "(", "l_subresources", ")", ")", ")", "else", ":", "raise", "TypeError", "(", "\"'subresource' expects a str\"", ")", "return", "subresource" ]
Check if specific_resources parameter is valid. :param str resource: subresource to check.
[ "Check", "if", "specific_resources", "parameter", "is", "valid", "." ]
python
train
35.770833
saltstack/salt
salt/client/ssh/client.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/client/ssh/client.py#L120-L152
def cmd_sync(self, low): ''' Execute a salt-ssh call synchronously. .. versionadded:: 2015.5.0 WARNING: Eauth is **NOT** respected .. code-block:: python client.cmd_sync({ 'tgt': 'silver', 'fun': 'test.ping', 'arg': (), 'tgt_type'='glob', 'kwarg'={} }) {'silver': {'fun_args': [], 'jid': '20141202152721523072', 'return': True, 'retcode': 0, 'success': True, 'fun': 'test.ping', 'id': 'silver'}} ''' kwargs = copy.deepcopy(low) for ignore in ['tgt', 'fun', 'arg', 'timeout', 'tgt_type', 'kwarg']: if ignore in kwargs: del kwargs[ignore] return self.cmd(low['tgt'], low['fun'], low.get('arg', []), low.get('timeout'), low.get('tgt_type'), low.get('kwarg'), **kwargs)
[ "def", "cmd_sync", "(", "self", ",", "low", ")", ":", "kwargs", "=", "copy", ".", "deepcopy", "(", "low", ")", "for", "ignore", "in", "[", "'tgt'", ",", "'fun'", ",", "'arg'", ",", "'timeout'", ",", "'tgt_type'", ",", "'kwarg'", "]", ":", "if", "ignore", "in", "kwargs", ":", "del", "kwargs", "[", "ignore", "]", "return", "self", ".", "cmd", "(", "low", "[", "'tgt'", "]", ",", "low", "[", "'fun'", "]", ",", "low", ".", "get", "(", "'arg'", ",", "[", "]", ")", ",", "low", ".", "get", "(", "'timeout'", ")", ",", "low", ".", "get", "(", "'tgt_type'", ")", ",", "low", ".", "get", "(", "'kwarg'", ")", ",", "*", "*", "kwargs", ")" ]
Execute a salt-ssh call synchronously. .. versionadded:: 2015.5.0 WARNING: Eauth is **NOT** respected .. code-block:: python client.cmd_sync({ 'tgt': 'silver', 'fun': 'test.ping', 'arg': (), 'tgt_type'='glob', 'kwarg'={} }) {'silver': {'fun_args': [], 'jid': '20141202152721523072', 'return': True, 'retcode': 0, 'success': True, 'fun': 'test.ping', 'id': 'silver'}}
[ "Execute", "a", "salt", "-", "ssh", "call", "synchronously", "." ]
python
train
30.424242
twilio/twilio-python
twilio/rest/preview/acc_security/service/__init__.py
https://github.com/twilio/twilio-python/blob/c867895f55dcc29f522e6e8b8868d0d18483132f/twilio/rest/preview/acc_security/service/__init__.py#L276-L285
def verifications(self): """ Access the verifications :returns: twilio.rest.preview.acc_security.service.verification.VerificationList :rtype: twilio.rest.preview.acc_security.service.verification.VerificationList """ if self._verifications is None: self._verifications = VerificationList(self._version, service_sid=self._solution['sid'], ) return self._verifications
[ "def", "verifications", "(", "self", ")", ":", "if", "self", ".", "_verifications", "is", "None", ":", "self", ".", "_verifications", "=", "VerificationList", "(", "self", ".", "_version", ",", "service_sid", "=", "self", ".", "_solution", "[", "'sid'", "]", ",", ")", "return", "self", ".", "_verifications" ]
Access the verifications :returns: twilio.rest.preview.acc_security.service.verification.VerificationList :rtype: twilio.rest.preview.acc_security.service.verification.VerificationList
[ "Access", "the", "verifications" ]
python
train
42.7
saltstack/salt
salt/modules/zabbix.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/zabbix.py#L943-L995
def usergroup_get(name=None, usrgrpids=None, userids=None, **kwargs): ''' .. versionadded:: 2016.3.0 Retrieve user groups according to the given parameters .. note:: This function accepts all usergroup_get properties: keyword argument names differ depending on your zabbix version, see here__. .. __: https://www.zabbix.com/documentation/2.4/manual/api/reference/usergroup/get :param name: names of the user groups :param usrgrpids: return only user groups with the given IDs :param userids: return only user groups that contain the given users :param _connection_user: Optional - zabbix user (can also be set in opts or pillar, see module's docstring) :param _connection_password: Optional - zabbix password (can also be set in opts or pillar, see module's docstring) :param _connection_url: Optional - url of zabbix frontend (can also be set in opts, pillar, see module's docstring) :return: Array with convenient user groups details, False if no user group found or on failure. CLI Example: .. code-block:: bash salt '*' zabbix.usergroup_get Guests ''' conn_args = _login(**kwargs) zabbix_version = apiinfo_version(**kwargs) ret = {} try: if conn_args: method = 'usergroup.get' # Versions above 2.4 allow retrieving user group permissions if _LooseVersion(zabbix_version) > _LooseVersion("2.5"): params = {"selectRights": "extend", "output": "extend", "filter": {}} else: params = {"output": "extend", "filter": {}} if not name and not usrgrpids and not userids: return False if name: params['filter'].setdefault('name', name) if usrgrpids: params.setdefault('usrgrpids', usrgrpids) if userids: params.setdefault('userids', userids) params = _params_extend(params, **kwargs) ret = _query(method, params, conn_args['url'], conn_args['auth']) return False if not ret['result'] else ret['result'] else: raise KeyError except KeyError: return ret
[ "def", "usergroup_get", "(", "name", "=", "None", ",", "usrgrpids", "=", "None", ",", "userids", "=", "None", ",", "*", "*", "kwargs", ")", ":", "conn_args", "=", "_login", "(", "*", "*", "kwargs", ")", "zabbix_version", "=", "apiinfo_version", "(", "*", "*", "kwargs", ")", "ret", "=", "{", "}", "try", ":", "if", "conn_args", ":", "method", "=", "'usergroup.get'", "# Versions above 2.4 allow retrieving user group permissions", "if", "_LooseVersion", "(", "zabbix_version", ")", ">", "_LooseVersion", "(", "\"2.5\"", ")", ":", "params", "=", "{", "\"selectRights\"", ":", "\"extend\"", ",", "\"output\"", ":", "\"extend\"", ",", "\"filter\"", ":", "{", "}", "}", "else", ":", "params", "=", "{", "\"output\"", ":", "\"extend\"", ",", "\"filter\"", ":", "{", "}", "}", "if", "not", "name", "and", "not", "usrgrpids", "and", "not", "userids", ":", "return", "False", "if", "name", ":", "params", "[", "'filter'", "]", ".", "setdefault", "(", "'name'", ",", "name", ")", "if", "usrgrpids", ":", "params", ".", "setdefault", "(", "'usrgrpids'", ",", "usrgrpids", ")", "if", "userids", ":", "params", ".", "setdefault", "(", "'userids'", ",", "userids", ")", "params", "=", "_params_extend", "(", "params", ",", "*", "*", "kwargs", ")", "ret", "=", "_query", "(", "method", ",", "params", ",", "conn_args", "[", "'url'", "]", ",", "conn_args", "[", "'auth'", "]", ")", "return", "False", "if", "not", "ret", "[", "'result'", "]", "else", "ret", "[", "'result'", "]", "else", ":", "raise", "KeyError", "except", "KeyError", ":", "return", "ret" ]
.. versionadded:: 2016.3.0 Retrieve user groups according to the given parameters .. note:: This function accepts all usergroup_get properties: keyword argument names differ depending on your zabbix version, see here__. .. __: https://www.zabbix.com/documentation/2.4/manual/api/reference/usergroup/get :param name: names of the user groups :param usrgrpids: return only user groups with the given IDs :param userids: return only user groups that contain the given users :param _connection_user: Optional - zabbix user (can also be set in opts or pillar, see module's docstring) :param _connection_password: Optional - zabbix password (can also be set in opts or pillar, see module's docstring) :param _connection_url: Optional - url of zabbix frontend (can also be set in opts, pillar, see module's docstring) :return: Array with convenient user groups details, False if no user group found or on failure. CLI Example: .. code-block:: bash salt '*' zabbix.usergroup_get Guests
[ "..", "versionadded", "::", "2016", ".", "3", ".", "0" ]
python
train
41
marl/jams
jams/core.py
https://github.com/marl/jams/blob/b16778399b9528efbd71434842a079f7691a7a66/jams/core.py#L1583-L1616
def slice(self, start_time, end_time, strict=False): ''' Slice every annotation contained in the annotation array using `Annotation.slice` and return as a new AnnotationArray See `Annotation.slice` for details about slicing. This function does not modify the annotations in the original annotation array. Parameters ---------- start_time : float The desired start time for slicing in seconds. end_time The desired end time for slicing in seconds. Must be greater than ``start_time``. strict : bool When ``False`` (default) observations that lie at the boundaries of the slicing range (see `Annotation.slice` for details) will have their time and/or duration adjusted such that only the part of the observation that lies within the trim range is kept. When ``True`` such observations are discarded and not included in the sliced annotation. Returns ------- sliced_array : AnnotationArray An annotation array where every annotation has been sliced. ''' sliced_array = AnnotationArray() for ann in self: sliced_array.append(ann.slice(start_time, end_time, strict=strict)) return sliced_array
[ "def", "slice", "(", "self", ",", "start_time", ",", "end_time", ",", "strict", "=", "False", ")", ":", "sliced_array", "=", "AnnotationArray", "(", ")", "for", "ann", "in", "self", ":", "sliced_array", ".", "append", "(", "ann", ".", "slice", "(", "start_time", ",", "end_time", ",", "strict", "=", "strict", ")", ")", "return", "sliced_array" ]
Slice every annotation contained in the annotation array using `Annotation.slice` and return as a new AnnotationArray See `Annotation.slice` for details about slicing. This function does not modify the annotations in the original annotation array. Parameters ---------- start_time : float The desired start time for slicing in seconds. end_time The desired end time for slicing in seconds. Must be greater than ``start_time``. strict : bool When ``False`` (default) observations that lie at the boundaries of the slicing range (see `Annotation.slice` for details) will have their time and/or duration adjusted such that only the part of the observation that lies within the trim range is kept. When ``True`` such observations are discarded and not included in the sliced annotation. Returns ------- sliced_array : AnnotationArray An annotation array where every annotation has been sliced.
[ "Slice", "every", "annotation", "contained", "in", "the", "annotation", "array", "using", "Annotation", ".", "slice", "and", "return", "as", "a", "new", "AnnotationArray" ]
python
valid
39.264706
google/pyringe
pyringe/payload/libpython.py
https://github.com/google/pyringe/blob/76dff5d1ac29cd5e7bf32677654a83291a15ad8a/pyringe/payload/libpython.py#L1240-L1251
def get_selected_python_frame(cls): '''Try to obtain the Frame for the python code in the selected frame, or None''' frame = cls.get_selected_frame() while frame: if frame.is_evalframeex(): return frame frame = frame.older() # Not found: return None
[ "def", "get_selected_python_frame", "(", "cls", ")", ":", "frame", "=", "cls", ".", "get_selected_frame", "(", ")", "while", "frame", ":", "if", "frame", ".", "is_evalframeex", "(", ")", ":", "return", "frame", "frame", "=", "frame", ".", "older", "(", ")", "# Not found:", "return", "None" ]
Try to obtain the Frame for the python code in the selected frame, or None
[ "Try", "to", "obtain", "the", "Frame", "for", "the", "python", "code", "in", "the", "selected", "frame", "or", "None" ]
python
train
27.333333
DLR-RM/RAFCON
source/rafcon/gui/mygaphas/tools.py
https://github.com/DLR-RM/RAFCON/blob/24942ef1a904531f49ab8830a1dbb604441be498/source/rafcon/gui/mygaphas/tools.py#L302-L352
def _filter_hovered_items(self, items, event): """Filters out items that cannot be hovered :param list items: Sorted list of items beneath the cursor :param Gtk.Event event: Motion event :return: filtered items :rtype: list """ items = self._filter_library_state(items) if not items: return items top_most_item = items[0] second_top_most_item = items[1] if len(items) > 1 else None # States/Names take precedence over connections if the connections are on the same hierarchy and if there is # a port beneath the cursor first_state_v = next(filter(lambda item: isinstance(item, (NameView, StateView)), items)) first_state_v = first_state_v.parent if isinstance(first_state_v, NameView) else first_state_v if first_state_v: # There can be several connections above the state/name skip those and find the first non-connection-item for item in items: if isinstance(item, ConnectionView): # connection is on the same hierarchy level as the state/name, thus we dismiss it if self.view.canvas.get_parent(top_most_item) is not first_state_v: continue break # Connections are only dismissed, if there is a port beneath the cursor. Search for ports here: port_beneath_cursor = False state_ports = first_state_v.get_all_ports() position = self.view.get_matrix_v2i(first_state_v).transform_point(event.x, event.y) i2v_matrix = self.view.get_matrix_i2v(first_state_v) for port_v in state_ports: item_distance = port_v.port.glue(position)[1] view_distance = i2v_matrix.transform_distance(item_distance, 0)[0] if view_distance == 0: port_beneath_cursor = True break if port_beneath_cursor: items = self.dismiss_upper_items(items, item) top_most_item = items[0] second_top_most_item = items[1] if len(items) > 1 else None # NameView can only be hovered if it or its parent state is selected if isinstance(top_most_item, NameView): state_v = second_top_most_item # second item in the list must be the parent state of the NameView if state_v not in self.view.selected_items and top_most_item not in self.view.selected_items: items = items[1:] return items
[ "def", "_filter_hovered_items", "(", "self", ",", "items", ",", "event", ")", ":", "items", "=", "self", ".", "_filter_library_state", "(", "items", ")", "if", "not", "items", ":", "return", "items", "top_most_item", "=", "items", "[", "0", "]", "second_top_most_item", "=", "items", "[", "1", "]", "if", "len", "(", "items", ")", ">", "1", "else", "None", "# States/Names take precedence over connections if the connections are on the same hierarchy and if there is", "# a port beneath the cursor", "first_state_v", "=", "next", "(", "filter", "(", "lambda", "item", ":", "isinstance", "(", "item", ",", "(", "NameView", ",", "StateView", ")", ")", ",", "items", ")", ")", "first_state_v", "=", "first_state_v", ".", "parent", "if", "isinstance", "(", "first_state_v", ",", "NameView", ")", "else", "first_state_v", "if", "first_state_v", ":", "# There can be several connections above the state/name skip those and find the first non-connection-item", "for", "item", "in", "items", ":", "if", "isinstance", "(", "item", ",", "ConnectionView", ")", ":", "# connection is on the same hierarchy level as the state/name, thus we dismiss it", "if", "self", ".", "view", ".", "canvas", ".", "get_parent", "(", "top_most_item", ")", "is", "not", "first_state_v", ":", "continue", "break", "# Connections are only dismissed, if there is a port beneath the cursor. Search for ports here:", "port_beneath_cursor", "=", "False", "state_ports", "=", "first_state_v", ".", "get_all_ports", "(", ")", "position", "=", "self", ".", "view", ".", "get_matrix_v2i", "(", "first_state_v", ")", ".", "transform_point", "(", "event", ".", "x", ",", "event", ".", "y", ")", "i2v_matrix", "=", "self", ".", "view", ".", "get_matrix_i2v", "(", "first_state_v", ")", "for", "port_v", "in", "state_ports", ":", "item_distance", "=", "port_v", ".", "port", ".", "glue", "(", "position", ")", "[", "1", "]", "view_distance", "=", "i2v_matrix", ".", "transform_distance", "(", "item_distance", ",", "0", ")", "[", "0", "]", "if", "view_distance", "==", "0", ":", "port_beneath_cursor", "=", "True", "break", "if", "port_beneath_cursor", ":", "items", "=", "self", ".", "dismiss_upper_items", "(", "items", ",", "item", ")", "top_most_item", "=", "items", "[", "0", "]", "second_top_most_item", "=", "items", "[", "1", "]", "if", "len", "(", "items", ")", ">", "1", "else", "None", "# NameView can only be hovered if it or its parent state is selected", "if", "isinstance", "(", "top_most_item", ",", "NameView", ")", ":", "state_v", "=", "second_top_most_item", "# second item in the list must be the parent state of the NameView", "if", "state_v", "not", "in", "self", ".", "view", ".", "selected_items", "and", "top_most_item", "not", "in", "self", ".", "view", ".", "selected_items", ":", "items", "=", "items", "[", "1", ":", "]", "return", "items" ]
Filters out items that cannot be hovered :param list items: Sorted list of items beneath the cursor :param Gtk.Event event: Motion event :return: filtered items :rtype: list
[ "Filters", "out", "items", "that", "cannot", "be", "hovered" ]
python
train
49.607843
pypa/pipenv
pipenv/vendor/requirementslib/models/cache.py
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/requirementslib/models/cache.py#L266-L287
def as_cache_key(self, ireq): """Given a requirement, return its cache key. This behavior is a little weird in order to allow backwards compatibility with cache files. For a requirement without extras, this will return, for example:: ("ipython", "2.1.0") For a requirement with extras, the extras will be comma-separated and appended to the version, inside brackets, like so:: ("ipython", "2.1.0[nbconvert,notebook]") """ extras = tuple(sorted(ireq.extras)) if not extras: extras_string = "" else: extras_string = "[{}]".format(",".join(extras)) name = key_from_req(ireq.req) version = get_pinned_version(ireq) return name, "{}{}".format(version, extras_string)
[ "def", "as_cache_key", "(", "self", ",", "ireq", ")", ":", "extras", "=", "tuple", "(", "sorted", "(", "ireq", ".", "extras", ")", ")", "if", "not", "extras", ":", "extras_string", "=", "\"\"", "else", ":", "extras_string", "=", "\"[{}]\"", ".", "format", "(", "\",\"", ".", "join", "(", "extras", ")", ")", "name", "=", "key_from_req", "(", "ireq", ".", "req", ")", "version", "=", "get_pinned_version", "(", "ireq", ")", "return", "name", ",", "\"{}{}\"", ".", "format", "(", "version", ",", "extras_string", ")" ]
Given a requirement, return its cache key. This behavior is a little weird in order to allow backwards compatibility with cache files. For a requirement without extras, this will return, for example:: ("ipython", "2.1.0") For a requirement with extras, the extras will be comma-separated and appended to the version, inside brackets, like so:: ("ipython", "2.1.0[nbconvert,notebook]")
[ "Given", "a", "requirement", "return", "its", "cache", "key", "." ]
python
train
36.181818
ozgurgunes/django-manifest
manifest/core/templatetags/navigation.py
https://github.com/ozgurgunes/django-manifest/blob/9873bbf2a475b76284ad7e36b2b26c92131e72dd/manifest/core/templatetags/navigation.py#L61-L77
def active_url(context, urls, css=None): """ Highlight menu item based on url tag. Returns a css class if ``request.path`` is in given ``url``. :param url: Django url to be reversed. :param css: Css class to be returned for highlighting. Return active if none set. """ request = context['request'] if request.get_full_path in (reverse(url) for url in urls.split()): return css if css else 'active' return ''
[ "def", "active_url", "(", "context", ",", "urls", ",", "css", "=", "None", ")", ":", "request", "=", "context", "[", "'request'", "]", "if", "request", ".", "get_full_path", "in", "(", "reverse", "(", "url", ")", "for", "url", "in", "urls", ".", "split", "(", ")", ")", ":", "return", "css", "if", "css", "else", "'active'", "return", "''" ]
Highlight menu item based on url tag. Returns a css class if ``request.path`` is in given ``url``. :param url: Django url to be reversed. :param css: Css class to be returned for highlighting. Return active if none set.
[ "Highlight", "menu", "item", "based", "on", "url", "tag", ".", "Returns", "a", "css", "class", "if", "request", ".", "path", "is", "in", "given", "url", ".", ":", "param", "url", ":", "Django", "url", "to", "be", "reversed", "." ]
python
train
27.235294