repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
sequence
docstring
stringlengths
1
46.9k
docstring_tokens
sequence
language
stringclasses
1 value
partition
stringclasses
3 values
adafruit/Adafruit_Python_BluefruitLE
Adafruit_BluefruitLE/corebluetooth/adapter.py
https://github.com/adafruit/Adafruit_Python_BluefruitLE/blob/34fc6f596371b961628369d78ce836950514062f/Adafruit_BluefruitLE/corebluetooth/adapter.py#L59-L69
def _state_changed(self, state): """Called when the power state changes.""" logger.debug('Adapter state change: {0}'.format(state)) # Handle when powered on. if state == 5: self._powered_off.clear() self._powered_on.set() # Handle when powered off. elif state == 4: self._powered_on.clear() self._powered_off.set()
[ "def", "_state_changed", "(", "self", ",", "state", ")", ":", "logger", ".", "debug", "(", "'Adapter state change: {0}'", ".", "format", "(", "state", ")", ")", "# Handle when powered on.", "if", "state", "==", "5", ":", "self", ".", "_powered_off", ".", "clear", "(", ")", "self", ".", "_powered_on", ".", "set", "(", ")", "# Handle when powered off.", "elif", "state", "==", "4", ":", "self", ".", "_powered_on", ".", "clear", "(", ")", "self", ".", "_powered_off", ".", "set", "(", ")" ]
Called when the power state changes.
[ "Called", "when", "the", "power", "state", "changes", "." ]
python
valid
fabioz/PyDev.Debugger
pydevd_attach_to_process/winappdbg/textio.py
https://github.com/fabioz/PyDev.Debugger/blob/ed9c4307662a5593b8a7f1f3389ecd0e79b8c503/pydevd_attach_to_process/winappdbg/textio.py#L238-L265
def string_list_file(cls, filename): """ Read a list of string values from a file. The file format is: - # anywhere in the line begins a comment - leading and trailing spaces are ignored - empty lines are ignored - strings cannot span over a single line @type filename: str @param filename: Name of the file to read. @rtype: list @return: List of integers and strings read from the file. """ count = 0 result = list() fd = open(filename, 'r') for line in fd: count = count + 1 if '#' in line: line = line[ : line.find('#') ] line = line.strip() if line: result.append(line) return result
[ "def", "string_list_file", "(", "cls", ",", "filename", ")", ":", "count", "=", "0", "result", "=", "list", "(", ")", "fd", "=", "open", "(", "filename", ",", "'r'", ")", "for", "line", "in", "fd", ":", "count", "=", "count", "+", "1", "if", "'#'", "in", "line", ":", "line", "=", "line", "[", ":", "line", ".", "find", "(", "'#'", ")", "]", "line", "=", "line", ".", "strip", "(", ")", "if", "line", ":", "result", ".", "append", "(", "line", ")", "return", "result" ]
Read a list of string values from a file. The file format is: - # anywhere in the line begins a comment - leading and trailing spaces are ignored - empty lines are ignored - strings cannot span over a single line @type filename: str @param filename: Name of the file to read. @rtype: list @return: List of integers and strings read from the file.
[ "Read", "a", "list", "of", "string", "values", "from", "a", "file", "." ]
python
train
PyMLGame/pymlgame
pymlgame/controller.py
https://github.com/PyMLGame/pymlgame/blob/450fe77d35f9a26c107586d6954f69c3895bf504/pymlgame/controller.py#L72-L85
def _del_controller(self, uid): """ Remove controller from internal list and tell the game. :param uid: Unique id of the controller :type uid: str """ try: self.controllers.pop(uid) e = Event(uid, E_DISCONNECT) self.queue.put_nowait(e) except KeyError: # There is no such controller, ignore the command pass
[ "def", "_del_controller", "(", "self", ",", "uid", ")", ":", "try", ":", "self", ".", "controllers", ".", "pop", "(", "uid", ")", "e", "=", "Event", "(", "uid", ",", "E_DISCONNECT", ")", "self", ".", "queue", ".", "put_nowait", "(", "e", ")", "except", "KeyError", ":", "# There is no such controller, ignore the command", "pass" ]
Remove controller from internal list and tell the game. :param uid: Unique id of the controller :type uid: str
[ "Remove", "controller", "from", "internal", "list", "and", "tell", "the", "game", "." ]
python
train
SmokinCaterpillar/pypet
pypet/utils/storagefactory.py
https://github.com/SmokinCaterpillar/pypet/blob/97ad3e80d46dbdea02deeb98ea41f05a19565826/pypet/utils/storagefactory.py#L18-L25
def _create_storage(storage_service, trajectory=None, **kwargs): """Creates a service from a constructor and checks which kwargs are not used""" kwargs_copy = kwargs.copy() kwargs_copy['trajectory'] = trajectory matching_kwargs = get_matching_kwargs(storage_service, kwargs_copy) storage_service = storage_service(**matching_kwargs) unused_kwargs = set(kwargs.keys()) - set(matching_kwargs.keys()) return storage_service, unused_kwargs
[ "def", "_create_storage", "(", "storage_service", ",", "trajectory", "=", "None", ",", "*", "*", "kwargs", ")", ":", "kwargs_copy", "=", "kwargs", ".", "copy", "(", ")", "kwargs_copy", "[", "'trajectory'", "]", "=", "trajectory", "matching_kwargs", "=", "get_matching_kwargs", "(", "storage_service", ",", "kwargs_copy", ")", "storage_service", "=", "storage_service", "(", "*", "*", "matching_kwargs", ")", "unused_kwargs", "=", "set", "(", "kwargs", ".", "keys", "(", ")", ")", "-", "set", "(", "matching_kwargs", ".", "keys", "(", ")", ")", "return", "storage_service", ",", "unused_kwargs" ]
Creates a service from a constructor and checks which kwargs are not used
[ "Creates", "a", "service", "from", "a", "constructor", "and", "checks", "which", "kwargs", "are", "not", "used" ]
python
test
materialsproject/pymatgen
pymatgen/core/sites.py
https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/core/sites.py#L488-L507
def is_periodic_image(self, other, tolerance=1e-8, check_lattice=True): """ Returns True if sites are periodic images of each other. Args: other (PeriodicSite): Other site tolerance (float): Tolerance to compare fractional coordinates check_lattice (bool): Whether to check if the two sites have the same lattice. Returns: bool: True if sites are periodic images of each other. """ if check_lattice and self.lattice != other.lattice: return False if self.species != other.species: return False frac_diff = pbc_diff(self.frac_coords, other.frac_coords) return np.allclose(frac_diff, [0, 0, 0], atol=tolerance)
[ "def", "is_periodic_image", "(", "self", ",", "other", ",", "tolerance", "=", "1e-8", ",", "check_lattice", "=", "True", ")", ":", "if", "check_lattice", "and", "self", ".", "lattice", "!=", "other", ".", "lattice", ":", "return", "False", "if", "self", ".", "species", "!=", "other", ".", "species", ":", "return", "False", "frac_diff", "=", "pbc_diff", "(", "self", ".", "frac_coords", ",", "other", ".", "frac_coords", ")", "return", "np", ".", "allclose", "(", "frac_diff", ",", "[", "0", ",", "0", ",", "0", "]", ",", "atol", "=", "tolerance", ")" ]
Returns True if sites are periodic images of each other. Args: other (PeriodicSite): Other site tolerance (float): Tolerance to compare fractional coordinates check_lattice (bool): Whether to check if the two sites have the same lattice. Returns: bool: True if sites are periodic images of each other.
[ "Returns", "True", "if", "sites", "are", "periodic", "images", "of", "each", "other", "." ]
python
train
openid/python-openid
openid/consumer/consumer.py
https://github.com/openid/python-openid/blob/f7e13536f0d1828d3cef5ae7a7b55cabadff37fc/openid/consumer/consumer.py#L1326-L1362
def _getOpenID1SessionType(self, assoc_response): """Given an association response message, extract the OpenID 1.X session type. This function mostly takes care of the 'no-encryption' default behavior in OpenID 1. If the association type is plain-text, this function will return 'no-encryption' @returns: The association type for this message @rtype: str @raises KeyError: when the session_type field is absent. """ # If it's an OpenID 1 message, allow session_type to default # to None (which signifies "no-encryption") session_type = assoc_response.getArg(OPENID1_NS, 'session_type') # Handle the differences between no-encryption association # respones in OpenID 1 and 2: # no-encryption is not really a valid session type for # OpenID 1, but we'll accept it anyway, while issuing a # warning. if session_type == 'no-encryption': logging.warn('OpenID server sent "no-encryption"' 'for OpenID 1.X') # Missing or empty session type is the way to flag a # 'no-encryption' response. Change the session type to # 'no-encryption' so that it can be handled in the same # way as OpenID 2 'no-encryption' respones. elif session_type == '' or session_type is None: session_type = 'no-encryption' return session_type
[ "def", "_getOpenID1SessionType", "(", "self", ",", "assoc_response", ")", ":", "# If it's an OpenID 1 message, allow session_type to default", "# to None (which signifies \"no-encryption\")", "session_type", "=", "assoc_response", ".", "getArg", "(", "OPENID1_NS", ",", "'session_type'", ")", "# Handle the differences between no-encryption association", "# respones in OpenID 1 and 2:", "# no-encryption is not really a valid session type for", "# OpenID 1, but we'll accept it anyway, while issuing a", "# warning.", "if", "session_type", "==", "'no-encryption'", ":", "logging", ".", "warn", "(", "'OpenID server sent \"no-encryption\"'", "'for OpenID 1.X'", ")", "# Missing or empty session type is the way to flag a", "# 'no-encryption' response. Change the session type to", "# 'no-encryption' so that it can be handled in the same", "# way as OpenID 2 'no-encryption' respones.", "elif", "session_type", "==", "''", "or", "session_type", "is", "None", ":", "session_type", "=", "'no-encryption'", "return", "session_type" ]
Given an association response message, extract the OpenID 1.X session type. This function mostly takes care of the 'no-encryption' default behavior in OpenID 1. If the association type is plain-text, this function will return 'no-encryption' @returns: The association type for this message @rtype: str @raises KeyError: when the session_type field is absent.
[ "Given", "an", "association", "response", "message", "extract", "the", "OpenID", "1", ".", "X", "session", "type", "." ]
python
train
decryptus/httpdis
httpdis/httpdis.py
https://github.com/decryptus/httpdis/blob/5d198cdc5558f416634602689b3df2c8aeb34984/httpdis/httpdis.py#L1018-L1112
def register(handler, op, safe_init = None, at_start = None, name = None, at_stop = None, static = False, root = None, replacement = None, charset = DEFAULT_CHARSET, content_type = None, to_auth = False, to_log = True): """ Register a command @handler: function to execute when the command is received @op: http method(s) @safe_init: called by the safe_init() function of this module @at_start: called once just before the server starts @at_stop: called once just before the server stops @name: name of the command (if not name, handler.__name__ is used) @static: render static file @root: root path @replacement: rewrite path when name is regexp @charset: charset @content_type: content_type @to_auth: use basic authentification if True @to_log: log request if True prototypes: handler(args) safe_init(options) at_start(options) at_stop() """ ref_cmd = _NCMD is_reg = False if isinstance(name, re._pattern_type): key = name.pattern ref_cmd = _RCMD is_reg = True elif name: key = name replacement = None else: key = handler.__name__ name = handler.__name__ replacement = None methods = [] if not isinstance(op, (list, tuple)): op = [op.upper()] for x in op: x = x.upper() if x not in _METHODS: raise ValueError("unknown HTTP method: %r" % x) if static and x not in ('GET', 'HEAD'): raise ValueError("Static must be GET, HEAD command") methods.append(x) if not methods: raise ValueError("Missing HTTP method") if static and not root: raise ValueError("Missing root argument for static") cmd = Command(name, handler, methods, safe_init, at_start, at_stop, static, root, replacement, charset, content_type, to_auth, to_log) for method in methods: if not is_reg: mkey = "%s /%s" % (method, key) else: mkey = "%s %s" % (method, key) if mkey in _COMMANDS: raise ValueError("%s is already registred" % name) _COMMANDS[mkey] = cmd ref_cmd[mkey] = _COMMANDS[mkey]
[ "def", "register", "(", "handler", ",", "op", ",", "safe_init", "=", "None", ",", "at_start", "=", "None", ",", "name", "=", "None", ",", "at_stop", "=", "None", ",", "static", "=", "False", ",", "root", "=", "None", ",", "replacement", "=", "None", ",", "charset", "=", "DEFAULT_CHARSET", ",", "content_type", "=", "None", ",", "to_auth", "=", "False", ",", "to_log", "=", "True", ")", ":", "ref_cmd", "=", "_NCMD", "is_reg", "=", "False", "if", "isinstance", "(", "name", ",", "re", ".", "_pattern_type", ")", ":", "key", "=", "name", ".", "pattern", "ref_cmd", "=", "_RCMD", "is_reg", "=", "True", "elif", "name", ":", "key", "=", "name", "replacement", "=", "None", "else", ":", "key", "=", "handler", ".", "__name__", "name", "=", "handler", ".", "__name__", "replacement", "=", "None", "methods", "=", "[", "]", "if", "not", "isinstance", "(", "op", ",", "(", "list", ",", "tuple", ")", ")", ":", "op", "=", "[", "op", ".", "upper", "(", ")", "]", "for", "x", "in", "op", ":", "x", "=", "x", ".", "upper", "(", ")", "if", "x", "not", "in", "_METHODS", ":", "raise", "ValueError", "(", "\"unknown HTTP method: %r\"", "%", "x", ")", "if", "static", "and", "x", "not", "in", "(", "'GET'", ",", "'HEAD'", ")", ":", "raise", "ValueError", "(", "\"Static must be GET, HEAD command\"", ")", "methods", ".", "append", "(", "x", ")", "if", "not", "methods", ":", "raise", "ValueError", "(", "\"Missing HTTP method\"", ")", "if", "static", "and", "not", "root", ":", "raise", "ValueError", "(", "\"Missing root argument for static\"", ")", "cmd", "=", "Command", "(", "name", ",", "handler", ",", "methods", ",", "safe_init", ",", "at_start", ",", "at_stop", ",", "static", ",", "root", ",", "replacement", ",", "charset", ",", "content_type", ",", "to_auth", ",", "to_log", ")", "for", "method", "in", "methods", ":", "if", "not", "is_reg", ":", "mkey", "=", "\"%s /%s\"", "%", "(", "method", ",", "key", ")", "else", ":", "mkey", "=", "\"%s %s\"", "%", "(", "method", ",", "key", ")", "if", "mkey", "in", "_COMMANDS", ":", "raise", "ValueError", "(", "\"%s is already registred\"", "%", "name", ")", "_COMMANDS", "[", "mkey", "]", "=", "cmd", "ref_cmd", "[", "mkey", "]", "=", "_COMMANDS", "[", "mkey", "]" ]
Register a command @handler: function to execute when the command is received @op: http method(s) @safe_init: called by the safe_init() function of this module @at_start: called once just before the server starts @at_stop: called once just before the server stops @name: name of the command (if not name, handler.__name__ is used) @static: render static file @root: root path @replacement: rewrite path when name is regexp @charset: charset @content_type: content_type @to_auth: use basic authentification if True @to_log: log request if True prototypes: handler(args) safe_init(options) at_start(options) at_stop()
[ "Register", "a", "command", "@handler", ":", "function", "to", "execute", "when", "the", "command", "is", "received", "@op", ":", "http", "method", "(", "s", ")", "@safe_init", ":", "called", "by", "the", "safe_init", "()", "function", "of", "this", "module", "@at_start", ":", "called", "once", "just", "before", "the", "server", "starts", "@at_stop", ":", "called", "once", "just", "before", "the", "server", "stops", "@name", ":", "name", "of", "the", "command", "(", "if", "not", "name", "handler", ".", "__name__", "is", "used", ")", "@static", ":", "render", "static", "file", "@root", ":", "root", "path", "@replacement", ":", "rewrite", "path", "when", "name", "is", "regexp", "@charset", ":", "charset", "@content_type", ":", "content_type", "@to_auth", ":", "use", "basic", "authentification", "if", "True", "@to_log", ":", "log", "request", "if", "True" ]
python
train
googlefonts/fontbakery
Lib/fontbakery/reporters/html.py
https://github.com/googlefonts/fontbakery/blob/b355aea2e619a4477769e060d24c32448aa65399/Lib/fontbakery/reporters/html.py#L104-L108
def html_for_check(self, check) -> str: """Return HTML string for complete single check.""" check["logs"].sort(key=lambda c: LOGLEVELS.index(c["status"])) logs = "<ul>" + "".join([self.log_html(log) for log in check["logs"]]) + "</ul>" return logs
[ "def", "html_for_check", "(", "self", ",", "check", ")", "->", "str", ":", "check", "[", "\"logs\"", "]", ".", "sort", "(", "key", "=", "lambda", "c", ":", "LOGLEVELS", ".", "index", "(", "c", "[", "\"status\"", "]", ")", ")", "logs", "=", "\"<ul>\"", "+", "\"\"", ".", "join", "(", "[", "self", ".", "log_html", "(", "log", ")", "for", "log", "in", "check", "[", "\"logs\"", "]", "]", ")", "+", "\"</ul>\"", "return", "logs" ]
Return HTML string for complete single check.
[ "Return", "HTML", "string", "for", "complete", "single", "check", "." ]
python
train
mitsei/dlkit
dlkit/json_/grading/sessions.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/grading/sessions.py#L4756-L4779
def alias_gradebook(self, gradebook_id, alias_id): """Adds an ``Id`` to a ``Gradebook`` for the purpose of creating compatibility. The primary ``Id`` of the ``Gradebook`` is determined by the provider. The new ``Id`` performs as an alias to the primary ``Id`` . If the alias is a pointer to another gradebook, it is reassigned to the given gradebook ``Id``. arg: gradebook_id (osid.id.Id): the ``Id`` of a ``Gradebook`` arg: alias_id (osid.id.Id): the alias ``Id`` raise: AlreadyExists - ``alias_id`` is already assigned raise: NotFound - ``gradebook_id`` not found raise: NullArgument - ``gradebook_id`` or ``alias_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for # osid.resource.BinLookupSession.alias_bin_template if self._catalog_session is not None: return self._catalog_session.alias_catalog(catalog_id=gradebook_id, alias_id=alias_id) self._alias_id(primary_id=gradebook_id, equivalent_id=alias_id)
[ "def", "alias_gradebook", "(", "self", ",", "gradebook_id", ",", "alias_id", ")", ":", "# Implemented from template for", "# osid.resource.BinLookupSession.alias_bin_template", "if", "self", ".", "_catalog_session", "is", "not", "None", ":", "return", "self", ".", "_catalog_session", ".", "alias_catalog", "(", "catalog_id", "=", "gradebook_id", ",", "alias_id", "=", "alias_id", ")", "self", ".", "_alias_id", "(", "primary_id", "=", "gradebook_id", ",", "equivalent_id", "=", "alias_id", ")" ]
Adds an ``Id`` to a ``Gradebook`` for the purpose of creating compatibility. The primary ``Id`` of the ``Gradebook`` is determined by the provider. The new ``Id`` performs as an alias to the primary ``Id`` . If the alias is a pointer to another gradebook, it is reassigned to the given gradebook ``Id``. arg: gradebook_id (osid.id.Id): the ``Id`` of a ``Gradebook`` arg: alias_id (osid.id.Id): the alias ``Id`` raise: AlreadyExists - ``alias_id`` is already assigned raise: NotFound - ``gradebook_id`` not found raise: NullArgument - ``gradebook_id`` or ``alias_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.*
[ "Adds", "an", "Id", "to", "a", "Gradebook", "for", "the", "purpose", "of", "creating", "compatibility", "." ]
python
train
amol-/depot
examples/turbogears/depotexample/model/auth.py
https://github.com/amol-/depot/blob/82104d2ae54f8ef55f05fb5a3f148cdc9f928959/examples/turbogears/depotexample/model/auth.py#L101-L103
def by_user_name(cls, username): """Return the user object whose user name is ``username``.""" return DBSession.query(cls).filter_by(user_name=username).first()
[ "def", "by_user_name", "(", "cls", ",", "username", ")", ":", "return", "DBSession", ".", "query", "(", "cls", ")", ".", "filter_by", "(", "user_name", "=", "username", ")", ".", "first", "(", ")" ]
Return the user object whose user name is ``username``.
[ "Return", "the", "user", "object", "whose", "user", "name", "is", "username", "." ]
python
train
NuGrid/NuGridPy
nugridpy/mesa.py
https://github.com/NuGrid/NuGridPy/blob/eee8047446e398be77362d82c1d8b3310054fab0/nugridpy/mesa.py#L3827-L3944
def abu_profiles(p,ifig=1,xlm=xlm,ylm=(-8,0),show=False,abunds='All',xaxis=xaxis_type, figsize1=(8,8)): '''Four panels of abundance plots Parameters ---------- p : instance mesa_profile instance xlm : tuple xlimits: mass_min, mass_max abus : 'All' plots many 'commonly used' isotopes up to Fe if they are in your mesa output. otherwise provide a list of lists of desired abus show : Boolean False for batch use True for interactive use xaxis : character Lagrangian mass is radial mass coordinate Eulerian radius is radial coordinate, in Mm ''' matplotlib.rc('figure',facecolor='white',figsize=figsize1) # create subplot structure f, ([ax1,ax2],[ax3,ax4]) = pl.subplots(2, 2, sharex=False, sharey=True, figsize=figsize1) # define 4 groups of elements, one for each of the 4 subplots all_isos=[['h1','he3','he4','li6','c12','c13','n13','n14','n15','o16','o17','o18','f19'],['ne20','ne21','ne22','na22','na23','mg24','mg25','mg26','al26','al27','si28','si29','si30'], ['p31', 's32','s33', 's34','s36','cl35','cl37','ar36', 'ar38','ar40', 'k39', 'k40','k41'], ['ca40','ca42','ca48','sc45','ti46','ti48','ti50','v50','v51','cr52','cr54','mn55','fe56']] if abunds == 'All': abus=[[],[],[],[]] j=0 for i, row in enumerate(all_isos): for iso in row: if iso in p.cols: abus[i].append(iso) j+=1 abus1=[] abus2 =[[],[],[],[]] for l in range(len(abus)): for k in range(len(abus[l])): abus1.append(abus[l][k]) is_small_isos = False for i in range(len(abus)): if len(abus[i]) < 5: is_small_isos = True print("Missing isotopes from the default list. Distributing the ones you have over the panels.") if is_small_isos: n=4 quo, rem = divmod(len(abus1), n) for i in range(len(abus2)): for k in range(i*quo,(i+1)*quo+rem): abus2[i].append(abus1[k]) abus = abus2 #print(abus) else: abus = abus ax = [ax1,ax2,ax3,ax4] xxx = p.get('radius') if xaxis is "Eulerian" else p.get('mass') mass = p.get('mass') # in units of Msun radius = p.get('radius')*ast.rsun_cm/1.e8 # in units of Mm if xaxis is "Eulerian": xxx = radius if xlm[0] == 0 and xlm[1] == 0: indtop = 0 indbot = len(mass)-1 else: indbot = np.where(radius>=xlm[0])[0][-1] indtop = np.where(radius<xlm[1])[0][0] xll = (radius[indbot],radius[indtop]) xxlabel = "Radius (Mm)" elif xaxis is "Lagrangian": xxx = mass xll = xlm xxlabel = "$M / \mathrm{M_{sun}}$" else: print("Error: don't understand xaxis choice, must be Lagrangian or Eulerian") for i in range(4): for thing in abus[i]: ind = abus[i].index(thing) ax[i].plot(xxx, np.log10(p.get(thing)), ls=u.linestylecb(ind,a,b)[0],\ marker=u.linestylecb(ind,a,b)[1], color=u.linestylecb(ind,a,b)[2],\ markevery=50,label=thing) # set x and y lims and labels ax[i].set_ylim(ylm) ax[i].set_xlim(xll) ax[i].legend(loc=1) ax[i].set_xlabel(xxlabel) if i%2 == 0: ax[i].set_ylabel('log X') # ax[i].set_aspect('equal') title_str = "Abundance plot: "+'t ='+str(title_format%p.header_attr['star_age'])\ +' dt ='+str(title_format%p.header_attr['time_step'])\ +'model number = '+str(int(p.header_attr['model_number'])) f.suptitle(title_str, fontsize=12) f.tight_layout() f.subplots_adjust(left=0.1, bottom=0.1, right=0.95, top=0.9, wspace=0, hspace=0.1) f.savefig('abuprof'+str(int(p.header_attr['model_number'])).zfill(6)+'.png')
[ "def", "abu_profiles", "(", "p", ",", "ifig", "=", "1", ",", "xlm", "=", "xlm", ",", "ylm", "=", "(", "-", "8", ",", "0", ")", ",", "show", "=", "False", ",", "abunds", "=", "'All'", ",", "xaxis", "=", "xaxis_type", ",", "figsize1", "=", "(", "8", ",", "8", ")", ")", ":", "matplotlib", ".", "rc", "(", "'figure'", ",", "facecolor", "=", "'white'", ",", "figsize", "=", "figsize1", ")", "# create subplot structure", "f", ",", "(", "[", "ax1", ",", "ax2", "]", ",", "[", "ax3", ",", "ax4", "]", ")", "=", "pl", ".", "subplots", "(", "2", ",", "2", ",", "sharex", "=", "False", ",", "sharey", "=", "True", ",", "figsize", "=", "figsize1", ")", "# define 4 groups of elements, one for each of the 4 subplots ", "all_isos", "=", "[", "[", "'h1'", ",", "'he3'", ",", "'he4'", ",", "'li6'", ",", "'c12'", ",", "'c13'", ",", "'n13'", ",", "'n14'", ",", "'n15'", ",", "'o16'", ",", "'o17'", ",", "'o18'", ",", "'f19'", "]", ",", "[", "'ne20'", ",", "'ne21'", ",", "'ne22'", ",", "'na22'", ",", "'na23'", ",", "'mg24'", ",", "'mg25'", ",", "'mg26'", ",", "'al26'", ",", "'al27'", ",", "'si28'", ",", "'si29'", ",", "'si30'", "]", ",", "[", "'p31'", ",", "'s32'", ",", "'s33'", ",", "'s34'", ",", "'s36'", ",", "'cl35'", ",", "'cl37'", ",", "'ar36'", ",", "'ar38'", ",", "'ar40'", ",", "'k39'", ",", "'k40'", ",", "'k41'", "]", ",", "[", "'ca40'", ",", "'ca42'", ",", "'ca48'", ",", "'sc45'", ",", "'ti46'", ",", "'ti48'", ",", "'ti50'", ",", "'v50'", ",", "'v51'", ",", "'cr52'", ",", "'cr54'", ",", "'mn55'", ",", "'fe56'", "]", "]", "if", "abunds", "==", "'All'", ":", "abus", "=", "[", "[", "]", ",", "[", "]", ",", "[", "]", ",", "[", "]", "]", "j", "=", "0", "for", "i", ",", "row", "in", "enumerate", "(", "all_isos", ")", ":", "for", "iso", "in", "row", ":", "if", "iso", "in", "p", ".", "cols", ":", "abus", "[", "i", "]", ".", "append", "(", "iso", ")", "j", "+=", "1", "abus1", "=", "[", "]", "abus2", "=", "[", "[", "]", ",", "[", "]", ",", "[", "]", ",", "[", "]", "]", "for", "l", "in", "range", "(", "len", "(", "abus", ")", ")", ":", "for", "k", "in", "range", "(", "len", "(", "abus", "[", "l", "]", ")", ")", ":", "abus1", ".", "append", "(", "abus", "[", "l", "]", "[", "k", "]", ")", "is_small_isos", "=", "False", "for", "i", "in", "range", "(", "len", "(", "abus", ")", ")", ":", "if", "len", "(", "abus", "[", "i", "]", ")", "<", "5", ":", "is_small_isos", "=", "True", "print", "(", "\"Missing isotopes from the default list. Distributing the ones you have over the panels.\"", ")", "if", "is_small_isos", ":", "n", "=", "4", "quo", ",", "rem", "=", "divmod", "(", "len", "(", "abus1", ")", ",", "n", ")", "for", "i", "in", "range", "(", "len", "(", "abus2", ")", ")", ":", "for", "k", "in", "range", "(", "i", "*", "quo", ",", "(", "i", "+", "1", ")", "*", "quo", "+", "rem", ")", ":", "abus2", "[", "i", "]", ".", "append", "(", "abus1", "[", "k", "]", ")", "abus", "=", "abus2", "#print(abus) ", "else", ":", "abus", "=", "abus", "ax", "=", "[", "ax1", ",", "ax2", ",", "ax3", ",", "ax4", "]", "xxx", "=", "p", ".", "get", "(", "'radius'", ")", "if", "xaxis", "is", "\"Eulerian\"", "else", "p", ".", "get", "(", "'mass'", ")", "mass", "=", "p", ".", "get", "(", "'mass'", ")", "# in units of Msun", "radius", "=", "p", ".", "get", "(", "'radius'", ")", "*", "ast", ".", "rsun_cm", "/", "1.e8", "# in units of Mm", "if", "xaxis", "is", "\"Eulerian\"", ":", "xxx", "=", "radius", "if", "xlm", "[", "0", "]", "==", "0", "and", "xlm", "[", "1", "]", "==", "0", ":", "indtop", "=", "0", "indbot", "=", "len", "(", "mass", ")", "-", "1", "else", ":", "indbot", "=", "np", ".", "where", "(", "radius", ">=", "xlm", "[", "0", "]", ")", "[", "0", "]", "[", "-", "1", "]", "indtop", "=", "np", ".", "where", "(", "radius", "<", "xlm", "[", "1", "]", ")", "[", "0", "]", "[", "0", "]", "xll", "=", "(", "radius", "[", "indbot", "]", ",", "radius", "[", "indtop", "]", ")", "xxlabel", "=", "\"Radius (Mm)\"", "elif", "xaxis", "is", "\"Lagrangian\"", ":", "xxx", "=", "mass", "xll", "=", "xlm", "xxlabel", "=", "\"$M / \\mathrm{M_{sun}}$\"", "else", ":", "print", "(", "\"Error: don't understand xaxis choice, must be Lagrangian or Eulerian\"", ")", "for", "i", "in", "range", "(", "4", ")", ":", "for", "thing", "in", "abus", "[", "i", "]", ":", "ind", "=", "abus", "[", "i", "]", ".", "index", "(", "thing", ")", "ax", "[", "i", "]", ".", "plot", "(", "xxx", ",", "np", ".", "log10", "(", "p", ".", "get", "(", "thing", ")", ")", ",", "ls", "=", "u", ".", "linestylecb", "(", "ind", ",", "a", ",", "b", ")", "[", "0", "]", ",", "marker", "=", "u", ".", "linestylecb", "(", "ind", ",", "a", ",", "b", ")", "[", "1", "]", ",", "color", "=", "u", ".", "linestylecb", "(", "ind", ",", "a", ",", "b", ")", "[", "2", "]", ",", "markevery", "=", "50", ",", "label", "=", "thing", ")", "# set x and y lims and labels", "ax", "[", "i", "]", ".", "set_ylim", "(", "ylm", ")", "ax", "[", "i", "]", ".", "set_xlim", "(", "xll", ")", "ax", "[", "i", "]", ".", "legend", "(", "loc", "=", "1", ")", "ax", "[", "i", "]", ".", "set_xlabel", "(", "xxlabel", ")", "if", "i", "%", "2", "==", "0", ":", "ax", "[", "i", "]", ".", "set_ylabel", "(", "'log X'", ")", "# ax[i].set_aspect('equal')", "title_str", "=", "\"Abundance plot: \"", "+", "'t ='", "+", "str", "(", "title_format", "%", "p", ".", "header_attr", "[", "'star_age'", "]", ")", "+", "' dt ='", "+", "str", "(", "title_format", "%", "p", ".", "header_attr", "[", "'time_step'", "]", ")", "+", "'model number = '", "+", "str", "(", "int", "(", "p", ".", "header_attr", "[", "'model_number'", "]", ")", ")", "f", ".", "suptitle", "(", "title_str", ",", "fontsize", "=", "12", ")", "f", ".", "tight_layout", "(", ")", "f", ".", "subplots_adjust", "(", "left", "=", "0.1", ",", "bottom", "=", "0.1", ",", "right", "=", "0.95", ",", "top", "=", "0.9", ",", "wspace", "=", "0", ",", "hspace", "=", "0.1", ")", "f", ".", "savefig", "(", "'abuprof'", "+", "str", "(", "int", "(", "p", ".", "header_attr", "[", "'model_number'", "]", ")", ")", ".", "zfill", "(", "6", ")", "+", "'.png'", ")" ]
Four panels of abundance plots Parameters ---------- p : instance mesa_profile instance xlm : tuple xlimits: mass_min, mass_max abus : 'All' plots many 'commonly used' isotopes up to Fe if they are in your mesa output. otherwise provide a list of lists of desired abus show : Boolean False for batch use True for interactive use xaxis : character Lagrangian mass is radial mass coordinate Eulerian radius is radial coordinate, in Mm
[ "Four", "panels", "of", "abundance", "plots" ]
python
train
CalebBell/ht
ht/boiling_nucleic.py
https://github.com/CalebBell/ht/blob/3097ef9524c4cf0068ad453c17b10ec9ce551eee/ht/boiling_nucleic.py#L1059-L1114
def Serth_HEDH(D, sigma, Hvap, rhol, rhog): r'''Calculates critical heat flux for nucleic boiling of a tube bundle according to [2]_, citing [3]_, and using [1]_ as the original form. .. math:: q_c = KH_{vap} \rho_g^{0.5}\left[\sigma g (\rho_L-\rho_g)\right]^{0.25} K = 0.123 (R^*)^{-0.25} \text{ for 0.12 < R* < 1.17} K = 0.118 R^* = \frac{D}{2} \left[\frac{g(\rho_L-\rho_G)}{\sigma}\right]^{0.5} Parameters ---------- D : float Diameter of tubes [m] sigma : float Surface tension of liquid [N/m] Hvap : float Heat of vaporization of the fluid at T, [J/kg] rhol : float Density of the liquid [kg/m^3] rhog : float Density of the produced gas [kg/m^3] Returns ------- q: float Critical heat flux [W/m^2] Notes ----- A further source for this would be nice. Examples -------- >>> Serth_HEDH(D=0.0127, sigma=8.2E-3, Hvap=272E3, rhol=567, rhog=18.09) 351867.46522901946 References ---------- .. [1] Zuber N. "On the stability of boiling heat transfer". Trans ASME 1958 80:711-20. .. [2] Serth, R. W., Process Heat Transfer: Principles, Applications and Rules of Thumb. 2E. Amsterdam: Academic Press, 2014. .. [3] Schlünder, Ernst U, and International Center for Heat and Mass Transfer. Heat Exchanger Design Handbook. Washington: Hemisphere Pub. Corp., 1987. ''' R = D/2*(g*(rhol-rhog)/sigma)**0.5 if 0.12 <= R <= 1.17: K = 0.125*R**-0.25 else: K = 0.118 return K*Hvap*rhog**0.5*(g*sigma*(rhol-rhog))**0.25
[ "def", "Serth_HEDH", "(", "D", ",", "sigma", ",", "Hvap", ",", "rhol", ",", "rhog", ")", ":", "R", "=", "D", "/", "2", "*", "(", "g", "*", "(", "rhol", "-", "rhog", ")", "/", "sigma", ")", "**", "0.5", "if", "0.12", "<=", "R", "<=", "1.17", ":", "K", "=", "0.125", "*", "R", "**", "-", "0.25", "else", ":", "K", "=", "0.118", "return", "K", "*", "Hvap", "*", "rhog", "**", "0.5", "*", "(", "g", "*", "sigma", "*", "(", "rhol", "-", "rhog", ")", ")", "**", "0.25" ]
r'''Calculates critical heat flux for nucleic boiling of a tube bundle according to [2]_, citing [3]_, and using [1]_ as the original form. .. math:: q_c = KH_{vap} \rho_g^{0.5}\left[\sigma g (\rho_L-\rho_g)\right]^{0.25} K = 0.123 (R^*)^{-0.25} \text{ for 0.12 < R* < 1.17} K = 0.118 R^* = \frac{D}{2} \left[\frac{g(\rho_L-\rho_G)}{\sigma}\right]^{0.5} Parameters ---------- D : float Diameter of tubes [m] sigma : float Surface tension of liquid [N/m] Hvap : float Heat of vaporization of the fluid at T, [J/kg] rhol : float Density of the liquid [kg/m^3] rhog : float Density of the produced gas [kg/m^3] Returns ------- q: float Critical heat flux [W/m^2] Notes ----- A further source for this would be nice. Examples -------- >>> Serth_HEDH(D=0.0127, sigma=8.2E-3, Hvap=272E3, rhol=567, rhog=18.09) 351867.46522901946 References ---------- .. [1] Zuber N. "On the stability of boiling heat transfer". Trans ASME 1958 80:711-20. .. [2] Serth, R. W., Process Heat Transfer: Principles, Applications and Rules of Thumb. 2E. Amsterdam: Academic Press, 2014. .. [3] Schlünder, Ernst U, and International Center for Heat and Mass Transfer. Heat Exchanger Design Handbook. Washington: Hemisphere Pub. Corp., 1987.
[ "r", "Calculates", "critical", "heat", "flux", "for", "nucleic", "boiling", "of", "a", "tube", "bundle", "according", "to", "[", "2", "]", "_", "citing", "[", "3", "]", "_", "and", "using", "[", "1", "]", "_", "as", "the", "original", "form", "." ]
python
train
dereneaton/ipyrad
ipyrad/analysis/sratools.py
https://github.com/dereneaton/ipyrad/blob/5eeb8a178160f45faf71bf47cec4abe998a575d1/ipyrad/analysis/sratools.py#L441-L462
def fields_checker(fields): """ returns a fields argument formatted as a list of strings. and doesn't allow zero. """ ## make sure fields will work if isinstance(fields, int): fields = str(fields) if isinstance(fields, str): if "," in fields: fields = [str(i) for i in fields.split(",")] else: fields = [str(fields)] elif isinstance(fields, (tuple, list)): fields = [str(i) for i in fields] else: raise IPyradWarningExit("fields not properly formatted") ## do not allow zero in fields fields = [i for i in fields if i != '0'] return fields
[ "def", "fields_checker", "(", "fields", ")", ":", "## make sure fields will work", "if", "isinstance", "(", "fields", ",", "int", ")", ":", "fields", "=", "str", "(", "fields", ")", "if", "isinstance", "(", "fields", ",", "str", ")", ":", "if", "\",\"", "in", "fields", ":", "fields", "=", "[", "str", "(", "i", ")", "for", "i", "in", "fields", ".", "split", "(", "\",\"", ")", "]", "else", ":", "fields", "=", "[", "str", "(", "fields", ")", "]", "elif", "isinstance", "(", "fields", ",", "(", "tuple", ",", "list", ")", ")", ":", "fields", "=", "[", "str", "(", "i", ")", "for", "i", "in", "fields", "]", "else", ":", "raise", "IPyradWarningExit", "(", "\"fields not properly formatted\"", ")", "## do not allow zero in fields", "fields", "=", "[", "i", "for", "i", "in", "fields", "if", "i", "!=", "'0'", "]", "return", "fields" ]
returns a fields argument formatted as a list of strings. and doesn't allow zero.
[ "returns", "a", "fields", "argument", "formatted", "as", "a", "list", "of", "strings", ".", "and", "doesn", "t", "allow", "zero", "." ]
python
valid
truemped/tornadotools
tornadotools/mongrel2/handler.py
https://github.com/truemped/tornadotools/blob/d22632b83810afc353fa886fbc9e265bee78653f/tornadotools/mongrel2/handler.py#L59-L68
def _create_listening_stream(self, pull_addr): """ Create a stream listening for Requests. The `self._recv_callback` method is asociated with incoming requests. """ sock = self._zmq_context.socket(zmq.PULL) sock.connect(pull_addr) stream = ZMQStream(sock, io_loop=self.io_loop) return stream
[ "def", "_create_listening_stream", "(", "self", ",", "pull_addr", ")", ":", "sock", "=", "self", ".", "_zmq_context", ".", "socket", "(", "zmq", ".", "PULL", ")", "sock", ".", "connect", "(", "pull_addr", ")", "stream", "=", "ZMQStream", "(", "sock", ",", "io_loop", "=", "self", ".", "io_loop", ")", "return", "stream" ]
Create a stream listening for Requests. The `self._recv_callback` method is asociated with incoming requests.
[ "Create", "a", "stream", "listening", "for", "Requests", ".", "The", "self", ".", "_recv_callback", "method", "is", "asociated", "with", "incoming", "requests", "." ]
python
train
hyperboria/python-cjdns
setup.py
https://github.com/hyperboria/python-cjdns/blob/a9ae5d6d2e99c18f9fc50a9589729cd176efa900/setup.py#L20-L29
def readme(fname): """Reads a markdown file and returns the contents formatted as rst""" md = open(os.path.join(os.path.dirname(__file__), fname)).read() output = md try: import pypandoc output = pypandoc.convert(md, 'rst', format='md') except ImportError: pass return output
[ "def", "readme", "(", "fname", ")", ":", "md", "=", "open", "(", "os", ".", "path", ".", "join", "(", "os", ".", "path", ".", "dirname", "(", "__file__", ")", ",", "fname", ")", ")", ".", "read", "(", ")", "output", "=", "md", "try", ":", "import", "pypandoc", "output", "=", "pypandoc", ".", "convert", "(", "md", ",", "'rst'", ",", "format", "=", "'md'", ")", "except", "ImportError", ":", "pass", "return", "output" ]
Reads a markdown file and returns the contents formatted as rst
[ "Reads", "a", "markdown", "file", "and", "returns", "the", "contents", "formatted", "as", "rst" ]
python
train
f3at/feat
src/feat/agencies/agency.py
https://github.com/f3at/feat/blob/15da93fc9d6ec8154f52a9172824e25821195ef8/src/feat/agencies/agency.py#L1339-L1343
def snapshot_agents(self, force=False): '''snapshot agents if number of entries from last snapshot if greater than 1000. Use force=True to override.''' for agent in self._agents: agent.check_if_should_snapshot(force)
[ "def", "snapshot_agents", "(", "self", ",", "force", "=", "False", ")", ":", "for", "agent", "in", "self", ".", "_agents", ":", "agent", ".", "check_if_should_snapshot", "(", "force", ")" ]
snapshot agents if number of entries from last snapshot if greater than 1000. Use force=True to override.
[ "snapshot", "agents", "if", "number", "of", "entries", "from", "last", "snapshot", "if", "greater", "than", "1000", ".", "Use", "force", "=", "True", "to", "override", "." ]
python
train
foliant-docs/foliantcontrib.includes
foliant/preprocessors/includes.py
https://github.com/foliant-docs/foliantcontrib.includes/blob/4bd89f6d287c9e21246d984c90ad05c2ccd24fcc/foliant/preprocessors/includes.py#L53-L108
def _sync_repo(self, repo_url: str, revision: str or None = None) -> Path: '''Clone a Git repository to the cache dir. If it has been cloned before, update it. :param repo_url: Repository URL :param revision: Revision: branch, commit hash, or tag :returns: Path to the cloned repository ''' repo_name = repo_url.split('/')[-1].rsplit('.', maxsplit=1)[0] repo_path = (self._cache_path / repo_name).resolve() self.logger.debug(f'Synchronizing with repo; URL: {repo_url}, revision: {revision}') try: self.logger.debug(f'Cloning repo {repo_url} to {repo_path}') run( f'git clone {repo_url} {repo_path}', shell=True, check=True, stdout=PIPE, stderr=STDOUT ) except CalledProcessError as exception: if repo_path.exists(): self.logger.debug('Repo already cloned; pulling from remote') try: run( 'git pull', cwd=repo_path, shell=True, check=True, stdout=PIPE, stderr=STDOUT ) except CalledProcessError as exception: self.logger.warning(str(exception)) else: self.logger.error(str(exception)) if revision: run( f'git checkout {revision}', cwd=repo_path, shell=True, check=True, stdout=PIPE, stderr=STDOUT ) return repo_path
[ "def", "_sync_repo", "(", "self", ",", "repo_url", ":", "str", ",", "revision", ":", "str", "or", "None", "=", "None", ")", "->", "Path", ":", "repo_name", "=", "repo_url", ".", "split", "(", "'/'", ")", "[", "-", "1", "]", ".", "rsplit", "(", "'.'", ",", "maxsplit", "=", "1", ")", "[", "0", "]", "repo_path", "=", "(", "self", ".", "_cache_path", "/", "repo_name", ")", ".", "resolve", "(", ")", "self", ".", "logger", ".", "debug", "(", "f'Synchronizing with repo; URL: {repo_url}, revision: {revision}'", ")", "try", ":", "self", ".", "logger", ".", "debug", "(", "f'Cloning repo {repo_url} to {repo_path}'", ")", "run", "(", "f'git clone {repo_url} {repo_path}'", ",", "shell", "=", "True", ",", "check", "=", "True", ",", "stdout", "=", "PIPE", ",", "stderr", "=", "STDOUT", ")", "except", "CalledProcessError", "as", "exception", ":", "if", "repo_path", ".", "exists", "(", ")", ":", "self", ".", "logger", ".", "debug", "(", "'Repo already cloned; pulling from remote'", ")", "try", ":", "run", "(", "'git pull'", ",", "cwd", "=", "repo_path", ",", "shell", "=", "True", ",", "check", "=", "True", ",", "stdout", "=", "PIPE", ",", "stderr", "=", "STDOUT", ")", "except", "CalledProcessError", "as", "exception", ":", "self", ".", "logger", ".", "warning", "(", "str", "(", "exception", ")", ")", "else", ":", "self", ".", "logger", ".", "error", "(", "str", "(", "exception", ")", ")", "if", "revision", ":", "run", "(", "f'git checkout {revision}'", ",", "cwd", "=", "repo_path", ",", "shell", "=", "True", ",", "check", "=", "True", ",", "stdout", "=", "PIPE", ",", "stderr", "=", "STDOUT", ")", "return", "repo_path" ]
Clone a Git repository to the cache dir. If it has been cloned before, update it. :param repo_url: Repository URL :param revision: Revision: branch, commit hash, or tag :returns: Path to the cloned repository
[ "Clone", "a", "Git", "repository", "to", "the", "cache", "dir", ".", "If", "it", "has", "been", "cloned", "before", "update", "it", "." ]
python
train
zeehio/parmap
parmap/parmap.py
https://github.com/zeehio/parmap/blob/368b77e1a49ff30aef9de2274ad430ad43a3f617/parmap/parmap.py#L220-L273
def _map_or_starmap(function, iterable, args, kwargs, map_or_starmap): """ Shared function between parmap.map and parmap.starmap. Refer to those functions for details. """ arg_newarg = (("parallel", "pm_parallel"), ("chunksize", "pm_chunksize"), ("pool", "pm_pool"), ("processes", "pm_processes"), ("parmap_progress", "pm_pbar")) kwargs = _deprecated_kwargs(kwargs, arg_newarg) chunksize = kwargs.pop("pm_chunksize", None) progress = kwargs.pop("pm_pbar", False) progress = progress and HAVE_TQDM parallel, pool, close_pool = _create_pool(kwargs) # Map: if parallel: func_star = _get_helper_func(map_or_starmap) try: if progress and close_pool: try: num_tasks = len(iterable) # get a chunksize (as multiprocessing does): chunksize = _get_default_chunksize(chunksize, pool, num_tasks) # use map_async to get progress information result = pool.map_async(func_star, izip(repeat(function), iterable, repeat(list(args)), repeat(kwargs)), chunksize) finally: pool.close() # Progress bar: try: _do_pbar(result, num_tasks, chunksize) finally: output = result.get() else: result = pool.map_async(func_star, izip(repeat(function), iterable, repeat(list(args)), repeat(kwargs)), chunksize) output = result.get() finally: if close_pool: if not progress: pool.close() pool.join() else: output = _serial_map_or_starmap(function, iterable, args, kwargs, progress, map_or_starmap) return output
[ "def", "_map_or_starmap", "(", "function", ",", "iterable", ",", "args", ",", "kwargs", ",", "map_or_starmap", ")", ":", "arg_newarg", "=", "(", "(", "\"parallel\"", ",", "\"pm_parallel\"", ")", ",", "(", "\"chunksize\"", ",", "\"pm_chunksize\"", ")", ",", "(", "\"pool\"", ",", "\"pm_pool\"", ")", ",", "(", "\"processes\"", ",", "\"pm_processes\"", ")", ",", "(", "\"parmap_progress\"", ",", "\"pm_pbar\"", ")", ")", "kwargs", "=", "_deprecated_kwargs", "(", "kwargs", ",", "arg_newarg", ")", "chunksize", "=", "kwargs", ".", "pop", "(", "\"pm_chunksize\"", ",", "None", ")", "progress", "=", "kwargs", ".", "pop", "(", "\"pm_pbar\"", ",", "False", ")", "progress", "=", "progress", "and", "HAVE_TQDM", "parallel", ",", "pool", ",", "close_pool", "=", "_create_pool", "(", "kwargs", ")", "# Map:", "if", "parallel", ":", "func_star", "=", "_get_helper_func", "(", "map_or_starmap", ")", "try", ":", "if", "progress", "and", "close_pool", ":", "try", ":", "num_tasks", "=", "len", "(", "iterable", ")", "# get a chunksize (as multiprocessing does):", "chunksize", "=", "_get_default_chunksize", "(", "chunksize", ",", "pool", ",", "num_tasks", ")", "# use map_async to get progress information", "result", "=", "pool", ".", "map_async", "(", "func_star", ",", "izip", "(", "repeat", "(", "function", ")", ",", "iterable", ",", "repeat", "(", "list", "(", "args", ")", ")", ",", "repeat", "(", "kwargs", ")", ")", ",", "chunksize", ")", "finally", ":", "pool", ".", "close", "(", ")", "# Progress bar:", "try", ":", "_do_pbar", "(", "result", ",", "num_tasks", ",", "chunksize", ")", "finally", ":", "output", "=", "result", ".", "get", "(", ")", "else", ":", "result", "=", "pool", ".", "map_async", "(", "func_star", ",", "izip", "(", "repeat", "(", "function", ")", ",", "iterable", ",", "repeat", "(", "list", "(", "args", ")", ")", ",", "repeat", "(", "kwargs", ")", ")", ",", "chunksize", ")", "output", "=", "result", ".", "get", "(", ")", "finally", ":", "if", "close_pool", ":", "if", "not", "progress", ":", "pool", ".", "close", "(", ")", "pool", ".", "join", "(", ")", "else", ":", "output", "=", "_serial_map_or_starmap", "(", "function", ",", "iterable", ",", "args", ",", "kwargs", ",", "progress", ",", "map_or_starmap", ")", "return", "output" ]
Shared function between parmap.map and parmap.starmap. Refer to those functions for details.
[ "Shared", "function", "between", "parmap", ".", "map", "and", "parmap", ".", "starmap", ".", "Refer", "to", "those", "functions", "for", "details", "." ]
python
train
Nic30/hwt
hwt/hdl/types/arrayCast.py
https://github.com/Nic30/hwt/blob/8cbb399e326da3b22c233b98188a9d08dec057e6/hwt/hdl/types/arrayCast.py#L11-L49
def getBits_from_array(array, wordWidth, start, end, reinterpretElmToType=None): """ Gets value of bits between selected range from memory :param start: bit address of start of bit of bits :param end: bit address of first bit behind bits :return: instance of BitsVal (derived from SimBits type) which contains copy of selected bits """ inPartOffset = 0 value = Bits(end - start, None).fromPy(None) while start != end: assert start < end, (start, end) dataWordIndex = start // wordWidth v = array[dataWordIndex] if reinterpretElmToType is not None: v = v._reinterpret_cast(reinterpretElmToType) endOfWord = (dataWordIndex + 1) * wordWidth width = min(end, endOfWord) - start offset = start % wordWidth val = selectBitRange(v.val, offset, width) vldMask = selectBitRange(v.vldMask, offset, width) updateTime = v.updateTime m = mask(width) value.val |= (val & m) << inPartOffset value.vldMask |= (vldMask & m) << inPartOffset value.updateMask = max(value.updateTime, updateTime) inPartOffset += width start += width return value
[ "def", "getBits_from_array", "(", "array", ",", "wordWidth", ",", "start", ",", "end", ",", "reinterpretElmToType", "=", "None", ")", ":", "inPartOffset", "=", "0", "value", "=", "Bits", "(", "end", "-", "start", ",", "None", ")", ".", "fromPy", "(", "None", ")", "while", "start", "!=", "end", ":", "assert", "start", "<", "end", ",", "(", "start", ",", "end", ")", "dataWordIndex", "=", "start", "//", "wordWidth", "v", "=", "array", "[", "dataWordIndex", "]", "if", "reinterpretElmToType", "is", "not", "None", ":", "v", "=", "v", ".", "_reinterpret_cast", "(", "reinterpretElmToType", ")", "endOfWord", "=", "(", "dataWordIndex", "+", "1", ")", "*", "wordWidth", "width", "=", "min", "(", "end", ",", "endOfWord", ")", "-", "start", "offset", "=", "start", "%", "wordWidth", "val", "=", "selectBitRange", "(", "v", ".", "val", ",", "offset", ",", "width", ")", "vldMask", "=", "selectBitRange", "(", "v", ".", "vldMask", ",", "offset", ",", "width", ")", "updateTime", "=", "v", ".", "updateTime", "m", "=", "mask", "(", "width", ")", "value", ".", "val", "|=", "(", "val", "&", "m", ")", "<<", "inPartOffset", "value", ".", "vldMask", "|=", "(", "vldMask", "&", "m", ")", "<<", "inPartOffset", "value", ".", "updateMask", "=", "max", "(", "value", ".", "updateTime", ",", "updateTime", ")", "inPartOffset", "+=", "width", "start", "+=", "width", "return", "value" ]
Gets value of bits between selected range from memory :param start: bit address of start of bit of bits :param end: bit address of first bit behind bits :return: instance of BitsVal (derived from SimBits type) which contains copy of selected bits
[ "Gets", "value", "of", "bits", "between", "selected", "range", "from", "memory" ]
python
test
croscon/fleaker
fleaker/component.py
https://github.com/croscon/fleaker/blob/046b026b79c9912bceebb17114bc0c5d2d02e3c7/fleaker/component.py#L222-L241
def clear_context(self, app=None): """Clear the component's context. Keyword Args: app (flask.Flask, optional): The app to clear this component's context for. If omitted, the value from ``Component.app`` is used. """ if (app is None and self._context is _CONTEXT_MISSING and not in_app_context()): raise RuntimeError("Attempted to clear component context without" " a bound app context or eager app set! Please" " pass the related app you want to update the" " context for!") if self._context is not _CONTEXT_MISSING: self._context = DEFAULT_DICT else: key = self._get_context_name(app=app) setattr(_CONTEXT_LOCALS, key, DEFAULT_DICT)
[ "def", "clear_context", "(", "self", ",", "app", "=", "None", ")", ":", "if", "(", "app", "is", "None", "and", "self", ".", "_context", "is", "_CONTEXT_MISSING", "and", "not", "in_app_context", "(", ")", ")", ":", "raise", "RuntimeError", "(", "\"Attempted to clear component context without\"", "\" a bound app context or eager app set! Please\"", "\" pass the related app you want to update the\"", "\" context for!\"", ")", "if", "self", ".", "_context", "is", "not", "_CONTEXT_MISSING", ":", "self", ".", "_context", "=", "DEFAULT_DICT", "else", ":", "key", "=", "self", ".", "_get_context_name", "(", "app", "=", "app", ")", "setattr", "(", "_CONTEXT_LOCALS", ",", "key", ",", "DEFAULT_DICT", ")" ]
Clear the component's context. Keyword Args: app (flask.Flask, optional): The app to clear this component's context for. If omitted, the value from ``Component.app`` is used.
[ "Clear", "the", "component", "s", "context", "." ]
python
train
KelSolaar/Foundations
foundations/namespace.py
https://github.com/KelSolaar/Foundations/blob/5c141330faf09dad70a12bc321f4c564917d0a91/foundations/namespace.py#L67-L95
def get_namespace(attribute, namespace_splitter=NAMESPACE_SPLITTER, root_only=False): """ Returns given attribute foundations.namespace. Usage:: >>> get_namespace("grandParent|parent|child") u'grandParent|parent' >>> get_namespace("grandParent|parent|child", root_only=True) u'grandParent' :param attribute: Attribute. :type attribute: unicode :param namespace_splitter: Namespace splitter character. :type namespace_splitter: unicode :param root_only: Return only root foundations.namespace. :type root_only: bool :return: Attribute foundations.namespace. :rtype: unicode """ attribute_tokens = attribute.split(namespace_splitter) if len(attribute_tokens) == 1: LOGGER.debug("> Attribute: '{0}', namespace: '{1}'.".format(attribute, Constants.null_object)) else: namespace = foundations.common.get_first_item(attribute_tokens) if root_only else \ namespace_splitter.join(attribute_tokens[0:-1]) LOGGER.debug("> Attribute: '{0}', namespace: '{1}'.".format(attribute, namespace)) return namespace
[ "def", "get_namespace", "(", "attribute", ",", "namespace_splitter", "=", "NAMESPACE_SPLITTER", ",", "root_only", "=", "False", ")", ":", "attribute_tokens", "=", "attribute", ".", "split", "(", "namespace_splitter", ")", "if", "len", "(", "attribute_tokens", ")", "==", "1", ":", "LOGGER", ".", "debug", "(", "\"> Attribute: '{0}', namespace: '{1}'.\"", ".", "format", "(", "attribute", ",", "Constants", ".", "null_object", ")", ")", "else", ":", "namespace", "=", "foundations", ".", "common", ".", "get_first_item", "(", "attribute_tokens", ")", "if", "root_only", "else", "namespace_splitter", ".", "join", "(", "attribute_tokens", "[", "0", ":", "-", "1", "]", ")", "LOGGER", ".", "debug", "(", "\"> Attribute: '{0}', namespace: '{1}'.\"", ".", "format", "(", "attribute", ",", "namespace", ")", ")", "return", "namespace" ]
Returns given attribute foundations.namespace. Usage:: >>> get_namespace("grandParent|parent|child") u'grandParent|parent' >>> get_namespace("grandParent|parent|child", root_only=True) u'grandParent' :param attribute: Attribute. :type attribute: unicode :param namespace_splitter: Namespace splitter character. :type namespace_splitter: unicode :param root_only: Return only root foundations.namespace. :type root_only: bool :return: Attribute foundations.namespace. :rtype: unicode
[ "Returns", "given", "attribute", "foundations", ".", "namespace", "." ]
python
train
rigetti/pyquil
pyquil/paulis.py
https://github.com/rigetti/pyquil/blob/ec98e453084b0037d69d8c3245f6822a5422593d/pyquil/paulis.py#L952-L995
def trotterize(first_pauli_term, second_pauli_term, trotter_order=1, trotter_steps=1): """ Create a Quil program that approximates exp( (A + B)t) where A and B are PauliTerm operators. :param PauliTerm first_pauli_term: PauliTerm denoted `A` :param PauliTerm second_pauli_term: PauliTerm denoted `B` :param int trotter_order: Optional argument indicating the Suzuki-Trotter approximation order--only accepts orders 1, 2, 3, 4. :param int trotter_steps: Optional argument indicating the number of products to decompose the exponential into. :return: Quil program :rtype: Program """ if not (1 <= trotter_order < 5): raise ValueError("trotterize only accepts trotter_order in {1, 2, 3, 4}.") commutator = (first_pauli_term * second_pauli_term) + \ (-1 * second_pauli_term * first_pauli_term) prog = Program() if is_zero(commutator): param_exp_prog_one = exponential_map(first_pauli_term) exp_prog = param_exp_prog_one(1) prog += exp_prog param_exp_prog_two = exponential_map(second_pauli_term) exp_prog = param_exp_prog_two(1) prog += exp_prog return prog order_slices = suzuki_trotter(trotter_order, trotter_steps) for coeff, operator in order_slices: if operator == 0: param_prog = exponential_map(coeff * first_pauli_term) exp_prog = param_prog(1) prog += exp_prog else: param_prog = exponential_map(coeff * second_pauli_term) exp_prog = param_prog(1) prog += exp_prog return prog
[ "def", "trotterize", "(", "first_pauli_term", ",", "second_pauli_term", ",", "trotter_order", "=", "1", ",", "trotter_steps", "=", "1", ")", ":", "if", "not", "(", "1", "<=", "trotter_order", "<", "5", ")", ":", "raise", "ValueError", "(", "\"trotterize only accepts trotter_order in {1, 2, 3, 4}.\"", ")", "commutator", "=", "(", "first_pauli_term", "*", "second_pauli_term", ")", "+", "(", "-", "1", "*", "second_pauli_term", "*", "first_pauli_term", ")", "prog", "=", "Program", "(", ")", "if", "is_zero", "(", "commutator", ")", ":", "param_exp_prog_one", "=", "exponential_map", "(", "first_pauli_term", ")", "exp_prog", "=", "param_exp_prog_one", "(", "1", ")", "prog", "+=", "exp_prog", "param_exp_prog_two", "=", "exponential_map", "(", "second_pauli_term", ")", "exp_prog", "=", "param_exp_prog_two", "(", "1", ")", "prog", "+=", "exp_prog", "return", "prog", "order_slices", "=", "suzuki_trotter", "(", "trotter_order", ",", "trotter_steps", ")", "for", "coeff", ",", "operator", "in", "order_slices", ":", "if", "operator", "==", "0", ":", "param_prog", "=", "exponential_map", "(", "coeff", "*", "first_pauli_term", ")", "exp_prog", "=", "param_prog", "(", "1", ")", "prog", "+=", "exp_prog", "else", ":", "param_prog", "=", "exponential_map", "(", "coeff", "*", "second_pauli_term", ")", "exp_prog", "=", "param_prog", "(", "1", ")", "prog", "+=", "exp_prog", "return", "prog" ]
Create a Quil program that approximates exp( (A + B)t) where A and B are PauliTerm operators. :param PauliTerm first_pauli_term: PauliTerm denoted `A` :param PauliTerm second_pauli_term: PauliTerm denoted `B` :param int trotter_order: Optional argument indicating the Suzuki-Trotter approximation order--only accepts orders 1, 2, 3, 4. :param int trotter_steps: Optional argument indicating the number of products to decompose the exponential into. :return: Quil program :rtype: Program
[ "Create", "a", "Quil", "program", "that", "approximates", "exp", "(", "(", "A", "+", "B", ")", "t", ")", "where", "A", "and", "B", "are", "PauliTerm", "operators", "." ]
python
train
Microsoft/nni
tools/nni_cmd/nnictl_utils.py
https://github.com/Microsoft/nni/blob/c7cc8db32da8d2ec77a382a55089f4e17247ce41/tools/nni_cmd/nnictl_utils.py#L304-L311
def log_internal(args, filetype): '''internal function to call get_log_content''' file_name = get_config_filename(args) if filetype == 'stdout': file_full_path = os.path.join(NNICTL_HOME_DIR, file_name, 'stdout') else: file_full_path = os.path.join(NNICTL_HOME_DIR, file_name, 'stderr') print(check_output_command(file_full_path, head=args.head, tail=args.tail))
[ "def", "log_internal", "(", "args", ",", "filetype", ")", ":", "file_name", "=", "get_config_filename", "(", "args", ")", "if", "filetype", "==", "'stdout'", ":", "file_full_path", "=", "os", ".", "path", ".", "join", "(", "NNICTL_HOME_DIR", ",", "file_name", ",", "'stdout'", ")", "else", ":", "file_full_path", "=", "os", ".", "path", ".", "join", "(", "NNICTL_HOME_DIR", ",", "file_name", ",", "'stderr'", ")", "print", "(", "check_output_command", "(", "file_full_path", ",", "head", "=", "args", ".", "head", ",", "tail", "=", "args", ".", "tail", ")", ")" ]
internal function to call get_log_content
[ "internal", "function", "to", "call", "get_log_content" ]
python
train
eerimoq/bincopy
bincopy.py
https://github.com/eerimoq/bincopy/blob/5e02cd001c3e9b54729425db6bffad5f03e1beac/bincopy.py#L754-L773
def add_srec(self, records, overwrite=False): """Add given Motorola S-Records string. Set `overwrite` to ``True`` to allow already added data to be overwritten. """ for record in StringIO(records): type_, address, size, data = unpack_srec(record.strip()) if type_ == '0': self._header = data elif type_ in '123': address *= self.word_size_bytes self._segments.add(_Segment(address, address + size, bytearray(data), self.word_size_bytes), overwrite) elif type_ in '789': self.execution_start_address = address
[ "def", "add_srec", "(", "self", ",", "records", ",", "overwrite", "=", "False", ")", ":", "for", "record", "in", "StringIO", "(", "records", ")", ":", "type_", ",", "address", ",", "size", ",", "data", "=", "unpack_srec", "(", "record", ".", "strip", "(", ")", ")", "if", "type_", "==", "'0'", ":", "self", ".", "_header", "=", "data", "elif", "type_", "in", "'123'", ":", "address", "*=", "self", ".", "word_size_bytes", "self", ".", "_segments", ".", "add", "(", "_Segment", "(", "address", ",", "address", "+", "size", ",", "bytearray", "(", "data", ")", ",", "self", ".", "word_size_bytes", ")", ",", "overwrite", ")", "elif", "type_", "in", "'789'", ":", "self", ".", "execution_start_address", "=", "address" ]
Add given Motorola S-Records string. Set `overwrite` to ``True`` to allow already added data to be overwritten.
[ "Add", "given", "Motorola", "S", "-", "Records", "string", ".", "Set", "overwrite", "to", "True", "to", "allow", "already", "added", "data", "to", "be", "overwritten", "." ]
python
train
klmitch/turnstile
turnstile/limits.py
https://github.com/klmitch/turnstile/blob/8fe9a359b45e505d3192ab193ecf9be177ab1a17/turnstile/limits.py#L198-L229
def decode(cls, key): """ Decode a bucket key into a BucketKey instance. :param key: The string form of a bucket key. :returns: A suitable instance of BucketKey corresponding to the passed-in key. """ # Determine bucket key version prefix, sep, param_str = key.partition(':') if sep != ':' or prefix not in cls._prefix_to_version: raise ValueError("%r is not a bucket key" % key) version = cls._prefix_to_version[prefix] # Take the parameters apart... parts = param_str.split('/') uuid = parts.pop(0) params = {} for part in parts: name, sep, value = part.partition('=') # Make sure it's well-formed if sep != '=': raise ValueError("Cannot interpret parameter expression %r" % part) params[name] = cls._decode(value) # Return a BucketKey return cls(uuid, params, version=version)
[ "def", "decode", "(", "cls", ",", "key", ")", ":", "# Determine bucket key version", "prefix", ",", "sep", ",", "param_str", "=", "key", ".", "partition", "(", "':'", ")", "if", "sep", "!=", "':'", "or", "prefix", "not", "in", "cls", ".", "_prefix_to_version", ":", "raise", "ValueError", "(", "\"%r is not a bucket key\"", "%", "key", ")", "version", "=", "cls", ".", "_prefix_to_version", "[", "prefix", "]", "# Take the parameters apart...", "parts", "=", "param_str", ".", "split", "(", "'/'", ")", "uuid", "=", "parts", ".", "pop", "(", "0", ")", "params", "=", "{", "}", "for", "part", "in", "parts", ":", "name", ",", "sep", ",", "value", "=", "part", ".", "partition", "(", "'='", ")", "# Make sure it's well-formed", "if", "sep", "!=", "'='", ":", "raise", "ValueError", "(", "\"Cannot interpret parameter expression %r\"", "%", "part", ")", "params", "[", "name", "]", "=", "cls", ".", "_decode", "(", "value", ")", "# Return a BucketKey", "return", "cls", "(", "uuid", ",", "params", ",", "version", "=", "version", ")" ]
Decode a bucket key into a BucketKey instance. :param key: The string form of a bucket key. :returns: A suitable instance of BucketKey corresponding to the passed-in key.
[ "Decode", "a", "bucket", "key", "into", "a", "BucketKey", "instance", "." ]
python
train
materialsproject/pymatgen
pymatgen/electronic_structure/boltztrap.py
https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/electronic_structure/boltztrap.py#L1125-L1196
def get_thermal_conductivity(self, output='eigs', doping_levels=True, k_el=True, relaxation_time=1e-14): """ Gives the electronic part of the thermal conductivity in either a full 3x3 tensor form, as 3 eigenvalues, or as the average value (trace/3.0) If doping_levels=True, the results are given at different p and n doping levels (given by self.doping), otherwise it is given as a series of electron chemical potential values Args: output (string): the type of output. 'tensor' give the full 3x3 tensor, 'eigs' its 3 eigenvalues and 'average' the average of the three eigenvalues doping_levels (boolean): True for the results to be given at different doping levels, False for results at different electron chemical potentials k_el (boolean): True for k_0-PF*T, False for k_0 relaxation_time (float): constant relaxation time in secs Returns: If doping_levels=True, a dictionary {temp:{'p':[],'n':[]}}. The 'p' links to thermal conductivity at p-type doping and 'n' to the thermal conductivity at n-type doping. Otherwise, returns a {temp:[]} dictionary. The result contains either the sorted three eigenvalues of the symmetric conductivity tensor (format='eigs') or a full tensor (3x3 array) ( output='tensor') or as an average (output='average'). The result includes a given constant relaxation time units are W/mK """ result = None result_doping = None if doping_levels: result_doping = {doping: {t: [] for t in self._seebeck_doping[doping]} for doping in self._seebeck_doping} for doping in result_doping: for t in result_doping[doping]: for i in range(len(self.doping[doping])): if k_el: pf_tensor = np.dot(self._cond_doping[doping][t][i], np.dot( self._seebeck_doping[doping][ t][i], self._seebeck_doping[doping][ t][i])) result_doping[doping][t].append(( self._kappa_doping[doping][t][ i] - pf_tensor * t)) else: result_doping[doping][t].append(( self._kappa_doping[doping][t][i])) else: result = {t: [] for t in self._seebeck} for t in result: for i in range(len(self.mu_steps)): if k_el: pf_tensor = np.dot(self._cond[t][i], np.dot(self._seebeck[t][i], self._seebeck[t][i])) result[t].append((self._kappa[t][i] - pf_tensor * t)) else: result[t].append((self._kappa[t][i])) return BoltztrapAnalyzer._format_to_output(result, result_doping, output, doping_levels, multi=relaxation_time)
[ "def", "get_thermal_conductivity", "(", "self", ",", "output", "=", "'eigs'", ",", "doping_levels", "=", "True", ",", "k_el", "=", "True", ",", "relaxation_time", "=", "1e-14", ")", ":", "result", "=", "None", "result_doping", "=", "None", "if", "doping_levels", ":", "result_doping", "=", "{", "doping", ":", "{", "t", ":", "[", "]", "for", "t", "in", "self", ".", "_seebeck_doping", "[", "doping", "]", "}", "for", "doping", "in", "self", ".", "_seebeck_doping", "}", "for", "doping", "in", "result_doping", ":", "for", "t", "in", "result_doping", "[", "doping", "]", ":", "for", "i", "in", "range", "(", "len", "(", "self", ".", "doping", "[", "doping", "]", ")", ")", ":", "if", "k_el", ":", "pf_tensor", "=", "np", ".", "dot", "(", "self", ".", "_cond_doping", "[", "doping", "]", "[", "t", "]", "[", "i", "]", ",", "np", ".", "dot", "(", "self", ".", "_seebeck_doping", "[", "doping", "]", "[", "t", "]", "[", "i", "]", ",", "self", ".", "_seebeck_doping", "[", "doping", "]", "[", "t", "]", "[", "i", "]", ")", ")", "result_doping", "[", "doping", "]", "[", "t", "]", ".", "append", "(", "(", "self", ".", "_kappa_doping", "[", "doping", "]", "[", "t", "]", "[", "i", "]", "-", "pf_tensor", "*", "t", ")", ")", "else", ":", "result_doping", "[", "doping", "]", "[", "t", "]", ".", "append", "(", "(", "self", ".", "_kappa_doping", "[", "doping", "]", "[", "t", "]", "[", "i", "]", ")", ")", "else", ":", "result", "=", "{", "t", ":", "[", "]", "for", "t", "in", "self", ".", "_seebeck", "}", "for", "t", "in", "result", ":", "for", "i", "in", "range", "(", "len", "(", "self", ".", "mu_steps", ")", ")", ":", "if", "k_el", ":", "pf_tensor", "=", "np", ".", "dot", "(", "self", ".", "_cond", "[", "t", "]", "[", "i", "]", ",", "np", ".", "dot", "(", "self", ".", "_seebeck", "[", "t", "]", "[", "i", "]", ",", "self", ".", "_seebeck", "[", "t", "]", "[", "i", "]", ")", ")", "result", "[", "t", "]", ".", "append", "(", "(", "self", ".", "_kappa", "[", "t", "]", "[", "i", "]", "-", "pf_tensor", "*", "t", ")", ")", "else", ":", "result", "[", "t", "]", ".", "append", "(", "(", "self", ".", "_kappa", "[", "t", "]", "[", "i", "]", ")", ")", "return", "BoltztrapAnalyzer", ".", "_format_to_output", "(", "result", ",", "result_doping", ",", "output", ",", "doping_levels", ",", "multi", "=", "relaxation_time", ")" ]
Gives the electronic part of the thermal conductivity in either a full 3x3 tensor form, as 3 eigenvalues, or as the average value (trace/3.0) If doping_levels=True, the results are given at different p and n doping levels (given by self.doping), otherwise it is given as a series of electron chemical potential values Args: output (string): the type of output. 'tensor' give the full 3x3 tensor, 'eigs' its 3 eigenvalues and 'average' the average of the three eigenvalues doping_levels (boolean): True for the results to be given at different doping levels, False for results at different electron chemical potentials k_el (boolean): True for k_0-PF*T, False for k_0 relaxation_time (float): constant relaxation time in secs Returns: If doping_levels=True, a dictionary {temp:{'p':[],'n':[]}}. The 'p' links to thermal conductivity at p-type doping and 'n' to the thermal conductivity at n-type doping. Otherwise, returns a {temp:[]} dictionary. The result contains either the sorted three eigenvalues of the symmetric conductivity tensor (format='eigs') or a full tensor (3x3 array) ( output='tensor') or as an average (output='average'). The result includes a given constant relaxation time units are W/mK
[ "Gives", "the", "electronic", "part", "of", "the", "thermal", "conductivity", "in", "either", "a", "full", "3x3", "tensor", "form", "as", "3", "eigenvalues", "or", "as", "the", "average", "value", "(", "trace", "/", "3", ".", "0", ")", "If", "doping_levels", "=", "True", "the", "results", "are", "given", "at", "different", "p", "and", "n", "doping", "levels", "(", "given", "by", "self", ".", "doping", ")", "otherwise", "it", "is", "given", "as", "a", "series", "of", "electron", "chemical", "potential", "values" ]
python
train
watson-developer-cloud/python-sdk
ibm_watson/speech_to_text_v1_adapter.py
https://github.com/watson-developer-cloud/python-sdk/blob/4c2c9df4466fcde88975da9ecd834e6ba95eb353/ibm_watson/speech_to_text_v1_adapter.py#L28-L241
def recognize_using_websocket(self, audio, content_type, recognize_callback, model=None, language_customization_id=None, acoustic_customization_id=None, customization_weight=None, base_model_version=None, inactivity_timeout=None, interim_results=None, keywords=None, keywords_threshold=None, max_alternatives=None, word_alternatives_threshold=None, word_confidence=None, timestamps=None, profanity_filter=None, smart_formatting=None, speaker_labels=None, http_proxy_host=None, http_proxy_port=None, customization_id=None, grammar_name=None, redaction=None, **kwargs): """ Sends audio for speech recognition using web sockets. :param AudioSource audio: The audio to transcribe in the format specified by the `Content-Type` header. :param str content_type: The type of the input: audio/basic, audio/flac, audio/l16, audio/mp3, audio/mpeg, audio/mulaw, audio/ogg, audio/ogg;codecs=opus, audio/ogg;codecs=vorbis, audio/wav, audio/webm, audio/webm;codecs=opus, or audio/webm;codecs=vorbis. :param RecognizeCallback recognize_callback: The callback method for the websocket. :param str model: The identifier of the model that is to be used for the recognition request or, for the **Create a session** method, with the new session. :param str language_customization_id: The customization ID (GUID) of a custom language model that is to be used with the recognition request. The base model of the specified custom language model must match the model specified with the `model` parameter. You must make the request with service credentials created for the instance of the service that owns the custom model. By default, no custom language model is used. See [Custom models](https://console.bluemix.net/docs/services/speech-to-text/input.html#custom). **Note:** Use this parameter instead of the deprecated `customization_id` parameter. :param str acoustic_customization_id: The customization ID (GUID) of a custom acoustic model that is to be used with the recognition request or, for the **Create a session** method, with the new session. The base model of the specified custom acoustic model must match the model specified with the `model` parameter. You must make the request with service credentials created for the instance of the service that owns the custom model. By default, no custom acoustic model is used. :param float customization_weight: If you specify the customization ID (GUID) of a custom language model with the recognition request or, for sessions, with the **Create a session** method, the customization weight tells the service how much weight to give to words from the custom language model compared to those from the base model for the current request. Specify a value between 0.0 and 1.0. Unless a different customization weight was specified for the custom model when it was trained, the default value is 0.3. A customization weight that you specify overrides a weight that was specified when the custom model was trained. The default value yields the best performance in general. Assign a higher value if your audio makes frequent use of OOV words from the custom model. Use caution when setting the weight: a higher value can improve the accuracy of phrases from the custom model's domain, but it can negatively affect performance on non-domain phrases. :param str base_model_version: The version of the specified base model that is to be used with recognition request or, for the **Create a session** method, with the new session. Multiple versions of a base model can exist when a model is updated for internal improvements. The parameter is intended primarily for use with custom models that have been upgraded for a new base model. The default value depends on whether the parameter is used with or without a custom model. For more information, see [Base model version](https://console.bluemix.net/docs/services/speech-to-text/input.html#version). :param int inactivity_timeout: The time in seconds after which, if only silence (no speech) is detected in submitted audio, the connection is closed with a 400 error. Useful for stopping audio submission from a live microphone when a user simply walks away. Use `-1` for infinity. :param list[str] keywords: An array of keyword strings to spot in the audio. Each keyword string can include one or more tokens. Keywords are spotted only in the final hypothesis, not in interim results. If you specify any keywords, you must also specify a keywords threshold. You can spot a maximum of 1000 keywords. Omit the parameter or specify an empty array if you do not need to spot keywords. :param float keywords_threshold: A confidence value that is the lower bound for spotting a keyword. A word is considered to match a keyword if its confidence is greater than or equal to the threshold. Specify a probability between 0 and 1 inclusive. No keyword spotting is performed if you omit the parameter. If you specify a threshold, you must also specify one or more keywords. :param int max_alternatives: The maximum number of alternative transcripts to be returned. By default, a single transcription is returned. :param float word_alternatives_threshold: A confidence value that is the lower bound for identifying a hypothesis as a possible word alternative (also known as \"Confusion Networks\"). An alternative word is considered if its confidence is greater than or equal to the threshold. Specify a probability between 0 and 1 inclusive. No alternative words are computed if you omit the parameter. :param bool word_confidence: If `true`, a confidence measure in the range of 0 to 1 is returned for each word. By default, no word confidence measures are returned. :param bool timestamps: If `true`, time alignment is returned for each word. By default, no timestamps are returned. :param bool profanity_filter: If `true` (the default), filters profanity from all output except for keyword results by replacing inappropriate words with a series of asterisks. Set the parameter to `false` to return results with no censoring. Applies to US English transcription only. :param bool smart_formatting: If `true`, converts dates, times, series of digits and numbers, phone numbers, currency values, and internet addresses into more readable, conventional representations in the final transcript of a recognition request. For US English, also converts certain keyword strings to punctuation symbols. By default, no smart formatting is performed. Applies to US English and Spanish transcription only. :param bool speaker_labels: If `true`, the response includes labels that identify which words were spoken by which participants in a multi-person exchange. By default, no speaker labels are returned. Setting `speaker_labels` to `true` forces the `timestamps` parameter to be `true`, regardless of whether you specify `false` for the parameter. To determine whether a language model supports speaker labels, use the **Get models** method and check that the attribute `speaker_labels` is set to `true`. You can also refer to [Speaker labels](https://console.bluemix.net/docs/services/speech-to-text/output.html#speaker_labels). :param str http_proxy_host: http proxy host name. :param str http_proxy_port: http proxy port. If not set, set to 80. :param str customization_id: **Deprecated.** Use the `language_customization_id` parameter to specify the customization ID (GUID) of a custom language model that is to be used with the recognition request. Do not specify both parameters with a request. :param str grammar_name: The name of a grammar that is to be used with the recognition request. If you specify a grammar, you must also use the `language_customization_id` parameter to specify the name of the custom language model for which the grammar is defined. The service recognizes only strings that are recognized by the specified grammar; it does not recognize other custom words from the model's words resource. See [Grammars](https://cloud.ibm.com/docs/services/speech-to-text/output.html). :param bool redaction: If `true`, the service redacts, or masks, numeric data from final transcripts. The feature redacts any number that has three or more consecutive digits by replacing each digit with an `X` character. It is intended to redact sensitive numeric data, such as credit card numbers. By default, the service performs no redaction. When you enable redaction, the service automatically enables smart formatting, regardless of whether you explicitly disable that feature. To ensure maximum security, the service also disables keyword spotting (ignores the `keywords` and `keywords_threshold` parameters) and returns only a single final transcript (forces the `max_alternatives` parameter to be `1`). **Note:** Applies to US English, Japanese, and Korean transcription only. See [Numeric redaction](https://cloud.ibm.com/docs/services/speech-to-text/output.html#redaction). :param dict headers: A `dict` containing the request headers :return: A `dict` containing the `SpeechRecognitionResults` response. :rtype: dict """ if audio is None: raise ValueError('audio must be provided') if not isinstance(audio, AudioSource): raise Exception( 'audio is not of type AudioSource. Import the class from ibm_watson.websocket') if content_type is None: raise ValueError('content_type must be provided') if recognize_callback is None: raise ValueError('recognize_callback must be provided') if not isinstance(recognize_callback, RecognizeCallback): raise Exception( 'Callback is not a derived class of RecognizeCallback') headers = {} if self.default_headers is not None: headers = self.default_headers.copy() if 'headers' in kwargs: headers.update(kwargs.get('headers')) if self.token_manager: access_token = self.token_manager.get_token() headers['Authorization'] = '{0} {1}'.format(BEARER, access_token) else: authstring = "{0}:{1}".format(self.username, self.password) base64_authorization = base64.b64encode(authstring.encode('utf-8')).decode('utf-8') headers['Authorization'] = 'Basic {0}'.format(base64_authorization) url = self.url.replace('https:', 'wss:') params = { 'model': model, 'customization_id': customization_id, 'acoustic_customization_id': acoustic_customization_id, 'customization_weight': customization_weight, 'base_model_version': base_model_version, 'language_customization_id': language_customization_id } params = dict([(k, v) for k, v in params.items() if v is not None]) url += '/v1/recognize?{0}'.format(urlencode(params)) options = { 'content_type': content_type, 'inactivity_timeout': inactivity_timeout, 'interim_results': interim_results, 'keywords': keywords, 'keywords_threshold': keywords_threshold, 'max_alternatives': max_alternatives, 'word_alternatives_threshold': word_alternatives_threshold, 'word_confidence': word_confidence, 'timestamps': timestamps, 'profanity_filter': profanity_filter, 'smart_formatting': smart_formatting, 'speaker_labels': speaker_labels, 'grammar_name': grammar_name, 'redaction': redaction } options = dict([(k, v) for k, v in options.items() if v is not None]) RecognizeListener(audio, options, recognize_callback, url, headers, http_proxy_host, http_proxy_port, self.verify)
[ "def", "recognize_using_websocket", "(", "self", ",", "audio", ",", "content_type", ",", "recognize_callback", ",", "model", "=", "None", ",", "language_customization_id", "=", "None", ",", "acoustic_customization_id", "=", "None", ",", "customization_weight", "=", "None", ",", "base_model_version", "=", "None", ",", "inactivity_timeout", "=", "None", ",", "interim_results", "=", "None", ",", "keywords", "=", "None", ",", "keywords_threshold", "=", "None", ",", "max_alternatives", "=", "None", ",", "word_alternatives_threshold", "=", "None", ",", "word_confidence", "=", "None", ",", "timestamps", "=", "None", ",", "profanity_filter", "=", "None", ",", "smart_formatting", "=", "None", ",", "speaker_labels", "=", "None", ",", "http_proxy_host", "=", "None", ",", "http_proxy_port", "=", "None", ",", "customization_id", "=", "None", ",", "grammar_name", "=", "None", ",", "redaction", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "audio", "is", "None", ":", "raise", "ValueError", "(", "'audio must be provided'", ")", "if", "not", "isinstance", "(", "audio", ",", "AudioSource", ")", ":", "raise", "Exception", "(", "'audio is not of type AudioSource. Import the class from ibm_watson.websocket'", ")", "if", "content_type", "is", "None", ":", "raise", "ValueError", "(", "'content_type must be provided'", ")", "if", "recognize_callback", "is", "None", ":", "raise", "ValueError", "(", "'recognize_callback must be provided'", ")", "if", "not", "isinstance", "(", "recognize_callback", ",", "RecognizeCallback", ")", ":", "raise", "Exception", "(", "'Callback is not a derived class of RecognizeCallback'", ")", "headers", "=", "{", "}", "if", "self", ".", "default_headers", "is", "not", "None", ":", "headers", "=", "self", ".", "default_headers", ".", "copy", "(", ")", "if", "'headers'", "in", "kwargs", ":", "headers", ".", "update", "(", "kwargs", ".", "get", "(", "'headers'", ")", ")", "if", "self", ".", "token_manager", ":", "access_token", "=", "self", ".", "token_manager", ".", "get_token", "(", ")", "headers", "[", "'Authorization'", "]", "=", "'{0} {1}'", ".", "format", "(", "BEARER", ",", "access_token", ")", "else", ":", "authstring", "=", "\"{0}:{1}\"", ".", "format", "(", "self", ".", "username", ",", "self", ".", "password", ")", "base64_authorization", "=", "base64", ".", "b64encode", "(", "authstring", ".", "encode", "(", "'utf-8'", ")", ")", ".", "decode", "(", "'utf-8'", ")", "headers", "[", "'Authorization'", "]", "=", "'Basic {0}'", ".", "format", "(", "base64_authorization", ")", "url", "=", "self", ".", "url", ".", "replace", "(", "'https:'", ",", "'wss:'", ")", "params", "=", "{", "'model'", ":", "model", ",", "'customization_id'", ":", "customization_id", ",", "'acoustic_customization_id'", ":", "acoustic_customization_id", ",", "'customization_weight'", ":", "customization_weight", ",", "'base_model_version'", ":", "base_model_version", ",", "'language_customization_id'", ":", "language_customization_id", "}", "params", "=", "dict", "(", "[", "(", "k", ",", "v", ")", "for", "k", ",", "v", "in", "params", ".", "items", "(", ")", "if", "v", "is", "not", "None", "]", ")", "url", "+=", "'/v1/recognize?{0}'", ".", "format", "(", "urlencode", "(", "params", ")", ")", "options", "=", "{", "'content_type'", ":", "content_type", ",", "'inactivity_timeout'", ":", "inactivity_timeout", ",", "'interim_results'", ":", "interim_results", ",", "'keywords'", ":", "keywords", ",", "'keywords_threshold'", ":", "keywords_threshold", ",", "'max_alternatives'", ":", "max_alternatives", ",", "'word_alternatives_threshold'", ":", "word_alternatives_threshold", ",", "'word_confidence'", ":", "word_confidence", ",", "'timestamps'", ":", "timestamps", ",", "'profanity_filter'", ":", "profanity_filter", ",", "'smart_formatting'", ":", "smart_formatting", ",", "'speaker_labels'", ":", "speaker_labels", ",", "'grammar_name'", ":", "grammar_name", ",", "'redaction'", ":", "redaction", "}", "options", "=", "dict", "(", "[", "(", "k", ",", "v", ")", "for", "k", ",", "v", "in", "options", ".", "items", "(", ")", "if", "v", "is", "not", "None", "]", ")", "RecognizeListener", "(", "audio", ",", "options", ",", "recognize_callback", ",", "url", ",", "headers", ",", "http_proxy_host", ",", "http_proxy_port", ",", "self", ".", "verify", ")" ]
Sends audio for speech recognition using web sockets. :param AudioSource audio: The audio to transcribe in the format specified by the `Content-Type` header. :param str content_type: The type of the input: audio/basic, audio/flac, audio/l16, audio/mp3, audio/mpeg, audio/mulaw, audio/ogg, audio/ogg;codecs=opus, audio/ogg;codecs=vorbis, audio/wav, audio/webm, audio/webm;codecs=opus, or audio/webm;codecs=vorbis. :param RecognizeCallback recognize_callback: The callback method for the websocket. :param str model: The identifier of the model that is to be used for the recognition request or, for the **Create a session** method, with the new session. :param str language_customization_id: The customization ID (GUID) of a custom language model that is to be used with the recognition request. The base model of the specified custom language model must match the model specified with the `model` parameter. You must make the request with service credentials created for the instance of the service that owns the custom model. By default, no custom language model is used. See [Custom models](https://console.bluemix.net/docs/services/speech-to-text/input.html#custom). **Note:** Use this parameter instead of the deprecated `customization_id` parameter. :param str acoustic_customization_id: The customization ID (GUID) of a custom acoustic model that is to be used with the recognition request or, for the **Create a session** method, with the new session. The base model of the specified custom acoustic model must match the model specified with the `model` parameter. You must make the request with service credentials created for the instance of the service that owns the custom model. By default, no custom acoustic model is used. :param float customization_weight: If you specify the customization ID (GUID) of a custom language model with the recognition request or, for sessions, with the **Create a session** method, the customization weight tells the service how much weight to give to words from the custom language model compared to those from the base model for the current request. Specify a value between 0.0 and 1.0. Unless a different customization weight was specified for the custom model when it was trained, the default value is 0.3. A customization weight that you specify overrides a weight that was specified when the custom model was trained. The default value yields the best performance in general. Assign a higher value if your audio makes frequent use of OOV words from the custom model. Use caution when setting the weight: a higher value can improve the accuracy of phrases from the custom model's domain, but it can negatively affect performance on non-domain phrases. :param str base_model_version: The version of the specified base model that is to be used with recognition request or, for the **Create a session** method, with the new session. Multiple versions of a base model can exist when a model is updated for internal improvements. The parameter is intended primarily for use with custom models that have been upgraded for a new base model. The default value depends on whether the parameter is used with or without a custom model. For more information, see [Base model version](https://console.bluemix.net/docs/services/speech-to-text/input.html#version). :param int inactivity_timeout: The time in seconds after which, if only silence (no speech) is detected in submitted audio, the connection is closed with a 400 error. Useful for stopping audio submission from a live microphone when a user simply walks away. Use `-1` for infinity. :param list[str] keywords: An array of keyword strings to spot in the audio. Each keyword string can include one or more tokens. Keywords are spotted only in the final hypothesis, not in interim results. If you specify any keywords, you must also specify a keywords threshold. You can spot a maximum of 1000 keywords. Omit the parameter or specify an empty array if you do not need to spot keywords. :param float keywords_threshold: A confidence value that is the lower bound for spotting a keyword. A word is considered to match a keyword if its confidence is greater than or equal to the threshold. Specify a probability between 0 and 1 inclusive. No keyword spotting is performed if you omit the parameter. If you specify a threshold, you must also specify one or more keywords. :param int max_alternatives: The maximum number of alternative transcripts to be returned. By default, a single transcription is returned. :param float word_alternatives_threshold: A confidence value that is the lower bound for identifying a hypothesis as a possible word alternative (also known as \"Confusion Networks\"). An alternative word is considered if its confidence is greater than or equal to the threshold. Specify a probability between 0 and 1 inclusive. No alternative words are computed if you omit the parameter. :param bool word_confidence: If `true`, a confidence measure in the range of 0 to 1 is returned for each word. By default, no word confidence measures are returned. :param bool timestamps: If `true`, time alignment is returned for each word. By default, no timestamps are returned. :param bool profanity_filter: If `true` (the default), filters profanity from all output except for keyword results by replacing inappropriate words with a series of asterisks. Set the parameter to `false` to return results with no censoring. Applies to US English transcription only. :param bool smart_formatting: If `true`, converts dates, times, series of digits and numbers, phone numbers, currency values, and internet addresses into more readable, conventional representations in the final transcript of a recognition request. For US English, also converts certain keyword strings to punctuation symbols. By default, no smart formatting is performed. Applies to US English and Spanish transcription only. :param bool speaker_labels: If `true`, the response includes labels that identify which words were spoken by which participants in a multi-person exchange. By default, no speaker labels are returned. Setting `speaker_labels` to `true` forces the `timestamps` parameter to be `true`, regardless of whether you specify `false` for the parameter. To determine whether a language model supports speaker labels, use the **Get models** method and check that the attribute `speaker_labels` is set to `true`. You can also refer to [Speaker labels](https://console.bluemix.net/docs/services/speech-to-text/output.html#speaker_labels). :param str http_proxy_host: http proxy host name. :param str http_proxy_port: http proxy port. If not set, set to 80. :param str customization_id: **Deprecated.** Use the `language_customization_id` parameter to specify the customization ID (GUID) of a custom language model that is to be used with the recognition request. Do not specify both parameters with a request. :param str grammar_name: The name of a grammar that is to be used with the recognition request. If you specify a grammar, you must also use the `language_customization_id` parameter to specify the name of the custom language model for which the grammar is defined. The service recognizes only strings that are recognized by the specified grammar; it does not recognize other custom words from the model's words resource. See [Grammars](https://cloud.ibm.com/docs/services/speech-to-text/output.html). :param bool redaction: If `true`, the service redacts, or masks, numeric data from final transcripts. The feature redacts any number that has three or more consecutive digits by replacing each digit with an `X` character. It is intended to redact sensitive numeric data, such as credit card numbers. By default, the service performs no redaction. When you enable redaction, the service automatically enables smart formatting, regardless of whether you explicitly disable that feature. To ensure maximum security, the service also disables keyword spotting (ignores the `keywords` and `keywords_threshold` parameters) and returns only a single final transcript (forces the `max_alternatives` parameter to be `1`). **Note:** Applies to US English, Japanese, and Korean transcription only. See [Numeric redaction](https://cloud.ibm.com/docs/services/speech-to-text/output.html#redaction). :param dict headers: A `dict` containing the request headers :return: A `dict` containing the `SpeechRecognitionResults` response. :rtype: dict
[ "Sends", "audio", "for", "speech", "recognition", "using", "web", "sockets", "." ]
python
train
angr/angr
angr/analyses/cfg/cfg_base.py
https://github.com/angr/angr/blob/4e2f97d56af5419ee73bdb30482c8dd8ff5f3e40/angr/analyses/cfg/cfg_base.py#L2027-L2046
def _resolve_indirect_jump_timelessly(self, addr, block, func_addr, jumpkind): """ Checks if MIPS32 and calls MIPS32 check, otherwise false :param int addr: irsb address :param pyvex.IRSB block: irsb :param int func_addr: Function address :return: If it was resolved and targets alongside it :rtype: tuple """ if block.statements is None: block = self.project.factory.block(block.addr, size=block.size).vex for res in self.timeless_indirect_jump_resolvers: if res.filter(self, addr, func_addr, block, jumpkind): r, resolved_targets = res.resolve(self, addr, func_addr, block, jumpkind) if r: return True, resolved_targets return False, [ ]
[ "def", "_resolve_indirect_jump_timelessly", "(", "self", ",", "addr", ",", "block", ",", "func_addr", ",", "jumpkind", ")", ":", "if", "block", ".", "statements", "is", "None", ":", "block", "=", "self", ".", "project", ".", "factory", ".", "block", "(", "block", ".", "addr", ",", "size", "=", "block", ".", "size", ")", ".", "vex", "for", "res", "in", "self", ".", "timeless_indirect_jump_resolvers", ":", "if", "res", ".", "filter", "(", "self", ",", "addr", ",", "func_addr", ",", "block", ",", "jumpkind", ")", ":", "r", ",", "resolved_targets", "=", "res", ".", "resolve", "(", "self", ",", "addr", ",", "func_addr", ",", "block", ",", "jumpkind", ")", "if", "r", ":", "return", "True", ",", "resolved_targets", "return", "False", ",", "[", "]" ]
Checks if MIPS32 and calls MIPS32 check, otherwise false :param int addr: irsb address :param pyvex.IRSB block: irsb :param int func_addr: Function address :return: If it was resolved and targets alongside it :rtype: tuple
[ "Checks", "if", "MIPS32", "and", "calls", "MIPS32", "check", "otherwise", "false" ]
python
train
assamite/creamas
creamas/core/environment.py
https://github.com/assamite/creamas/blob/54dc3e31c97a3f938e58272f8ab80b6bcafeff58/creamas/core/environment.py#L319-L335
def destroy(self, folder=None, as_coro=False): '''Destroy the environment. Does the following: 1. calls :py:meth:`~creamas.core.Environment.save_info` 2. for each agent: calls :py:meth:`close` 3. Shuts down its RPC-service. ''' async def _destroy(folder): ret = self.save_info(folder) for a in self.get_agents(addr=False): a.close(folder=folder) await self.shutdown(as_coro=True) return ret return run_or_coro(_destroy(folder), as_coro)
[ "def", "destroy", "(", "self", ",", "folder", "=", "None", ",", "as_coro", "=", "False", ")", ":", "async", "def", "_destroy", "(", "folder", ")", ":", "ret", "=", "self", ".", "save_info", "(", "folder", ")", "for", "a", "in", "self", ".", "get_agents", "(", "addr", "=", "False", ")", ":", "a", ".", "close", "(", "folder", "=", "folder", ")", "await", "self", ".", "shutdown", "(", "as_coro", "=", "True", ")", "return", "ret", "return", "run_or_coro", "(", "_destroy", "(", "folder", ")", ",", "as_coro", ")" ]
Destroy the environment. Does the following: 1. calls :py:meth:`~creamas.core.Environment.save_info` 2. for each agent: calls :py:meth:`close` 3. Shuts down its RPC-service.
[ "Destroy", "the", "environment", "." ]
python
train
RJT1990/pyflux
pyflux/inference/bbvi.py
https://github.com/RJT1990/pyflux/blob/297f2afc2095acd97c12e827dd500e8ea5da0c0f/pyflux/inference/bbvi.py#L54-L62
def change_parameters(self,params): """ Utility function for changing the approximate distribution parameters """ no_of_params = 0 for core_param in range(len(self.q)): for approx_param in range(self.q[core_param].param_no): self.q[core_param].vi_change_param(approx_param, params[no_of_params]) no_of_params += 1
[ "def", "change_parameters", "(", "self", ",", "params", ")", ":", "no_of_params", "=", "0", "for", "core_param", "in", "range", "(", "len", "(", "self", ".", "q", ")", ")", ":", "for", "approx_param", "in", "range", "(", "self", ".", "q", "[", "core_param", "]", ".", "param_no", ")", ":", "self", ".", "q", "[", "core_param", "]", ".", "vi_change_param", "(", "approx_param", ",", "params", "[", "no_of_params", "]", ")", "no_of_params", "+=", "1" ]
Utility function for changing the approximate distribution parameters
[ "Utility", "function", "for", "changing", "the", "approximate", "distribution", "parameters" ]
python
train
mediawiki-utilities/python-mwoauth
mwoauth/flask.py
https://github.com/mediawiki-utilities/python-mwoauth/blob/cd6990753ec3d59b7cfd96a76459f71ef4790cd3/mwoauth/flask.py#L200-L210
def mwapi_session(self, *args, **kwargs): """ Create :class:`mwapi.Session` that is authorized for the current user. `args` and `kwargs` are passed directly to :class:`mwapi.Session` """ import mwapi auth1 = self.generate_auth() return mwapi.Session(*args, user_agent=self.user_agent, auth=auth1, **kwargs)
[ "def", "mwapi_session", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "import", "mwapi", "auth1", "=", "self", ".", "generate_auth", "(", ")", "return", "mwapi", ".", "Session", "(", "*", "args", ",", "user_agent", "=", "self", ".", "user_agent", ",", "auth", "=", "auth1", ",", "*", "*", "kwargs", ")" ]
Create :class:`mwapi.Session` that is authorized for the current user. `args` and `kwargs` are passed directly to :class:`mwapi.Session`
[ "Create", ":", "class", ":", "mwapi", ".", "Session", "that", "is", "authorized", "for", "the", "current", "user", "." ]
python
train
watson-developer-cloud/python-sdk
ibm_watson/websocket/recognize_listener.py
https://github.com/watson-developer-cloud/python-sdk/blob/4c2c9df4466fcde88975da9ecd834e6ba95eb353/ibm_watson/websocket/recognize_listener.py#L139-L150
def on_open(self, ws): """ Callback executed when a connection is opened to the server. Handles streaming of audio to the server. :param ws: Websocket client """ self.callback.on_connected() # Send initialization message init_data = self.build_start_message(self.options) self.ws_client.send(json.dumps(init_data).encode('utf8'), websocket.ABNF.OPCODE_TEXT)
[ "def", "on_open", "(", "self", ",", "ws", ")", ":", "self", ".", "callback", ".", "on_connected", "(", ")", "# Send initialization message", "init_data", "=", "self", ".", "build_start_message", "(", "self", ".", "options", ")", "self", ".", "ws_client", ".", "send", "(", "json", ".", "dumps", "(", "init_data", ")", ".", "encode", "(", "'utf8'", ")", ",", "websocket", ".", "ABNF", ".", "OPCODE_TEXT", ")" ]
Callback executed when a connection is opened to the server. Handles streaming of audio to the server. :param ws: Websocket client
[ "Callback", "executed", "when", "a", "connection", "is", "opened", "to", "the", "server", ".", "Handles", "streaming", "of", "audio", "to", "the", "server", "." ]
python
train
CityOfZion/neo-python
neo/Wallets/Wallet.py
https://github.com/CityOfZion/neo-python/blob/fe90f62e123d720d4281c79af0598d9df9e776fb/neo/Wallets/Wallet.py#L560-L581
def GetTokenBalance(self, token, watch_only=0): """ Get the balance of the specified token. Args: token (NEP5Token): an instance of type neo.Wallets.NEP5Token to get the balance from. watch_only (bool): True, to limit to watch only wallets. Returns: Decimal: total balance for `token`. """ total = Decimal(0) if watch_only > 0: for addr in self._watch_only: balance = token.GetBalance(self, addr) total += balance else: for contract in self._contracts.values(): balance = token.GetBalance(self, contract.Address) total += balance return total
[ "def", "GetTokenBalance", "(", "self", ",", "token", ",", "watch_only", "=", "0", ")", ":", "total", "=", "Decimal", "(", "0", ")", "if", "watch_only", ">", "0", ":", "for", "addr", "in", "self", ".", "_watch_only", ":", "balance", "=", "token", ".", "GetBalance", "(", "self", ",", "addr", ")", "total", "+=", "balance", "else", ":", "for", "contract", "in", "self", ".", "_contracts", ".", "values", "(", ")", ":", "balance", "=", "token", ".", "GetBalance", "(", "self", ",", "contract", ".", "Address", ")", "total", "+=", "balance", "return", "total" ]
Get the balance of the specified token. Args: token (NEP5Token): an instance of type neo.Wallets.NEP5Token to get the balance from. watch_only (bool): True, to limit to watch only wallets. Returns: Decimal: total balance for `token`.
[ "Get", "the", "balance", "of", "the", "specified", "token", "." ]
python
train
AguaClara/aguaclara
aguaclara/core/physchem.py
https://github.com/AguaClara/aguaclara/blob/8dd4e734768b166a7fc2b60388a24df2f93783fc/aguaclara/core/physchem.py#L219-L230
def headloss_fric(FlowRate, Diam, Length, Nu, PipeRough): """Return the major head loss (due to wall shear) in a pipe. This equation applies to both laminar and turbulent flows. """ #Checking input validity - inputs not checked here are checked by #functions this function calls. ut.check_range([Length, ">0", "Length"]) return (fric(FlowRate, Diam, Nu, PipeRough) * 8 / (gravity.magnitude * np.pi**2) * (Length * FlowRate**2) / Diam**5 )
[ "def", "headloss_fric", "(", "FlowRate", ",", "Diam", ",", "Length", ",", "Nu", ",", "PipeRough", ")", ":", "#Checking input validity - inputs not checked here are checked by", "#functions this function calls.", "ut", ".", "check_range", "(", "[", "Length", ",", "\">0\"", ",", "\"Length\"", "]", ")", "return", "(", "fric", "(", "FlowRate", ",", "Diam", ",", "Nu", ",", "PipeRough", ")", "*", "8", "/", "(", "gravity", ".", "magnitude", "*", "np", ".", "pi", "**", "2", ")", "*", "(", "Length", "*", "FlowRate", "**", "2", ")", "/", "Diam", "**", "5", ")" ]
Return the major head loss (due to wall shear) in a pipe. This equation applies to both laminar and turbulent flows.
[ "Return", "the", "major", "head", "loss", "(", "due", "to", "wall", "shear", ")", "in", "a", "pipe", "." ]
python
train
bitesofcode/projexui
projexui/widgets/xlogrecordwidget/xlogrecordwidget.py
https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/widgets/xlogrecordwidget/xlogrecordwidget.py#L358-L376
def setActiveLevels(self, levels): """ Defines the levels for this widgets visible/processed levels. :param levels | [<int>, ..] """ self._activeLevels = levels tree = self.uiRecordTREE tree.setUpdatesEnabled(False) tree.blockSignals(True) for i in tree.topLevelItems(): if levels and i.record().levelno not in levels: i.setHidden(True) else: i.setHidden(False) tree.blockSignals(False) tree.setUpdatesEnabled(True)
[ "def", "setActiveLevels", "(", "self", ",", "levels", ")", ":", "self", ".", "_activeLevels", "=", "levels", "tree", "=", "self", ".", "uiRecordTREE", "tree", ".", "setUpdatesEnabled", "(", "False", ")", "tree", ".", "blockSignals", "(", "True", ")", "for", "i", "in", "tree", ".", "topLevelItems", "(", ")", ":", "if", "levels", "and", "i", ".", "record", "(", ")", ".", "levelno", "not", "in", "levels", ":", "i", ".", "setHidden", "(", "True", ")", "else", ":", "i", ".", "setHidden", "(", "False", ")", "tree", ".", "blockSignals", "(", "False", ")", "tree", ".", "setUpdatesEnabled", "(", "True", ")" ]
Defines the levels for this widgets visible/processed levels. :param levels | [<int>, ..]
[ "Defines", "the", "levels", "for", "this", "widgets", "visible", "/", "processed", "levels", ".", ":", "param", "levels", "|", "[", "<int", ">", "..", "]" ]
python
train
materialsproject/pymatgen
pymatgen/io/abinit/tasks.py
https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/io/abinit/tasks.py#L770-L817
def select_qadapter(self, pconfs): """ Given a list of parallel configurations, pconfs, this method select an `optimal` configuration according to some criterion as well as the :class:`QueueAdapter` to use. Args: pconfs: :class:`ParalHints` object with the list of parallel configurations Returns: :class:`ParallelConf` object with the `optimal` configuration. """ # Order the list of configurations according to policy. policy, max_ncpus = self.policy, self.max_cores pconfs = pconfs.get_ordered_with_policy(policy, max_ncpus) if policy.precedence == "qadapter": # Try to run on the qadapter with the highest priority. for qadpos, qad in enumerate(self.qads): possible_pconfs = [pc for pc in pconfs if qad.can_run_pconf(pc)] if qad.allocation == "nodes": #if qad.allocation in ["nodes", "force_nodes"]: # Select the configuration divisible by nodes if possible. for pconf in possible_pconfs: if pconf.num_cores % qad.hw.cores_per_node == 0: return self._use_qadpos_pconf(qadpos, pconf) # Here we select the first one. if possible_pconfs: return self._use_qadpos_pconf(qadpos, possible_pconfs[0]) elif policy.precedence == "autoparal_conf": # Try to run on the first pconf irrespectively of the priority of the qadapter. for pconf in pconfs: for qadpos, qad in enumerate(self.qads): if qad.allocation == "nodes" and not pconf.num_cores % qad.hw.cores_per_node == 0: continue # Ignore it. not very clean if qad.can_run_pconf(pconf): return self._use_qadpos_pconf(qadpos, pconf) else: raise ValueError("Wrong value of policy.precedence = %s" % policy.precedence) # No qadapter could be found raise RuntimeError("Cannot find qadapter for this run!")
[ "def", "select_qadapter", "(", "self", ",", "pconfs", ")", ":", "# Order the list of configurations according to policy.", "policy", ",", "max_ncpus", "=", "self", ".", "policy", ",", "self", ".", "max_cores", "pconfs", "=", "pconfs", ".", "get_ordered_with_policy", "(", "policy", ",", "max_ncpus", ")", "if", "policy", ".", "precedence", "==", "\"qadapter\"", ":", "# Try to run on the qadapter with the highest priority.", "for", "qadpos", ",", "qad", "in", "enumerate", "(", "self", ".", "qads", ")", ":", "possible_pconfs", "=", "[", "pc", "for", "pc", "in", "pconfs", "if", "qad", ".", "can_run_pconf", "(", "pc", ")", "]", "if", "qad", ".", "allocation", "==", "\"nodes\"", ":", "#if qad.allocation in [\"nodes\", \"force_nodes\"]:", "# Select the configuration divisible by nodes if possible.", "for", "pconf", "in", "possible_pconfs", ":", "if", "pconf", ".", "num_cores", "%", "qad", ".", "hw", ".", "cores_per_node", "==", "0", ":", "return", "self", ".", "_use_qadpos_pconf", "(", "qadpos", ",", "pconf", ")", "# Here we select the first one.", "if", "possible_pconfs", ":", "return", "self", ".", "_use_qadpos_pconf", "(", "qadpos", ",", "possible_pconfs", "[", "0", "]", ")", "elif", "policy", ".", "precedence", "==", "\"autoparal_conf\"", ":", "# Try to run on the first pconf irrespectively of the priority of the qadapter.", "for", "pconf", "in", "pconfs", ":", "for", "qadpos", ",", "qad", "in", "enumerate", "(", "self", ".", "qads", ")", ":", "if", "qad", ".", "allocation", "==", "\"nodes\"", "and", "not", "pconf", ".", "num_cores", "%", "qad", ".", "hw", ".", "cores_per_node", "==", "0", ":", "continue", "# Ignore it. not very clean", "if", "qad", ".", "can_run_pconf", "(", "pconf", ")", ":", "return", "self", ".", "_use_qadpos_pconf", "(", "qadpos", ",", "pconf", ")", "else", ":", "raise", "ValueError", "(", "\"Wrong value of policy.precedence = %s\"", "%", "policy", ".", "precedence", ")", "# No qadapter could be found", "raise", "RuntimeError", "(", "\"Cannot find qadapter for this run!\"", ")" ]
Given a list of parallel configurations, pconfs, this method select an `optimal` configuration according to some criterion as well as the :class:`QueueAdapter` to use. Args: pconfs: :class:`ParalHints` object with the list of parallel configurations Returns: :class:`ParallelConf` object with the `optimal` configuration.
[ "Given", "a", "list", "of", "parallel", "configurations", "pconfs", "this", "method", "select", "an", "optimal", "configuration", "according", "to", "some", "criterion", "as", "well", "as", "the", ":", "class", ":", "QueueAdapter", "to", "use", "." ]
python
train
brocade/pynos
pynos/versions/ver_6/ver_6_0_1/yang/brocade_ras_ext.py
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_6/ver_6_0_1/yang/brocade_ras_ext.py#L52-L64
def show_raslog_output_show_all_raslog_number_of_entries(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") show_raslog = ET.Element("show_raslog") config = show_raslog output = ET.SubElement(show_raslog, "output") show_all_raslog = ET.SubElement(output, "show-all-raslog") number_of_entries = ET.SubElement(show_all_raslog, "number-of-entries") number_of_entries.text = kwargs.pop('number_of_entries') callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "show_raslog_output_show_all_raslog_number_of_entries", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "show_raslog", "=", "ET", ".", "Element", "(", "\"show_raslog\"", ")", "config", "=", "show_raslog", "output", "=", "ET", ".", "SubElement", "(", "show_raslog", ",", "\"output\"", ")", "show_all_raslog", "=", "ET", ".", "SubElement", "(", "output", ",", "\"show-all-raslog\"", ")", "number_of_entries", "=", "ET", ".", "SubElement", "(", "show_all_raslog", ",", "\"number-of-entries\"", ")", "number_of_entries", ".", "text", "=", "kwargs", ".", "pop", "(", "'number_of_entries'", ")", "callback", "=", "kwargs", ".", "pop", "(", "'callback'", ",", "self", ".", "_callback", ")", "return", "callback", "(", "config", ")" ]
Auto Generated Code
[ "Auto", "Generated", "Code" ]
python
train
stephen-bunn/file-config
src/file_config/utils.py
https://github.com/stephen-bunn/file-config/blob/93429360c949985202e1f2b9cd0340731819ba75/src/file_config/utils.py#L320-L333
def is_array_type(type_): """ Checks if the given type is a array type. :param type_: The type to check :return: True if the type is a array type, otherwise False :rtype: bool """ array_types = _get_types(Types.ARRAY) if is_typing_type(type_): return type_ in array_types or ( hasattr(type_, "__origin__") and type_.__origin__ in array_types ) return type_ in array_types
[ "def", "is_array_type", "(", "type_", ")", ":", "array_types", "=", "_get_types", "(", "Types", ".", "ARRAY", ")", "if", "is_typing_type", "(", "type_", ")", ":", "return", "type_", "in", "array_types", "or", "(", "hasattr", "(", "type_", ",", "\"__origin__\"", ")", "and", "type_", ".", "__origin__", "in", "array_types", ")", "return", "type_", "in", "array_types" ]
Checks if the given type is a array type. :param type_: The type to check :return: True if the type is a array type, otherwise False :rtype: bool
[ "Checks", "if", "the", "given", "type", "is", "a", "array", "type", "." ]
python
train
tomnor/channelpack
channelpack/pack.py
https://github.com/tomnor/channelpack/blob/9ad3cd11c698aed4c0fc178385b2ba38a7d0efae/channelpack/pack.py#L613-L624
def set_stopextend(self, n): """Extend the True elements by n when setting the conditions based on a 'stopcond' condition. n is an integer >= 0. .. note:: Updates the mask if not no_auto. """ self.conconf.set_condition('stopextend', n) if not self.no_auto: self.make_mask()
[ "def", "set_stopextend", "(", "self", ",", "n", ")", ":", "self", ".", "conconf", ".", "set_condition", "(", "'stopextend'", ",", "n", ")", "if", "not", "self", ".", "no_auto", ":", "self", ".", "make_mask", "(", ")" ]
Extend the True elements by n when setting the conditions based on a 'stopcond' condition. n is an integer >= 0. .. note:: Updates the mask if not no_auto.
[ "Extend", "the", "True", "elements", "by", "n", "when", "setting", "the", "conditions", "based", "on", "a", "stopcond", "condition", "." ]
python
train
bcbio/bcbio-nextgen
bcbio/structural/validate.py
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/validate.py#L77-L96
def _survivor_merge(call_vcf, truth_vcf, stats, work_dir, data): """Perform a merge of two callsets using SURVIVOR, """ out_file = os.path.join(work_dir, "eval-merge.vcf") if not utils.file_uptodate(out_file, call_vcf): in_call_vcf = call_vcf.replace(".vcf.gz", ".vcf") if not utils.file_exists(in_call_vcf): with file_transaction(data, in_call_vcf) as tx_in_call_vcf: do.run("gunzip -c {call_vcf} > {tx_in_call_vcf}".format(**locals())) in_truth_vcf = truth_vcf.replace(".vcf.gz", ".vcf") if not utils.file_exists(in_truth_vcf): with file_transaction(data, in_truth_vcf) as tx_in_truth_vcf: do.run("gunzip -c {truth_vcf} > {tx_in_truth_vcf}".format(**locals())) in_list_file = os.path.join(work_dir, "eval-inputs.txt") with open(in_list_file, "w") as out_handle: out_handle.write("%s\n%s\n" % (in_call_vcf, in_truth_vcf)) with file_transaction(data, out_file) as tx_out_file: cmd = ("SURVIVOR merge {in_list_file} {stats[merge_size]} 1 0 0 0 {stats[min_size]} {tx_out_file}") do.run(cmd.format(**locals()), "Merge SV files for validation: %s" % dd.get_sample_name(data)) return out_file
[ "def", "_survivor_merge", "(", "call_vcf", ",", "truth_vcf", ",", "stats", ",", "work_dir", ",", "data", ")", ":", "out_file", "=", "os", ".", "path", ".", "join", "(", "work_dir", ",", "\"eval-merge.vcf\"", ")", "if", "not", "utils", ".", "file_uptodate", "(", "out_file", ",", "call_vcf", ")", ":", "in_call_vcf", "=", "call_vcf", ".", "replace", "(", "\".vcf.gz\"", ",", "\".vcf\"", ")", "if", "not", "utils", ".", "file_exists", "(", "in_call_vcf", ")", ":", "with", "file_transaction", "(", "data", ",", "in_call_vcf", ")", "as", "tx_in_call_vcf", ":", "do", ".", "run", "(", "\"gunzip -c {call_vcf} > {tx_in_call_vcf}\"", ".", "format", "(", "*", "*", "locals", "(", ")", ")", ")", "in_truth_vcf", "=", "truth_vcf", ".", "replace", "(", "\".vcf.gz\"", ",", "\".vcf\"", ")", "if", "not", "utils", ".", "file_exists", "(", "in_truth_vcf", ")", ":", "with", "file_transaction", "(", "data", ",", "in_truth_vcf", ")", "as", "tx_in_truth_vcf", ":", "do", ".", "run", "(", "\"gunzip -c {truth_vcf} > {tx_in_truth_vcf}\"", ".", "format", "(", "*", "*", "locals", "(", ")", ")", ")", "in_list_file", "=", "os", ".", "path", ".", "join", "(", "work_dir", ",", "\"eval-inputs.txt\"", ")", "with", "open", "(", "in_list_file", ",", "\"w\"", ")", "as", "out_handle", ":", "out_handle", ".", "write", "(", "\"%s\\n%s\\n\"", "%", "(", "in_call_vcf", ",", "in_truth_vcf", ")", ")", "with", "file_transaction", "(", "data", ",", "out_file", ")", "as", "tx_out_file", ":", "cmd", "=", "(", "\"SURVIVOR merge {in_list_file} {stats[merge_size]} 1 0 0 0 {stats[min_size]} {tx_out_file}\"", ")", "do", ".", "run", "(", "cmd", ".", "format", "(", "*", "*", "locals", "(", ")", ")", ",", "\"Merge SV files for validation: %s\"", "%", "dd", ".", "get_sample_name", "(", "data", ")", ")", "return", "out_file" ]
Perform a merge of two callsets using SURVIVOR,
[ "Perform", "a", "merge", "of", "two", "callsets", "using", "SURVIVOR" ]
python
train
hvac/hvac
hvac/api/secrets_engines/transit.py
https://github.com/hvac/hvac/blob/cce5b86889193f622c2a72a4a1b7e1c9c8aff1ce/hvac/api/secrets_engines/transit.py#L759-L792
def restore_key(self, backup, name=None, force=False, mount_point=DEFAULT_MOUNT_POINT): """Restore the backup as a named key. This will restore the key configurations and all the versions of the named key along with HMAC keys. The input to this endpoint should be the output of /backup endpoint. For safety, by default the backend will refuse to restore to an existing key. If you want to reuse a key name, it is recommended you delete the key before restoring. It is a good idea to attempt restoring to a different key name first to verify that the operation successfully completes. Supported methods: POST: /{mount_point}/restore(/name). Produces: 204 (empty body) :param backup: Backed up key data to be restored. This should be the output from the /backup endpoint. :type backup: str | unicode :param name: If set, this will be the name of the restored key. :type name: str | unicode :param force: If set, force the restore to proceed even if a key by this name already exists. :type force: bool :param mount_point: The "path" the method/backend was mounted on. :type mount_point: str | unicode :return: The response of the request. :rtype: requests.Response """ params = { 'backup': backup, 'force': force, } api_path = '/v1/{mount_point}/restore'.format(mount_point=mount_point) if name is not None: api_path = self._adapter.urljoin(api_path, name) return self._adapter.post( url=api_path, json=params, )
[ "def", "restore_key", "(", "self", ",", "backup", ",", "name", "=", "None", ",", "force", "=", "False", ",", "mount_point", "=", "DEFAULT_MOUNT_POINT", ")", ":", "params", "=", "{", "'backup'", ":", "backup", ",", "'force'", ":", "force", ",", "}", "api_path", "=", "'/v1/{mount_point}/restore'", ".", "format", "(", "mount_point", "=", "mount_point", ")", "if", "name", "is", "not", "None", ":", "api_path", "=", "self", ".", "_adapter", ".", "urljoin", "(", "api_path", ",", "name", ")", "return", "self", ".", "_adapter", ".", "post", "(", "url", "=", "api_path", ",", "json", "=", "params", ",", ")" ]
Restore the backup as a named key. This will restore the key configurations and all the versions of the named key along with HMAC keys. The input to this endpoint should be the output of /backup endpoint. For safety, by default the backend will refuse to restore to an existing key. If you want to reuse a key name, it is recommended you delete the key before restoring. It is a good idea to attempt restoring to a different key name first to verify that the operation successfully completes. Supported methods: POST: /{mount_point}/restore(/name). Produces: 204 (empty body) :param backup: Backed up key data to be restored. This should be the output from the /backup endpoint. :type backup: str | unicode :param name: If set, this will be the name of the restored key. :type name: str | unicode :param force: If set, force the restore to proceed even if a key by this name already exists. :type force: bool :param mount_point: The "path" the method/backend was mounted on. :type mount_point: str | unicode :return: The response of the request. :rtype: requests.Response
[ "Restore", "the", "backup", "as", "a", "named", "key", "." ]
python
train
getnikola/coil
coil/utils.py
https://github.com/getnikola/coil/blob/80ef1827460b0691cf2c98351a14d88e235c9899/coil/utils.py#L70-L90
def ask_yesno(query, default=None): """Ask a yes/no question.""" if default is None: default_q = ' [y/n]' elif default is True: default_q = ' [Y/n]' elif default is False: default_q = ' [y/N]' if sys.version_info[0] == 3: inp = raw_input("{query}{default_q} ".format( query=query, default_q=default_q)).strip() else: inp = raw_input("{query}{default_q} ".format( query=query, default_q=default_q).encode('utf-8')).strip() if inp: return inp.lower().startswith('y') elif default is not None: return default else: # Loop if no answer and no default. return ask_yesno(query, default)
[ "def", "ask_yesno", "(", "query", ",", "default", "=", "None", ")", ":", "if", "default", "is", "None", ":", "default_q", "=", "' [y/n]'", "elif", "default", "is", "True", ":", "default_q", "=", "' [Y/n]'", "elif", "default", "is", "False", ":", "default_q", "=", "' [y/N]'", "if", "sys", ".", "version_info", "[", "0", "]", "==", "3", ":", "inp", "=", "raw_input", "(", "\"{query}{default_q} \"", ".", "format", "(", "query", "=", "query", ",", "default_q", "=", "default_q", ")", ")", ".", "strip", "(", ")", "else", ":", "inp", "=", "raw_input", "(", "\"{query}{default_q} \"", ".", "format", "(", "query", "=", "query", ",", "default_q", "=", "default_q", ")", ".", "encode", "(", "'utf-8'", ")", ")", ".", "strip", "(", ")", "if", "inp", ":", "return", "inp", ".", "lower", "(", ")", ".", "startswith", "(", "'y'", ")", "elif", "default", "is", "not", "None", ":", "return", "default", "else", ":", "# Loop if no answer and no default.", "return", "ask_yesno", "(", "query", ",", "default", ")" ]
Ask a yes/no question.
[ "Ask", "a", "yes", "/", "no", "question", "." ]
python
train
sentinel-hub/sentinelhub-py
sentinelhub/geometry.py
https://github.com/sentinel-hub/sentinelhub-py/blob/08a83b7f1e289187159a643336995d8369860fea/sentinelhub/geometry.py#L284-L297
def get_partition(self, num_x=1, num_y=1): """ Partitions bounding box into smaller bounding boxes of the same size. :param num_x: Number of parts BBox will be horizontally divided into. :type num_x: int :param num_y: Number of parts BBox will be vertically divided into. :type num_y: int or None :return: Two-dimensional list of smaller bounding boxes. Their location is :rtype: list(list(BBox)) """ size_x, size_y = (self.max_x - self.min_x) / num_x, (self.max_y - self.min_y) / num_y return [[BBox([self.min_x + i * size_x, self.min_y + j * size_y, self.min_x + (i + 1) * size_x, self.min_y + (j + 1) * size_y], crs=self.crs) for j in range(num_y)] for i in range(num_x)]
[ "def", "get_partition", "(", "self", ",", "num_x", "=", "1", ",", "num_y", "=", "1", ")", ":", "size_x", ",", "size_y", "=", "(", "self", ".", "max_x", "-", "self", ".", "min_x", ")", "/", "num_x", ",", "(", "self", ".", "max_y", "-", "self", ".", "min_y", ")", "/", "num_y", "return", "[", "[", "BBox", "(", "[", "self", ".", "min_x", "+", "i", "*", "size_x", ",", "self", ".", "min_y", "+", "j", "*", "size_y", ",", "self", ".", "min_x", "+", "(", "i", "+", "1", ")", "*", "size_x", ",", "self", ".", "min_y", "+", "(", "j", "+", "1", ")", "*", "size_y", "]", ",", "crs", "=", "self", ".", "crs", ")", "for", "j", "in", "range", "(", "num_y", ")", "]", "for", "i", "in", "range", "(", "num_x", ")", "]" ]
Partitions bounding box into smaller bounding boxes of the same size. :param num_x: Number of parts BBox will be horizontally divided into. :type num_x: int :param num_y: Number of parts BBox will be vertically divided into. :type num_y: int or None :return: Two-dimensional list of smaller bounding boxes. Their location is :rtype: list(list(BBox))
[ "Partitions", "bounding", "box", "into", "smaller", "bounding", "boxes", "of", "the", "same", "size", "." ]
python
train
twilio/twilio-python
twilio/rest/api/v2010/account/sip/credential_list/__init__.py
https://github.com/twilio/twilio-python/blob/c867895f55dcc29f522e6e8b8868d0d18483132f/twilio/rest/api/v2010/account/sip/credential_list/__init__.py#L288-L301
def credentials(self): """ Access the credentials :returns: twilio.rest.api.v2010.account.sip.credential_list.credential.CredentialList :rtype: twilio.rest.api.v2010.account.sip.credential_list.credential.CredentialList """ if self._credentials is None: self._credentials = CredentialList( self._version, account_sid=self._solution['account_sid'], credential_list_sid=self._solution['sid'], ) return self._credentials
[ "def", "credentials", "(", "self", ")", ":", "if", "self", ".", "_credentials", "is", "None", ":", "self", ".", "_credentials", "=", "CredentialList", "(", "self", ".", "_version", ",", "account_sid", "=", "self", ".", "_solution", "[", "'account_sid'", "]", ",", "credential_list_sid", "=", "self", ".", "_solution", "[", "'sid'", "]", ",", ")", "return", "self", ".", "_credentials" ]
Access the credentials :returns: twilio.rest.api.v2010.account.sip.credential_list.credential.CredentialList :rtype: twilio.rest.api.v2010.account.sip.credential_list.credential.CredentialList
[ "Access", "the", "credentials" ]
python
train
SeattleTestbed/seash
pyreadline/console/console.py
https://github.com/SeattleTestbed/seash/blob/40f9d2285662ff8b61e0468b4196acee089b273b/pyreadline/console/console.py#L803-L820
def install_readline(hook): '''Set up things for the interpreter to call our function like GNU readline.''' global readline_hook, readline_ref # save the hook so the wrapper can call it readline_hook = hook # get the address of PyOS_ReadlineFunctionPointer so we can update it PyOS_RFP = c_void_p.from_address(Console.GetProcAddress(sys.dllhandle, "PyOS_ReadlineFunctionPointer")) # save a reference to the generated C-callable so it doesn't go away if sys.version < '2.3': readline_ref = HOOKFUNC22(hook_wrapper) else: readline_ref = HOOKFUNC23(hook_wrapper_23) # get the address of the function func_start = c_void_p.from_address(addressof(readline_ref)).value # write the function address into PyOS_ReadlineFunctionPointer PyOS_RFP.value = func_start
[ "def", "install_readline", "(", "hook", ")", ":", "global", "readline_hook", ",", "readline_ref", "# save the hook so the wrapper can call it\r", "readline_hook", "=", "hook", "# get the address of PyOS_ReadlineFunctionPointer so we can update it\r", "PyOS_RFP", "=", "c_void_p", ".", "from_address", "(", "Console", ".", "GetProcAddress", "(", "sys", ".", "dllhandle", ",", "\"PyOS_ReadlineFunctionPointer\"", ")", ")", "# save a reference to the generated C-callable so it doesn't go away\r", "if", "sys", ".", "version", "<", "'2.3'", ":", "readline_ref", "=", "HOOKFUNC22", "(", "hook_wrapper", ")", "else", ":", "readline_ref", "=", "HOOKFUNC23", "(", "hook_wrapper_23", ")", "# get the address of the function\r", "func_start", "=", "c_void_p", ".", "from_address", "(", "addressof", "(", "readline_ref", ")", ")", ".", "value", "# write the function address into PyOS_ReadlineFunctionPointer\r", "PyOS_RFP", ".", "value", "=", "func_start" ]
Set up things for the interpreter to call our function like GNU readline.
[ "Set", "up", "things", "for", "the", "interpreter", "to", "call", "our", "function", "like", "GNU", "readline", "." ]
python
train
mikedh/trimesh
trimesh/path/simplify.py
https://github.com/mikedh/trimesh/blob/25e059bf6d4caa74f62ffd58ce4f61a90ee4e518/trimesh/path/simplify.py#L15-L104
def fit_circle_check(points, scale, prior=None, final=False, verbose=False): """ Fit a circle, and reject the fit if: * the radius is larger than tol.radius_min*scale or tol.radius_max*scale * any segment spans more than tol.seg_angle * any segment is longer than tol.seg_frac*scale * the fit deviates by more than tol.radius_frac*radius * the segments on the ends deviate from tangent by more than tol.tangent Parameters --------- points: (n, d) set of points which represent a path prior: (center, radius) tuple for best guess, or None if unknown scale: float, what is the overall scale of the set of points verbose: boolean, if True output log.debug messages for the reasons for fit rejection. Potentially generates hundreds of thousands of messages so only suggested in manual debugging. Returns --------- if fit is acceptable: (center, radius) tuple else: None """ # an arc needs at least three points if len(points) < 3: return None # do a least squares fit on the points C, R, r_deviation = fit_nsphere(points, prior=prior) # check to make sure radius is between min and max allowed if not tol.radius_min < (R / scale) < tol.radius_max: if verbose: log.debug('circle fit error: R %f', R / scale) return None # check point radius error r_error = r_deviation / R if r_error > tol.radius_frac: if verbose: log.debug('circle fit error: fit %s', str(r_error)) return None vectors = np.diff(points, axis=0) segment = np.linalg.norm(vectors, axis=1) # approximate angle in radians, segments are linear length # not arc length but this is close and avoids a cosine angle = segment / R if (angle > tol.seg_angle).any(): if verbose: log.debug('circle fit error: angle %s', str(angle)) return None if final and (angle > tol.seg_angle_min).sum() < 3: log.debug('final: angle %s', str(angle)) return None # check segment length as a fraction of drawing scale scaled = segment / scale if (scaled > tol.seg_frac).any(): if verbose: log.debug('circle fit error: segment %s', str(scaled)) return None # check to make sure the line segments on the ends are actually # tangent with the candidate circle fit mid_pt = points[[0, -2]] + (vectors[[0, -1]] * .5) radial = unitize(mid_pt - C) ends = unitize(vectors[[0, -1]]) tangent = np.abs(np.arccos(diagonal_dot(radial, ends))) tangent = np.abs(tangent - np.pi / 2).max() if tangent > tol.tangent: if verbose: log.debug('circle fit error: tangent %f', np.degrees(tangent)) return None result = {'center': C, 'radius': R} return result
[ "def", "fit_circle_check", "(", "points", ",", "scale", ",", "prior", "=", "None", ",", "final", "=", "False", ",", "verbose", "=", "False", ")", ":", "# an arc needs at least three points", "if", "len", "(", "points", ")", "<", "3", ":", "return", "None", "# do a least squares fit on the points", "C", ",", "R", ",", "r_deviation", "=", "fit_nsphere", "(", "points", ",", "prior", "=", "prior", ")", "# check to make sure radius is between min and max allowed", "if", "not", "tol", ".", "radius_min", "<", "(", "R", "/", "scale", ")", "<", "tol", ".", "radius_max", ":", "if", "verbose", ":", "log", ".", "debug", "(", "'circle fit error: R %f'", ",", "R", "/", "scale", ")", "return", "None", "# check point radius error", "r_error", "=", "r_deviation", "/", "R", "if", "r_error", ">", "tol", ".", "radius_frac", ":", "if", "verbose", ":", "log", ".", "debug", "(", "'circle fit error: fit %s'", ",", "str", "(", "r_error", ")", ")", "return", "None", "vectors", "=", "np", ".", "diff", "(", "points", ",", "axis", "=", "0", ")", "segment", "=", "np", ".", "linalg", ".", "norm", "(", "vectors", ",", "axis", "=", "1", ")", "# approximate angle in radians, segments are linear length", "# not arc length but this is close and avoids a cosine", "angle", "=", "segment", "/", "R", "if", "(", "angle", ">", "tol", ".", "seg_angle", ")", ".", "any", "(", ")", ":", "if", "verbose", ":", "log", ".", "debug", "(", "'circle fit error: angle %s'", ",", "str", "(", "angle", ")", ")", "return", "None", "if", "final", "and", "(", "angle", ">", "tol", ".", "seg_angle_min", ")", ".", "sum", "(", ")", "<", "3", ":", "log", ".", "debug", "(", "'final: angle %s'", ",", "str", "(", "angle", ")", ")", "return", "None", "# check segment length as a fraction of drawing scale", "scaled", "=", "segment", "/", "scale", "if", "(", "scaled", ">", "tol", ".", "seg_frac", ")", ".", "any", "(", ")", ":", "if", "verbose", ":", "log", ".", "debug", "(", "'circle fit error: segment %s'", ",", "str", "(", "scaled", ")", ")", "return", "None", "# check to make sure the line segments on the ends are actually", "# tangent with the candidate circle fit", "mid_pt", "=", "points", "[", "[", "0", ",", "-", "2", "]", "]", "+", "(", "vectors", "[", "[", "0", ",", "-", "1", "]", "]", "*", ".5", ")", "radial", "=", "unitize", "(", "mid_pt", "-", "C", ")", "ends", "=", "unitize", "(", "vectors", "[", "[", "0", ",", "-", "1", "]", "]", ")", "tangent", "=", "np", ".", "abs", "(", "np", ".", "arccos", "(", "diagonal_dot", "(", "radial", ",", "ends", ")", ")", ")", "tangent", "=", "np", ".", "abs", "(", "tangent", "-", "np", ".", "pi", "/", "2", ")", ".", "max", "(", ")", "if", "tangent", ">", "tol", ".", "tangent", ":", "if", "verbose", ":", "log", ".", "debug", "(", "'circle fit error: tangent %f'", ",", "np", ".", "degrees", "(", "tangent", ")", ")", "return", "None", "result", "=", "{", "'center'", ":", "C", ",", "'radius'", ":", "R", "}", "return", "result" ]
Fit a circle, and reject the fit if: * the radius is larger than tol.radius_min*scale or tol.radius_max*scale * any segment spans more than tol.seg_angle * any segment is longer than tol.seg_frac*scale * the fit deviates by more than tol.radius_frac*radius * the segments on the ends deviate from tangent by more than tol.tangent Parameters --------- points: (n, d) set of points which represent a path prior: (center, radius) tuple for best guess, or None if unknown scale: float, what is the overall scale of the set of points verbose: boolean, if True output log.debug messages for the reasons for fit rejection. Potentially generates hundreds of thousands of messages so only suggested in manual debugging. Returns --------- if fit is acceptable: (center, radius) tuple else: None
[ "Fit", "a", "circle", "and", "reject", "the", "fit", "if", ":", "*", "the", "radius", "is", "larger", "than", "tol", ".", "radius_min", "*", "scale", "or", "tol", ".", "radius_max", "*", "scale", "*", "any", "segment", "spans", "more", "than", "tol", ".", "seg_angle", "*", "any", "segment", "is", "longer", "than", "tol", ".", "seg_frac", "*", "scale", "*", "the", "fit", "deviates", "by", "more", "than", "tol", ".", "radius_frac", "*", "radius", "*", "the", "segments", "on", "the", "ends", "deviate", "from", "tangent", "by", "more", "than", "tol", ".", "tangent" ]
python
train
Microsoft/azure-devops-python-api
azure-devops/azure/devops/v5_1/profile_regions/profile_regions_client.py
https://github.com/Microsoft/azure-devops-python-api/blob/4777ffda2f5052fabbaddb2abe9cb434e0cf1aa8/azure-devops/azure/devops/v5_1/profile_regions/profile_regions_client.py#L28-L41
def get_geo_region(self, ip): """GetGeoRegion. [Preview API] Lookup up country/region based on provided IPv4, null if using the remote IPv4 address. :param str ip: :rtype: :class:`<GeoRegion> <azure.devops.v5_1.profile-regions.models.GeoRegion>` """ query_parameters = {} if ip is not None: query_parameters['ip'] = self._serialize.query('ip', ip, 'str') response = self._send(http_method='GET', location_id='35b3ff1d-ab4c-4d1c-98bb-f6ea21d86bd9', version='5.1-preview.1', query_parameters=query_parameters) return self._deserialize('GeoRegion', response)
[ "def", "get_geo_region", "(", "self", ",", "ip", ")", ":", "query_parameters", "=", "{", "}", "if", "ip", "is", "not", "None", ":", "query_parameters", "[", "'ip'", "]", "=", "self", ".", "_serialize", ".", "query", "(", "'ip'", ",", "ip", ",", "'str'", ")", "response", "=", "self", ".", "_send", "(", "http_method", "=", "'GET'", ",", "location_id", "=", "'35b3ff1d-ab4c-4d1c-98bb-f6ea21d86bd9'", ",", "version", "=", "'5.1-preview.1'", ",", "query_parameters", "=", "query_parameters", ")", "return", "self", ".", "_deserialize", "(", "'GeoRegion'", ",", "response", ")" ]
GetGeoRegion. [Preview API] Lookup up country/region based on provided IPv4, null if using the remote IPv4 address. :param str ip: :rtype: :class:`<GeoRegion> <azure.devops.v5_1.profile-regions.models.GeoRegion>`
[ "GetGeoRegion", ".", "[", "Preview", "API", "]", "Lookup", "up", "country", "/", "region", "based", "on", "provided", "IPv4", "null", "if", "using", "the", "remote", "IPv4", "address", ".", ":", "param", "str", "ip", ":", ":", "rtype", ":", ":", "class", ":", "<GeoRegion", ">", "<azure", ".", "devops", ".", "v5_1", ".", "profile", "-", "regions", ".", "models", ".", "GeoRegion", ">" ]
python
train
lipoja/URLExtract
urlextract/cachefile.py
https://github.com/lipoja/URLExtract/blob/b53fd2adfaed3cd23a811aed4d277b0ade7b4640/urlextract/cachefile.py#L77-L94
def _get_default_cache_file_path(self): """ Returns default cache file path :return: default cache file path (to data directory) :rtype: str """ default_list_path = os.path.join( self._get_default_cache_dir(), self._CACHE_FILE_NAME) if not os.access(default_list_path, os.F_OK): raise CacheFileError( "Default cache file does not exist " "'{}'!".format(default_list_path) ) return default_list_path
[ "def", "_get_default_cache_file_path", "(", "self", ")", ":", "default_list_path", "=", "os", ".", "path", ".", "join", "(", "self", ".", "_get_default_cache_dir", "(", ")", ",", "self", ".", "_CACHE_FILE_NAME", ")", "if", "not", "os", ".", "access", "(", "default_list_path", ",", "os", ".", "F_OK", ")", ":", "raise", "CacheFileError", "(", "\"Default cache file does not exist \"", "\"'{}'!\"", ".", "format", "(", "default_list_path", ")", ")", "return", "default_list_path" ]
Returns default cache file path :return: default cache file path (to data directory) :rtype: str
[ "Returns", "default", "cache", "file", "path" ]
python
train
evolbioinfo/pastml
pastml/parsimony.py
https://github.com/evolbioinfo/pastml/blob/df8a375841525738383e59548eed3441b07dbd3e/pastml/parsimony.py#L114-L143
def acctran(tree, character, feature=PARS_STATES): """ ACCTRAN (accelerated transformation) (Farris, 1970) aims at reducing the number of ambiguities in the parsimonious result. ACCTRAN forces the state changes to be performed as close to the root as possible, and therefore prioritises the reverse mutations. if N is not a tip: L, R <- left and right children of N if intersection(S(N), S(L)) is not empty: S(L) <- intersection(S(N), S(L)) if intersection(S(N), S(R)) is not empty: S(R) <- intersection(S(N), S(R)) ACCTRAN(L) ACCTRAN(R) :param tree: ete3.Tree, the tree of interest :param character: str, character for which the parsimonious states are reconstructed :return: void, adds get_personalized_feature_name(feature, PARS_STATES) feature to the tree nodes """ ps_feature_down = get_personalized_feature_name(character, BU_PARS_STATES) for node in tree.traverse('preorder'): if node.is_root(): node.add_feature(feature, getattr(node, ps_feature_down)) node_states = getattr(node, feature) for child in node.children: child_states = getattr(child, ps_feature_down) state_intersection = node_states & child_states child.add_feature(feature, state_intersection if state_intersection else child_states)
[ "def", "acctran", "(", "tree", ",", "character", ",", "feature", "=", "PARS_STATES", ")", ":", "ps_feature_down", "=", "get_personalized_feature_name", "(", "character", ",", "BU_PARS_STATES", ")", "for", "node", "in", "tree", ".", "traverse", "(", "'preorder'", ")", ":", "if", "node", ".", "is_root", "(", ")", ":", "node", ".", "add_feature", "(", "feature", ",", "getattr", "(", "node", ",", "ps_feature_down", ")", ")", "node_states", "=", "getattr", "(", "node", ",", "feature", ")", "for", "child", "in", "node", ".", "children", ":", "child_states", "=", "getattr", "(", "child", ",", "ps_feature_down", ")", "state_intersection", "=", "node_states", "&", "child_states", "child", ".", "add_feature", "(", "feature", ",", "state_intersection", "if", "state_intersection", "else", "child_states", ")" ]
ACCTRAN (accelerated transformation) (Farris, 1970) aims at reducing the number of ambiguities in the parsimonious result. ACCTRAN forces the state changes to be performed as close to the root as possible, and therefore prioritises the reverse mutations. if N is not a tip: L, R <- left and right children of N if intersection(S(N), S(L)) is not empty: S(L) <- intersection(S(N), S(L)) if intersection(S(N), S(R)) is not empty: S(R) <- intersection(S(N), S(R)) ACCTRAN(L) ACCTRAN(R) :param tree: ete3.Tree, the tree of interest :param character: str, character for which the parsimonious states are reconstructed :return: void, adds get_personalized_feature_name(feature, PARS_STATES) feature to the tree nodes
[ "ACCTRAN", "(", "accelerated", "transformation", ")", "(", "Farris", "1970", ")", "aims", "at", "reducing", "the", "number", "of", "ambiguities", "in", "the", "parsimonious", "result", ".", "ACCTRAN", "forces", "the", "state", "changes", "to", "be", "performed", "as", "close", "to", "the", "root", "as", "possible", "and", "therefore", "prioritises", "the", "reverse", "mutations", "." ]
python
train
JdeRobot/base
src/drivers/MAVLinkServer/MAVProxy/modules/mavproxy_help.py
https://github.com/JdeRobot/base/blob/303b18992785b2fe802212f2d758a60873007f1f/src/drivers/MAVLinkServer/MAVProxy/modules/mavproxy_help.py#L90-L94
def idle_task(self): '''called on idle''' if self.module('console') is not None and not self.menu_added_console: self.menu_added_console = True self.module('console').add_menu(self.menu)
[ "def", "idle_task", "(", "self", ")", ":", "if", "self", ".", "module", "(", "'console'", ")", "is", "not", "None", "and", "not", "self", ".", "menu_added_console", ":", "self", ".", "menu_added_console", "=", "True", "self", ".", "module", "(", "'console'", ")", ".", "add_menu", "(", "self", ".", "menu", ")" ]
called on idle
[ "called", "on", "idle" ]
python
train
fake-name/ChromeController
ChromeController/Generator/Generated.py
https://github.com/fake-name/ChromeController/blob/914dd136184e8f1165c7aa6ef30418aaf10c61f0/ChromeController/Generator/Generated.py#L2889-L2908
def CacheStorage_deleteEntry(self, cacheId, request): """ Function path: CacheStorage.deleteEntry Domain: CacheStorage Method name: deleteEntry Parameters: Required arguments: 'cacheId' (type: CacheId) -> Id of cache where the entry will be deleted. 'request' (type: string) -> URL spec of the request. No return value. Description: Deletes a cache entry. """ assert isinstance(request, (str,) ), "Argument 'request' must be of type '['str']'. Received type: '%s'" % type( request) subdom_funcs = self.synchronous_command('CacheStorage.deleteEntry', cacheId=cacheId, request=request) return subdom_funcs
[ "def", "CacheStorage_deleteEntry", "(", "self", ",", "cacheId", ",", "request", ")", ":", "assert", "isinstance", "(", "request", ",", "(", "str", ",", ")", ")", ",", "\"Argument 'request' must be of type '['str']'. Received type: '%s'\"", "%", "type", "(", "request", ")", "subdom_funcs", "=", "self", ".", "synchronous_command", "(", "'CacheStorage.deleteEntry'", ",", "cacheId", "=", "cacheId", ",", "request", "=", "request", ")", "return", "subdom_funcs" ]
Function path: CacheStorage.deleteEntry Domain: CacheStorage Method name: deleteEntry Parameters: Required arguments: 'cacheId' (type: CacheId) -> Id of cache where the entry will be deleted. 'request' (type: string) -> URL spec of the request. No return value. Description: Deletes a cache entry.
[ "Function", "path", ":", "CacheStorage", ".", "deleteEntry", "Domain", ":", "CacheStorage", "Method", "name", ":", "deleteEntry", "Parameters", ":", "Required", "arguments", ":", "cacheId", "(", "type", ":", "CacheId", ")", "-", ">", "Id", "of", "cache", "where", "the", "entry", "will", "be", "deleted", ".", "request", "(", "type", ":", "string", ")", "-", ">", "URL", "spec", "of", "the", "request", ".", "No", "return", "value", ".", "Description", ":", "Deletes", "a", "cache", "entry", "." ]
python
train
BYU-PCCL/holodeck
holodeck/environments.py
https://github.com/BYU-PCCL/holodeck/blob/01acd4013f5acbd9f61fbc9caaafe19975e8b121/holodeck/environments.py#L357-L367
def set_control_scheme(self, agent_name, control_scheme): """Set the control scheme for a specific agent. Args: agent_name (str): The name of the agent to set the control scheme for. control_scheme (int): A control scheme value (see :obj:`holodeck.agents.ControlSchemes`) """ if agent_name not in self.agents: print("No such agent %s" % agent_name) else: self.agents[agent_name].set_control_scheme(control_scheme)
[ "def", "set_control_scheme", "(", "self", ",", "agent_name", ",", "control_scheme", ")", ":", "if", "agent_name", "not", "in", "self", ".", "agents", ":", "print", "(", "\"No such agent %s\"", "%", "agent_name", ")", "else", ":", "self", ".", "agents", "[", "agent_name", "]", ".", "set_control_scheme", "(", "control_scheme", ")" ]
Set the control scheme for a specific agent. Args: agent_name (str): The name of the agent to set the control scheme for. control_scheme (int): A control scheme value (see :obj:`holodeck.agents.ControlSchemes`)
[ "Set", "the", "control", "scheme", "for", "a", "specific", "agent", "." ]
python
train
markovmodel/PyEMMA
pyemma/coordinates/data/featurization/featurizer.py
https://github.com/markovmodel/PyEMMA/blob/5c3124398217de05ba5ce9c8fb01519222481ab8/pyemma/coordinates/data/featurization/featurizer.py#L236-L253
def _check_indices(self, pair_inds, pair_n=2): """ensure pairs are valid (shapes, all atom indices available?, etc.) """ pair_inds = np.array(pair_inds).astype(dtype=np.int, casting='safe') if pair_inds.ndim != 2: raise ValueError("pair indices has to be a matrix.") if pair_inds.shape[1] != pair_n: raise ValueError("pair indices shape has to be (x, %i)." % pair_n) if pair_inds.max() > self.topology.n_atoms: raise ValueError("index out of bounds: %i." " Maximum atom index available: %i" % (pair_inds.max(), self.topology.n_atoms)) return pair_inds
[ "def", "_check_indices", "(", "self", ",", "pair_inds", ",", "pair_n", "=", "2", ")", ":", "pair_inds", "=", "np", ".", "array", "(", "pair_inds", ")", ".", "astype", "(", "dtype", "=", "np", ".", "int", ",", "casting", "=", "'safe'", ")", "if", "pair_inds", ".", "ndim", "!=", "2", ":", "raise", "ValueError", "(", "\"pair indices has to be a matrix.\"", ")", "if", "pair_inds", ".", "shape", "[", "1", "]", "!=", "pair_n", ":", "raise", "ValueError", "(", "\"pair indices shape has to be (x, %i).\"", "%", "pair_n", ")", "if", "pair_inds", ".", "max", "(", ")", ">", "self", ".", "topology", ".", "n_atoms", ":", "raise", "ValueError", "(", "\"index out of bounds: %i.\"", "\" Maximum atom index available: %i\"", "%", "(", "pair_inds", ".", "max", "(", ")", ",", "self", ".", "topology", ".", "n_atoms", ")", ")", "return", "pair_inds" ]
ensure pairs are valid (shapes, all atom indices available?, etc.)
[ "ensure", "pairs", "are", "valid", "(", "shapes", "all", "atom", "indices", "available?", "etc", ".", ")" ]
python
train
learningequality/morango
morango/api/permissions.py
https://github.com/learningequality/morango/blob/c3ec2554b026f65ac5f0fc5c9d439277fbac14f9/morango/api/permissions.py#L15-L43
def authenticate_credentials(self, userargs, password, request=None): """ Authenticate the userargs and password against Django auth backends. The "userargs" string may be just the username, or a querystring-encoded set of params. """ credentials = { 'password': password } if "=" not in userargs: # if it doesn't seem to be in querystring format, just use it as the username credentials[get_user_model().USERNAME_FIELD] = userargs else: # parse out the user args from querystring format into the credentials dict for arg in userargs.split("&"): key, val = arg.split("=") credentials[key] = val # authenticate the user via Django's auth backends user = authenticate(**credentials) if user is None: raise exceptions.AuthenticationFailed('Invalid credentials.') if not user.is_active: raise exceptions.AuthenticationFailed('User inactive or deleted.') return (user, None)
[ "def", "authenticate_credentials", "(", "self", ",", "userargs", ",", "password", ",", "request", "=", "None", ")", ":", "credentials", "=", "{", "'password'", ":", "password", "}", "if", "\"=\"", "not", "in", "userargs", ":", "# if it doesn't seem to be in querystring format, just use it as the username", "credentials", "[", "get_user_model", "(", ")", ".", "USERNAME_FIELD", "]", "=", "userargs", "else", ":", "# parse out the user args from querystring format into the credentials dict", "for", "arg", "in", "userargs", ".", "split", "(", "\"&\"", ")", ":", "key", ",", "val", "=", "arg", ".", "split", "(", "\"=\"", ")", "credentials", "[", "key", "]", "=", "val", "# authenticate the user via Django's auth backends", "user", "=", "authenticate", "(", "*", "*", "credentials", ")", "if", "user", "is", "None", ":", "raise", "exceptions", ".", "AuthenticationFailed", "(", "'Invalid credentials.'", ")", "if", "not", "user", ".", "is_active", ":", "raise", "exceptions", ".", "AuthenticationFailed", "(", "'User inactive or deleted.'", ")", "return", "(", "user", ",", "None", ")" ]
Authenticate the userargs and password against Django auth backends. The "userargs" string may be just the username, or a querystring-encoded set of params.
[ "Authenticate", "the", "userargs", "and", "password", "against", "Django", "auth", "backends", ".", "The", "userargs", "string", "may", "be", "just", "the", "username", "or", "a", "querystring", "-", "encoded", "set", "of", "params", "." ]
python
valid
saltstack/salt
salt/transport/tcp.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/transport/tcp.py#L650-L676
def post_fork(self, payload_handler, io_loop): ''' After forking we need to create all of the local sockets to listen to the router payload_handler: function to call with your payloads ''' self.payload_handler = payload_handler self.io_loop = io_loop self.serial = salt.payload.Serial(self.opts) with salt.utils.asynchronous.current_ioloop(self.io_loop): if USE_LOAD_BALANCER: self.req_server = LoadBalancerWorker(self.socket_queue, self.handle_message, ssl_options=self.opts.get('ssl')) else: if salt.utils.platform.is_windows(): self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) _set_tcp_keepalive(self._socket, self.opts) self._socket.setblocking(0) self._socket.bind((self.opts['interface'], int(self.opts['ret_port']))) self.req_server = SaltMessageServer(self.handle_message, ssl_options=self.opts.get('ssl')) self.req_server.add_socket(self._socket) self._socket.listen(self.backlog) salt.transport.mixins.auth.AESReqServerMixin.post_fork(self, payload_handler, io_loop)
[ "def", "post_fork", "(", "self", ",", "payload_handler", ",", "io_loop", ")", ":", "self", ".", "payload_handler", "=", "payload_handler", "self", ".", "io_loop", "=", "io_loop", "self", ".", "serial", "=", "salt", ".", "payload", ".", "Serial", "(", "self", ".", "opts", ")", "with", "salt", ".", "utils", ".", "asynchronous", ".", "current_ioloop", "(", "self", ".", "io_loop", ")", ":", "if", "USE_LOAD_BALANCER", ":", "self", ".", "req_server", "=", "LoadBalancerWorker", "(", "self", ".", "socket_queue", ",", "self", ".", "handle_message", ",", "ssl_options", "=", "self", ".", "opts", ".", "get", "(", "'ssl'", ")", ")", "else", ":", "if", "salt", ".", "utils", ".", "platform", ".", "is_windows", "(", ")", ":", "self", ".", "_socket", "=", "socket", ".", "socket", "(", "socket", ".", "AF_INET", ",", "socket", ".", "SOCK_STREAM", ")", "self", ".", "_socket", ".", "setsockopt", "(", "socket", ".", "SOL_SOCKET", ",", "socket", ".", "SO_REUSEADDR", ",", "1", ")", "_set_tcp_keepalive", "(", "self", ".", "_socket", ",", "self", ".", "opts", ")", "self", ".", "_socket", ".", "setblocking", "(", "0", ")", "self", ".", "_socket", ".", "bind", "(", "(", "self", ".", "opts", "[", "'interface'", "]", ",", "int", "(", "self", ".", "opts", "[", "'ret_port'", "]", ")", ")", ")", "self", ".", "req_server", "=", "SaltMessageServer", "(", "self", ".", "handle_message", ",", "ssl_options", "=", "self", ".", "opts", ".", "get", "(", "'ssl'", ")", ")", "self", ".", "req_server", ".", "add_socket", "(", "self", ".", "_socket", ")", "self", ".", "_socket", ".", "listen", "(", "self", ".", "backlog", ")", "salt", ".", "transport", ".", "mixins", ".", "auth", ".", "AESReqServerMixin", ".", "post_fork", "(", "self", ",", "payload_handler", ",", "io_loop", ")" ]
After forking we need to create all of the local sockets to listen to the router payload_handler: function to call with your payloads
[ "After", "forking", "we", "need", "to", "create", "all", "of", "the", "local", "sockets", "to", "listen", "to", "the", "router" ]
python
train
camsci/meteor-pi
src/pythonModules/meteorpi_client/meteorpi_client/__init__.py
https://github.com/camsci/meteor-pi/blob/7b01527650bd1b2b76d6f364e8122e25b8812c8d/src/pythonModules/meteorpi_client/meteorpi_client/__init__.py#L133-L158
def search_files(self, search=None): """ Search for files, returning a FileRecord for each result. FileRecords have two additional methods patched into them, get_url() and download_to(file_name), which will retrieve the URL for the file content and download that content to a named file on disk, respectively. :param FileRecordSearch search: an instance of :class:`meteorpi_model.FileRecordSearch` - see the model docs for details on how to construct this :return: an object containing 'count' and 'files'. 'files' is a sequence of FileRecord objects containing the results of the search, and 'count' is the total number of results which would be returned if no result limit was in place (i.e. if the number of FileRecords in the 'files' part is less than 'count' you have more records which weren't returned because of a query limit. Note that the default query limit is 100). """ if search is None: search = model.FileRecordSearch() search_string = _to_encoded_string(search) url = self.base_url + '/files/{0}'.format(search_string) # print url response = requests.get(url) response_object = safe_load(response.text) file_dicts = response_object['files'] file_count = response_object['count'] return {'count': file_count, 'files': list((self._augment_file(f) for f in (model.FileRecord.from_dict(d) for d in file_dicts)))}
[ "def", "search_files", "(", "self", ",", "search", "=", "None", ")", ":", "if", "search", "is", "None", ":", "search", "=", "model", ".", "FileRecordSearch", "(", ")", "search_string", "=", "_to_encoded_string", "(", "search", ")", "url", "=", "self", ".", "base_url", "+", "'/files/{0}'", ".", "format", "(", "search_string", ")", "# print url", "response", "=", "requests", ".", "get", "(", "url", ")", "response_object", "=", "safe_load", "(", "response", ".", "text", ")", "file_dicts", "=", "response_object", "[", "'files'", "]", "file_count", "=", "response_object", "[", "'count'", "]", "return", "{", "'count'", ":", "file_count", ",", "'files'", ":", "list", "(", "(", "self", ".", "_augment_file", "(", "f", ")", "for", "f", "in", "(", "model", ".", "FileRecord", ".", "from_dict", "(", "d", ")", "for", "d", "in", "file_dicts", ")", ")", ")", "}" ]
Search for files, returning a FileRecord for each result. FileRecords have two additional methods patched into them, get_url() and download_to(file_name), which will retrieve the URL for the file content and download that content to a named file on disk, respectively. :param FileRecordSearch search: an instance of :class:`meteorpi_model.FileRecordSearch` - see the model docs for details on how to construct this :return: an object containing 'count' and 'files'. 'files' is a sequence of FileRecord objects containing the results of the search, and 'count' is the total number of results which would be returned if no result limit was in place (i.e. if the number of FileRecords in the 'files' part is less than 'count' you have more records which weren't returned because of a query limit. Note that the default query limit is 100).
[ "Search", "for", "files", "returning", "a", "FileRecord", "for", "each", "result", ".", "FileRecords", "have", "two", "additional", "methods", "patched", "into", "them", "get_url", "()", "and", "download_to", "(", "file_name", ")", "which", "will", "retrieve", "the", "URL", "for", "the", "file", "content", "and", "download", "that", "content", "to", "a", "named", "file", "on", "disk", "respectively", "." ]
python
train
wummel/linkchecker
third_party/dnspython/dns/rdatatype.py
https://github.com/wummel/linkchecker/blob/c2ce810c3fb00b895a841a7be6b2e78c64e7b042/third_party/dnspython/dns/rdatatype.py#L214-L222
def is_metatype(rdtype): """True if the type is a metatype. @param rdtype: the type @type rdtype: int @rtype: bool""" if rdtype >= TKEY and rdtype <= ANY or rdtype in _metatypes: return True return False
[ "def", "is_metatype", "(", "rdtype", ")", ":", "if", "rdtype", ">=", "TKEY", "and", "rdtype", "<=", "ANY", "or", "rdtype", "in", "_metatypes", ":", "return", "True", "return", "False" ]
True if the type is a metatype. @param rdtype: the type @type rdtype: int @rtype: bool
[ "True", "if", "the", "type", "is", "a", "metatype", "." ]
python
train
joopert/nad_receiver
nad_receiver/__init__.py
https://github.com/joopert/nad_receiver/blob/416de0173a330c75cc73f9c90b0c5df32e5e0ba3/nad_receiver/__init__.py#L320-L365
def exec_command(self, domain, function, operator, value=None): """ Write a command to the receiver and read the value it returns. """ if operator in CMDS[domain][function]['supported_operators']: if operator is '=' and value is None: raise ValueError('No value provided') if value is None: cmd = ''.join([CMDS[domain][function]['cmd'], operator]) else: cmd = ''.join( [CMDS[domain][function]['cmd'], operator, str(value)]) else: raise ValueError('Invalid operator provided %s' % operator) if self._open_connection(): # For telnet the first \r / \n is recommended only self.telnet.write((''.join(['\r', cmd, '\n']).encode())) # Could raise eg. socket.error, UnicodeError, let the client handle it # Test 3 x buffer is completely empty # With the default timeout that means a delay at # about 3+ seconds loop = 3 while loop: msg = self.telnet.read_until('\n'.encode(), self.timeout) # Could raise eg. EOFError, UnicodeError, let the client handle it if msg == "": # Nothing in buffer loop -= 1 continue msg = msg.decode().strip('\r\n') # Could raise eg. UnicodeError, let the client handle it #print("NAD reponded with '%s'" % msg) # Wait for the response that equals the requested domain.function if msg.strip().split('=')[0].lower() == '.'.join([domain, function]).lower(): # b'Main.Volume=-12\r will return -12 return msg.strip().split('=')[1] raise RuntimeError('Failed to read response') raise RuntimeError('Failed to open connection')
[ "def", "exec_command", "(", "self", ",", "domain", ",", "function", ",", "operator", ",", "value", "=", "None", ")", ":", "if", "operator", "in", "CMDS", "[", "domain", "]", "[", "function", "]", "[", "'supported_operators'", "]", ":", "if", "operator", "is", "'='", "and", "value", "is", "None", ":", "raise", "ValueError", "(", "'No value provided'", ")", "if", "value", "is", "None", ":", "cmd", "=", "''", ".", "join", "(", "[", "CMDS", "[", "domain", "]", "[", "function", "]", "[", "'cmd'", "]", ",", "operator", "]", ")", "else", ":", "cmd", "=", "''", ".", "join", "(", "[", "CMDS", "[", "domain", "]", "[", "function", "]", "[", "'cmd'", "]", ",", "operator", ",", "str", "(", "value", ")", "]", ")", "else", ":", "raise", "ValueError", "(", "'Invalid operator provided %s'", "%", "operator", ")", "if", "self", ".", "_open_connection", "(", ")", ":", "# For telnet the first \\r / \\n is recommended only", "self", ".", "telnet", ".", "write", "(", "(", "''", ".", "join", "(", "[", "'\\r'", ",", "cmd", ",", "'\\n'", "]", ")", ".", "encode", "(", ")", ")", ")", "# Could raise eg. socket.error, UnicodeError, let the client handle it", "# Test 3 x buffer is completely empty", "# With the default timeout that means a delay at", "# about 3+ seconds", "loop", "=", "3", "while", "loop", ":", "msg", "=", "self", ".", "telnet", ".", "read_until", "(", "'\\n'", ".", "encode", "(", ")", ",", "self", ".", "timeout", ")", "# Could raise eg. EOFError, UnicodeError, let the client handle it", "if", "msg", "==", "\"\"", ":", "# Nothing in buffer", "loop", "-=", "1", "continue", "msg", "=", "msg", ".", "decode", "(", ")", ".", "strip", "(", "'\\r\\n'", ")", "# Could raise eg. UnicodeError, let the client handle it", "#print(\"NAD reponded with '%s'\" % msg)", "# Wait for the response that equals the requested domain.function", "if", "msg", ".", "strip", "(", ")", ".", "split", "(", "'='", ")", "[", "0", "]", ".", "lower", "(", ")", "==", "'.'", ".", "join", "(", "[", "domain", ",", "function", "]", ")", ".", "lower", "(", ")", ":", "# b'Main.Volume=-12\\r will return -12", "return", "msg", ".", "strip", "(", ")", ".", "split", "(", "'='", ")", "[", "1", "]", "raise", "RuntimeError", "(", "'Failed to read response'", ")", "raise", "RuntimeError", "(", "'Failed to open connection'", ")" ]
Write a command to the receiver and read the value it returns.
[ "Write", "a", "command", "to", "the", "receiver", "and", "read", "the", "value", "it", "returns", "." ]
python
test
SatelliteQE/nailgun
nailgun/config.py
https://github.com/SatelliteQE/nailgun/blob/c36d8c20862e87bf6975bd48ac1ca40a9e634eaa/nailgun/config.py#L194-L235
def save(self, label='default', path=None): """Save the current connection configuration to a file. This method is thread safe. :param label: A string. An identifier for the current configuration. This allows multiple configurations with unique labels to be saved in a single file. If a configuration identified by ``label`` already exists in the destination configuration file, it is replaced. :param path: A string. The configuration file to be manipulated. By default, an XDG-compliant configuration file is used. A configuration file is created if one does not exist already. :returns: ``None`` """ # What will we write out? cfg = vars(self) if 'version' in cfg: # pragma: no cover cfg['version'] = str(cfg['version']) # Where is the file we're writing to? if path is None: path = join( BaseDirectory.save_config_path(self._xdg_config_dir), self._xdg_config_file ) self._file_lock.acquire() try: # Either read an existing config or make an empty one. Then update # the config and write it out. try: with open(path) as config_file: config = json.load(config_file) except IOError: # pragma: no cover config = {} config[label] = cfg with open(path, 'w') as config_file: json.dump(config, config_file) finally: self._file_lock.release()
[ "def", "save", "(", "self", ",", "label", "=", "'default'", ",", "path", "=", "None", ")", ":", "# What will we write out?", "cfg", "=", "vars", "(", "self", ")", "if", "'version'", "in", "cfg", ":", "# pragma: no cover", "cfg", "[", "'version'", "]", "=", "str", "(", "cfg", "[", "'version'", "]", ")", "# Where is the file we're writing to?", "if", "path", "is", "None", ":", "path", "=", "join", "(", "BaseDirectory", ".", "save_config_path", "(", "self", ".", "_xdg_config_dir", ")", ",", "self", ".", "_xdg_config_file", ")", "self", ".", "_file_lock", ".", "acquire", "(", ")", "try", ":", "# Either read an existing config or make an empty one. Then update", "# the config and write it out.", "try", ":", "with", "open", "(", "path", ")", "as", "config_file", ":", "config", "=", "json", ".", "load", "(", "config_file", ")", "except", "IOError", ":", "# pragma: no cover", "config", "=", "{", "}", "config", "[", "label", "]", "=", "cfg", "with", "open", "(", "path", ",", "'w'", ")", "as", "config_file", ":", "json", ".", "dump", "(", "config", ",", "config_file", ")", "finally", ":", "self", ".", "_file_lock", ".", "release", "(", ")" ]
Save the current connection configuration to a file. This method is thread safe. :param label: A string. An identifier for the current configuration. This allows multiple configurations with unique labels to be saved in a single file. If a configuration identified by ``label`` already exists in the destination configuration file, it is replaced. :param path: A string. The configuration file to be manipulated. By default, an XDG-compliant configuration file is used. A configuration file is created if one does not exist already. :returns: ``None``
[ "Save", "the", "current", "connection", "configuration", "to", "a", "file", "." ]
python
train
aws/aws-xray-sdk-python
aws_xray_sdk/core/sampling/sampling_rule.py
https://github.com/aws/aws-xray-sdk-python/blob/707358cd3a516d51f2ebf71cf34f00e8d906a667/aws_xray_sdk/core/sampling/sampling_rule.py#L55-L69
def snapshot_statistics(self): """ Take a snapshot of request/borrow/sampled count for reporting back to X-Ray back-end by ``TargetPoller`` and reset those counters. """ with self._lock: stats = { 'request_count': self.request_count, 'borrow_count': self.borrow_count, 'sampled_count': self.sampled_count, } self._reset_statistics() return stats
[ "def", "snapshot_statistics", "(", "self", ")", ":", "with", "self", ".", "_lock", ":", "stats", "=", "{", "'request_count'", ":", "self", ".", "request_count", ",", "'borrow_count'", ":", "self", ".", "borrow_count", ",", "'sampled_count'", ":", "self", ".", "sampled_count", ",", "}", "self", ".", "_reset_statistics", "(", ")", "return", "stats" ]
Take a snapshot of request/borrow/sampled count for reporting back to X-Ray back-end by ``TargetPoller`` and reset those counters.
[ "Take", "a", "snapshot", "of", "request", "/", "borrow", "/", "sampled", "count", "for", "reporting", "back", "to", "X", "-", "Ray", "back", "-", "end", "by", "TargetPoller", "and", "reset", "those", "counters", "." ]
python
train
Parsely/birding
src/birding/config.py
https://github.com/Parsely/birding/blob/c7f6eee56424234e361b1a455595de202e744dac/src/birding/config.py#L189-L224
def overlay(upper, lower): """Return the overlay of `upper` dict onto `lower` dict. This operation is similar to `dict.update`, but recurses when it encounters a dict/mapping, as to allow nested leaf values in the lower collection which are not in the upper collection. Whenever the upper collection has a value, its value is used. >>> overlay({'a': 0}, {}) {'a': 0} >>> abc = {'a': 0, 'b': 1, 'c': 2} >>> abc == overlay({'a': 0, 'c': 2}, {'a': None, 'b': 1}) True >>> result = {' ': None, '_': abc} >>> result == overlay( ... {'_': {'a': 0, 'c': 2}, ' ': None}, ... {'_': {'a': None, 'b': 1}}) ... True >>> """ result = {} for key in upper: if is_mapping(upper[key]): lower_value = lower.get(key, {}) if not is_mapping(lower_value): msg = 'Attempting to overlay a mapping on a non-mapping: {}' raise ValueError(msg.format(key)) result[key] = overlay(upper[key], lower_value) else: result[key] = upper[key] for key in lower: if key in result: continue result[key] = lower[key] return result
[ "def", "overlay", "(", "upper", ",", "lower", ")", ":", "result", "=", "{", "}", "for", "key", "in", "upper", ":", "if", "is_mapping", "(", "upper", "[", "key", "]", ")", ":", "lower_value", "=", "lower", ".", "get", "(", "key", ",", "{", "}", ")", "if", "not", "is_mapping", "(", "lower_value", ")", ":", "msg", "=", "'Attempting to overlay a mapping on a non-mapping: {}'", "raise", "ValueError", "(", "msg", ".", "format", "(", "key", ")", ")", "result", "[", "key", "]", "=", "overlay", "(", "upper", "[", "key", "]", ",", "lower_value", ")", "else", ":", "result", "[", "key", "]", "=", "upper", "[", "key", "]", "for", "key", "in", "lower", ":", "if", "key", "in", "result", ":", "continue", "result", "[", "key", "]", "=", "lower", "[", "key", "]", "return", "result" ]
Return the overlay of `upper` dict onto `lower` dict. This operation is similar to `dict.update`, but recurses when it encounters a dict/mapping, as to allow nested leaf values in the lower collection which are not in the upper collection. Whenever the upper collection has a value, its value is used. >>> overlay({'a': 0}, {}) {'a': 0} >>> abc = {'a': 0, 'b': 1, 'c': 2} >>> abc == overlay({'a': 0, 'c': 2}, {'a': None, 'b': 1}) True >>> result = {' ': None, '_': abc} >>> result == overlay( ... {'_': {'a': 0, 'c': 2}, ' ': None}, ... {'_': {'a': None, 'b': 1}}) ... True >>>
[ "Return", "the", "overlay", "of", "upper", "dict", "onto", "lower", "dict", "." ]
python
train
phn/lineid_plot
lineid_plot/lineid_plot.py
https://github.com/phn/lineid_plot/blob/7c7a1af53fe439b3a7c5a57f01680575837fb978/lineid_plot/lineid_plot.py#L233-L239
def initial_annotate_kwargs(): """Return default parameters passed to Axes.annotate to create labels.""" return dict( xycoords="data", textcoords="data", rotation=90, horizontalalignment="center", verticalalignment="center", arrowprops=dict(arrowstyle="-", relpos=(0.5, 0.0)) )
[ "def", "initial_annotate_kwargs", "(", ")", ":", "return", "dict", "(", "xycoords", "=", "\"data\"", ",", "textcoords", "=", "\"data\"", ",", "rotation", "=", "90", ",", "horizontalalignment", "=", "\"center\"", ",", "verticalalignment", "=", "\"center\"", ",", "arrowprops", "=", "dict", "(", "arrowstyle", "=", "\"-\"", ",", "relpos", "=", "(", "0.5", ",", "0.0", ")", ")", ")" ]
Return default parameters passed to Axes.annotate to create labels.
[ "Return", "default", "parameters", "passed", "to", "Axes", ".", "annotate", "to", "create", "labels", "." ]
python
train
Rapptz/discord.py
discord/channel.py
https://github.com/Rapptz/discord.py/blob/05d4f7f9620ef33635d6ac965b26528e09cdaf5b/discord/channel.py#L740-L745
async def create_voice_channel(self, name, *, overwrites=None, reason=None, **options): """|coro| A shortcut method to :meth:`Guild.create_voice_channel` to create a :class:`VoiceChannel` in the category. """ return await self.guild.create_voice_channel(name, overwrites=overwrites, category=self, reason=reason, **options)
[ "async", "def", "create_voice_channel", "(", "self", ",", "name", ",", "*", ",", "overwrites", "=", "None", ",", "reason", "=", "None", ",", "*", "*", "options", ")", ":", "return", "await", "self", ".", "guild", ".", "create_voice_channel", "(", "name", ",", "overwrites", "=", "overwrites", ",", "category", "=", "self", ",", "reason", "=", "reason", ",", "*", "*", "options", ")" ]
|coro| A shortcut method to :meth:`Guild.create_voice_channel` to create a :class:`VoiceChannel` in the category.
[ "|coro|" ]
python
train
DistrictDataLabs/yellowbrick
yellowbrick/contrib/missing/bar.py
https://github.com/DistrictDataLabs/yellowbrick/blob/59b67236a3862c73363e8edad7cd86da5b69e3b2/yellowbrick/contrib/missing/bar.py#L142-L163
def draw_stacked_bar(self, nan_col_counts): """Draws a horizontal stacked bar chart with different colors for each count of nan values per label. """ for index, nan_values in enumerate(nan_col_counts): label, nan_col_counts = nan_values if index == 0: # first draw should be at zero bottom_chart = np.zeros(nan_col_counts.shape) # if features passed in then, label as such if self.classes_ is not None: label = self.classes_[index] color = self.colors[index] self.ax.barh(self.ind - self.width / 2, nan_col_counts, self.width, color=color, label=label, left=bottom_chart) # keep track of counts to build on stacked bottom_chart = nan_col_counts
[ "def", "draw_stacked_bar", "(", "self", ",", "nan_col_counts", ")", ":", "for", "index", ",", "nan_values", "in", "enumerate", "(", "nan_col_counts", ")", ":", "label", ",", "nan_col_counts", "=", "nan_values", "if", "index", "==", "0", ":", "# first draw should be at zero", "bottom_chart", "=", "np", ".", "zeros", "(", "nan_col_counts", ".", "shape", ")", "# if features passed in then, label as such", "if", "self", ".", "classes_", "is", "not", "None", ":", "label", "=", "self", ".", "classes_", "[", "index", "]", "color", "=", "self", ".", "colors", "[", "index", "]", "self", ".", "ax", ".", "barh", "(", "self", ".", "ind", "-", "self", ".", "width", "/", "2", ",", "nan_col_counts", ",", "self", ".", "width", ",", "color", "=", "color", ",", "label", "=", "label", ",", "left", "=", "bottom_chart", ")", "# keep track of counts to build on stacked", "bottom_chart", "=", "nan_col_counts" ]
Draws a horizontal stacked bar chart with different colors for each count of nan values per label.
[ "Draws", "a", "horizontal", "stacked", "bar", "chart", "with", "different", "colors", "for", "each", "count", "of", "nan", "values", "per", "label", "." ]
python
train
globocom/GloboNetworkAPI-client-python
networkapiclient/GrupoL3.py
https://github.com/globocom/GloboNetworkAPI-client-python/blob/cf34f913da48d9abbf750114f5d2ac4b2dde137d/networkapiclient/GrupoL3.py#L82-L108
def alterar(self, id_groupl3, name): """Change Group L3 from by the identifier. :param id_groupl3: Identifier of the Group L3. Integer value and greater than zero. :param name: Group L3 name. String with a minimum 2 and maximum of 80 characters :return: None :raise InvalidParameterError: The identifier of Group L3 or name is null and invalid. :raise NomeGrupoL3DuplicadoError: There is already a registered Group L3 with the value of name. :raise GrupoL3NaoExisteError: Group L3 not registered. :raise DataBaseError: Networkapi failed to access the database. :raise XMLError: Networkapi failed to generate the XML response. """ if not is_valid_int_param(id_groupl3): raise InvalidParameterError( u'The identifier of Group L3 is invalid or was not informed.') url = 'groupl3/' + str(id_groupl3) + '/' group_l3_map = dict() group_l3_map['name'] = name code, xml = self.submit({'groupl3': group_l3_map}, 'PUT', url) return self.response(code, xml)
[ "def", "alterar", "(", "self", ",", "id_groupl3", ",", "name", ")", ":", "if", "not", "is_valid_int_param", "(", "id_groupl3", ")", ":", "raise", "InvalidParameterError", "(", "u'The identifier of Group L3 is invalid or was not informed.'", ")", "url", "=", "'groupl3/'", "+", "str", "(", "id_groupl3", ")", "+", "'/'", "group_l3_map", "=", "dict", "(", ")", "group_l3_map", "[", "'name'", "]", "=", "name", "code", ",", "xml", "=", "self", ".", "submit", "(", "{", "'groupl3'", ":", "group_l3_map", "}", ",", "'PUT'", ",", "url", ")", "return", "self", ".", "response", "(", "code", ",", "xml", ")" ]
Change Group L3 from by the identifier. :param id_groupl3: Identifier of the Group L3. Integer value and greater than zero. :param name: Group L3 name. String with a minimum 2 and maximum of 80 characters :return: None :raise InvalidParameterError: The identifier of Group L3 or name is null and invalid. :raise NomeGrupoL3DuplicadoError: There is already a registered Group L3 with the value of name. :raise GrupoL3NaoExisteError: Group L3 not registered. :raise DataBaseError: Networkapi failed to access the database. :raise XMLError: Networkapi failed to generate the XML response.
[ "Change", "Group", "L3", "from", "by", "the", "identifier", "." ]
python
train
kolypto/py-good
good/schema/compiler.py
https://github.com/kolypto/py-good/blob/192ef19e79f6fd95c1cbd7c378a3074c7ad7a6d4/good/schema/compiler.py#L266-L295
def _compile_literal(self, schema): """ Compile literal schema: type and value matching """ # Prepare self self.compiled_type = const.COMPILED_TYPE.LITERAL self.name = get_literal_name(schema) # Error partials schema_type = type(schema) err_type = self.Invalid(_(u'Wrong value type'), get_type_name(schema_type)) err_value = self.Invalid(_(u'Invalid value'), self.name) # Matcher if self.matcher: def match_literal(v): return type(v) == schema_type and v == schema, v return match_literal # Validator def validate_literal(v): # Type check if type(v) != schema_type: # expected=<type>, provided=<type> raise err_type(get_type_name(type(v))) # Equality check if v != schema: # expected=<value>, provided=<value> raise err_value(get_literal_name(v)) # Fine return v return validate_literal
[ "def", "_compile_literal", "(", "self", ",", "schema", ")", ":", "# Prepare self", "self", ".", "compiled_type", "=", "const", ".", "COMPILED_TYPE", ".", "LITERAL", "self", ".", "name", "=", "get_literal_name", "(", "schema", ")", "# Error partials", "schema_type", "=", "type", "(", "schema", ")", "err_type", "=", "self", ".", "Invalid", "(", "_", "(", "u'Wrong value type'", ")", ",", "get_type_name", "(", "schema_type", ")", ")", "err_value", "=", "self", ".", "Invalid", "(", "_", "(", "u'Invalid value'", ")", ",", "self", ".", "name", ")", "# Matcher", "if", "self", ".", "matcher", ":", "def", "match_literal", "(", "v", ")", ":", "return", "type", "(", "v", ")", "==", "schema_type", "and", "v", "==", "schema", ",", "v", "return", "match_literal", "# Validator", "def", "validate_literal", "(", "v", ")", ":", "# Type check", "if", "type", "(", "v", ")", "!=", "schema_type", ":", "# expected=<type>, provided=<type>", "raise", "err_type", "(", "get_type_name", "(", "type", "(", "v", ")", ")", ")", "# Equality check", "if", "v", "!=", "schema", ":", "# expected=<value>, provided=<value>", "raise", "err_value", "(", "get_literal_name", "(", "v", ")", ")", "# Fine", "return", "v", "return", "validate_literal" ]
Compile literal schema: type and value matching
[ "Compile", "literal", "schema", ":", "type", "and", "value", "matching" ]
python
train
volafiled/python-volapi
volapi/volapi.py
https://github.com/volafiled/python-volapi/blob/5f0bc03dbde703264ac6ed494e2050761f688a3e/volapi/volapi.py#L348-L355
def run_queues(self): """Run all queues that have data queued""" if self.exception: # pylint: disable=raising-bad-type raise self.exception listeners = self.__listeners_for_thread return sum(l.process() for l in listeners) > 0
[ "def", "run_queues", "(", "self", ")", ":", "if", "self", ".", "exception", ":", "# pylint: disable=raising-bad-type", "raise", "self", ".", "exception", "listeners", "=", "self", ".", "__listeners_for_thread", "return", "sum", "(", "l", ".", "process", "(", ")", "for", "l", "in", "listeners", ")", ">", "0" ]
Run all queues that have data queued
[ "Run", "all", "queues", "that", "have", "data", "queued" ]
python
train
watson-developer-cloud/python-sdk
ibm_watson/assistant_v1.py
https://github.com/watson-developer-cloud/python-sdk/blob/4c2c9df4466fcde88975da9ecd834e6ba95eb353/ibm_watson/assistant_v1.py#L5987-L5997
def _to_dict(self): """Return a json dictionary representing this model.""" _dict = {} if hasattr(self, 'text') and self.text is not None: _dict['text'] = self.text if hasattr(self, '_additionalProperties'): for _key in self._additionalProperties: _value = getattr(self, _key, None) if _value is not None: _dict[_key] = _value return _dict
[ "def", "_to_dict", "(", "self", ")", ":", "_dict", "=", "{", "}", "if", "hasattr", "(", "self", ",", "'text'", ")", "and", "self", ".", "text", "is", "not", "None", ":", "_dict", "[", "'text'", "]", "=", "self", ".", "text", "if", "hasattr", "(", "self", ",", "'_additionalProperties'", ")", ":", "for", "_key", "in", "self", ".", "_additionalProperties", ":", "_value", "=", "getattr", "(", "self", ",", "_key", ",", "None", ")", "if", "_value", "is", "not", "None", ":", "_dict", "[", "_key", "]", "=", "_value", "return", "_dict" ]
Return a json dictionary representing this model.
[ "Return", "a", "json", "dictionary", "representing", "this", "model", "." ]
python
train
pazz/urwidtrees
urwidtrees/nested.py
https://github.com/pazz/urwidtrees/blob/d1fa38ce4f37db00bdfc574b856023b5db4c7ead/urwidtrees/nested.py#L94-L100
def _get_subtree_for(self, pos): """returns Tree that manages pos[-1]""" res = self._tree candidate = self._lookup_entry(self._tree, pos[:-1]) if isinstance(candidate, Tree): res = candidate return res
[ "def", "_get_subtree_for", "(", "self", ",", "pos", ")", ":", "res", "=", "self", ".", "_tree", "candidate", "=", "self", ".", "_lookup_entry", "(", "self", ".", "_tree", ",", "pos", "[", ":", "-", "1", "]", ")", "if", "isinstance", "(", "candidate", ",", "Tree", ")", ":", "res", "=", "candidate", "return", "res" ]
returns Tree that manages pos[-1]
[ "returns", "Tree", "that", "manages", "pos", "[", "-", "1", "]" ]
python
train
KrishnaswamyLab/graphtools
graphtools/graphs.py
https://github.com/KrishnaswamyLab/graphtools/blob/44685352be7df2005d44722903092207967457f2/graphtools/graphs.py#L595-L635
def build_landmark_op(self): """Build the landmark operator Calculates spectral clusters on the kernel, and calculates transition probabilities between cluster centers by using transition probabilities between samples assigned to each cluster. """ tasklogger.log_start("landmark operator") is_sparse = sparse.issparse(self.kernel) # spectral clustering tasklogger.log_start("SVD") _, _, VT = randomized_svd(self.diff_aff, n_components=self.n_svd, random_state=self.random_state) tasklogger.log_complete("SVD") tasklogger.log_start("KMeans") kmeans = MiniBatchKMeans( self.n_landmark, init_size=3 * self.n_landmark, batch_size=10000, random_state=self.random_state) self._clusters = kmeans.fit_predict( self.diff_op.dot(VT.T)) # some clusters are not assigned tasklogger.log_complete("KMeans") # transition matrices pmn = self._landmarks_to_data() # row normalize pnm = pmn.transpose() pmn = normalize(pmn, norm='l1', axis=1) pnm = normalize(pnm, norm='l1', axis=1) landmark_op = pmn.dot(pnm) # sparsity agnostic matrix multiplication if is_sparse: # no need to have a sparse landmark operator landmark_op = landmark_op.toarray() # store output self._landmark_op = landmark_op self._transitions = pnm tasklogger.log_complete("landmark operator")
[ "def", "build_landmark_op", "(", "self", ")", ":", "tasklogger", ".", "log_start", "(", "\"landmark operator\"", ")", "is_sparse", "=", "sparse", ".", "issparse", "(", "self", ".", "kernel", ")", "# spectral clustering", "tasklogger", ".", "log_start", "(", "\"SVD\"", ")", "_", ",", "_", ",", "VT", "=", "randomized_svd", "(", "self", ".", "diff_aff", ",", "n_components", "=", "self", ".", "n_svd", ",", "random_state", "=", "self", ".", "random_state", ")", "tasklogger", ".", "log_complete", "(", "\"SVD\"", ")", "tasklogger", ".", "log_start", "(", "\"KMeans\"", ")", "kmeans", "=", "MiniBatchKMeans", "(", "self", ".", "n_landmark", ",", "init_size", "=", "3", "*", "self", ".", "n_landmark", ",", "batch_size", "=", "10000", ",", "random_state", "=", "self", ".", "random_state", ")", "self", ".", "_clusters", "=", "kmeans", ".", "fit_predict", "(", "self", ".", "diff_op", ".", "dot", "(", "VT", ".", "T", ")", ")", "# some clusters are not assigned", "tasklogger", ".", "log_complete", "(", "\"KMeans\"", ")", "# transition matrices", "pmn", "=", "self", ".", "_landmarks_to_data", "(", ")", "# row normalize", "pnm", "=", "pmn", ".", "transpose", "(", ")", "pmn", "=", "normalize", "(", "pmn", ",", "norm", "=", "'l1'", ",", "axis", "=", "1", ")", "pnm", "=", "normalize", "(", "pnm", ",", "norm", "=", "'l1'", ",", "axis", "=", "1", ")", "landmark_op", "=", "pmn", ".", "dot", "(", "pnm", ")", "# sparsity agnostic matrix multiplication", "if", "is_sparse", ":", "# no need to have a sparse landmark operator", "landmark_op", "=", "landmark_op", ".", "toarray", "(", ")", "# store output", "self", ".", "_landmark_op", "=", "landmark_op", "self", ".", "_transitions", "=", "pnm", "tasklogger", ".", "log_complete", "(", "\"landmark operator\"", ")" ]
Build the landmark operator Calculates spectral clusters on the kernel, and calculates transition probabilities between cluster centers by using transition probabilities between samples assigned to each cluster.
[ "Build", "the", "landmark", "operator" ]
python
train
rtfd/recommonmark
recommonmark/transform.py
https://github.com/rtfd/recommonmark/blob/815d75ea503f30af26ab67c6820078f84875b1fc/recommonmark/transform.py#L262-L283
def find_replace(self, node): """Try to find replace node for current node. Parameters ---------- node : docutil node Node to find replacement for. Returns ------- nodes : node or list of node The replacement nodes of current node. Returns None if no replacement can be found. """ newnode = None if isinstance(node, nodes.Sequential): newnode = self.auto_toc_tree(node) elif isinstance(node, nodes.literal_block): newnode = self.auto_code_block(node) elif isinstance(node, nodes.literal): newnode = self.auto_inline_code(node) return newnode
[ "def", "find_replace", "(", "self", ",", "node", ")", ":", "newnode", "=", "None", "if", "isinstance", "(", "node", ",", "nodes", ".", "Sequential", ")", ":", "newnode", "=", "self", ".", "auto_toc_tree", "(", "node", ")", "elif", "isinstance", "(", "node", ",", "nodes", ".", "literal_block", ")", ":", "newnode", "=", "self", ".", "auto_code_block", "(", "node", ")", "elif", "isinstance", "(", "node", ",", "nodes", ".", "literal", ")", ":", "newnode", "=", "self", ".", "auto_inline_code", "(", "node", ")", "return", "newnode" ]
Try to find replace node for current node. Parameters ---------- node : docutil node Node to find replacement for. Returns ------- nodes : node or list of node The replacement nodes of current node. Returns None if no replacement can be found.
[ "Try", "to", "find", "replace", "node", "for", "current", "node", "." ]
python
train
LogicalDash/LiSE
ELiDE/ELiDE/game.py
https://github.com/LogicalDash/LiSE/blob/fe6fd4f0a7c1780e065f4c9babb9bc443af6bb84/ELiDE/ELiDE/game.py#L189-L199
def wait_command(self, start_func, turns=1, end_func=None): """Call ``start_func``, and wait to call ``end_func`` after simulating ``turns`` (default 1) :param start_func: function to call before waiting :param turns: number of turns to wait :param end_func: function to call after waiting :return: ``None`` """ start_func() self.wait_turns(turns, cb=end_func)
[ "def", "wait_command", "(", "self", ",", "start_func", ",", "turns", "=", "1", ",", "end_func", "=", "None", ")", ":", "start_func", "(", ")", "self", ".", "wait_turns", "(", "turns", ",", "cb", "=", "end_func", ")" ]
Call ``start_func``, and wait to call ``end_func`` after simulating ``turns`` (default 1) :param start_func: function to call before waiting :param turns: number of turns to wait :param end_func: function to call after waiting :return: ``None``
[ "Call", "start_func", "and", "wait", "to", "call", "end_func", "after", "simulating", "turns", "(", "default", "1", ")" ]
python
train
mar10/wsgidav
wsgidav/server/server_cli.py
https://github.com/mar10/wsgidav/blob/cec0d84222fc24bea01be1cea91729001963f172/wsgidav/server/server_cli.py#L520-L603
def _run__cherrypy(app, config, mode): """Run WsgiDAV using cherrypy.wsgiserver if CherryPy is installed.""" assert mode == "cherrypy-wsgiserver" try: from cherrypy import wsgiserver from cherrypy.wsgiserver.ssl_builtin import BuiltinSSLAdapter _logger.warning("WARNING: cherrypy.wsgiserver is deprecated.") _logger.warning( " Starting with CherryPy 9.0 the functionality from cherrypy.wsgiserver" ) _logger.warning(" was moved to the cheroot project.") _logger.warning(" Consider using --server=cheroot.") except ImportError: _logger.error("*" * 78) _logger.error("ERROR: Could not import cherrypy.wsgiserver.") _logger.error( "Try `pip install cherrypy` or specify another server using the --server option." ) _logger.error("Note that starting with CherryPy 9.0, the server was moved to") _logger.error( "the cheroot project, so it is recommended to use `-server=cheroot`" ) _logger.error("and run `pip install cheroot` instead.") _logger.error("*" * 78) raise server_name = "WsgiDAV/{} {} Python/{}".format( __version__, wsgiserver.CherryPyWSGIServer.version, util.PYTHON_VERSION ) wsgiserver.CherryPyWSGIServer.version = server_name # Support SSL ssl_certificate = _get_checked_path(config.get("ssl_certificate"), config) ssl_private_key = _get_checked_path(config.get("ssl_private_key"), config) ssl_certificate_chain = _get_checked_path( config.get("ssl_certificate_chain"), config ) protocol = "http" if ssl_certificate: assert ssl_private_key wsgiserver.CherryPyWSGIServer.ssl_adapter = BuiltinSSLAdapter( ssl_certificate, ssl_private_key, ssl_certificate_chain ) protocol = "https" _logger.info("SSL / HTTPS enabled.") _logger.info("Running {}".format(server_name)) _logger.info( "Serving on {}://{}:{} ...".format(protocol, config["host"], config["port"]) ) server_args = { "bind_addr": (config["host"], config["port"]), "wsgi_app": app, "server_name": server_name, } # Override or add custom args server_args.update(config.get("server_args", {})) server = wsgiserver.CherryPyWSGIServer(**server_args) # If the caller passed a startup event, monkey patch the server to set it # when the request handler loop is entered startup_event = config.get("startup_event") if startup_event: def _patched_tick(): server.tick = org_tick # undo the monkey patch org_tick() _logger.info("CherryPyWSGIServer is ready") startup_event.set() org_tick = server.tick server.tick = _patched_tick try: server.start() except KeyboardInterrupt: _logger.warning("Caught Ctrl-C, shutting down...") finally: server.stop() return
[ "def", "_run__cherrypy", "(", "app", ",", "config", ",", "mode", ")", ":", "assert", "mode", "==", "\"cherrypy-wsgiserver\"", "try", ":", "from", "cherrypy", "import", "wsgiserver", "from", "cherrypy", ".", "wsgiserver", ".", "ssl_builtin", "import", "BuiltinSSLAdapter", "_logger", ".", "warning", "(", "\"WARNING: cherrypy.wsgiserver is deprecated.\"", ")", "_logger", ".", "warning", "(", "\" Starting with CherryPy 9.0 the functionality from cherrypy.wsgiserver\"", ")", "_logger", ".", "warning", "(", "\" was moved to the cheroot project.\"", ")", "_logger", ".", "warning", "(", "\" Consider using --server=cheroot.\"", ")", "except", "ImportError", ":", "_logger", ".", "error", "(", "\"*\"", "*", "78", ")", "_logger", ".", "error", "(", "\"ERROR: Could not import cherrypy.wsgiserver.\"", ")", "_logger", ".", "error", "(", "\"Try `pip install cherrypy` or specify another server using the --server option.\"", ")", "_logger", ".", "error", "(", "\"Note that starting with CherryPy 9.0, the server was moved to\"", ")", "_logger", ".", "error", "(", "\"the cheroot project, so it is recommended to use `-server=cheroot`\"", ")", "_logger", ".", "error", "(", "\"and run `pip install cheroot` instead.\"", ")", "_logger", ".", "error", "(", "\"*\"", "*", "78", ")", "raise", "server_name", "=", "\"WsgiDAV/{} {} Python/{}\"", ".", "format", "(", "__version__", ",", "wsgiserver", ".", "CherryPyWSGIServer", ".", "version", ",", "util", ".", "PYTHON_VERSION", ")", "wsgiserver", ".", "CherryPyWSGIServer", ".", "version", "=", "server_name", "# Support SSL", "ssl_certificate", "=", "_get_checked_path", "(", "config", ".", "get", "(", "\"ssl_certificate\"", ")", ",", "config", ")", "ssl_private_key", "=", "_get_checked_path", "(", "config", ".", "get", "(", "\"ssl_private_key\"", ")", ",", "config", ")", "ssl_certificate_chain", "=", "_get_checked_path", "(", "config", ".", "get", "(", "\"ssl_certificate_chain\"", ")", ",", "config", ")", "protocol", "=", "\"http\"", "if", "ssl_certificate", ":", "assert", "ssl_private_key", "wsgiserver", ".", "CherryPyWSGIServer", ".", "ssl_adapter", "=", "BuiltinSSLAdapter", "(", "ssl_certificate", ",", "ssl_private_key", ",", "ssl_certificate_chain", ")", "protocol", "=", "\"https\"", "_logger", ".", "info", "(", "\"SSL / HTTPS enabled.\"", ")", "_logger", ".", "info", "(", "\"Running {}\"", ".", "format", "(", "server_name", ")", ")", "_logger", ".", "info", "(", "\"Serving on {}://{}:{} ...\"", ".", "format", "(", "protocol", ",", "config", "[", "\"host\"", "]", ",", "config", "[", "\"port\"", "]", ")", ")", "server_args", "=", "{", "\"bind_addr\"", ":", "(", "config", "[", "\"host\"", "]", ",", "config", "[", "\"port\"", "]", ")", ",", "\"wsgi_app\"", ":", "app", ",", "\"server_name\"", ":", "server_name", ",", "}", "# Override or add custom args", "server_args", ".", "update", "(", "config", ".", "get", "(", "\"server_args\"", ",", "{", "}", ")", ")", "server", "=", "wsgiserver", ".", "CherryPyWSGIServer", "(", "*", "*", "server_args", ")", "# If the caller passed a startup event, monkey patch the server to set it", "# when the request handler loop is entered", "startup_event", "=", "config", ".", "get", "(", "\"startup_event\"", ")", "if", "startup_event", ":", "def", "_patched_tick", "(", ")", ":", "server", ".", "tick", "=", "org_tick", "# undo the monkey patch", "org_tick", "(", ")", "_logger", ".", "info", "(", "\"CherryPyWSGIServer is ready\"", ")", "startup_event", ".", "set", "(", ")", "org_tick", "=", "server", ".", "tick", "server", ".", "tick", "=", "_patched_tick", "try", ":", "server", ".", "start", "(", ")", "except", "KeyboardInterrupt", ":", "_logger", ".", "warning", "(", "\"Caught Ctrl-C, shutting down...\"", ")", "finally", ":", "server", ".", "stop", "(", ")", "return" ]
Run WsgiDAV using cherrypy.wsgiserver if CherryPy is installed.
[ "Run", "WsgiDAV", "using", "cherrypy", ".", "wsgiserver", "if", "CherryPy", "is", "installed", "." ]
python
valid
davidchua/pymessenger
pymessenger/bot.py
https://github.com/davidchua/pymessenger/blob/c3aedb65b7a50e0ec82c0df39a566fceec734c85/pymessenger/bot.py#L177-L187
def send_image(self, recipient_id, image_path, notification_type=NotificationType.regular): """Send an image to the specified recipient. Image must be PNG or JPEG or GIF (more might be supported). https://developers.facebook.com/docs/messenger-platform/send-api-reference/image-attachment Input: recipient_id: recipient id to send to image_path: path to image to be sent Output: Response from API as <dict> """ return self.send_attachment(recipient_id, "image", image_path, notification_type)
[ "def", "send_image", "(", "self", ",", "recipient_id", ",", "image_path", ",", "notification_type", "=", "NotificationType", ".", "regular", ")", ":", "return", "self", ".", "send_attachment", "(", "recipient_id", ",", "\"image\"", ",", "image_path", ",", "notification_type", ")" ]
Send an image to the specified recipient. Image must be PNG or JPEG or GIF (more might be supported). https://developers.facebook.com/docs/messenger-platform/send-api-reference/image-attachment Input: recipient_id: recipient id to send to image_path: path to image to be sent Output: Response from API as <dict>
[ "Send", "an", "image", "to", "the", "specified", "recipient", ".", "Image", "must", "be", "PNG", "or", "JPEG", "or", "GIF", "(", "more", "might", "be", "supported", ")", ".", "https", ":", "//", "developers", ".", "facebook", ".", "com", "/", "docs", "/", "messenger", "-", "platform", "/", "send", "-", "api", "-", "reference", "/", "image", "-", "attachment", "Input", ":", "recipient_id", ":", "recipient", "id", "to", "send", "to", "image_path", ":", "path", "to", "image", "to", "be", "sent", "Output", ":", "Response", "from", "API", "as", "<dict", ">" ]
python
train
a1ezzz/wasp-general
wasp_general/signals/signals.py
https://github.com/a1ezzz/wasp-general/blob/1029839d33eb663f8dec76c1c46754d53c1de4a9/wasp_general/signals/signals.py#L299-L304
def stop_proxying(self, signal_source, *signal_names, weak_ref=False): """ :meth:`.WSignalProxyProto.stop_proxying` implementation """ callback = self.__callback if weak_ref is False else self.__weak_ref_callback for signal_name in signal_names: signal_source.remove_callback(signal_name, callback)
[ "def", "stop_proxying", "(", "self", ",", "signal_source", ",", "*", "signal_names", ",", "weak_ref", "=", "False", ")", ":", "callback", "=", "self", ".", "__callback", "if", "weak_ref", "is", "False", "else", "self", ".", "__weak_ref_callback", "for", "signal_name", "in", "signal_names", ":", "signal_source", ".", "remove_callback", "(", "signal_name", ",", "callback", ")" ]
:meth:`.WSignalProxyProto.stop_proxying` implementation
[ ":", "meth", ":", ".", "WSignalProxyProto", ".", "stop_proxying", "implementation" ]
python
train
has2k1/plotnine
plotnine/utils.py
https://github.com/has2k1/plotnine/blob/566e579af705367e584fb27a74e6c5199624ca89/plotnine/utils.py#L398-L424
def defaults(d1, d2): """ Update a copy of d1 with the contents of d2 that are not in d1. d1 and d2 are dictionary like objects. Parameters ---------- d1 : dict | dataframe dict with the preferred values d2 : dict | dataframe dict with the default values Returns ------- out : dict | dataframe Result of adding default values type of d1 """ d1 = d1.copy() tolist = isinstance(d2, pd.DataFrame) keys = (k for k in d2 if k not in d1) for k in keys: if tolist: d1[k] = d2[k].tolist() else: d1[k] = d2[k] return d1
[ "def", "defaults", "(", "d1", ",", "d2", ")", ":", "d1", "=", "d1", ".", "copy", "(", ")", "tolist", "=", "isinstance", "(", "d2", ",", "pd", ".", "DataFrame", ")", "keys", "=", "(", "k", "for", "k", "in", "d2", "if", "k", "not", "in", "d1", ")", "for", "k", "in", "keys", ":", "if", "tolist", ":", "d1", "[", "k", "]", "=", "d2", "[", "k", "]", ".", "tolist", "(", ")", "else", ":", "d1", "[", "k", "]", "=", "d2", "[", "k", "]", "return", "d1" ]
Update a copy of d1 with the contents of d2 that are not in d1. d1 and d2 are dictionary like objects. Parameters ---------- d1 : dict | dataframe dict with the preferred values d2 : dict | dataframe dict with the default values Returns ------- out : dict | dataframe Result of adding default values type of d1
[ "Update", "a", "copy", "of", "d1", "with", "the", "contents", "of", "d2", "that", "are", "not", "in", "d1", ".", "d1", "and", "d2", "are", "dictionary", "like", "objects", "." ]
python
train
brocade/pynos
pynos/versions/base/yang/tailf_netconf_transactions.py
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/base/yang/tailf_netconf_transactions.py#L42-L55
def start_transaction_input_target_target_candidate_candidate(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") start_transaction = ET.Element("start_transaction") config = start_transaction input = ET.SubElement(start_transaction, "input") target = ET.SubElement(input, "target") target = ET.SubElement(target, "target") candidate = ET.SubElement(target, "candidate") candidate = ET.SubElement(candidate, "candidate") callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "start_transaction_input_target_target_candidate_candidate", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "start_transaction", "=", "ET", ".", "Element", "(", "\"start_transaction\"", ")", "config", "=", "start_transaction", "input", "=", "ET", ".", "SubElement", "(", "start_transaction", ",", "\"input\"", ")", "target", "=", "ET", ".", "SubElement", "(", "input", ",", "\"target\"", ")", "target", "=", "ET", ".", "SubElement", "(", "target", ",", "\"target\"", ")", "candidate", "=", "ET", ".", "SubElement", "(", "target", ",", "\"candidate\"", ")", "candidate", "=", "ET", ".", "SubElement", "(", "candidate", ",", "\"candidate\"", ")", "callback", "=", "kwargs", ".", "pop", "(", "'callback'", ",", "self", ".", "_callback", ")", "return", "callback", "(", "config", ")" ]
Auto Generated Code
[ "Auto", "Generated", "Code" ]
python
train
google/transitfeed
transitfeed/shapelib.py
https://github.com/google/transitfeed/blob/eb2991a3747ba541b2cb66502b305b6304a1f85f/transitfeed/shapelib.py#L413-L423
def FindMatchingPolys(self, start_point, end_point, max_radius=150): """ Returns a list of polylines in the collection that have endpoints within max_radius of the given start and end points. """ matches = [] for shape in self._name_to_shape.itervalues(): if start_point.GetDistanceMeters(shape.GetPoint(0)) < max_radius and \ end_point.GetDistanceMeters(shape.GetPoint(-1)) < max_radius: matches.append(shape) return matches
[ "def", "FindMatchingPolys", "(", "self", ",", "start_point", ",", "end_point", ",", "max_radius", "=", "150", ")", ":", "matches", "=", "[", "]", "for", "shape", "in", "self", ".", "_name_to_shape", ".", "itervalues", "(", ")", ":", "if", "start_point", ".", "GetDistanceMeters", "(", "shape", ".", "GetPoint", "(", "0", ")", ")", "<", "max_radius", "and", "end_point", ".", "GetDistanceMeters", "(", "shape", ".", "GetPoint", "(", "-", "1", ")", ")", "<", "max_radius", ":", "matches", ".", "append", "(", "shape", ")", "return", "matches" ]
Returns a list of polylines in the collection that have endpoints within max_radius of the given start and end points.
[ "Returns", "a", "list", "of", "polylines", "in", "the", "collection", "that", "have", "endpoints", "within", "max_radius", "of", "the", "given", "start", "and", "end", "points", "." ]
python
train
mdickinson/bigfloat
bigfloat/core.py
https://github.com/mdickinson/bigfloat/blob/e5fdd1048615191ed32a2b7460e14b3b3ff24662/bigfloat/core.py#L1805-L1815
def cosh(x, context=None): """ Return the hyperbolic cosine of x. """ return _apply_function_in_current_context( BigFloat, mpfr.mpfr_cosh, (BigFloat._implicit_convert(x),), context, )
[ "def", "cosh", "(", "x", ",", "context", "=", "None", ")", ":", "return", "_apply_function_in_current_context", "(", "BigFloat", ",", "mpfr", ".", "mpfr_cosh", ",", "(", "BigFloat", ".", "_implicit_convert", "(", "x", ")", ",", ")", ",", "context", ",", ")" ]
Return the hyperbolic cosine of x.
[ "Return", "the", "hyperbolic", "cosine", "of", "x", "." ]
python
train
geophysics-ubonn/reda
lib/reda/utils/filter_config_types.py
https://github.com/geophysics-ubonn/reda/blob/46a939729e40c7c4723315c03679c40761152e9e/lib/reda/utils/filter_config_types.py#L100-L155
def _filter_dipole_dipole(configs): """Filter dipole-dipole configurations A dipole-dipole configuration is defined using the following criteria: * equal distance between the two current electrodes and between the two voltage electrodes * no overlap of dipoles Parameters ---------- configs: numpy.ndarray Nx4 array with N measurement configurations Returns ------- configs: numpy.ndarray Remaining configurations, all dipole-dipole configurations are set to numpy.nan dd_indices: numpy.ndarray indices of dipole-dipole configurations """ # check that dipoles have equal size dist_ab = np.abs(configs[:, 0] - configs[:, 1]) dist_mn = np.abs(configs[:, 2] - configs[:, 3]) distances_equal = (dist_ab == dist_mn) # check that they are not overlapping not_overlapping = ( # either a,b < m,n ( (configs[:, 0] < configs[:, 2]) & (configs[:, 1] < configs[:, 2]) & (configs[:, 0] < configs[:, 3]) & (configs[:, 1] < configs[:, 3]) ) | # or m,n < a,b ( (configs[:, 2] < configs[:, 0]) & (configs[:, 3] < configs[:, 0]) & (configs[:, 2] < configs[:, 1]) & (configs[:, 3] < configs[:, 1]) ) ) is_dipole_dipole = (distances_equal & not_overlapping) dd_indices = np.where(is_dipole_dipole)[0] dd_indices_sorted = _sort_dd_skips(configs[dd_indices, :], dd_indices) # set all dd configs to nan configs[dd_indices, :] = np.nan return configs, dd_indices_sorted
[ "def", "_filter_dipole_dipole", "(", "configs", ")", ":", "# check that dipoles have equal size", "dist_ab", "=", "np", ".", "abs", "(", "configs", "[", ":", ",", "0", "]", "-", "configs", "[", ":", ",", "1", "]", ")", "dist_mn", "=", "np", ".", "abs", "(", "configs", "[", ":", ",", "2", "]", "-", "configs", "[", ":", ",", "3", "]", ")", "distances_equal", "=", "(", "dist_ab", "==", "dist_mn", ")", "# check that they are not overlapping", "not_overlapping", "=", "(", "# either a,b < m,n", "(", "(", "configs", "[", ":", ",", "0", "]", "<", "configs", "[", ":", ",", "2", "]", ")", "&", "(", "configs", "[", ":", ",", "1", "]", "<", "configs", "[", ":", ",", "2", "]", ")", "&", "(", "configs", "[", ":", ",", "0", "]", "<", "configs", "[", ":", ",", "3", "]", ")", "&", "(", "configs", "[", ":", ",", "1", "]", "<", "configs", "[", ":", ",", "3", "]", ")", ")", "|", "# or m,n < a,b", "(", "(", "configs", "[", ":", ",", "2", "]", "<", "configs", "[", ":", ",", "0", "]", ")", "&", "(", "configs", "[", ":", ",", "3", "]", "<", "configs", "[", ":", ",", "0", "]", ")", "&", "(", "configs", "[", ":", ",", "2", "]", "<", "configs", "[", ":", ",", "1", "]", ")", "&", "(", "configs", "[", ":", ",", "3", "]", "<", "configs", "[", ":", ",", "1", "]", ")", ")", ")", "is_dipole_dipole", "=", "(", "distances_equal", "&", "not_overlapping", ")", "dd_indices", "=", "np", ".", "where", "(", "is_dipole_dipole", ")", "[", "0", "]", "dd_indices_sorted", "=", "_sort_dd_skips", "(", "configs", "[", "dd_indices", ",", ":", "]", ",", "dd_indices", ")", "# set all dd configs to nan", "configs", "[", "dd_indices", ",", ":", "]", "=", "np", ".", "nan", "return", "configs", ",", "dd_indices_sorted" ]
Filter dipole-dipole configurations A dipole-dipole configuration is defined using the following criteria: * equal distance between the two current electrodes and between the two voltage electrodes * no overlap of dipoles Parameters ---------- configs: numpy.ndarray Nx4 array with N measurement configurations Returns ------- configs: numpy.ndarray Remaining configurations, all dipole-dipole configurations are set to numpy.nan dd_indices: numpy.ndarray indices of dipole-dipole configurations
[ "Filter", "dipole", "-", "dipole", "configurations" ]
python
train
OCR-D/core
ocrd/ocrd/workspace.py
https://github.com/OCR-D/core/blob/57e68c578526cb955fd2e368207f5386c459d91d/ocrd/ocrd/workspace.py#L58-L71
def download_url(self, url, **kwargs): """ Download a URL to the workspace. Args: url (string): URL to download to directory **kwargs : See :py:mod:`ocrd.resolver.Resolver` Returns: The local filename of the downloaded file """ if self.baseurl and '://' not in url: url = join(self.baseurl, url) return self.resolver.download_to_directory(self.directory, url, **kwargs)
[ "def", "download_url", "(", "self", ",", "url", ",", "*", "*", "kwargs", ")", ":", "if", "self", ".", "baseurl", "and", "'://'", "not", "in", "url", ":", "url", "=", "join", "(", "self", ".", "baseurl", ",", "url", ")", "return", "self", ".", "resolver", ".", "download_to_directory", "(", "self", ".", "directory", ",", "url", ",", "*", "*", "kwargs", ")" ]
Download a URL to the workspace. Args: url (string): URL to download to directory **kwargs : See :py:mod:`ocrd.resolver.Resolver` Returns: The local filename of the downloaded file
[ "Download", "a", "URL", "to", "the", "workspace", "." ]
python
train
xmikos/soapy_power
soapypower/power.py
https://github.com/xmikos/soapy_power/blob/46e12659b8d08af764dc09a1f31b0e85a68f808f/soapypower/power.py#L274-L330
def sweep(self, min_freq, max_freq, bins, repeats, runs=0, time_limit=0, overlap=0, fft_window='hann', fft_overlap=0.5, crop=False, log_scale=True, remove_dc=False, detrend=None, lnb_lo=0, tune_delay=0, reset_stream=False, base_buffer_size=0, max_buffer_size=0, max_threads=0, max_queue_size=0): """Sweep spectrum using frequency hopping""" self.setup( bins, repeats, base_buffer_size, max_buffer_size, fft_window=fft_window, fft_overlap=fft_overlap, crop_factor=overlap if crop else 0, log_scale=log_scale, remove_dc=remove_dc, detrend=detrend, lnb_lo=lnb_lo, tune_delay=tune_delay, reset_stream=reset_stream, max_threads=max_threads, max_queue_size=max_queue_size ) try: freq_list = self.freq_plan(min_freq - lnb_lo, max_freq - lnb_lo, bins, overlap) t_start = time.time() run = 0 while not _shutdown and (runs == 0 or run < runs): run += 1 t_run_start = time.time() logger.debug('Run: {}'.format(run)) for freq in freq_list: # Tune to new frequency, acquire samples and compute Power Spectral Density psd_future, acq_time_start, acq_time_stop = self.psd(freq) # Write PSD to stdout (in another thread) self._writer.write_async(psd_future, acq_time_start, acq_time_stop, len(self._buffer) * self._buffer_repeats) if _shutdown: break # Write end of measurement marker (in another thread) write_next_future = self._writer.write_next_async() t_run = time.time() logger.debug(' Total run time: {:.3f} s'.format(t_run - t_run_start)) # End measurement if time limit is exceeded if time_limit and (time.time() - t_start) >= time_limit: logger.info('Time limit of {} s exceeded, completed {} runs'.format(time_limit, run)) break # Wait for last write to be finished write_next_future.result() # Debug thread pool queues logging.debug('Number of USB buffer overflow errors: {}'.format(self.device.buffer_overflow_count)) logging.debug('PSD worker threads: {}'.format(self._psd._executor._max_workers)) logging.debug('Max. PSD queue size: {} / {}'.format(self._psd._executor.max_queue_size_reached, self._psd._executor.max_queue_size)) logging.debug('Writer worker threads: {}'.format(self._writer._executor._max_workers)) logging.debug('Max. Writer queue size: {} / {}'.format(self._writer._executor.max_queue_size_reached, self._writer._executor.max_queue_size)) finally: # Shutdown SDR self.stop() t_stop = time.time() logger.info('Total time: {:.3f} s'.format(t_stop - t_start))
[ "def", "sweep", "(", "self", ",", "min_freq", ",", "max_freq", ",", "bins", ",", "repeats", ",", "runs", "=", "0", ",", "time_limit", "=", "0", ",", "overlap", "=", "0", ",", "fft_window", "=", "'hann'", ",", "fft_overlap", "=", "0.5", ",", "crop", "=", "False", ",", "log_scale", "=", "True", ",", "remove_dc", "=", "False", ",", "detrend", "=", "None", ",", "lnb_lo", "=", "0", ",", "tune_delay", "=", "0", ",", "reset_stream", "=", "False", ",", "base_buffer_size", "=", "0", ",", "max_buffer_size", "=", "0", ",", "max_threads", "=", "0", ",", "max_queue_size", "=", "0", ")", ":", "self", ".", "setup", "(", "bins", ",", "repeats", ",", "base_buffer_size", ",", "max_buffer_size", ",", "fft_window", "=", "fft_window", ",", "fft_overlap", "=", "fft_overlap", ",", "crop_factor", "=", "overlap", "if", "crop", "else", "0", ",", "log_scale", "=", "log_scale", ",", "remove_dc", "=", "remove_dc", ",", "detrend", "=", "detrend", ",", "lnb_lo", "=", "lnb_lo", ",", "tune_delay", "=", "tune_delay", ",", "reset_stream", "=", "reset_stream", ",", "max_threads", "=", "max_threads", ",", "max_queue_size", "=", "max_queue_size", ")", "try", ":", "freq_list", "=", "self", ".", "freq_plan", "(", "min_freq", "-", "lnb_lo", ",", "max_freq", "-", "lnb_lo", ",", "bins", ",", "overlap", ")", "t_start", "=", "time", ".", "time", "(", ")", "run", "=", "0", "while", "not", "_shutdown", "and", "(", "runs", "==", "0", "or", "run", "<", "runs", ")", ":", "run", "+=", "1", "t_run_start", "=", "time", ".", "time", "(", ")", "logger", ".", "debug", "(", "'Run: {}'", ".", "format", "(", "run", ")", ")", "for", "freq", "in", "freq_list", ":", "# Tune to new frequency, acquire samples and compute Power Spectral Density", "psd_future", ",", "acq_time_start", ",", "acq_time_stop", "=", "self", ".", "psd", "(", "freq", ")", "# Write PSD to stdout (in another thread)", "self", ".", "_writer", ".", "write_async", "(", "psd_future", ",", "acq_time_start", ",", "acq_time_stop", ",", "len", "(", "self", ".", "_buffer", ")", "*", "self", ".", "_buffer_repeats", ")", "if", "_shutdown", ":", "break", "# Write end of measurement marker (in another thread)", "write_next_future", "=", "self", ".", "_writer", ".", "write_next_async", "(", ")", "t_run", "=", "time", ".", "time", "(", ")", "logger", ".", "debug", "(", "' Total run time: {:.3f} s'", ".", "format", "(", "t_run", "-", "t_run_start", ")", ")", "# End measurement if time limit is exceeded", "if", "time_limit", "and", "(", "time", ".", "time", "(", ")", "-", "t_start", ")", ">=", "time_limit", ":", "logger", ".", "info", "(", "'Time limit of {} s exceeded, completed {} runs'", ".", "format", "(", "time_limit", ",", "run", ")", ")", "break", "# Wait for last write to be finished", "write_next_future", ".", "result", "(", ")", "# Debug thread pool queues", "logging", ".", "debug", "(", "'Number of USB buffer overflow errors: {}'", ".", "format", "(", "self", ".", "device", ".", "buffer_overflow_count", ")", ")", "logging", ".", "debug", "(", "'PSD worker threads: {}'", ".", "format", "(", "self", ".", "_psd", ".", "_executor", ".", "_max_workers", ")", ")", "logging", ".", "debug", "(", "'Max. PSD queue size: {} / {}'", ".", "format", "(", "self", ".", "_psd", ".", "_executor", ".", "max_queue_size_reached", ",", "self", ".", "_psd", ".", "_executor", ".", "max_queue_size", ")", ")", "logging", ".", "debug", "(", "'Writer worker threads: {}'", ".", "format", "(", "self", ".", "_writer", ".", "_executor", ".", "_max_workers", ")", ")", "logging", ".", "debug", "(", "'Max. Writer queue size: {} / {}'", ".", "format", "(", "self", ".", "_writer", ".", "_executor", ".", "max_queue_size_reached", ",", "self", ".", "_writer", ".", "_executor", ".", "max_queue_size", ")", ")", "finally", ":", "# Shutdown SDR", "self", ".", "stop", "(", ")", "t_stop", "=", "time", ".", "time", "(", ")", "logger", ".", "info", "(", "'Total time: {:.3f} s'", ".", "format", "(", "t_stop", "-", "t_start", ")", ")" ]
Sweep spectrum using frequency hopping
[ "Sweep", "spectrum", "using", "frequency", "hopping" ]
python
test
ucbvislab/radiotool
radiotool/composer/composition.py
https://github.com/ucbvislab/radiotool/blob/01c9d878a811cf400b1482896d641d9c95e83ded/radiotool/composer/composition.py#L175-L203
def extended_fade_in(self, segment, duration): """Add a fade-in to a segment that extends the beginning of the segment. :param segment: Segment to fade in :type segment: :py:class:`radiotool.composer.Segment` :param duration: Duration of fade-in (in seconds) :returns: The fade that has been added to the composition :rtype: :py:class:`Fade` """ dur = int(duration * segment.track.samplerate) if segment.start - dur >= 0: segment.start -= dur else: raise Exception( "Cannot create fade-in that extends " "past the track's beginning") if segment.comp_location - dur >= 0: segment.comp_location -= dur else: raise Exception( "Cannot create fade-in the extends past the score's beginning") segment.duration += dur f = Fade(segment.track, segment.comp_location_in_seconds, duration, 0.0, 1.0) self.add_dynamic(f) return f
[ "def", "extended_fade_in", "(", "self", ",", "segment", ",", "duration", ")", ":", "dur", "=", "int", "(", "duration", "*", "segment", ".", "track", ".", "samplerate", ")", "if", "segment", ".", "start", "-", "dur", ">=", "0", ":", "segment", ".", "start", "-=", "dur", "else", ":", "raise", "Exception", "(", "\"Cannot create fade-in that extends \"", "\"past the track's beginning\"", ")", "if", "segment", ".", "comp_location", "-", "dur", ">=", "0", ":", "segment", ".", "comp_location", "-=", "dur", "else", ":", "raise", "Exception", "(", "\"Cannot create fade-in the extends past the score's beginning\"", ")", "segment", ".", "duration", "+=", "dur", "f", "=", "Fade", "(", "segment", ".", "track", ",", "segment", ".", "comp_location_in_seconds", ",", "duration", ",", "0.0", ",", "1.0", ")", "self", ".", "add_dynamic", "(", "f", ")", "return", "f" ]
Add a fade-in to a segment that extends the beginning of the segment. :param segment: Segment to fade in :type segment: :py:class:`radiotool.composer.Segment` :param duration: Duration of fade-in (in seconds) :returns: The fade that has been added to the composition :rtype: :py:class:`Fade`
[ "Add", "a", "fade", "-", "in", "to", "a", "segment", "that", "extends", "the", "beginning", "of", "the", "segment", "." ]
python
train
SuperCowPowers/workbench
workbench_apps/workbench_cli/workbench_shell.py
https://github.com/SuperCowPowers/workbench/blob/710232756dd717f734253315e3d0b33c9628dafb/workbench_apps/workbench_cli/workbench_shell.py#L276-L296
def _connect(self, server_info): """Connect to the workbench server""" # First we do a temp connect with a short heartbeat _tmp_connect = zerorpc.Client(timeout=300, heartbeat=2) _tmp_connect.connect('tcp://'+server_info['server']+':'+server_info['port']) try: _tmp_connect._zerorpc_name() _tmp_connect.close() del _tmp_connect except zerorpc.exceptions.LostRemote: print '%sError: Could not connect to Workbench Server at %s:%s%s' % \ (color.Red, server_info['server'], server_info['port'], color.Normal) sys.exit(1) # Okay do the real connection if self.workbench: self.workbench.close() self.workbench = zerorpc.Client(timeout=300, heartbeat=60) self.workbench.connect('tcp://'+server_info['server']+':'+server_info['port']) print '\n%s<<< Connected: %s:%s >>>%s' % (color.Green, server_info['server'], server_info['port'], color.Normal)
[ "def", "_connect", "(", "self", ",", "server_info", ")", ":", "# First we do a temp connect with a short heartbeat", "_tmp_connect", "=", "zerorpc", ".", "Client", "(", "timeout", "=", "300", ",", "heartbeat", "=", "2", ")", "_tmp_connect", ".", "connect", "(", "'tcp://'", "+", "server_info", "[", "'server'", "]", "+", "':'", "+", "server_info", "[", "'port'", "]", ")", "try", ":", "_tmp_connect", ".", "_zerorpc_name", "(", ")", "_tmp_connect", ".", "close", "(", ")", "del", "_tmp_connect", "except", "zerorpc", ".", "exceptions", ".", "LostRemote", ":", "print", "'%sError: Could not connect to Workbench Server at %s:%s%s'", "%", "(", "color", ".", "Red", ",", "server_info", "[", "'server'", "]", ",", "server_info", "[", "'port'", "]", ",", "color", ".", "Normal", ")", "sys", ".", "exit", "(", "1", ")", "# Okay do the real connection", "if", "self", ".", "workbench", ":", "self", ".", "workbench", ".", "close", "(", ")", "self", ".", "workbench", "=", "zerorpc", ".", "Client", "(", "timeout", "=", "300", ",", "heartbeat", "=", "60", ")", "self", ".", "workbench", ".", "connect", "(", "'tcp://'", "+", "server_info", "[", "'server'", "]", "+", "':'", "+", "server_info", "[", "'port'", "]", ")", "print", "'\\n%s<<< Connected: %s:%s >>>%s'", "%", "(", "color", ".", "Green", ",", "server_info", "[", "'server'", "]", ",", "server_info", "[", "'port'", "]", ",", "color", ".", "Normal", ")" ]
Connect to the workbench server
[ "Connect", "to", "the", "workbench", "server" ]
python
train
SmartTeleMax/iktomi
iktomi/auth.py
https://github.com/SmartTeleMax/iktomi/blob/80bc0f1408d63efe7f5844367d1f6efba44b35f2/iktomi/auth.py#L104-L126
def login(self, template='login'): ''' This property will return component which will handle login requests. auth.login(template='login.html') ''' def _login(env, data): form = self._login_form(env) next = env.request.GET.get('next', '/') login_failed = False if env.request.method == 'POST': if form.accept(env.request.POST): user_identity = self.get_user_identity( env, **form.python_data) if user_identity is not None: response = HTTPSeeOther(location=next) return self.login_identity(user_identity, response) login_failed = True data.form = form data.login_failed = login_failed data.login_url = env.root.login.as_url.qs_set(next=next) return env.template.render_to_response(template, data.as_dict()) return web.match('/login', 'login') | _login
[ "def", "login", "(", "self", ",", "template", "=", "'login'", ")", ":", "def", "_login", "(", "env", ",", "data", ")", ":", "form", "=", "self", ".", "_login_form", "(", "env", ")", "next", "=", "env", ".", "request", ".", "GET", ".", "get", "(", "'next'", ",", "'/'", ")", "login_failed", "=", "False", "if", "env", ".", "request", ".", "method", "==", "'POST'", ":", "if", "form", ".", "accept", "(", "env", ".", "request", ".", "POST", ")", ":", "user_identity", "=", "self", ".", "get_user_identity", "(", "env", ",", "*", "*", "form", ".", "python_data", ")", "if", "user_identity", "is", "not", "None", ":", "response", "=", "HTTPSeeOther", "(", "location", "=", "next", ")", "return", "self", ".", "login_identity", "(", "user_identity", ",", "response", ")", "login_failed", "=", "True", "data", ".", "form", "=", "form", "data", ".", "login_failed", "=", "login_failed", "data", ".", "login_url", "=", "env", ".", "root", ".", "login", ".", "as_url", ".", "qs_set", "(", "next", "=", "next", ")", "return", "env", ".", "template", ".", "render_to_response", "(", "template", ",", "data", ".", "as_dict", "(", ")", ")", "return", "web", ".", "match", "(", "'/login'", ",", "'login'", ")", "|", "_login" ]
This property will return component which will handle login requests. auth.login(template='login.html')
[ "This", "property", "will", "return", "component", "which", "will", "handle", "login", "requests", "." ]
python
train
google/grr
grr/server/grr_response_server/flows/general/filesystem.py
https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/server/grr_response_server/flows/general/filesystem.py#L818-L833
def FindNode(self, component_path): """Find the node in the component_tree from component_path. Args: component_path: A list of components which reference a node in the component tree. This allows us to resume processing in the tree. Returns: A node in the component_tree. """ # Find the node that the component path is referring to. node = self.state.component_tree for component in component_path: node = node[component] return node
[ "def", "FindNode", "(", "self", ",", "component_path", ")", ":", "# Find the node that the component path is referring to.", "node", "=", "self", ".", "state", ".", "component_tree", "for", "component", "in", "component_path", ":", "node", "=", "node", "[", "component", "]", "return", "node" ]
Find the node in the component_tree from component_path. Args: component_path: A list of components which reference a node in the component tree. This allows us to resume processing in the tree. Returns: A node in the component_tree.
[ "Find", "the", "node", "in", "the", "component_tree", "from", "component_path", "." ]
python
train
heikomuller/sco-datastore
scodata/image.py
https://github.com/heikomuller/sco-datastore/blob/7180a6b51150667e47629da566aedaa742e39342/scodata/image.py#L410-L430
def get_directory(self, identifier): """Implements the policy for naming directories for image objects. Image object directories are name by their identifier. In addition, these directories are grouped in parent directories named by the first two characters of the identifier. The aim is to avoid having too many sub-folders in a single directory. Parameters ---------- identifier : string Unique object identifier Returns ------- string Path to image objects data directory """ return os.path.join( os.path.join(self.directory, identifier[:2]), identifier )
[ "def", "get_directory", "(", "self", ",", "identifier", ")", ":", "return", "os", ".", "path", ".", "join", "(", "os", ".", "path", ".", "join", "(", "self", ".", "directory", ",", "identifier", "[", ":", "2", "]", ")", ",", "identifier", ")" ]
Implements the policy for naming directories for image objects. Image object directories are name by their identifier. In addition, these directories are grouped in parent directories named by the first two characters of the identifier. The aim is to avoid having too many sub-folders in a single directory. Parameters ---------- identifier : string Unique object identifier Returns ------- string Path to image objects data directory
[ "Implements", "the", "policy", "for", "naming", "directories", "for", "image", "objects", ".", "Image", "object", "directories", "are", "name", "by", "their", "identifier", ".", "In", "addition", "these", "directories", "are", "grouped", "in", "parent", "directories", "named", "by", "the", "first", "two", "characters", "of", "the", "identifier", ".", "The", "aim", "is", "to", "avoid", "having", "too", "many", "sub", "-", "folders", "in", "a", "single", "directory", "." ]
python
train
quantumlib/Cirq
dev_tools/auto_merge.py
https://github.com/quantumlib/Cirq/blob/0827da80dd7880e5b923eb69407e980ed9bc0bd2/dev_tools/auto_merge.py#L203-L220
def edit_comment(repo: GithubRepository, text: str, comment_id: int) -> None: """ References: https://developer.github.com/v3/issues/comments/#edit-a-comment """ url = ("https://api.github.com/repos/{}/{}/issues/comments/{}" "?access_token={}".format(repo.organization, repo.name, comment_id, repo.access_token)) data = { 'body': text } response = requests.patch(url, json=data) if response.status_code != 200: raise RuntimeError('Edit comment failed. Code: {}. Content: {}.'.format( response.status_code, response.content))
[ "def", "edit_comment", "(", "repo", ":", "GithubRepository", ",", "text", ":", "str", ",", "comment_id", ":", "int", ")", "->", "None", ":", "url", "=", "(", "\"https://api.github.com/repos/{}/{}/issues/comments/{}\"", "\"?access_token={}\"", ".", "format", "(", "repo", ".", "organization", ",", "repo", ".", "name", ",", "comment_id", ",", "repo", ".", "access_token", ")", ")", "data", "=", "{", "'body'", ":", "text", "}", "response", "=", "requests", ".", "patch", "(", "url", ",", "json", "=", "data", ")", "if", "response", ".", "status_code", "!=", "200", ":", "raise", "RuntimeError", "(", "'Edit comment failed. Code: {}. Content: {}.'", ".", "format", "(", "response", ".", "status_code", ",", "response", ".", "content", ")", ")" ]
References: https://developer.github.com/v3/issues/comments/#edit-a-comment
[ "References", ":", "https", ":", "//", "developer", ".", "github", ".", "com", "/", "v3", "/", "issues", "/", "comments", "/", "#edit", "-", "a", "-", "comment" ]
python
train
Nic30/hwt
hwt/hdl/statements.py
https://github.com/Nic30/hwt/blob/8cbb399e326da3b22c233b98188a9d08dec057e6/hwt/hdl/statements.py#L564-L578
def isSameStatementList(stmListA: List[HdlStatement], stmListB: List[HdlStatement]) -> bool: """ :return: True if two lists of HdlStatement instances are same """ if stmListA is stmListB: return True if stmListA is None or stmListB is None: return False for a, b in zip(stmListA, stmListB): if not a.isSame(b): return False return True
[ "def", "isSameStatementList", "(", "stmListA", ":", "List", "[", "HdlStatement", "]", ",", "stmListB", ":", "List", "[", "HdlStatement", "]", ")", "->", "bool", ":", "if", "stmListA", "is", "stmListB", ":", "return", "True", "if", "stmListA", "is", "None", "or", "stmListB", "is", "None", ":", "return", "False", "for", "a", ",", "b", "in", "zip", "(", "stmListA", ",", "stmListB", ")", ":", "if", "not", "a", ".", "isSame", "(", "b", ")", ":", "return", "False", "return", "True" ]
:return: True if two lists of HdlStatement instances are same
[ ":", "return", ":", "True", "if", "two", "lists", "of", "HdlStatement", "instances", "are", "same" ]
python
test
rdussurget/py-altimetry
altimetry/tools/nctools.py
https://github.com/rdussurget/py-altimetry/blob/57ce7f2d63c6bbc4993821af0bbe46929e3a2d98/altimetry/tools/nctools.py#L73-L83
def add(self,dimlist,dimvalues): ''' add dimensions :parameter dimlist: list of dimensions :parameter dimvalues: list of values for dimlist ''' for i,d in enumerate(dimlist): self[d] = dimvalues[i] self.set_ndims()
[ "def", "add", "(", "self", ",", "dimlist", ",", "dimvalues", ")", ":", "for", "i", ",", "d", "in", "enumerate", "(", "dimlist", ")", ":", "self", "[", "d", "]", "=", "dimvalues", "[", "i", "]", "self", ".", "set_ndims", "(", ")" ]
add dimensions :parameter dimlist: list of dimensions :parameter dimvalues: list of values for dimlist
[ "add", "dimensions", ":", "parameter", "dimlist", ":", "list", "of", "dimensions", ":", "parameter", "dimvalues", ":", "list", "of", "values", "for", "dimlist" ]
python
train
flypenguin/python-cattleprod
cattleprod/__init__.py
https://github.com/flypenguin/python-cattleprod/blob/05043c91de78d211968db65413d3db4fd44c89e4/cattleprod/__init__.py#L55-L76
def poke(url, accesskey=None, secretkey=None, __method__='GET', **req_args): """ Poke the Rancher API. Returns a Rod object instance. Central starting point for the cattleprod package. :param url: The full Rancher URL to the API endpoint. :param accesskey: The rancher access key, optional. :param secretkey: The rancher secret key, optional. :param __method__: Internal method, don't use! :param req_args: Arguments which are passed directly to the requests API. The accesskey / secretkey values have precedence before simple auth objects defined in here. :return: A Rod instance, or anything that the URL returns on a GET request """ if accesskey and secretkey: req_args['auth'] = (accesskey, secretkey) tmp = requests.request(__method__.lower(), url, **req_args) tmp.raise_for_status() if tmp.headers.get('Content-Type').find("json") != -1: rv = _convert_to_rod(tmp.json(), **req_args) else: rv = tmp.content return rv
[ "def", "poke", "(", "url", ",", "accesskey", "=", "None", ",", "secretkey", "=", "None", ",", "__method__", "=", "'GET'", ",", "*", "*", "req_args", ")", ":", "if", "accesskey", "and", "secretkey", ":", "req_args", "[", "'auth'", "]", "=", "(", "accesskey", ",", "secretkey", ")", "tmp", "=", "requests", ".", "request", "(", "__method__", ".", "lower", "(", ")", ",", "url", ",", "*", "*", "req_args", ")", "tmp", ".", "raise_for_status", "(", ")", "if", "tmp", ".", "headers", ".", "get", "(", "'Content-Type'", ")", ".", "find", "(", "\"json\"", ")", "!=", "-", "1", ":", "rv", "=", "_convert_to_rod", "(", "tmp", ".", "json", "(", ")", ",", "*", "*", "req_args", ")", "else", ":", "rv", "=", "tmp", ".", "content", "return", "rv" ]
Poke the Rancher API. Returns a Rod object instance. Central starting point for the cattleprod package. :param url: The full Rancher URL to the API endpoint. :param accesskey: The rancher access key, optional. :param secretkey: The rancher secret key, optional. :param __method__: Internal method, don't use! :param req_args: Arguments which are passed directly to the requests API. The accesskey / secretkey values have precedence before simple auth objects defined in here. :return: A Rod instance, or anything that the URL returns on a GET request
[ "Poke", "the", "Rancher", "API", ".", "Returns", "a", "Rod", "object", "instance", ".", "Central", "starting", "point", "for", "the", "cattleprod", "package", ".", ":", "param", "url", ":", "The", "full", "Rancher", "URL", "to", "the", "API", "endpoint", ".", ":", "param", "accesskey", ":", "The", "rancher", "access", "key", "optional", ".", ":", "param", "secretkey", ":", "The", "rancher", "secret", "key", "optional", ".", ":", "param", "__method__", ":", "Internal", "method", "don", "t", "use!", ":", "param", "req_args", ":", "Arguments", "which", "are", "passed", "directly", "to", "the", "requests", "API", ".", "The", "accesskey", "/", "secretkey", "values", "have", "precedence", "before", "simple", "auth", "objects", "defined", "in", "here", ".", ":", "return", ":", "A", "Rod", "instance", "or", "anything", "that", "the", "URL", "returns", "on", "a", "GET", "request" ]
python
train
balloob/pychromecast
pychromecast/__init__.py
https://github.com/balloob/pychromecast/blob/831b09c4fed185a7bffe0ea330b7849d5f4e36b6/pychromecast/__init__.py#L315-L331
def wait(self, timeout=None): """ Waits until the cast device is ready for communication. The device is ready as soon a status message has been received. If the worker thread is not already running, it will be started. If the status has already been received then the method returns immediately. :param timeout: a floating point number specifying a timeout for the operation in seconds (or fractions thereof). Or None to block forever. """ if not self.socket_client.isAlive(): self.socket_client.start() self.status_event.wait(timeout=timeout)
[ "def", "wait", "(", "self", ",", "timeout", "=", "None", ")", ":", "if", "not", "self", ".", "socket_client", ".", "isAlive", "(", ")", ":", "self", ".", "socket_client", ".", "start", "(", ")", "self", ".", "status_event", ".", "wait", "(", "timeout", "=", "timeout", ")" ]
Waits until the cast device is ready for communication. The device is ready as soon a status message has been received. If the worker thread is not already running, it will be started. If the status has already been received then the method returns immediately. :param timeout: a floating point number specifying a timeout for the operation in seconds (or fractions thereof). Or None to block forever.
[ "Waits", "until", "the", "cast", "device", "is", "ready", "for", "communication", ".", "The", "device", "is", "ready", "as", "soon", "a", "status", "message", "has", "been", "received", "." ]
python
train
attm2x/m2x-python
m2x/v2/devices.py
https://github.com/attm2x/m2x-python/blob/df83f590114692b1f96577148b7ba260065905bb/m2x/v2/devices.py#L156-L167
def post_update(self, **values): """ Method for `Post Device Update (Single Values to Multiple Streams) <https://m2x.att.com/developer/documentation/v2/device#Post-Device-Update--Single-Values-to-Multiple-Streams->` endpoint. :param values: The values being posted, formatted according to the API docs :type values: dict :return: The API response, see M2X API docs for details :rtype: dict :raises: :class:`~requests.exceptions.HTTPError` if an error occurs when sending the HTTP request """ return self.api.post(self.subpath('/update'), data=values)
[ "def", "post_update", "(", "self", ",", "*", "*", "values", ")", ":", "return", "self", ".", "api", ".", "post", "(", "self", ".", "subpath", "(", "'/update'", ")", ",", "data", "=", "values", ")" ]
Method for `Post Device Update (Single Values to Multiple Streams) <https://m2x.att.com/developer/documentation/v2/device#Post-Device-Update--Single-Values-to-Multiple-Streams->` endpoint. :param values: The values being posted, formatted according to the API docs :type values: dict :return: The API response, see M2X API docs for details :rtype: dict :raises: :class:`~requests.exceptions.HTTPError` if an error occurs when sending the HTTP request
[ "Method", "for", "Post", "Device", "Update", "(", "Single", "Values", "to", "Multiple", "Streams", ")", "<https", ":", "//", "m2x", ".", "att", ".", "com", "/", "developer", "/", "documentation", "/", "v2", "/", "device#Post", "-", "Device", "-", "Update", "--", "Single", "-", "Values", "-", "to", "-", "Multiple", "-", "Streams", "-", ">", "endpoint", "." ]
python
test
ARMmbed/icetea
icetea_lib/Plugin/PluginManager.py
https://github.com/ARMmbed/icetea/blob/b2b97ac607429830cf7d62dae2e3903692c7c778/icetea_lib/Plugin/PluginManager.py#L161-L173
def load_default_run_plugins(self): """ Load default run level plugins from icetea_lib.Plugin.plugins.default_plugins. :return: Nothing """ for plugin_name, plugin_class in default_plugins.items(): if issubclass(plugin_class, RunPluginBase): try: self.register_run_plugins(plugin_name, plugin_class()) except PluginException as error: self.logger.debug(error) continue
[ "def", "load_default_run_plugins", "(", "self", ")", ":", "for", "plugin_name", ",", "plugin_class", "in", "default_plugins", ".", "items", "(", ")", ":", "if", "issubclass", "(", "plugin_class", ",", "RunPluginBase", ")", ":", "try", ":", "self", ".", "register_run_plugins", "(", "plugin_name", ",", "plugin_class", "(", ")", ")", "except", "PluginException", "as", "error", ":", "self", ".", "logger", ".", "debug", "(", "error", ")", "continue" ]
Load default run level plugins from icetea_lib.Plugin.plugins.default_plugins. :return: Nothing
[ "Load", "default", "run", "level", "plugins", "from", "icetea_lib", ".", "Plugin", ".", "plugins", ".", "default_plugins", "." ]
python
train
foremast/foremast
src/foremast/pipeline/clean_pipelines.py
https://github.com/foremast/foremast/blob/fb70f29b8ce532f061685a17d120486e47b215ba/src/foremast/pipeline/clean_pipelines.py#L28-L47
def delete_pipeline(app='', pipeline_name=''): """Delete _pipeline_name_ from _app_.""" safe_pipeline_name = normalize_pipeline_name(name=pipeline_name) LOG.warning('Deleting Pipeline: %s', safe_pipeline_name) url = '{host}/pipelines/{app}/{pipeline}'.format(host=API_URL, app=app, pipeline=safe_pipeline_name) response = requests.delete(url, verify=GATE_CA_BUNDLE, cert=GATE_CLIENT_CERT) if not response.ok: LOG.debug('Delete response code: %d', response.status_code) if response.status_code == requests.status_codes.codes['method_not_allowed']: raise SpinnakerPipelineDeletionFailed('Failed to delete "{0}" from "{1}", ' 'possibly invalid Pipeline name.'.format(safe_pipeline_name, app)) else: LOG.debug('Pipeline missing, no delete required.') LOG.debug('Deleted "%s" Pipeline response:\n%s', safe_pipeline_name, response.text) return response.text
[ "def", "delete_pipeline", "(", "app", "=", "''", ",", "pipeline_name", "=", "''", ")", ":", "safe_pipeline_name", "=", "normalize_pipeline_name", "(", "name", "=", "pipeline_name", ")", "LOG", ".", "warning", "(", "'Deleting Pipeline: %s'", ",", "safe_pipeline_name", ")", "url", "=", "'{host}/pipelines/{app}/{pipeline}'", ".", "format", "(", "host", "=", "API_URL", ",", "app", "=", "app", ",", "pipeline", "=", "safe_pipeline_name", ")", "response", "=", "requests", ".", "delete", "(", "url", ",", "verify", "=", "GATE_CA_BUNDLE", ",", "cert", "=", "GATE_CLIENT_CERT", ")", "if", "not", "response", ".", "ok", ":", "LOG", ".", "debug", "(", "'Delete response code: %d'", ",", "response", ".", "status_code", ")", "if", "response", ".", "status_code", "==", "requests", ".", "status_codes", ".", "codes", "[", "'method_not_allowed'", "]", ":", "raise", "SpinnakerPipelineDeletionFailed", "(", "'Failed to delete \"{0}\" from \"{1}\", '", "'possibly invalid Pipeline name.'", ".", "format", "(", "safe_pipeline_name", ",", "app", ")", ")", "else", ":", "LOG", ".", "debug", "(", "'Pipeline missing, no delete required.'", ")", "LOG", ".", "debug", "(", "'Deleted \"%s\" Pipeline response:\\n%s'", ",", "safe_pipeline_name", ",", "response", ".", "text", ")", "return", "response", ".", "text" ]
Delete _pipeline_name_ from _app_.
[ "Delete", "_pipeline_name_", "from", "_app_", "." ]
python
train
TheHive-Project/Cortex-Analyzers
analyzers/GreyNoise/greynoise.py
https://github.com/TheHive-Project/Cortex-Analyzers/blob/8dae6a8c4cf9af5554ae8c844985c4b44d4bd4bf/analyzers/GreyNoise/greynoise.py#L16-L44
def _get_level(current_level, new_intention): """ Map GreyNoise intentions to Cortex maliciousness levels. Accept a Cortex level and a GreyNoise intention, the return the more malicious of the two. :param current_level: A Cortex maliciousness level https://github.com/TheHive-Project/CortexDocs/blob/master/api/how-to-create-an-analyzer.md#output :param new_intention: An intention field value from a GreyNoise record https://github.com/GreyNoise-Intelligence/api.greynoise.io#v1queryip :return: The more malicious of the 2 submitted values as a Cortex maliciousness level """ intention_level_map = OrderedDict([ ('info', 'info'), ('benign', 'safe'), ('suspicious', 'suspicious'), ('malicious', 'malicious') ]) levels = intention_level_map.values() new_level = intention_level_map.get(new_intention, 'info') new_index = levels.index(new_level) try: current_index = levels.index(current_level) except ValueError: # There is no existing level current_index = -1 return new_level if new_index > current_index else current_level
[ "def", "_get_level", "(", "current_level", ",", "new_intention", ")", ":", "intention_level_map", "=", "OrderedDict", "(", "[", "(", "'info'", ",", "'info'", ")", ",", "(", "'benign'", ",", "'safe'", ")", ",", "(", "'suspicious'", ",", "'suspicious'", ")", ",", "(", "'malicious'", ",", "'malicious'", ")", "]", ")", "levels", "=", "intention_level_map", ".", "values", "(", ")", "new_level", "=", "intention_level_map", ".", "get", "(", "new_intention", ",", "'info'", ")", "new_index", "=", "levels", ".", "index", "(", "new_level", ")", "try", ":", "current_index", "=", "levels", ".", "index", "(", "current_level", ")", "except", "ValueError", ":", "# There is no existing level", "current_index", "=", "-", "1", "return", "new_level", "if", "new_index", ">", "current_index", "else", "current_level" ]
Map GreyNoise intentions to Cortex maliciousness levels. Accept a Cortex level and a GreyNoise intention, the return the more malicious of the two. :param current_level: A Cortex maliciousness level https://github.com/TheHive-Project/CortexDocs/blob/master/api/how-to-create-an-analyzer.md#output :param new_intention: An intention field value from a GreyNoise record https://github.com/GreyNoise-Intelligence/api.greynoise.io#v1queryip :return: The more malicious of the 2 submitted values as a Cortex maliciousness level
[ "Map", "GreyNoise", "intentions", "to", "Cortex", "maliciousness", "levels", ".", "Accept", "a", "Cortex", "level", "and", "a", "GreyNoise", "intention", "the", "return", "the", "more", "malicious", "of", "the", "two", "." ]
python
train
GoogleCloudPlatform/appengine-mapreduce
python/src/mapreduce/handlers.py
https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/src/mapreduce/handlers.py#L1582-L1602
def _check_mr_state(cls, state, mr_id): """Check MapreduceState. Args: state: an MapreduceState instance. mr_id: mapreduce id. Returns: True if state is valid. False if not and this task should be dropped. """ if state is None: logging.warning( "Mapreduce State for job %s is missing. Dropping Task.", mr_id) return False if not state.active: logging.warning( "Mapreduce %s is not active. Looks like spurious task " "execution. Dropping Task.", mr_id) return False return True
[ "def", "_check_mr_state", "(", "cls", ",", "state", ",", "mr_id", ")", ":", "if", "state", "is", "None", ":", "logging", ".", "warning", "(", "\"Mapreduce State for job %s is missing. Dropping Task.\"", ",", "mr_id", ")", "return", "False", "if", "not", "state", ".", "active", ":", "logging", ".", "warning", "(", "\"Mapreduce %s is not active. Looks like spurious task \"", "\"execution. Dropping Task.\"", ",", "mr_id", ")", "return", "False", "return", "True" ]
Check MapreduceState. Args: state: an MapreduceState instance. mr_id: mapreduce id. Returns: True if state is valid. False if not and this task should be dropped.
[ "Check", "MapreduceState", "." ]
python
train
bcbio/bcbio-nextgen
bcbio/qc/qsignature.py
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/qc/qsignature.py#L186-L194
def _slice_vcf_chr21(vcf_file, out_dir): """ Slice chr21 of qsignature SNPs to reduce computation time """ tmp_file = os.path.join(out_dir, "chr21_qsignature.vcf") if not utils.file_exists(tmp_file): cmd = ("grep chr21 {vcf_file} > {tmp_file}").format(**locals()) out = subprocess.check_output(cmd, shell=True) return tmp_file
[ "def", "_slice_vcf_chr21", "(", "vcf_file", ",", "out_dir", ")", ":", "tmp_file", "=", "os", ".", "path", ".", "join", "(", "out_dir", ",", "\"chr21_qsignature.vcf\"", ")", "if", "not", "utils", ".", "file_exists", "(", "tmp_file", ")", ":", "cmd", "=", "(", "\"grep chr21 {vcf_file} > {tmp_file}\"", ")", ".", "format", "(", "*", "*", "locals", "(", ")", ")", "out", "=", "subprocess", ".", "check_output", "(", "cmd", ",", "shell", "=", "True", ")", "return", "tmp_file" ]
Slice chr21 of qsignature SNPs to reduce computation time
[ "Slice", "chr21", "of", "qsignature", "SNPs", "to", "reduce", "computation", "time" ]
python
train
buriburisuri/sugartensor
sugartensor/sg_layer.py
https://github.com/buriburisuri/sugartensor/blob/d2c039954777c7fbe3eb0c2ae40c45c9854deb40/sugartensor/sg_layer.py#L33-L58
def sg_dense(tensor, opt): r"""Applies a full connection. Args: tensor: A 2-D tensor (automatically passed by decorator). opt: in_dim: An `integer`. The size of input dimension. dim: An `integer`. The size of output dimension. bias: Boolean. If True, biases are added. regularizer: A (Tensor -> Tensor or None) function; the result of applying it on a newly created variable will be added to the collection tf.GraphKeys.REGULARIZATION_LOSSES and can be used for regularization summary: If True, summaries are added. The default is True. Returns: A `Tensor` with the same type as `tensor`. """ # parameter initialize w = tf.sg_initializer.he_uniform('W', (opt.in_dim, opt.dim), regularizer=opt.regularizer, summary=opt.summary) b = tf.sg_initializer.constant('b', opt.dim, summary=opt.summary) if opt.bias else 0 # apply transform out = tf.matmul(tensor, w) + b return out
[ "def", "sg_dense", "(", "tensor", ",", "opt", ")", ":", "# parameter initialize", "w", "=", "tf", ".", "sg_initializer", ".", "he_uniform", "(", "'W'", ",", "(", "opt", ".", "in_dim", ",", "opt", ".", "dim", ")", ",", "regularizer", "=", "opt", ".", "regularizer", ",", "summary", "=", "opt", ".", "summary", ")", "b", "=", "tf", ".", "sg_initializer", ".", "constant", "(", "'b'", ",", "opt", ".", "dim", ",", "summary", "=", "opt", ".", "summary", ")", "if", "opt", ".", "bias", "else", "0", "# apply transform", "out", "=", "tf", ".", "matmul", "(", "tensor", ",", "w", ")", "+", "b", "return", "out" ]
r"""Applies a full connection. Args: tensor: A 2-D tensor (automatically passed by decorator). opt: in_dim: An `integer`. The size of input dimension. dim: An `integer`. The size of output dimension. bias: Boolean. If True, biases are added. regularizer: A (Tensor -> Tensor or None) function; the result of applying it on a newly created variable will be added to the collection tf.GraphKeys.REGULARIZATION_LOSSES and can be used for regularization summary: If True, summaries are added. The default is True. Returns: A `Tensor` with the same type as `tensor`.
[ "r", "Applies", "a", "full", "connection", ".", "Args", ":", "tensor", ":", "A", "2", "-", "D", "tensor", "(", "automatically", "passed", "by", "decorator", ")", ".", "opt", ":", "in_dim", ":", "An", "integer", ".", "The", "size", "of", "input", "dimension", ".", "dim", ":", "An", "integer", ".", "The", "size", "of", "output", "dimension", ".", "bias", ":", "Boolean", ".", "If", "True", "biases", "are", "added", ".", "regularizer", ":", "A", "(", "Tensor", "-", ">", "Tensor", "or", "None", ")", "function", ";", "the", "result", "of", "applying", "it", "on", "a", "newly", "created", "variable", "will", "be", "added", "to", "the", "collection", "tf", ".", "GraphKeys", ".", "REGULARIZATION_LOSSES", "and", "can", "be", "used", "for", "regularization", "summary", ":", "If", "True", "summaries", "are", "added", ".", "The", "default", "is", "True", "." ]
python
train
senaite/senaite.core
bika/lims/content/contact.py
https://github.com/senaite/senaite.core/blob/7602ce2ea2f9e81eb34e20ce17b98a3e70713f85/bika/lims/content/contact.py#L309-L316
def _addLocalOwnerRole(self, username): """Add local owner role from parent object """ parent = self.getParent() if parent.portal_type == "Client": parent.manage_setLocalRoles(username, ["Owner", ]) # reindex object security self._recursive_reindex_object_security(parent)
[ "def", "_addLocalOwnerRole", "(", "self", ",", "username", ")", ":", "parent", "=", "self", ".", "getParent", "(", ")", "if", "parent", ".", "portal_type", "==", "\"Client\"", ":", "parent", ".", "manage_setLocalRoles", "(", "username", ",", "[", "\"Owner\"", ",", "]", ")", "# reindex object security", "self", ".", "_recursive_reindex_object_security", "(", "parent", ")" ]
Add local owner role from parent object
[ "Add", "local", "owner", "role", "from", "parent", "object" ]
python
train