repo
stringlengths
7
55
path
stringlengths
4
223
url
stringlengths
87
315
code
stringlengths
75
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
avg_line_len
float64
7.91
980
diging/tethne
tethne/classes/graphcollection.py
https://github.com/diging/tethne/blob/ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f/tethne/classes/graphcollection.py#L213-L235
def edges(self, data=False, native=True): """ Returns a list of all edges in the :class:`.GraphCollection`\. Parameters ---------- data : bool (default: False) If True, returns a list of 3-tuples containing source and target node labels, and attributes. Returns ------- edges : list """ edges = self.master_graph.edges(data=data) if native: if data: edges = [(self.node_index[s], self.node_index[t], attrs) for s, t, attrs in edges] else: edges = [(self.node_index[s], self.node_index[t]) for s, t in edges] return edges
[ "def", "edges", "(", "self", ",", "data", "=", "False", ",", "native", "=", "True", ")", ":", "edges", "=", "self", ".", "master_graph", ".", "edges", "(", "data", "=", "data", ")", "if", "native", ":", "if", "data", ":", "edges", "=", "[", "(", "self", ".", "node_index", "[", "s", "]", ",", "self", ".", "node_index", "[", "t", "]", ",", "attrs", ")", "for", "s", ",", "t", ",", "attrs", "in", "edges", "]", "else", ":", "edges", "=", "[", "(", "self", ".", "node_index", "[", "s", "]", ",", "self", ".", "node_index", "[", "t", "]", ")", "for", "s", ",", "t", "in", "edges", "]", "return", "edges" ]
Returns a list of all edges in the :class:`.GraphCollection`\. Parameters ---------- data : bool (default: False) If True, returns a list of 3-tuples containing source and target node labels, and attributes. Returns ------- edges : list
[ "Returns", "a", "list", "of", "all", "edges", "in", "the", ":", "class", ":", ".", "GraphCollection", "\\", "." ]
python
train
31.565217
beerfactory/hbmqtt
hbmqtt/plugins/manager.py
https://github.com/beerfactory/hbmqtt/blob/4aa6fe982141abc3c54e9f4d7b981ab3eba0a13c/hbmqtt/plugins/manager.py#L115-L151
def fire_event(self, event_name, wait=False, *args, **kwargs): """ Fire an event to plugins. PluginManager schedule @asyncio.coroutinecalls for each plugin on method called "on_" + event_name For example, on_connect will be called on event 'connect' Method calls are schedule in the asyn loop. wait parameter must be set to true to wait until all mehtods are completed. :param event_name: :param args: :param kwargs: :param wait: indicates if fire_event should wait for plugin calls completion (True), or not :return: """ tasks = [] event_method_name = "on_" + event_name for plugin in self._plugins: event_method = getattr(plugin.object, event_method_name, None) if event_method: try: task = self._schedule_coro(event_method(*args, **kwargs)) tasks.append(task) def clean_fired_events(future): try: self._fired_events.remove(task) except (KeyError, ValueError): pass task.add_done_callback(clean_fired_events) except AssertionError: self.logger.error("Method '%s' on plugin '%s' is not a coroutine" % (event_method_name, plugin.name)) self._fired_events.extend(tasks) if wait: if tasks: yield from asyncio.wait(tasks, loop=self._loop)
[ "def", "fire_event", "(", "self", ",", "event_name", ",", "wait", "=", "False", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "tasks", "=", "[", "]", "event_method_name", "=", "\"on_\"", "+", "event_name", "for", "plugin", "in", "self", ".", "_plugins", ":", "event_method", "=", "getattr", "(", "plugin", ".", "object", ",", "event_method_name", ",", "None", ")", "if", "event_method", ":", "try", ":", "task", "=", "self", ".", "_schedule_coro", "(", "event_method", "(", "*", "args", ",", "*", "*", "kwargs", ")", ")", "tasks", ".", "append", "(", "task", ")", "def", "clean_fired_events", "(", "future", ")", ":", "try", ":", "self", ".", "_fired_events", ".", "remove", "(", "task", ")", "except", "(", "KeyError", ",", "ValueError", ")", ":", "pass", "task", ".", "add_done_callback", "(", "clean_fired_events", ")", "except", "AssertionError", ":", "self", ".", "logger", ".", "error", "(", "\"Method '%s' on plugin '%s' is not a coroutine\"", "%", "(", "event_method_name", ",", "plugin", ".", "name", ")", ")", "self", ".", "_fired_events", ".", "extend", "(", "tasks", ")", "if", "wait", ":", "if", "tasks", ":", "yield", "from", "asyncio", ".", "wait", "(", "tasks", ",", "loop", "=", "self", ".", "_loop", ")" ]
Fire an event to plugins. PluginManager schedule @asyncio.coroutinecalls for each plugin on method called "on_" + event_name For example, on_connect will be called on event 'connect' Method calls are schedule in the asyn loop. wait parameter must be set to true to wait until all mehtods are completed. :param event_name: :param args: :param kwargs: :param wait: indicates if fire_event should wait for plugin calls completion (True), or not :return:
[ "Fire", "an", "event", "to", "plugins", ".", "PluginManager", "schedule" ]
python
train
42.27027
moonso/vcf_parser
vcf_parser/utils/build_vep.py
https://github.com/moonso/vcf_parser/blob/8e2b6724e31995e0d43af501f25974310c6b843b/vcf_parser/utils/build_vep.py#L3-L31
def build_vep_string(vep_info, vep_columns): """ Build a vep string formatted string. Take a list with vep annotations and build a new vep string Args: vep_info (list): A list with vep annotation dictionaries vep_columns (list): A list with the vep column names found in the header of the vcf Returns: string: A string with the proper vep annotations """ logger = getLogger(__name__) logger.debug("Building vep string from {0}".format(vep_info)) logger.debug("Found vep headers {0}".format(vep_columns)) vep_strings = [] for vep_annotation in vep_info: try: vep_info_list = [ vep_annotation[vep_key] for vep_key in vep_columns ] except KeyError: raise SyntaxError("Vep entry does not correspond to vep headers") vep_strings.append('|'.join(vep_info_list)) return ','.join(vep_strings)
[ "def", "build_vep_string", "(", "vep_info", ",", "vep_columns", ")", ":", "logger", "=", "getLogger", "(", "__name__", ")", "logger", ".", "debug", "(", "\"Building vep string from {0}\"", ".", "format", "(", "vep_info", ")", ")", "logger", ".", "debug", "(", "\"Found vep headers {0}\"", ".", "format", "(", "vep_columns", ")", ")", "vep_strings", "=", "[", "]", "for", "vep_annotation", "in", "vep_info", ":", "try", ":", "vep_info_list", "=", "[", "vep_annotation", "[", "vep_key", "]", "for", "vep_key", "in", "vep_columns", "]", "except", "KeyError", ":", "raise", "SyntaxError", "(", "\"Vep entry does not correspond to vep headers\"", ")", "vep_strings", ".", "append", "(", "'|'", ".", "join", "(", "vep_info_list", ")", ")", "return", "','", ".", "join", "(", "vep_strings", ")" ]
Build a vep string formatted string. Take a list with vep annotations and build a new vep string Args: vep_info (list): A list with vep annotation dictionaries vep_columns (list): A list with the vep column names found in the header of the vcf Returns: string: A string with the proper vep annotations
[ "Build", "a", "vep", "string", "formatted", "string", ".", "Take", "a", "list", "with", "vep", "annotations", "and", "build", "a", "new", "vep", "string", "Args", ":", "vep_info", "(", "list", ")", ":", "A", "list", "with", "vep", "annotation", "dictionaries", "vep_columns", "(", "list", ")", ":", "A", "list", "with", "the", "vep", "column", "names", "found", "in", "the", "header", "of", "the", "vcf", "Returns", ":", "string", ":", "A", "string", "with", "the", "proper", "vep", "annotations" ]
python
train
32.448276
numenta/htmresearch
projects/sequence_prediction/continuous_sequence/run_knn.py
https://github.com/numenta/htmresearch/blob/70c096b09a577ea0432c3f3bfff4442d4871b7aa/projects/sequence_prediction/continuous_sequence/run_knn.py#L181-L198
def normalizeSequence(sequence, considerDimensions=None): """ normalize sequence by subtracting the mean and :param sequence: a list of data samples :param considerDimensions: a list of dimensions to consider :return: normalized sequence """ seq = np.array(sequence).astype('float64') nSampleDim = seq.shape[1] if considerDimensions is None: considerDimensions = range(nSampleDim) for dim in considerDimensions: seq[:, dim] = (seq[:, dim] - np.mean(seq[:, dim])) / np.std(seq[:, dim]) sequence = seq.tolist() return sequence
[ "def", "normalizeSequence", "(", "sequence", ",", "considerDimensions", "=", "None", ")", ":", "seq", "=", "np", ".", "array", "(", "sequence", ")", ".", "astype", "(", "'float64'", ")", "nSampleDim", "=", "seq", ".", "shape", "[", "1", "]", "if", "considerDimensions", "is", "None", ":", "considerDimensions", "=", "range", "(", "nSampleDim", ")", "for", "dim", "in", "considerDimensions", ":", "seq", "[", ":", ",", "dim", "]", "=", "(", "seq", "[", ":", ",", "dim", "]", "-", "np", ".", "mean", "(", "seq", "[", ":", ",", "dim", "]", ")", ")", "/", "np", ".", "std", "(", "seq", "[", ":", ",", "dim", "]", ")", "sequence", "=", "seq", ".", "tolist", "(", ")", "return", "sequence" ]
normalize sequence by subtracting the mean and :param sequence: a list of data samples :param considerDimensions: a list of dimensions to consider :return: normalized sequence
[ "normalize", "sequence", "by", "subtracting", "the", "mean", "and", ":", "param", "sequence", ":", "a", "list", "of", "data", "samples", ":", "param", "considerDimensions", ":", "a", "list", "of", "dimensions", "to", "consider", ":", "return", ":", "normalized", "sequence" ]
python
train
30.111111
ARMmbed/mbed-cloud-sdk-python
src/mbed_cloud/_backends/iam/apis/aggregator_account_admin_api.py
https://github.com/ARMmbed/mbed-cloud-sdk-python/blob/c0af86fb2cdd4dc7ed26f236139241067d293509/src/mbed_cloud/_backends/iam/apis/aggregator_account_admin_api.py#L1641-L1662
def get_account_api_key(self, account_id, api_key, **kwargs): # noqa: E501 """Get API key details. # noqa: E501 An endpoint for retrieving API key details. **Example usage:** `curl https://api.us-east-1.mbedcloud.com/v3/accounts/{accountID}/api-keys/{apiKey} -H 'Authorization: Bearer API_KEY'` # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass asynchronous=True >>> thread = api.get_account_api_key(account_id, api_key, asynchronous=True) >>> result = thread.get() :param asynchronous bool :param str account_id: Account ID. (required) :param str api_key: The ID of the API key to be retrieved. (required) :return: ApiKeyInfoResp If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('asynchronous'): return self.get_account_api_key_with_http_info(account_id, api_key, **kwargs) # noqa: E501 else: (data) = self.get_account_api_key_with_http_info(account_id, api_key, **kwargs) # noqa: E501 return data
[ "def", "get_account_api_key", "(", "self", ",", "account_id", ",", "api_key", ",", "*", "*", "kwargs", ")", ":", "# noqa: E501", "kwargs", "[", "'_return_http_data_only'", "]", "=", "True", "if", "kwargs", ".", "get", "(", "'asynchronous'", ")", ":", "return", "self", ".", "get_account_api_key_with_http_info", "(", "account_id", ",", "api_key", ",", "*", "*", "kwargs", ")", "# noqa: E501", "else", ":", "(", "data", ")", "=", "self", ".", "get_account_api_key_with_http_info", "(", "account_id", ",", "api_key", ",", "*", "*", "kwargs", ")", "# noqa: E501", "return", "data" ]
Get API key details. # noqa: E501 An endpoint for retrieving API key details. **Example usage:** `curl https://api.us-east-1.mbedcloud.com/v3/accounts/{accountID}/api-keys/{apiKey} -H 'Authorization: Bearer API_KEY'` # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass asynchronous=True >>> thread = api.get_account_api_key(account_id, api_key, asynchronous=True) >>> result = thread.get() :param asynchronous bool :param str account_id: Account ID. (required) :param str api_key: The ID of the API key to be retrieved. (required) :return: ApiKeyInfoResp If the method is called asynchronously, returns the request thread.
[ "Get", "API", "key", "details", ".", "#", "noqa", ":", "E501" ]
python
train
55.227273
bitesofcode/projexui
projexui/widgets/xlineedit.py
https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/widgets/xlineedit.py#L152-L182
def adjustButtons( self ): """ Adjusts the placement of the buttons for this line edit. """ y = 1 for btn in self.buttons(): btn.setIconSize(self.iconSize()) btn.setFixedSize(QSize(self.height() - 2, self.height() - 2)) # adjust the location for the left buttons left_buttons = self._buttons.get(Qt.AlignLeft, []) x = (self.cornerRadius() / 2.0) + 2 for btn in left_buttons: btn.move(x, y) x += btn.width() # adjust the location for the right buttons right_buttons = self._buttons.get(Qt.AlignRight, []) w = self.width() bwidth = sum([btn.width() for btn in right_buttons]) bwidth += (self.cornerRadius() / 2.0) + 1 for btn in right_buttons: btn.move(w - bwidth, y) bwidth -= btn.width() self._buttonWidth = sum([btn.width() for btn in self.buttons()]) self.adjustTextMargins()
[ "def", "adjustButtons", "(", "self", ")", ":", "y", "=", "1", "for", "btn", "in", "self", ".", "buttons", "(", ")", ":", "btn", ".", "setIconSize", "(", "self", ".", "iconSize", "(", ")", ")", "btn", ".", "setFixedSize", "(", "QSize", "(", "self", ".", "height", "(", ")", "-", "2", ",", "self", ".", "height", "(", ")", "-", "2", ")", ")", "# adjust the location for the left buttons", "left_buttons", "=", "self", ".", "_buttons", ".", "get", "(", "Qt", ".", "AlignLeft", ",", "[", "]", ")", "x", "=", "(", "self", ".", "cornerRadius", "(", ")", "/", "2.0", ")", "+", "2", "for", "btn", "in", "left_buttons", ":", "btn", ".", "move", "(", "x", ",", "y", ")", "x", "+=", "btn", ".", "width", "(", ")", "# adjust the location for the right buttons", "right_buttons", "=", "self", ".", "_buttons", ".", "get", "(", "Qt", ".", "AlignRight", ",", "[", "]", ")", "w", "=", "self", ".", "width", "(", ")", "bwidth", "=", "sum", "(", "[", "btn", ".", "width", "(", ")", "for", "btn", "in", "right_buttons", "]", ")", "bwidth", "+=", "(", "self", ".", "cornerRadius", "(", ")", "/", "2.0", ")", "+", "1", "for", "btn", "in", "right_buttons", ":", "btn", ".", "move", "(", "w", "-", "bwidth", ",", "y", ")", "bwidth", "-=", "btn", ".", "width", "(", ")", "self", ".", "_buttonWidth", "=", "sum", "(", "[", "btn", ".", "width", "(", ")", "for", "btn", "in", "self", ".", "buttons", "(", ")", "]", ")", "self", ".", "adjustTextMargins", "(", ")" ]
Adjusts the placement of the buttons for this line edit.
[ "Adjusts", "the", "placement", "of", "the", "buttons", "for", "this", "line", "edit", "." ]
python
train
33.258065
fr33jc/bang
bang/providers/hpcloud/__init__.py
https://github.com/fr33jc/bang/blob/8f000713f88d2a9a8c1193b63ca10a6578560c16/bang/providers/hpcloud/__init__.py#L29-L104
def create_db(self, instance_name, instance_type, admin_username, admin_password, security_groups=None, db_name=None, storage_size_gb=DEFAULT_STORAGE_SIZE_GB, timeout_s=DEFAULT_TIMEOUT_S): """ Creates a database instance. This method blocks until the db instance is active, or until :attr:`timeout_s` has elapsed. By default, hpcloud *assigns* an automatically-generated set of credentials for an admin user. In addition to launching the db instance, this method uses the autogenerated credentials to login to the server and create the intended admin user based on the credentials supplied as method arguments. :param str instance_name: A name to assign to the db instance. :param str instance_type: The server instance type (e.g. ``medium``). :param str admin_username: The admin username. :param str admin_password: The admin password. :param security_groups: *Not used in hpcloud*. :param str db_name: The database name. If this is not specified, the database will be named the same as the :attr:`instance_name`. :param int storage_size_gb: The size of the storage volume in GB. :param float timeout_s: The number of seconds to poll for an active database server before failing. This value is also used when attempting to connect to the running mysql server. :rtype: :class:`dict` """ db = self._create_db(instance_name, instance_type, storage_size_gb) # hang on to these... hpcloud only provides a way to generate a new # set of username/password - there is no way to retrieve the originals. default_creds = db.credential log.debug('Credentials for %s: %s' % (instance_name, default_creds)) instance = self._poll_instance_status(db, timeout_s) # we're taking advantage of a security bug in hpcloud's dbaas security # group rules. the default *security* is to allow connections from # everywhere in the world. def connect(): try: return pymysql.connect( host=instance.hostname, port=instance.port, # db=self.database, user=default_creds['username'], passwd=default_creds['password'], connect_timeout=timeout_s, ) except: log.warn("Could not connect to db, %s" % instance_name) # log.debug("Connection exception", exc_info=True) log.info("Connecting to %s..." % instance_name) db = poll_with_timeout(timeout_s, connect, 10) cur = db.cursor() cur.execute( "grant all privileges on *.* " "to '%s'@'%%' identified by '%s' " "with grant option" % (admin_username, admin_password) ) cur.execute("flush privileges") return db_to_dict(instance)
[ "def", "create_db", "(", "self", ",", "instance_name", ",", "instance_type", ",", "admin_username", ",", "admin_password", ",", "security_groups", "=", "None", ",", "db_name", "=", "None", ",", "storage_size_gb", "=", "DEFAULT_STORAGE_SIZE_GB", ",", "timeout_s", "=", "DEFAULT_TIMEOUT_S", ")", ":", "db", "=", "self", ".", "_create_db", "(", "instance_name", ",", "instance_type", ",", "storage_size_gb", ")", "# hang on to these... hpcloud only provides a way to generate a new", "# set of username/password - there is no way to retrieve the originals.", "default_creds", "=", "db", ".", "credential", "log", ".", "debug", "(", "'Credentials for %s: %s'", "%", "(", "instance_name", ",", "default_creds", ")", ")", "instance", "=", "self", ".", "_poll_instance_status", "(", "db", ",", "timeout_s", ")", "# we're taking advantage of a security bug in hpcloud's dbaas security", "# group rules. the default *security* is to allow connections from", "# everywhere in the world.", "def", "connect", "(", ")", ":", "try", ":", "return", "pymysql", ".", "connect", "(", "host", "=", "instance", ".", "hostname", ",", "port", "=", "instance", ".", "port", ",", "# db=self.database,", "user", "=", "default_creds", "[", "'username'", "]", ",", "passwd", "=", "default_creds", "[", "'password'", "]", ",", "connect_timeout", "=", "timeout_s", ",", ")", "except", ":", "log", ".", "warn", "(", "\"Could not connect to db, %s\"", "%", "instance_name", ")", "# log.debug(\"Connection exception\", exc_info=True)", "log", ".", "info", "(", "\"Connecting to %s...\"", "%", "instance_name", ")", "db", "=", "poll_with_timeout", "(", "timeout_s", ",", "connect", ",", "10", ")", "cur", "=", "db", ".", "cursor", "(", ")", "cur", ".", "execute", "(", "\"grant all privileges on *.* \"", "\"to '%s'@'%%' identified by '%s' \"", "\"with grant option\"", "%", "(", "admin_username", ",", "admin_password", ")", ")", "cur", ".", "execute", "(", "\"flush privileges\"", ")", "return", "db_to_dict", "(", "instance", ")" ]
Creates a database instance. This method blocks until the db instance is active, or until :attr:`timeout_s` has elapsed. By default, hpcloud *assigns* an automatically-generated set of credentials for an admin user. In addition to launching the db instance, this method uses the autogenerated credentials to login to the server and create the intended admin user based on the credentials supplied as method arguments. :param str instance_name: A name to assign to the db instance. :param str instance_type: The server instance type (e.g. ``medium``). :param str admin_username: The admin username. :param str admin_password: The admin password. :param security_groups: *Not used in hpcloud*. :param str db_name: The database name. If this is not specified, the database will be named the same as the :attr:`instance_name`. :param int storage_size_gb: The size of the storage volume in GB. :param float timeout_s: The number of seconds to poll for an active database server before failing. This value is also used when attempting to connect to the running mysql server. :rtype: :class:`dict`
[ "Creates", "a", "database", "instance", "." ]
python
train
40.539474
theislab/scanpy
scanpy/tools/_dpt.py
https://github.com/theislab/scanpy/blob/9e4e5ee02e04cf618872d9b098e24f0542e8b227/scanpy/tools/_dpt.py#L859-L873
def _kendall_tau_subtract(self, len_old, diff_neg, tau_old): """Compute Kendall tau delta. The new sequence has length len_old - 1. Parameters ---------- len_old : int The length of the old sequence, used to compute tau_old. diff_neg : int Difference between concordant and non-concordant pairs. tau_old : float Kendall rank correlation of the old sequence. """ return 2./(len_old-2)*(-float(diff_neg)/(len_old-1)+tau_old)
[ "def", "_kendall_tau_subtract", "(", "self", ",", "len_old", ",", "diff_neg", ",", "tau_old", ")", ":", "return", "2.", "/", "(", "len_old", "-", "2", ")", "*", "(", "-", "float", "(", "diff_neg", ")", "/", "(", "len_old", "-", "1", ")", "+", "tau_old", ")" ]
Compute Kendall tau delta. The new sequence has length len_old - 1. Parameters ---------- len_old : int The length of the old sequence, used to compute tau_old. diff_neg : int Difference between concordant and non-concordant pairs. tau_old : float Kendall rank correlation of the old sequence.
[ "Compute", "Kendall", "tau", "delta", "." ]
python
train
34.533333
Telefonica/toolium
toolium/config_driver.py
https://github.com/Telefonica/toolium/blob/56847c243b3a98876df74c184b75e43f8810e475/toolium/config_driver.py#L373-L381
def _setup_opera(self, capabilities): """Setup Opera webdriver :param capabilities: capabilities object :returns: a new local Opera driver """ opera_driver = self.config.get('Driver', 'opera_driver_path') self.logger.debug("Opera driver path given in properties: %s", opera_driver) return webdriver.Opera(executable_path=opera_driver, desired_capabilities=capabilities)
[ "def", "_setup_opera", "(", "self", ",", "capabilities", ")", ":", "opera_driver", "=", "self", ".", "config", ".", "get", "(", "'Driver'", ",", "'opera_driver_path'", ")", "self", ".", "logger", ".", "debug", "(", "\"Opera driver path given in properties: %s\"", ",", "opera_driver", ")", "return", "webdriver", ".", "Opera", "(", "executable_path", "=", "opera_driver", ",", "desired_capabilities", "=", "capabilities", ")" ]
Setup Opera webdriver :param capabilities: capabilities object :returns: a new local Opera driver
[ "Setup", "Opera", "webdriver" ]
python
train
46.444444
brocade/pynos
pynos/versions/ver_6/ver_6_0_1/yang/brocade_fcoe_ext.py
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_6/ver_6_0_1/yang/brocade_fcoe_ext.py#L626-L638
def fcoe_get_login_output_fcoe_login_list_fcoe_login_session_mac(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") fcoe_get_login = ET.Element("fcoe_get_login") config = fcoe_get_login output = ET.SubElement(fcoe_get_login, "output") fcoe_login_list = ET.SubElement(output, "fcoe-login-list") fcoe_login_session_mac = ET.SubElement(fcoe_login_list, "fcoe-login-session-mac") fcoe_login_session_mac.text = kwargs.pop('fcoe_login_session_mac') callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "fcoe_get_login_output_fcoe_login_list_fcoe_login_session_mac", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "fcoe_get_login", "=", "ET", ".", "Element", "(", "\"fcoe_get_login\"", ")", "config", "=", "fcoe_get_login", "output", "=", "ET", ".", "SubElement", "(", "fcoe_get_login", ",", "\"output\"", ")", "fcoe_login_list", "=", "ET", ".", "SubElement", "(", "output", ",", "\"fcoe-login-list\"", ")", "fcoe_login_session_mac", "=", "ET", ".", "SubElement", "(", "fcoe_login_list", ",", "\"fcoe-login-session-mac\"", ")", "fcoe_login_session_mac", ".", "text", "=", "kwargs", ".", "pop", "(", "'fcoe_login_session_mac'", ")", "callback", "=", "kwargs", ".", "pop", "(", "'callback'", ",", "self", ".", "_callback", ")", "return", "callback", "(", "config", ")" ]
Auto Generated Code
[ "Auto", "Generated", "Code" ]
python
train
47.384615
tensorflow/cleverhans
cleverhans/attacks/spsa.py
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/cleverhans/attacks/spsa.py#L246-L270
def _compute_gradients(self, loss_fn, x, unused_optim_state): """Compute a new value of `x` to minimize `loss_fn`. Args: loss_fn: a callable that takes `x`, a batch of images, and returns a batch of loss values. `x` will be optimized to minimize `loss_fn(x)`. x: A list of Tensors, the values to be updated. This is analogous to the `var_list` argument in standard TF Optimizer. unused_optim_state: A (possibly nested) dict, containing any state info needed for the optimizer. Returns: new_x: A list of Tensors, the same length as `x`, which are updated new_optim_state: A dict, with the same structure as `optim_state`, which have been updated. """ # Assumes `x` is a list, # and contains a tensor representing a batch of images assert len(x) == 1 and isinstance(x, list), \ 'x should be a list and contain only one image tensor' x = x[0] loss = reduce_mean(loss_fn(x), axis=0) return tf.gradients(loss, x)
[ "def", "_compute_gradients", "(", "self", ",", "loss_fn", ",", "x", ",", "unused_optim_state", ")", ":", "# Assumes `x` is a list,", "# and contains a tensor representing a batch of images", "assert", "len", "(", "x", ")", "==", "1", "and", "isinstance", "(", "x", ",", "list", ")", ",", "'x should be a list and contain only one image tensor'", "x", "=", "x", "[", "0", "]", "loss", "=", "reduce_mean", "(", "loss_fn", "(", "x", ")", ",", "axis", "=", "0", ")", "return", "tf", ".", "gradients", "(", "loss", ",", "x", ")" ]
Compute a new value of `x` to minimize `loss_fn`. Args: loss_fn: a callable that takes `x`, a batch of images, and returns a batch of loss values. `x` will be optimized to minimize `loss_fn(x)`. x: A list of Tensors, the values to be updated. This is analogous to the `var_list` argument in standard TF Optimizer. unused_optim_state: A (possibly nested) dict, containing any state info needed for the optimizer. Returns: new_x: A list of Tensors, the same length as `x`, which are updated new_optim_state: A dict, with the same structure as `optim_state`, which have been updated.
[ "Compute", "a", "new", "value", "of", "x", "to", "minimize", "loss_fn", "." ]
python
train
41.36
biolink/biolink-model
metamodel/generators/yumlgen.py
https://github.com/biolink/biolink-model/blob/f379e28d5d4085e1115798c6cb28e5acc4dba8b4/metamodel/generators/yumlgen.py#L89-L109
def class_box(self, cn: ClassDefinitionName) -> str: """ Generate a box for the class. Populate its interior only if (a) it hasn't previously been generated and (b) it appears in the gen_classes list @param cn: @param inherited: @return: """ slot_defs: List[str] = [] if cn not in self.box_generated and (not self.focus_classes or cn in self.focus_classes): cls = self.schema.classes[cn] for slotname in self.filtered_cls_slots(cn, all_slots=True): slot = self.schema.slots[slotname] if not slot.range or slot.range in builtin_names or slot.range in self.schema.types: mod = self.prop_modifier(cls, slot) slot_defs.append(underscore(self.aliased_slot_name(slot)) + mod + ':' + underscore(slot.range) + self.cardinality(slot)) self.box_generated.add(cn) self.referenced.add(cn) return '[' + camelcase(cn) + ('|' + ';'.join(slot_defs) if slot_defs else '') + ']'
[ "def", "class_box", "(", "self", ",", "cn", ":", "ClassDefinitionName", ")", "->", "str", ":", "slot_defs", ":", "List", "[", "str", "]", "=", "[", "]", "if", "cn", "not", "in", "self", ".", "box_generated", "and", "(", "not", "self", ".", "focus_classes", "or", "cn", "in", "self", ".", "focus_classes", ")", ":", "cls", "=", "self", ".", "schema", ".", "classes", "[", "cn", "]", "for", "slotname", "in", "self", ".", "filtered_cls_slots", "(", "cn", ",", "all_slots", "=", "True", ")", ":", "slot", "=", "self", ".", "schema", ".", "slots", "[", "slotname", "]", "if", "not", "slot", ".", "range", "or", "slot", ".", "range", "in", "builtin_names", "or", "slot", ".", "range", "in", "self", ".", "schema", ".", "types", ":", "mod", "=", "self", ".", "prop_modifier", "(", "cls", ",", "slot", ")", "slot_defs", ".", "append", "(", "underscore", "(", "self", ".", "aliased_slot_name", "(", "slot", ")", ")", "+", "mod", "+", "':'", "+", "underscore", "(", "slot", ".", "range", ")", "+", "self", ".", "cardinality", "(", "slot", ")", ")", "self", ".", "box_generated", ".", "add", "(", "cn", ")", "self", ".", "referenced", ".", "add", "(", "cn", ")", "return", "'['", "+", "camelcase", "(", "cn", ")", "+", "(", "'|'", "+", "';'", ".", "join", "(", "slot_defs", ")", "if", "slot_defs", "else", "''", ")", "+", "']'" ]
Generate a box for the class. Populate its interior only if (a) it hasn't previously been generated and (b) it appears in the gen_classes list @param cn: @param inherited: @return:
[ "Generate", "a", "box", "for", "the", "class", ".", "Populate", "its", "interior", "only", "if", "(", "a", ")", "it", "hasn", "t", "previously", "been", "generated", "and", "(", "b", ")", "it", "appears", "in", "the", "gen_classes", "list" ]
python
train
52.571429
ellmetha/django-machina
machina/apps/forum_conversation/views.py
https://github.com/ellmetha/django-machina/blob/89ac083c1eaf1cfdeae6686ee094cc86362e8c69/machina/apps/forum_conversation/views.py#L73-L79
def get_topic(self): """ Returns the topic to consider. """ if not hasattr(self, 'topic'): self.topic = get_object_or_404( Topic.objects.select_related('forum').all(), pk=self.kwargs['pk'], ) return self.topic
[ "def", "get_topic", "(", "self", ")", ":", "if", "not", "hasattr", "(", "self", ",", "'topic'", ")", ":", "self", ".", "topic", "=", "get_object_or_404", "(", "Topic", ".", "objects", ".", "select_related", "(", "'forum'", ")", ".", "all", "(", ")", ",", "pk", "=", "self", ".", "kwargs", "[", "'pk'", "]", ",", ")", "return", "self", ".", "topic" ]
Returns the topic to consider.
[ "Returns", "the", "topic", "to", "consider", "." ]
python
train
38.142857
mozilla-iot/webthing-python
webthing/server.py
https://github.com/mozilla-iot/webthing-python/blob/65d467c89ed79d0bbc42b8b3c8f9e5a320edd237/webthing/server.py#L363-L397
def put(self, thing_id='0', property_name=None): """ Handle a PUT request. thing_id -- ID of the thing this request is for property_name -- the name of the property from the URL path """ thing = self.get_thing(thing_id) if thing is None: self.set_status(404) return try: args = json.loads(self.request.body.decode()) except ValueError: self.set_status(400) return if property_name not in args: self.set_status(400) return if thing.has_property(property_name): try: thing.set_property(property_name, args[property_name]) except PropertyError: self.set_status(400) return self.set_header('Content-Type', 'application/json') self.write(json.dumps({ property_name: thing.get_property(property_name), })) else: self.set_status(404)
[ "def", "put", "(", "self", ",", "thing_id", "=", "'0'", ",", "property_name", "=", "None", ")", ":", "thing", "=", "self", ".", "get_thing", "(", "thing_id", ")", "if", "thing", "is", "None", ":", "self", ".", "set_status", "(", "404", ")", "return", "try", ":", "args", "=", "json", ".", "loads", "(", "self", ".", "request", ".", "body", ".", "decode", "(", ")", ")", "except", "ValueError", ":", "self", ".", "set_status", "(", "400", ")", "return", "if", "property_name", "not", "in", "args", ":", "self", ".", "set_status", "(", "400", ")", "return", "if", "thing", ".", "has_property", "(", "property_name", ")", ":", "try", ":", "thing", ".", "set_property", "(", "property_name", ",", "args", "[", "property_name", "]", ")", "except", "PropertyError", ":", "self", ".", "set_status", "(", "400", ")", "return", "self", ".", "set_header", "(", "'Content-Type'", ",", "'application/json'", ")", "self", ".", "write", "(", "json", ".", "dumps", "(", "{", "property_name", ":", "thing", ".", "get_property", "(", "property_name", ")", ",", "}", ")", ")", "else", ":", "self", ".", "set_status", "(", "404", ")" ]
Handle a PUT request. thing_id -- ID of the thing this request is for property_name -- the name of the property from the URL path
[ "Handle", "a", "PUT", "request", "." ]
python
test
28.942857
tkem/uritools
uritools/split.py
https://github.com/tkem/uritools/blob/e77ba4acd937b68da9850138563debd4c925ef9f/uritools/split.py#L81-L99
def geturi(self): """Return the re-combined version of the original URI reference as a string. """ scheme, authority, path, query, fragment = self # RFC 3986 5.3. Component Recomposition result = [] if scheme is not None: result.extend([scheme, self.COLON]) if authority is not None: result.extend([self.SLASH, self.SLASH, authority]) result.append(path) if query is not None: result.extend([self.QUEST, query]) if fragment is not None: result.extend([self.HASH, fragment]) return self.EMPTY.join(result)
[ "def", "geturi", "(", "self", ")", ":", "scheme", ",", "authority", ",", "path", ",", "query", ",", "fragment", "=", "self", "# RFC 3986 5.3. Component Recomposition", "result", "=", "[", "]", "if", "scheme", "is", "not", "None", ":", "result", ".", "extend", "(", "[", "scheme", ",", "self", ".", "COLON", "]", ")", "if", "authority", "is", "not", "None", ":", "result", ".", "extend", "(", "[", "self", ".", "SLASH", ",", "self", ".", "SLASH", ",", "authority", "]", ")", "result", ".", "append", "(", "path", ")", "if", "query", "is", "not", "None", ":", "result", ".", "extend", "(", "[", "self", ".", "QUEST", ",", "query", "]", ")", "if", "fragment", "is", "not", "None", ":", "result", ".", "extend", "(", "[", "self", ".", "HASH", ",", "fragment", "]", ")", "return", "self", ".", "EMPTY", ".", "join", "(", "result", ")" ]
Return the re-combined version of the original URI reference as a string.
[ "Return", "the", "re", "-", "combined", "version", "of", "the", "original", "URI", "reference", "as", "a", "string", "." ]
python
train
33.263158
saltstack/salt
salt/modules/dpkg_lowpkg.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/dpkg_lowpkg.py#L404-L432
def _get_pkg_ds_avail(): ''' Get the package information of the available packages, maintained by dselect. Note, this will be not very useful, if dselect isn't installed. :return: ''' avail = "/var/lib/dpkg/available" if not salt.utils.path.which('dselect') or not os.path.exists(avail): return dict() # Do not update with dselect, just read what is. ret = dict() pkg_mrk = "Package:" pkg_name = "package" with salt.utils.files.fopen(avail) as fp_: for pkg_info in salt.utils.stringutils.to_unicode(fp_.read()).split(pkg_mrk): nfo = dict() for line in (pkg_mrk + pkg_info).split(os.linesep): line = line.split(": ", 1) if len(line) != 2: continue key, value = line if value.strip(): nfo[key.lower()] = value if nfo.get(pkg_name): ret[nfo[pkg_name]] = nfo return ret
[ "def", "_get_pkg_ds_avail", "(", ")", ":", "avail", "=", "\"/var/lib/dpkg/available\"", "if", "not", "salt", ".", "utils", ".", "path", ".", "which", "(", "'dselect'", ")", "or", "not", "os", ".", "path", ".", "exists", "(", "avail", ")", ":", "return", "dict", "(", ")", "# Do not update with dselect, just read what is.", "ret", "=", "dict", "(", ")", "pkg_mrk", "=", "\"Package:\"", "pkg_name", "=", "\"package\"", "with", "salt", ".", "utils", ".", "files", ".", "fopen", "(", "avail", ")", "as", "fp_", ":", "for", "pkg_info", "in", "salt", ".", "utils", ".", "stringutils", ".", "to_unicode", "(", "fp_", ".", "read", "(", ")", ")", ".", "split", "(", "pkg_mrk", ")", ":", "nfo", "=", "dict", "(", ")", "for", "line", "in", "(", "pkg_mrk", "+", "pkg_info", ")", ".", "split", "(", "os", ".", "linesep", ")", ":", "line", "=", "line", ".", "split", "(", "\": \"", ",", "1", ")", "if", "len", "(", "line", ")", "!=", "2", ":", "continue", "key", ",", "value", "=", "line", "if", "value", ".", "strip", "(", ")", ":", "nfo", "[", "key", ".", "lower", "(", ")", "]", "=", "value", "if", "nfo", ".", "get", "(", "pkg_name", ")", ":", "ret", "[", "nfo", "[", "pkg_name", "]", "]", "=", "nfo", "return", "ret" ]
Get the package information of the available packages, maintained by dselect. Note, this will be not very useful, if dselect isn't installed. :return:
[ "Get", "the", "package", "information", "of", "the", "available", "packages", "maintained", "by", "dselect", ".", "Note", "this", "will", "be", "not", "very", "useful", "if", "dselect", "isn", "t", "installed", "." ]
python
train
33.241379
sdss/sdss_access
python/sdss_access/path/path.py
https://github.com/sdss/sdss_access/blob/76375bbf37d39d2e4ccbed90bdfa9a4298784470/python/sdss_access/path/path.py#L453-L485
def refine(self, filelist, regex, filterdir='out', **kwargs): ''' Returns a list of files filterd by a regular expression Parameters ---------- filelist : list A list of files to filter on. regex : str The regular expression string to filter your list filterdir: {'in', 'out'} Indicates the filter to be inclusive or exclusive 'out' removes the items satisfying the regular expression 'in' keeps the items satisfying the regular expression Returns ------- refine : list A file list refined by an input regular expression. ''' assert filelist, 'Must provide a list of filenames to refine on' assert regex, 'Must provide a regular expression to refine the file list' r = re.compile(regex) # icheck filter direction; default is out assert filterdir in ['in', 'out'], 'Filter direction must be either "in" or "out"' if filterdir == 'out': subset = list(filter(lambda i: r.search(i), filelist)) elif filterdir == 'in': subset = list(filter(lambda i: not r.search(i), filelist)) return subset
[ "def", "refine", "(", "self", ",", "filelist", ",", "regex", ",", "filterdir", "=", "'out'", ",", "*", "*", "kwargs", ")", ":", "assert", "filelist", ",", "'Must provide a list of filenames to refine on'", "assert", "regex", ",", "'Must provide a regular expression to refine the file list'", "r", "=", "re", ".", "compile", "(", "regex", ")", "# icheck filter direction; default is out", "assert", "filterdir", "in", "[", "'in'", ",", "'out'", "]", ",", "'Filter direction must be either \"in\" or \"out\"'", "if", "filterdir", "==", "'out'", ":", "subset", "=", "list", "(", "filter", "(", "lambda", "i", ":", "r", ".", "search", "(", "i", ")", ",", "filelist", ")", ")", "elif", "filterdir", "==", "'in'", ":", "subset", "=", "list", "(", "filter", "(", "lambda", "i", ":", "not", "r", ".", "search", "(", "i", ")", ",", "filelist", ")", ")", "return", "subset" ]
Returns a list of files filterd by a regular expression Parameters ---------- filelist : list A list of files to filter on. regex : str The regular expression string to filter your list filterdir: {'in', 'out'} Indicates the filter to be inclusive or exclusive 'out' removes the items satisfying the regular expression 'in' keeps the items satisfying the regular expression Returns ------- refine : list A file list refined by an input regular expression.
[ "Returns", "a", "list", "of", "files", "filterd", "by", "a", "regular", "expression" ]
python
train
36.363636
pydanny/dj-webhooks
djwebhooks/senders/redisq.py
https://github.com/pydanny/dj-webhooks/blob/88e245bfe2020e96279af261d88bf8469ba469e5/djwebhooks/senders/redisq.py#L29-L82
def worker(wrapped, dkwargs, hash_value=None, *args, **kwargs): """ This is an asynchronous sender callable that uses the Django ORM to store webhooks. Redis is used to handle the message queue. dkwargs argument requires the following key/values: :event: A string representing an event. kwargs argument requires the following key/values :owner: The user who created/owns the event """ if "event" not in dkwargs: msg = "djwebhooks.decorators.redis_hook requires an 'event' argument in the decorator." raise TypeError(msg) event = dkwargs['event'] if "owner" not in kwargs: msg = "djwebhooks.senders.redis_callable requires an 'owner' argument in the decorated function." raise TypeError(msg) owner = kwargs['owner'] if "identifier" not in kwargs: msg = "djwebhooks.senders.orm_callable requires an 'identifier' argument in the decorated function." raise TypeError(msg) identifier = kwargs['identifier'] senderobj = DjangoRQSenderable( wrapped, dkwargs, hash_value, WEBHOOK_ATTEMPTS, *args, **kwargs ) # Add the webhook object just so it's around # TODO - error handling if this can't be found senderobj.webhook_target = WebhookTarget.objects.get( event=event, owner=owner, identifier=identifier ) # Get the target url and add it senderobj.url = senderobj.webhook_target.target_url # Get the payload. This overides the senderobj.payload property. senderobj.payload = senderobj.get_payload() # Get the creator and add it to the payload. senderobj.payload['owner'] = getattr(kwargs['owner'], WEBHOOK_OWNER_FIELD) # get the event and add it to the payload senderobj.payload['event'] = dkwargs['event'] return senderobj.send()
[ "def", "worker", "(", "wrapped", ",", "dkwargs", ",", "hash_value", "=", "None", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "\"event\"", "not", "in", "dkwargs", ":", "msg", "=", "\"djwebhooks.decorators.redis_hook requires an 'event' argument in the decorator.\"", "raise", "TypeError", "(", "msg", ")", "event", "=", "dkwargs", "[", "'event'", "]", "if", "\"owner\"", "not", "in", "kwargs", ":", "msg", "=", "\"djwebhooks.senders.redis_callable requires an 'owner' argument in the decorated function.\"", "raise", "TypeError", "(", "msg", ")", "owner", "=", "kwargs", "[", "'owner'", "]", "if", "\"identifier\"", "not", "in", "kwargs", ":", "msg", "=", "\"djwebhooks.senders.orm_callable requires an 'identifier' argument in the decorated function.\"", "raise", "TypeError", "(", "msg", ")", "identifier", "=", "kwargs", "[", "'identifier'", "]", "senderobj", "=", "DjangoRQSenderable", "(", "wrapped", ",", "dkwargs", ",", "hash_value", ",", "WEBHOOK_ATTEMPTS", ",", "*", "args", ",", "*", "*", "kwargs", ")", "# Add the webhook object just so it's around", "# TODO - error handling if this can't be found", "senderobj", ".", "webhook_target", "=", "WebhookTarget", ".", "objects", ".", "get", "(", "event", "=", "event", ",", "owner", "=", "owner", ",", "identifier", "=", "identifier", ")", "# Get the target url and add it", "senderobj", ".", "url", "=", "senderobj", ".", "webhook_target", ".", "target_url", "# Get the payload. This overides the senderobj.payload property.", "senderobj", ".", "payload", "=", "senderobj", ".", "get_payload", "(", ")", "# Get the creator and add it to the payload.", "senderobj", ".", "payload", "[", "'owner'", "]", "=", "getattr", "(", "kwargs", "[", "'owner'", "]", ",", "WEBHOOK_OWNER_FIELD", ")", "# get the event and add it to the payload", "senderobj", ".", "payload", "[", "'event'", "]", "=", "dkwargs", "[", "'event'", "]", "return", "senderobj", ".", "send", "(", ")" ]
This is an asynchronous sender callable that uses the Django ORM to store webhooks. Redis is used to handle the message queue. dkwargs argument requires the following key/values: :event: A string representing an event. kwargs argument requires the following key/values :owner: The user who created/owns the event
[ "This", "is", "an", "asynchronous", "sender", "callable", "that", "uses", "the", "Django", "ORM", "to", "store", "webhooks", ".", "Redis", "is", "used", "to", "handle", "the", "message", "queue", "." ]
python
valid
33.648148
dsoprea/PySchedules
pyschedules/xml_callbacks.py
https://github.com/dsoprea/PySchedules/blob/e5aae988fad90217f72db45f93bf69839f4d75e7/pyschedules/xml_callbacks.py#L357-L361
def error(self, msg): """Callback run when a recoverable parsing error occurs""" self._error = True self._progress.printMsg('XML parse error: %s' % msg, error=True)
[ "def", "error", "(", "self", ",", "msg", ")", ":", "self", ".", "_error", "=", "True", "self", ".", "_progress", ".", "printMsg", "(", "'XML parse error: %s'", "%", "msg", ",", "error", "=", "True", ")" ]
Callback run when a recoverable parsing error occurs
[ "Callback", "run", "when", "a", "recoverable", "parsing", "error", "occurs" ]
python
train
37
zhanglab/psamm
psamm/util.py
https://github.com/zhanglab/psamm/blob/dc427848c4f9d109ca590f0afa024c63b685b3f4/psamm/util.py#L196-L213
def git_try_describe(repo_path): """Try to describe the current commit of a Git repository. Return a string containing a string with the commit ID and/or a base tag, if successful. Otherwise, return None. """ try: p = subprocess.Popen(['git', 'describe', '--always', '--dirty'], cwd=repo_path, stdout=subprocess.PIPE, stderr=subprocess.PIPE) output, _ = p.communicate() except: return None else: if p.returncode == 0: return output.strip() return None
[ "def", "git_try_describe", "(", "repo_path", ")", ":", "try", ":", "p", "=", "subprocess", ".", "Popen", "(", "[", "'git'", ",", "'describe'", ",", "'--always'", ",", "'--dirty'", "]", ",", "cwd", "=", "repo_path", ",", "stdout", "=", "subprocess", ".", "PIPE", ",", "stderr", "=", "subprocess", ".", "PIPE", ")", "output", ",", "_", "=", "p", ".", "communicate", "(", ")", "except", ":", "return", "None", "else", ":", "if", "p", ".", "returncode", "==", "0", ":", "return", "output", ".", "strip", "(", ")", "return", "None" ]
Try to describe the current commit of a Git repository. Return a string containing a string with the commit ID and/or a base tag, if successful. Otherwise, return None.
[ "Try", "to", "describe", "the", "current", "commit", "of", "a", "Git", "repository", "." ]
python
train
31.666667
PythonCharmers/python-future
src/future/backports/email/_header_value_parser.py
https://github.com/PythonCharmers/python-future/blob/c423752879acc05eebc29b0bb9909327bd5c7308/src/future/backports/email/_header_value_parser.py#L2318-L2370
def get_address_list(value): """ address_list = (address *("," address)) / obs-addr-list obs-addr-list = *([CFWS] ",") address *("," [address / CFWS]) We depart from the formal grammar here by continuing to parse until the end of the input, assuming the input to be entirely composed of an address-list. This is always true in email parsing, and allows us to skip invalid addresses to parse additional valid ones. """ address_list = AddressList() while value: try: token, value = get_address(value) address_list.append(token) except errors.HeaderParseError as err: leader = None if value[0] in CFWS_LEADER: leader, value = get_cfws(value) if not value or value[0] == ',': address_list.append(leader) address_list.defects.append(errors.ObsoleteHeaderDefect( "address-list entry with no content")) else: token, value = get_invalid_mailbox(value, ',') if leader is not None: token[:0] = [leader] address_list.append(Address([token])) address_list.defects.append(errors.InvalidHeaderDefect( "invalid address in address-list")) elif value[0] == ',': address_list.defects.append(errors.ObsoleteHeaderDefect( "empty element in address-list")) else: token, value = get_invalid_mailbox(value, ',') if leader is not None: token[:0] = [leader] address_list.append(Address([token])) address_list.defects.append(errors.InvalidHeaderDefect( "invalid address in address-list")) if value and value[0] != ',': # Crap after address; treat it as an invalid mailbox. # The mailbox info will still be available. mailbox = address_list[-1][0] mailbox.token_type = 'invalid-mailbox' token, value = get_invalid_mailbox(value, ',') mailbox.extend(token) address_list.defects.append(errors.InvalidHeaderDefect( "invalid address in address-list")) if value: # Must be a , at this point. address_list.append(ValueTerminal(',', 'list-separator')) value = value[1:] return address_list, value
[ "def", "get_address_list", "(", "value", ")", ":", "address_list", "=", "AddressList", "(", ")", "while", "value", ":", "try", ":", "token", ",", "value", "=", "get_address", "(", "value", ")", "address_list", ".", "append", "(", "token", ")", "except", "errors", ".", "HeaderParseError", "as", "err", ":", "leader", "=", "None", "if", "value", "[", "0", "]", "in", "CFWS_LEADER", ":", "leader", ",", "value", "=", "get_cfws", "(", "value", ")", "if", "not", "value", "or", "value", "[", "0", "]", "==", "','", ":", "address_list", ".", "append", "(", "leader", ")", "address_list", ".", "defects", ".", "append", "(", "errors", ".", "ObsoleteHeaderDefect", "(", "\"address-list entry with no content\"", ")", ")", "else", ":", "token", ",", "value", "=", "get_invalid_mailbox", "(", "value", ",", "','", ")", "if", "leader", "is", "not", "None", ":", "token", "[", ":", "0", "]", "=", "[", "leader", "]", "address_list", ".", "append", "(", "Address", "(", "[", "token", "]", ")", ")", "address_list", ".", "defects", ".", "append", "(", "errors", ".", "InvalidHeaderDefect", "(", "\"invalid address in address-list\"", ")", ")", "elif", "value", "[", "0", "]", "==", "','", ":", "address_list", ".", "defects", ".", "append", "(", "errors", ".", "ObsoleteHeaderDefect", "(", "\"empty element in address-list\"", ")", ")", "else", ":", "token", ",", "value", "=", "get_invalid_mailbox", "(", "value", ",", "','", ")", "if", "leader", "is", "not", "None", ":", "token", "[", ":", "0", "]", "=", "[", "leader", "]", "address_list", ".", "append", "(", "Address", "(", "[", "token", "]", ")", ")", "address_list", ".", "defects", ".", "append", "(", "errors", ".", "InvalidHeaderDefect", "(", "\"invalid address in address-list\"", ")", ")", "if", "value", "and", "value", "[", "0", "]", "!=", "','", ":", "# Crap after address; treat it as an invalid mailbox.", "# The mailbox info will still be available.", "mailbox", "=", "address_list", "[", "-", "1", "]", "[", "0", "]", "mailbox", ".", "token_type", "=", "'invalid-mailbox'", "token", ",", "value", "=", "get_invalid_mailbox", "(", "value", ",", "','", ")", "mailbox", ".", "extend", "(", "token", ")", "address_list", ".", "defects", ".", "append", "(", "errors", ".", "InvalidHeaderDefect", "(", "\"invalid address in address-list\"", ")", ")", "if", "value", ":", "# Must be a , at this point.", "address_list", ".", "append", "(", "ValueTerminal", "(", "','", ",", "'list-separator'", ")", ")", "value", "=", "value", "[", "1", ":", "]", "return", "address_list", ",", "value" ]
address_list = (address *("," address)) / obs-addr-list obs-addr-list = *([CFWS] ",") address *("," [address / CFWS]) We depart from the formal grammar here by continuing to parse until the end of the input, assuming the input to be entirely composed of an address-list. This is always true in email parsing, and allows us to skip invalid addresses to parse additional valid ones.
[ "address_list", "=", "(", "address", "*", "(", "address", "))", "/", "obs", "-", "addr", "-", "list", "obs", "-", "addr", "-", "list", "=", "*", "(", "[", "CFWS", "]", ")", "address", "*", "(", "[", "address", "/", "CFWS", "]", ")" ]
python
train
46.528302
Kozea/cairocffi
cairocffi/context.py
https://github.com/Kozea/cairocffi/blob/450853add7e32eea20985b6aa5f54d9cb3cd04fe/cairocffi/context.py#L999-L1023
def rectangle(self, x, y, width, height): """Adds a closed sub-path rectangle of the given size to the current path at position ``(x, y)`` in user-space coordinates. This method is logically equivalent to:: context.move_to(x, y) context.rel_line_to(width, 0) context.rel_line_to(0, height) context.rel_line_to(-width, 0) context.close_path() :param x: The X coordinate of the top left corner of the rectangle. :param y: The Y coordinate of the top left corner of the rectangle. :param width: Width of the rectangle. :param height: Height of the rectangle. :type float: x :type float: y :type float: width :type float: heigth """ cairo.cairo_rectangle(self._pointer, x, y, width, height) self._check_status()
[ "def", "rectangle", "(", "self", ",", "x", ",", "y", ",", "width", ",", "height", ")", ":", "cairo", ".", "cairo_rectangle", "(", "self", ".", "_pointer", ",", "x", ",", "y", ",", "width", ",", "height", ")", "self", ".", "_check_status", "(", ")" ]
Adds a closed sub-path rectangle of the given size to the current path at position ``(x, y)`` in user-space coordinates. This method is logically equivalent to:: context.move_to(x, y) context.rel_line_to(width, 0) context.rel_line_to(0, height) context.rel_line_to(-width, 0) context.close_path() :param x: The X coordinate of the top left corner of the rectangle. :param y: The Y coordinate of the top left corner of the rectangle. :param width: Width of the rectangle. :param height: Height of the rectangle. :type float: x :type float: y :type float: width :type float: heigth
[ "Adds", "a", "closed", "sub", "-", "path", "rectangle", "of", "the", "given", "size", "to", "the", "current", "path", "at", "position", "(", "x", "y", ")", "in", "user", "-", "space", "coordinates", "." ]
python
train
34.68
EricCrosson/stump
stump/stump.py
https://github.com/EricCrosson/stump/blob/eb4d9f0dbe2642f86d47ca1b5f51fb7801bb09ab/stump/stump.py#L58-L80
def pre(f, *args, **kwargs): """Automatically log progress on function entry. Default logging value: info. *Logging with values contained in the parameters of the decorated function* Message (args[0]) may be a string to be formatted with parameters passed to the decorated function. Each '{varname}' will be replaced by the value of the parameter of the same name. *Keyword parameters* - log :: integer - Specifies a custom level of logging to pass to the active logger. - Default: INFO *Exceptions:* - IndexError and ValueError - will be returned if *args contains a string that does not correspond to a parameter name of the decorated function, or if there are more '{}'s than there are *args. """ kwargs.update({'prefix_only': True}) return _stump(f, *args, **kwargs)
[ "def", "pre", "(", "f", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "kwargs", ".", "update", "(", "{", "'prefix_only'", ":", "True", "}", ")", "return", "_stump", "(", "f", ",", "*", "args", ",", "*", "*", "kwargs", ")" ]
Automatically log progress on function entry. Default logging value: info. *Logging with values contained in the parameters of the decorated function* Message (args[0]) may be a string to be formatted with parameters passed to the decorated function. Each '{varname}' will be replaced by the value of the parameter of the same name. *Keyword parameters* - log :: integer - Specifies a custom level of logging to pass to the active logger. - Default: INFO *Exceptions:* - IndexError and ValueError - will be returned if *args contains a string that does not correspond to a parameter name of the decorated function, or if there are more '{}'s than there are *args.
[ "Automatically", "log", "progress", "on", "function", "entry", ".", "Default", "logging", "value", ":", "info", "." ]
python
train
36.434783
tBuLi/symfit
symfit/core/fit.py
https://github.com/tBuLi/symfit/blob/759dd3d1d4270510d651f40b23dd26b1b10eee83/symfit/core/fit.py#L1031-L1040
def dependent_data(self): """ Read-only Property :return: Data belonging to each dependent variable as a dict with variable names as key, data as value. :rtype: collections.OrderedDict """ return OrderedDict((var, self.data[var]) for var in self.model.dependent_vars)
[ "def", "dependent_data", "(", "self", ")", ":", "return", "OrderedDict", "(", "(", "var", ",", "self", ".", "data", "[", "var", "]", ")", "for", "var", "in", "self", ".", "model", ".", "dependent_vars", ")" ]
Read-only Property :return: Data belonging to each dependent variable as a dict with variable names as key, data as value. :rtype: collections.OrderedDict
[ "Read", "-", "only", "Property" ]
python
train
35.1
CivicSpleen/ambry
ambry/orm/partition.py
https://github.com/CivicSpleen/ambry/blob/d7f2be4bf1f7ffd086f3fadd4fcae60c32473e42/ambry/orm/partition.py#L420-L434
def detail_dict(self): """A more detailed dict that includes the descriptions, sub descriptions, table and columns.""" d = self.dict def aug_col(c): d = c.dict d['stats'] = [s.dict for s in c.stats] return d d['table'] = self.table.dict d['table']['columns'] = [aug_col(c) for c in self.table.columns] return d
[ "def", "detail_dict", "(", "self", ")", ":", "d", "=", "self", ".", "dict", "def", "aug_col", "(", "c", ")", ":", "d", "=", "c", ".", "dict", "d", "[", "'stats'", "]", "=", "[", "s", ".", "dict", "for", "s", "in", "c", ".", "stats", "]", "return", "d", "d", "[", "'table'", "]", "=", "self", ".", "table", ".", "dict", "d", "[", "'table'", "]", "[", "'columns'", "]", "=", "[", "aug_col", "(", "c", ")", "for", "c", "in", "self", ".", "table", ".", "columns", "]", "return", "d" ]
A more detailed dict that includes the descriptions, sub descriptions, table and columns.
[ "A", "more", "detailed", "dict", "that", "includes", "the", "descriptions", "sub", "descriptions", "table", "and", "columns", "." ]
python
train
26.133333
modin-project/modin
modin/engines/base/frame/axis_partition.py
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/engines/base/frame/axis_partition.py#L213-L236
def deploy_func_between_two_axis_partitions( cls, axis, func, num_splits, len_of_left, kwargs, *partitions ): """Deploy a function along a full axis between two data sets in Ray. Args: axis: The axis to perform the function along. func: The function to perform. num_splits: The number of splits to return (see `split_result_of_axis_func_pandas`). len_of_left: The number of values in `partitions` that belong to the left data set. kwargs: A dictionary of keyword arguments. partitions: All partitions that make up the full axis (row or column) for both data sets. Returns: A list of Pandas DataFrames. """ lt_frame = pandas.concat(list(partitions[:len_of_left]), axis=axis, copy=False) rt_frame = pandas.concat(list(partitions[len_of_left:]), axis=axis, copy=False) result = func(lt_frame, rt_frame, **kwargs) return split_result_of_axis_func_pandas(axis, num_splits, result)
[ "def", "deploy_func_between_two_axis_partitions", "(", "cls", ",", "axis", ",", "func", ",", "num_splits", ",", "len_of_left", ",", "kwargs", ",", "*", "partitions", ")", ":", "lt_frame", "=", "pandas", ".", "concat", "(", "list", "(", "partitions", "[", ":", "len_of_left", "]", ")", ",", "axis", "=", "axis", ",", "copy", "=", "False", ")", "rt_frame", "=", "pandas", ".", "concat", "(", "list", "(", "partitions", "[", "len_of_left", ":", "]", ")", ",", "axis", "=", "axis", ",", "copy", "=", "False", ")", "result", "=", "func", "(", "lt_frame", ",", "rt_frame", ",", "*", "*", "kwargs", ")", "return", "split_result_of_axis_func_pandas", "(", "axis", ",", "num_splits", ",", "result", ")" ]
Deploy a function along a full axis between two data sets in Ray. Args: axis: The axis to perform the function along. func: The function to perform. num_splits: The number of splits to return (see `split_result_of_axis_func_pandas`). len_of_left: The number of values in `partitions` that belong to the left data set. kwargs: A dictionary of keyword arguments. partitions: All partitions that make up the full axis (row or column) for both data sets. Returns: A list of Pandas DataFrames.
[ "Deploy", "a", "function", "along", "a", "full", "axis", "between", "two", "data", "sets", "in", "Ray", "." ]
python
train
44.291667
JensRantil/rewind
rewind/server/eventstores.py
https://github.com/JensRantil/rewind/blob/7f645d20186c1db55cfe53a0310c9fd6292f91ea/rewind/server/eventstores.py#L656-L670
def key_exists(self, key): """Check if key has previously been added to this store. This function makes a linear search through the log file and is very slow. Returns True if the event has previously been added, False otherwise. """ assert isinstance(key, str) self._close() try: return self._unsafe_key_exists(key) finally: self._open()
[ "def", "key_exists", "(", "self", ",", "key", ")", ":", "assert", "isinstance", "(", "key", ",", "str", ")", "self", ".", "_close", "(", ")", "try", ":", "return", "self", ".", "_unsafe_key_exists", "(", "key", ")", "finally", ":", "self", ".", "_open", "(", ")" ]
Check if key has previously been added to this store. This function makes a linear search through the log file and is very slow. Returns True if the event has previously been added, False otherwise.
[ "Check", "if", "key", "has", "previously", "been", "added", "to", "this", "store", "." ]
python
train
28.133333
StanfordVL/robosuite
robosuite/devices/spacemouse.py
https://github.com/StanfordVL/robosuite/blob/65cd16810e2ed647e3ec88746af3412065b7f278/robosuite/devices/spacemouse.py#L138-L153
def get_controller_state(self): """Returns the current state of the 3d mouse, a dictionary of pos, orn, grasp, and reset.""" dpos = self.control[:3] * 0.005 roll, pitch, yaw = self.control[3:] * 0.005 self.grasp = self.control_gripper # convert RPY to an absolute orientation drot1 = rotation_matrix(angle=-pitch, direction=[1., 0, 0], point=None)[:3, :3] drot2 = rotation_matrix(angle=roll, direction=[0, 1., 0], point=None)[:3, :3] drot3 = rotation_matrix(angle=yaw, direction=[0, 0, 1.], point=None)[:3, :3] self.rotation = self.rotation.dot(drot1.dot(drot2.dot(drot3))) return dict( dpos=dpos, rotation=self.rotation, grasp=self.grasp, reset=self._reset_state )
[ "def", "get_controller_state", "(", "self", ")", ":", "dpos", "=", "self", ".", "control", "[", ":", "3", "]", "*", "0.005", "roll", ",", "pitch", ",", "yaw", "=", "self", ".", "control", "[", "3", ":", "]", "*", "0.005", "self", ".", "grasp", "=", "self", ".", "control_gripper", "# convert RPY to an absolute orientation", "drot1", "=", "rotation_matrix", "(", "angle", "=", "-", "pitch", ",", "direction", "=", "[", "1.", ",", "0", ",", "0", "]", ",", "point", "=", "None", ")", "[", ":", "3", ",", ":", "3", "]", "drot2", "=", "rotation_matrix", "(", "angle", "=", "roll", ",", "direction", "=", "[", "0", ",", "1.", ",", "0", "]", ",", "point", "=", "None", ")", "[", ":", "3", ",", ":", "3", "]", "drot3", "=", "rotation_matrix", "(", "angle", "=", "yaw", ",", "direction", "=", "[", "0", ",", "0", ",", "1.", "]", ",", "point", "=", "None", ")", "[", ":", "3", ",", ":", "3", "]", "self", ".", "rotation", "=", "self", ".", "rotation", ".", "dot", "(", "drot1", ".", "dot", "(", "drot2", ".", "dot", "(", "drot3", ")", ")", ")", "return", "dict", "(", "dpos", "=", "dpos", ",", "rotation", "=", "self", ".", "rotation", ",", "grasp", "=", "self", ".", "grasp", ",", "reset", "=", "self", ".", "_reset_state", ")" ]
Returns the current state of the 3d mouse, a dictionary of pos, orn, grasp, and reset.
[ "Returns", "the", "current", "state", "of", "the", "3d", "mouse", "a", "dictionary", "of", "pos", "orn", "grasp", "and", "reset", "." ]
python
train
47.0625
wummel/linkchecker
linkcheck/htmlutil/linkparse.py
https://github.com/wummel/linkchecker/blob/c2ce810c3fb00b895a841a7be6b2e78c64e7b042/linkcheck/htmlutil/linkparse.py#L164-L174
def is_meta_url (attr, attrs): """Check if the meta attributes contain a URL.""" res = False if attr == "content": equiv = attrs.get_true('http-equiv', u'').lower() scheme = attrs.get_true('scheme', u'').lower() res = equiv in (u'refresh',) or scheme in (u'dcterms.uri',) if attr == "href": rel = attrs.get_true('rel', u'').lower() res = rel in (u'shortcut icon', u'icon') return res
[ "def", "is_meta_url", "(", "attr", ",", "attrs", ")", ":", "res", "=", "False", "if", "attr", "==", "\"content\"", ":", "equiv", "=", "attrs", ".", "get_true", "(", "'http-equiv'", ",", "u''", ")", ".", "lower", "(", ")", "scheme", "=", "attrs", ".", "get_true", "(", "'scheme'", ",", "u''", ")", ".", "lower", "(", ")", "res", "=", "equiv", "in", "(", "u'refresh'", ",", ")", "or", "scheme", "in", "(", "u'dcterms.uri'", ",", ")", "if", "attr", "==", "\"href\"", ":", "rel", "=", "attrs", ".", "get_true", "(", "'rel'", ",", "u''", ")", ".", "lower", "(", ")", "res", "=", "rel", "in", "(", "u'shortcut icon'", ",", "u'icon'", ")", "return", "res" ]
Check if the meta attributes contain a URL.
[ "Check", "if", "the", "meta", "attributes", "contain", "a", "URL", "." ]
python
train
39.363636
ska-sa/katcp-python
katcp/sensortree.py
https://github.com/ska-sa/katcp-python/blob/9127c826a1d030c53b84d0e95743e20e5c5ea153/katcp/sensortree.py#L520-L534
def recalculate(self, parent, updates): """Re-calculate the value of parent sensor. Parent's value is calculated by calling the associate aggregation rule. Parameters ---------- parent : :class:`katcp.Sensor` object The sensor that needs to be updated. updates : sequence of :class:`katcp.Sensor` objects The child sensors which triggered the update. """ rule_function, children = self._aggregates[parent] rule_function(parent, children)
[ "def", "recalculate", "(", "self", ",", "parent", ",", "updates", ")", ":", "rule_function", ",", "children", "=", "self", ".", "_aggregates", "[", "parent", "]", "rule_function", "(", "parent", ",", "children", ")" ]
Re-calculate the value of parent sensor. Parent's value is calculated by calling the associate aggregation rule. Parameters ---------- parent : :class:`katcp.Sensor` object The sensor that needs to be updated. updates : sequence of :class:`katcp.Sensor` objects The child sensors which triggered the update.
[ "Re", "-", "calculate", "the", "value", "of", "parent", "sensor", "." ]
python
train
34.8
sibirrer/lenstronomy
lenstronomy/LensModel/Profiles/spp.py
https://github.com/sibirrer/lenstronomy/blob/4edb100a4f3f4fdc4fac9b0032d2b0283d0aa1d6/lenstronomy/LensModel/Profiles/spp.py#L188-L199
def density(self, r, rho0, gamma): """ computes the density :param x: :param y: :param rho0: :param a: :param s: :return: """ rho = rho0 / r**gamma return rho
[ "def", "density", "(", "self", ",", "r", ",", "rho0", ",", "gamma", ")", ":", "rho", "=", "rho0", "/", "r", "**", "gamma", "return", "rho" ]
computes the density :param x: :param y: :param rho0: :param a: :param s: :return:
[ "computes", "the", "density", ":", "param", "x", ":", ":", "param", "y", ":", ":", "param", "rho0", ":", ":", "param", "a", ":", ":", "param", "s", ":", ":", "return", ":" ]
python
train
19.583333
googleapis/google-cloud-python
trace/google/cloud/trace/_gapic.py
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/trace/google/cloud/trace/_gapic.py#L51-L90
def batch_write_spans( self, name, spans, retry=method.DEFAULT, timeout=method.DEFAULT ): """ Sends new spans to Stackdriver Trace or updates existing traces. If the name of a trace that you send matches that of an existing trace, new spans are added to the existing trace. Attempt to update existing spans results undefined behavior. If the name does not match, a new trace is created with given set of spans. Args: name (str): Required. Name of the project where the spans belong. The format is ``projects/PROJECT_ID``. spans (list[Union[dict, ~google.cloud.trace_v2.types.Span]]): A collection of spans. If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.trace_v2.types.Span` retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will not be retried. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. Raises: google.api_core.exceptions.GoogleAPICallError: If the request failed for any reason. google.api_core.exceptions.RetryError: If the request failed due to a retryable error and retry attempts failed. ValueError: If the parameters are invalid. """ spans_pb_list = [] for span_mapping in spans["spans"]: span_pb = _dict_mapping_to_pb(span_mapping, "Span") spans_pb_list.append(span_pb) self._gapic_api.batch_write_spans( name=name, spans=spans_pb_list, retry=retry, timeout=timeout )
[ "def", "batch_write_spans", "(", "self", ",", "name", ",", "spans", ",", "retry", "=", "method", ".", "DEFAULT", ",", "timeout", "=", "method", ".", "DEFAULT", ")", ":", "spans_pb_list", "=", "[", "]", "for", "span_mapping", "in", "spans", "[", "\"spans\"", "]", ":", "span_pb", "=", "_dict_mapping_to_pb", "(", "span_mapping", ",", "\"Span\"", ")", "spans_pb_list", ".", "append", "(", "span_pb", ")", "self", ".", "_gapic_api", ".", "batch_write_spans", "(", "name", "=", "name", ",", "spans", "=", "spans_pb_list", ",", "retry", "=", "retry", ",", "timeout", "=", "timeout", ")" ]
Sends new spans to Stackdriver Trace or updates existing traces. If the name of a trace that you send matches that of an existing trace, new spans are added to the existing trace. Attempt to update existing spans results undefined behavior. If the name does not match, a new trace is created with given set of spans. Args: name (str): Required. Name of the project where the spans belong. The format is ``projects/PROJECT_ID``. spans (list[Union[dict, ~google.cloud.trace_v2.types.Span]]): A collection of spans. If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.trace_v2.types.Span` retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will not be retried. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. Raises: google.api_core.exceptions.GoogleAPICallError: If the request failed for any reason. google.api_core.exceptions.RetryError: If the request failed due to a retryable error and retry attempts failed. ValueError: If the parameters are invalid.
[ "Sends", "new", "spans", "to", "Stackdriver", "Trace", "or", "updates", "existing", "traces", ".", "If", "the", "name", "of", "a", "trace", "that", "you", "send", "matches", "that", "of", "an", "existing", "trace", "new", "spans", "are", "added", "to", "the", "existing", "trace", ".", "Attempt", "to", "update", "existing", "spans", "results", "undefined", "behavior", ".", "If", "the", "name", "does", "not", "match", "a", "new", "trace", "is", "created", "with", "given", "set", "of", "spans", "." ]
python
train
47.625
globocom/GloboNetworkAPI-client-python
networkapiclient/Vlan.py
https://github.com/globocom/GloboNetworkAPI-client-python/blob/cf34f913da48d9abbf750114f5d2ac4b2dde137d/networkapiclient/Vlan.py#L364-L414
def edit_vlan( self, environment_id, name, number, description, acl_file, acl_file_v6, id_vlan): """Edit a VLAN :param id_vlan: ID for Vlan :param environment_id: ID for Environment. :param name: The name of VLAN. :param description: Some description to VLAN. :param number: Number of Vlan :param acl_file: Acl IPv4 File name to VLAN. :param acl_file_v6: Acl IPv6 File name to VLAN. :return: None :raise VlanError: VLAN name already exists, DC division of the environment invalid or there is no VLAN number available. :raise VlanNaoExisteError: VLAN not found. :raise AmbienteNaoExisteError: Environment not registered. :raise InvalidParameterError: Name of Vlan and/or the identifier of the Environment is null or invalid. :raise DataBaseError: Networkapi failed to access the database. :raise XMLError: Networkapi failed to generate the XML response. """ if not is_valid_int_param(id_vlan): raise InvalidParameterError( u'Vlan id is invalid or was not informed.') if not is_valid_int_param(environment_id): raise InvalidParameterError(u'Environment id is none or invalid.') if not is_valid_int_param(number): raise InvalidParameterError(u'Vlan number is none or invalid') vlan_map = dict() vlan_map['vlan_id'] = id_vlan vlan_map['environment_id'] = environment_id vlan_map['name'] = name vlan_map['description'] = description vlan_map['acl_file'] = acl_file vlan_map['acl_file_v6'] = acl_file_v6 vlan_map['number'] = number code, xml = self.submit({'vlan': vlan_map}, 'POST', 'vlan/edit/') return self.response(code, xml)
[ "def", "edit_vlan", "(", "self", ",", "environment_id", ",", "name", ",", "number", ",", "description", ",", "acl_file", ",", "acl_file_v6", ",", "id_vlan", ")", ":", "if", "not", "is_valid_int_param", "(", "id_vlan", ")", ":", "raise", "InvalidParameterError", "(", "u'Vlan id is invalid or was not informed.'", ")", "if", "not", "is_valid_int_param", "(", "environment_id", ")", ":", "raise", "InvalidParameterError", "(", "u'Environment id is none or invalid.'", ")", "if", "not", "is_valid_int_param", "(", "number", ")", ":", "raise", "InvalidParameterError", "(", "u'Vlan number is none or invalid'", ")", "vlan_map", "=", "dict", "(", ")", "vlan_map", "[", "'vlan_id'", "]", "=", "id_vlan", "vlan_map", "[", "'environment_id'", "]", "=", "environment_id", "vlan_map", "[", "'name'", "]", "=", "name", "vlan_map", "[", "'description'", "]", "=", "description", "vlan_map", "[", "'acl_file'", "]", "=", "acl_file", "vlan_map", "[", "'acl_file_v6'", "]", "=", "acl_file_v6", "vlan_map", "[", "'number'", "]", "=", "number", "code", ",", "xml", "=", "self", ".", "submit", "(", "{", "'vlan'", ":", "vlan_map", "}", ",", "'POST'", ",", "'vlan/edit/'", ")", "return", "self", ".", "response", "(", "code", ",", "xml", ")" ]
Edit a VLAN :param id_vlan: ID for Vlan :param environment_id: ID for Environment. :param name: The name of VLAN. :param description: Some description to VLAN. :param number: Number of Vlan :param acl_file: Acl IPv4 File name to VLAN. :param acl_file_v6: Acl IPv6 File name to VLAN. :return: None :raise VlanError: VLAN name already exists, DC division of the environment invalid or there is no VLAN number available. :raise VlanNaoExisteError: VLAN not found. :raise AmbienteNaoExisteError: Environment not registered. :raise InvalidParameterError: Name of Vlan and/or the identifier of the Environment is null or invalid. :raise DataBaseError: Networkapi failed to access the database. :raise XMLError: Networkapi failed to generate the XML response.
[ "Edit", "a", "VLAN" ]
python
train
36.509804
nefarioustim/parker
parker/client.py
https://github.com/nefarioustim/parker/blob/ccc1de1ac6bfb5e0a8cfa4fdebb2f38f2ee027d6/parker/client.py#L73-L90
def get(self, uri, disable_proxy=False, stream=False): """Return Requests response to GET request.""" response = requests.get( uri, headers=self.headers, allow_redirects=True, cookies={}, stream=stream, proxies=self.proxy if not disable_proxy else False ) if response.status_code in _PERMITTED_STATUS_CODES: self.response_headers = response.headers return response.content if not stream else response.iter_content() else: raise requests.exceptions.HTTPError( "HTTP response did not have a permitted status code." )
[ "def", "get", "(", "self", ",", "uri", ",", "disable_proxy", "=", "False", ",", "stream", "=", "False", ")", ":", "response", "=", "requests", ".", "get", "(", "uri", ",", "headers", "=", "self", ".", "headers", ",", "allow_redirects", "=", "True", ",", "cookies", "=", "{", "}", ",", "stream", "=", "stream", ",", "proxies", "=", "self", ".", "proxy", "if", "not", "disable_proxy", "else", "False", ")", "if", "response", ".", "status_code", "in", "_PERMITTED_STATUS_CODES", ":", "self", ".", "response_headers", "=", "response", ".", "headers", "return", "response", ".", "content", "if", "not", "stream", "else", "response", ".", "iter_content", "(", ")", "else", ":", "raise", "requests", ".", "exceptions", ".", "HTTPError", "(", "\"HTTP response did not have a permitted status code.\"", ")" ]
Return Requests response to GET request.
[ "Return", "Requests", "response", "to", "GET", "request", "." ]
python
train
37.444444
ktbyers/netmiko
netmiko/huawei/huawei.py
https://github.com/ktbyers/netmiko/blob/54e6116c0b4664de2123081937e0a9a27bdfdfea/netmiko/huawei/huawei.py#L46-L85
def set_base_prompt( self, pri_prompt_terminator=">", alt_prompt_terminator="]", delay_factor=1 ): """ Sets self.base_prompt Used as delimiter for stripping of trailing prompt in output. Should be set to something that is general and applies in multiple contexts. For Comware this will be the router prompt with < > or [ ] stripped off. This will be set on logging in, but not when entering system-view """ log.debug("In set_base_prompt") delay_factor = self.select_delay_factor(delay_factor) self.clear_buffer() self.write_channel(self.RETURN) time.sleep(0.5 * delay_factor) prompt = self.read_channel() prompt = self.normalize_linefeeds(prompt) # If multiple lines in the output take the last line prompt = prompt.split(self.RESPONSE_RETURN)[-1] prompt = prompt.strip() # Check that ends with a valid terminator character if not prompt[-1] in (pri_prompt_terminator, alt_prompt_terminator): raise ValueError("Router prompt not found: {0}".format(prompt)) # Strip off any leading HRP_. characters for USGv5 HA prompt = re.sub(r"^HRP_.", "", prompt, flags=re.M) # Strip off leading and trailing terminator prompt = prompt[1:-1] prompt = prompt.strip() self.base_prompt = prompt log.debug("prompt: {0}".format(self.base_prompt)) return self.base_prompt
[ "def", "set_base_prompt", "(", "self", ",", "pri_prompt_terminator", "=", "\">\"", ",", "alt_prompt_terminator", "=", "\"]\"", ",", "delay_factor", "=", "1", ")", ":", "log", ".", "debug", "(", "\"In set_base_prompt\"", ")", "delay_factor", "=", "self", ".", "select_delay_factor", "(", "delay_factor", ")", "self", ".", "clear_buffer", "(", ")", "self", ".", "write_channel", "(", "self", ".", "RETURN", ")", "time", ".", "sleep", "(", "0.5", "*", "delay_factor", ")", "prompt", "=", "self", ".", "read_channel", "(", ")", "prompt", "=", "self", ".", "normalize_linefeeds", "(", "prompt", ")", "# If multiple lines in the output take the last line", "prompt", "=", "prompt", ".", "split", "(", "self", ".", "RESPONSE_RETURN", ")", "[", "-", "1", "]", "prompt", "=", "prompt", ".", "strip", "(", ")", "# Check that ends with a valid terminator character", "if", "not", "prompt", "[", "-", "1", "]", "in", "(", "pri_prompt_terminator", ",", "alt_prompt_terminator", ")", ":", "raise", "ValueError", "(", "\"Router prompt not found: {0}\"", ".", "format", "(", "prompt", ")", ")", "# Strip off any leading HRP_. characters for USGv5 HA", "prompt", "=", "re", ".", "sub", "(", "r\"^HRP_.\"", ",", "\"\"", ",", "prompt", ",", "flags", "=", "re", ".", "M", ")", "# Strip off leading and trailing terminator", "prompt", "=", "prompt", "[", "1", ":", "-", "1", "]", "prompt", "=", "prompt", ".", "strip", "(", ")", "self", ".", "base_prompt", "=", "prompt", "log", ".", "debug", "(", "\"prompt: {0}\"", ".", "format", "(", "self", ".", "base_prompt", ")", ")", "return", "self", ".", "base_prompt" ]
Sets self.base_prompt Used as delimiter for stripping of trailing prompt in output. Should be set to something that is general and applies in multiple contexts. For Comware this will be the router prompt with < > or [ ] stripped off. This will be set on logging in, but not when entering system-view
[ "Sets", "self", ".", "base_prompt" ]
python
train
36.525
aws/sagemaker-containers
src/sagemaker_containers/_encoders.py
https://github.com/aws/sagemaker-containers/blob/0030f07abbaf22a55d986d97274d7a8d1aa1f10c/src/sagemaker_containers/_encoders.py#L125-L140
def decode(obj, content_type): # type: (np.array or Iterable or int or float, str) -> np.array """Decode an object ton a one of the default content types to a numpy array. Args: obj (object): to be decoded. content_type (str): content type to be used. Returns: np.array: decoded object. """ try: decoder = _decoders_map[content_type] return decoder(obj) except KeyError: raise _errors.UnsupportedFormatError(content_type)
[ "def", "decode", "(", "obj", ",", "content_type", ")", ":", "# type: (np.array or Iterable or int or float, str) -> np.array", "try", ":", "decoder", "=", "_decoders_map", "[", "content_type", "]", "return", "decoder", "(", "obj", ")", "except", "KeyError", ":", "raise", "_errors", ".", "UnsupportedFormatError", "(", "content_type", ")" ]
Decode an object ton a one of the default content types to a numpy array. Args: obj (object): to be decoded. content_type (str): content type to be used. Returns: np.array: decoded object.
[ "Decode", "an", "object", "ton", "a", "one", "of", "the", "default", "content", "types", "to", "a", "numpy", "array", "." ]
python
train
30.25
joshleeb/creditcard
creditcard/formatter.py
https://github.com/joshleeb/creditcard/blob/8cff49ba80029026c7e221764eb2387eb2e04a4c/creditcard/formatter.py#L23-L30
def is_mastercard(n): """Checks if credit card number fits the mastercard format.""" n, length = str(n), len(str(n)) if length >= 16 and length <= 19: if ''.join(n[:2]) in strings_between(51, 56): return True return False
[ "def", "is_mastercard", "(", "n", ")", ":", "n", ",", "length", "=", "str", "(", "n", ")", ",", "len", "(", "str", "(", "n", ")", ")", "if", "length", ">=", "16", "and", "length", "<=", "19", ":", "if", "''", ".", "join", "(", "n", "[", ":", "2", "]", ")", "in", "strings_between", "(", "51", ",", "56", ")", ":", "return", "True", "return", "False" ]
Checks if credit card number fits the mastercard format.
[ "Checks", "if", "credit", "card", "number", "fits", "the", "mastercard", "format", "." ]
python
train
31.375
Numigi/gitoo
src/core.py
https://github.com/Numigi/gitoo/blob/0921f5fb8a948021760bb0373a40f9fbe8a4a2e5/src/core.py#L82-L93
def install(self, destination): """ Install a third party odoo add-on :param string destination: the folder where the add-on should end up at. """ logger.info( "Installing %s@%s to %s", self.repo, self.commit if self.commit else self.branch, destination ) with temp_repo(self.repo, self.branch, self.commit) as tmp: self._apply_patches(tmp) self._move_modules(tmp, destination)
[ "def", "install", "(", "self", ",", "destination", ")", ":", "logger", ".", "info", "(", "\"Installing %s@%s to %s\"", ",", "self", ".", "repo", ",", "self", ".", "commit", "if", "self", ".", "commit", "else", "self", ".", "branch", ",", "destination", ")", "with", "temp_repo", "(", "self", ".", "repo", ",", "self", ".", "branch", ",", "self", ".", "commit", ")", "as", "tmp", ":", "self", ".", "_apply_patches", "(", "tmp", ")", "self", ".", "_move_modules", "(", "tmp", ",", "destination", ")" ]
Install a third party odoo add-on :param string destination: the folder where the add-on should end up at.
[ "Install", "a", "third", "party", "odoo", "add", "-", "on" ]
python
train
38.583333
JasonKessler/scattertext
scattertext/termscoring/ScaledFScore.py
https://github.com/JasonKessler/scattertext/blob/cacf1f687d218ee8cae3fc05cc901db824bb1b81/scattertext/termscoring/ScaledFScore.py#L166-L180
def get_p_vals(self, X): ''' Parameters ---------- X : np.array Array of word counts, shape (N, 2) where N is the vocab size. X[:,0] is the positive class, while X[:,1] is the negative class. None by default Returns ------- np.array of p-values ''' z_scores = self.get_scores(X[:, 0], X[:, 1]) return norm.cdf(z_scores)
[ "def", "get_p_vals", "(", "self", ",", "X", ")", ":", "z_scores", "=", "self", ".", "get_scores", "(", "X", "[", ":", ",", "0", "]", ",", "X", "[", ":", ",", "1", "]", ")", "return", "norm", ".", "cdf", "(", "z_scores", ")" ]
Parameters ---------- X : np.array Array of word counts, shape (N, 2) where N is the vocab size. X[:,0] is the positive class, while X[:,1] is the negative class. None by default Returns ------- np.array of p-values
[ "Parameters", "----------", "X", ":", "np", ".", "array", "Array", "of", "word", "counts", "shape", "(", "N", "2", ")", "where", "N", "is", "the", "vocab", "size", ".", "X", "[", ":", "0", "]", "is", "the", "positive", "class", "while", "X", "[", ":", "1", "]", "is", "the", "negative", "class", ".", "None", "by", "default" ]
python
train
22.266667
twisted/mantissa
xmantissa/interstore.py
https://github.com/twisted/mantissa/blob/53e5502aba23ce99be78b27f923a276593033fe8/xmantissa/interstore.py#L465-L474
def routeAnswer(self, originalSender, originalTarget, value, messageID): """ Implement L{IMessageRouter.routeMessage} by synchronously locating an account via L{axiom.userbase.LoginSystem.accountByAddress}, and delivering a response to it by calling a method on it and returning a deferred containing its answer. """ router = self._routerForAccount(originalSender) return router.routeAnswer(originalSender, originalTarget, value, messageID)
[ "def", "routeAnswer", "(", "self", ",", "originalSender", ",", "originalTarget", ",", "value", ",", "messageID", ")", ":", "router", "=", "self", ".", "_routerForAccount", "(", "originalSender", ")", "return", "router", ".", "routeAnswer", "(", "originalSender", ",", "originalTarget", ",", "value", ",", "messageID", ")" ]
Implement L{IMessageRouter.routeMessage} by synchronously locating an account via L{axiom.userbase.LoginSystem.accountByAddress}, and delivering a response to it by calling a method on it and returning a deferred containing its answer.
[ "Implement", "L", "{", "IMessageRouter", ".", "routeMessage", "}", "by", "synchronously", "locating", "an", "account", "via", "L", "{", "axiom", ".", "userbase", ".", "LoginSystem", ".", "accountByAddress", "}", "and", "delivering", "a", "response", "to", "it", "by", "calling", "a", "method", "on", "it", "and", "returning", "a", "deferred", "containing", "its", "answer", "." ]
python
train
52.9
pyfca/pyfca
pyfca/implications.py
https://github.com/pyfca/pyfca/blob/cf8cea9e76076dbf4bb3f38996dcb5491b0eb0b0/pyfca/implications.py#L135-L151
def B012(t,i): """ Constructs ternary implication coding (0=not there, 2=U, 1=V) t is B column position i = |M|-1 to 0 """ if not i: return "1" nA = Awidth(i) nB = Bwidth(i) nBB = nB + nA if t < nB: return "0"+B012(t,i-1) elif t < nBB: return "1"+A012(t-nB,i-1) else: return "2"+B012(t-nBB,i-1)
[ "def", "B012", "(", "t", ",", "i", ")", ":", "if", "not", "i", ":", "return", "\"1\"", "nA", "=", "Awidth", "(", "i", ")", "nB", "=", "Bwidth", "(", "i", ")", "nBB", "=", "nB", "+", "nA", "if", "t", "<", "nB", ":", "return", "\"0\"", "+", "B012", "(", "t", ",", "i", "-", "1", ")", "elif", "t", "<", "nBB", ":", "return", "\"1\"", "+", "A012", "(", "t", "-", "nB", ",", "i", "-", "1", ")", "else", ":", "return", "\"2\"", "+", "B012", "(", "t", "-", "nBB", ",", "i", "-", "1", ")" ]
Constructs ternary implication coding (0=not there, 2=U, 1=V) t is B column position i = |M|-1 to 0
[ "Constructs", "ternary", "implication", "coding", "(", "0", "=", "not", "there", "2", "=", "U", "1", "=", "V", ")", "t", "is", "B", "column", "position", "i", "=", "|M|", "-", "1", "to", "0" ]
python
train
21.058824
emory-libraries/eulxml
eulxml/xmlmap/core.py
https://github.com/emory-libraries/eulxml/blob/17d71c7d98c0cebda9932b7f13e72093805e1fe2/eulxml/xmlmap/core.py#L55-L58
def parseUri(stream, uri=None): """Read an XML document from a URI, and return a :mod:`lxml.etree` document.""" return etree.parse(stream, parser=_get_xmlparser(), base_url=uri)
[ "def", "parseUri", "(", "stream", ",", "uri", "=", "None", ")", ":", "return", "etree", ".", "parse", "(", "stream", ",", "parser", "=", "_get_xmlparser", "(", ")", ",", "base_url", "=", "uri", ")" ]
Read an XML document from a URI, and return a :mod:`lxml.etree` document.
[ "Read", "an", "XML", "document", "from", "a", "URI", "and", "return", "a", ":", "mod", ":", "lxml", ".", "etree", "document", "." ]
python
train
46.5
angr/angr
angr/concretization_strategies/__init__.py
https://github.com/angr/angr/blob/4e2f97d56af5419ee73bdb30482c8dd8ff5f3e40/angr/concretization_strategies/__init__.py#L27-L31
def _max(self, memory, addr, **kwargs): """ Gets the maximum solution of an address. """ return memory.state.solver.max(addr, exact=kwargs.pop('exact', self._exact), **kwargs)
[ "def", "_max", "(", "self", ",", "memory", ",", "addr", ",", "*", "*", "kwargs", ")", ":", "return", "memory", ".", "state", ".", "solver", ".", "max", "(", "addr", ",", "exact", "=", "kwargs", ".", "pop", "(", "'exact'", ",", "self", ".", "_exact", ")", ",", "*", "*", "kwargs", ")" ]
Gets the maximum solution of an address.
[ "Gets", "the", "maximum", "solution", "of", "an", "address", "." ]
python
train
40.6
kkroening/ffmpeg-python
ffmpeg/_utils.py
https://github.com/kkroening/ffmpeg-python/blob/ac111dc3a976ddbb872bc7d6d4fe24a267c1a956/ffmpeg/_utils.py#L83-L91
def convert_kwargs_to_cmd_line_args(kwargs): """Helper function to build command line arguments out of dict.""" args = [] for k in sorted(kwargs.keys()): v = kwargs[k] args.append('-{}'.format(k)) if v is not None: args.append('{}'.format(v)) return args
[ "def", "convert_kwargs_to_cmd_line_args", "(", "kwargs", ")", ":", "args", "=", "[", "]", "for", "k", "in", "sorted", "(", "kwargs", ".", "keys", "(", ")", ")", ":", "v", "=", "kwargs", "[", "k", "]", "args", ".", "append", "(", "'-{}'", ".", "format", "(", "k", ")", ")", "if", "v", "is", "not", "None", ":", "args", ".", "append", "(", "'{}'", ".", "format", "(", "v", ")", ")", "return", "args" ]
Helper function to build command line arguments out of dict.
[ "Helper", "function", "to", "build", "command", "line", "arguments", "out", "of", "dict", "." ]
python
train
33.111111
delph-in/pydelphin
delphin/itsdb.py
https://github.com/delph-in/pydelphin/blob/7bd2cd63ab7cf74803e1d6547b9ebc014b382abd/delphin/itsdb.py#L1362-L1391
def decode_row(line, fields=None): """ Decode a raw line from a profile into a list of column values. Decoding involves splitting the line by the field delimiter (`"@"` by default) and unescaping special characters. If *fields* is given, cast the values into the datatype given by their respective Field object. Args: line: a raw line from a [incr tsdb()] profile. fields: a list or Relation object of Fields for the row Returns: A list of column values. """ cols = line.rstrip('\n').split(_field_delimiter) cols = list(map(unescape, cols)) if fields is not None: if len(cols) != len(fields): raise ItsdbError( 'Wrong number of fields: {} != {}' .format(len(cols), len(fields)) ) for i in range(len(cols)): col = cols[i] if col: field = fields[i] col = _cast_to_datatype(col, field) cols[i] = col return cols
[ "def", "decode_row", "(", "line", ",", "fields", "=", "None", ")", ":", "cols", "=", "line", ".", "rstrip", "(", "'\\n'", ")", ".", "split", "(", "_field_delimiter", ")", "cols", "=", "list", "(", "map", "(", "unescape", ",", "cols", ")", ")", "if", "fields", "is", "not", "None", ":", "if", "len", "(", "cols", ")", "!=", "len", "(", "fields", ")", ":", "raise", "ItsdbError", "(", "'Wrong number of fields: {} != {}'", ".", "format", "(", "len", "(", "cols", ")", ",", "len", "(", "fields", ")", ")", ")", "for", "i", "in", "range", "(", "len", "(", "cols", ")", ")", ":", "col", "=", "cols", "[", "i", "]", "if", "col", ":", "field", "=", "fields", "[", "i", "]", "col", "=", "_cast_to_datatype", "(", "col", ",", "field", ")", "cols", "[", "i", "]", "=", "col", "return", "cols" ]
Decode a raw line from a profile into a list of column values. Decoding involves splitting the line by the field delimiter (`"@"` by default) and unescaping special characters. If *fields* is given, cast the values into the datatype given by their respective Field object. Args: line: a raw line from a [incr tsdb()] profile. fields: a list or Relation object of Fields for the row Returns: A list of column values.
[ "Decode", "a", "raw", "line", "from", "a", "profile", "into", "a", "list", "of", "column", "values", "." ]
python
train
33.233333
Aluriak/bubble-tools
bubbletools/validator.py
https://github.com/Aluriak/bubble-tools/blob/f014f4a1986abefc80dc418feaa05ed258c2221a/bubbletools/validator.py#L120-L139
def mergeability_validation(tree:BubbleTree) -> iter: """Yield message about mergables powernodes""" def gen_warnings(one, two, inc_message:str) -> [str]: "Yield the warning for given (power)nodes if necessary" nodetype = '' if tree.inclusions[one] and tree.inclusions[two]: nodetype = 'power' elif tree.inclusions[one] or tree.inclusions[two]: nodetype = '(power)' if one > two: one, two = two, one shared = set(tree.edges.get(one, ())) & set(tree.edges.get(two, ())) if shared: yield (f"WARNING mergeable {nodetype}nodes: {one} and {two}" f" are {inc_message}, and share" f" {len(shared)} neigbor{'s' if len(shared) > 1 else ''}") for one, two in it.combinations(tree.roots, 2): yield from gen_warnings(one, two, inc_message='both roots') for parent, childs in tree.inclusions.items(): for one, two in it.combinations(childs, 2): yield from gen_warnings(one, two, inc_message=f'in the same level (under {parent})')
[ "def", "mergeability_validation", "(", "tree", ":", "BubbleTree", ")", "->", "iter", ":", "def", "gen_warnings", "(", "one", ",", "two", ",", "inc_message", ":", "str", ")", "->", "[", "str", "]", ":", "\"Yield the warning for given (power)nodes if necessary\"", "nodetype", "=", "''", "if", "tree", ".", "inclusions", "[", "one", "]", "and", "tree", ".", "inclusions", "[", "two", "]", ":", "nodetype", "=", "'power'", "elif", "tree", ".", "inclusions", "[", "one", "]", "or", "tree", ".", "inclusions", "[", "two", "]", ":", "nodetype", "=", "'(power)'", "if", "one", ">", "two", ":", "one", ",", "two", "=", "two", ",", "one", "shared", "=", "set", "(", "tree", ".", "edges", ".", "get", "(", "one", ",", "(", ")", ")", ")", "&", "set", "(", "tree", ".", "edges", ".", "get", "(", "two", ",", "(", ")", ")", ")", "if", "shared", ":", "yield", "(", "f\"WARNING mergeable {nodetype}nodes: {one} and {two}\"", "f\" are {inc_message}, and share\"", "f\" {len(shared)} neigbor{'s' if len(shared) > 1 else ''}\"", ")", "for", "one", ",", "two", "in", "it", ".", "combinations", "(", "tree", ".", "roots", ",", "2", ")", ":", "yield", "from", "gen_warnings", "(", "one", ",", "two", ",", "inc_message", "=", "'both roots'", ")", "for", "parent", ",", "childs", "in", "tree", ".", "inclusions", ".", "items", "(", ")", ":", "for", "one", ",", "two", "in", "it", ".", "combinations", "(", "childs", ",", "2", ")", ":", "yield", "from", "gen_warnings", "(", "one", ",", "two", ",", "inc_message", "=", "f'in the same level (under {parent})'", ")" ]
Yield message about mergables powernodes
[ "Yield", "message", "about", "mergables", "powernodes" ]
python
train
53.6
bwohlberg/sporco
sporco/admm/ccmod.py
https://github.com/bwohlberg/sporco/blob/8946a04331106f4e39904fbdf2dc7351900baa04/sporco/admm/ccmod.py#L408-L417
def reconstruct(self, D=None): """Reconstruct representation.""" if D is None: Df = self.Xf else: Df = sl.rfftn(D, None, self.cri.axisN) Sf = np.sum(self.Zf * Df, axis=self.cri.axisM) return sl.irfftn(Sf, self.cri.Nv, self.cri.axisN)
[ "def", "reconstruct", "(", "self", ",", "D", "=", "None", ")", ":", "if", "D", "is", "None", ":", "Df", "=", "self", ".", "Xf", "else", ":", "Df", "=", "sl", ".", "rfftn", "(", "D", ",", "None", ",", "self", ".", "cri", ".", "axisN", ")", "Sf", "=", "np", ".", "sum", "(", "self", ".", "Zf", "*", "Df", ",", "axis", "=", "self", ".", "cri", ".", "axisM", ")", "return", "sl", ".", "irfftn", "(", "Sf", ",", "self", ".", "cri", ".", "Nv", ",", "self", ".", "cri", ".", "axisN", ")" ]
Reconstruct representation.
[ "Reconstruct", "representation", "." ]
python
train
29
marshmallow-code/marshmallow
src/marshmallow/fields.py
https://github.com/marshmallow-code/marshmallow/blob/a6b6c4151f1fbf16f3774d4052ca2bddf6903750/src/marshmallow/fields.py#L435-L467
def schema(self): """The nested Schema object. .. versionchanged:: 1.0.0 Renamed from `serializer` to `schema` """ if not self.__schema: # Inherit context from parent. context = getattr(self.parent, 'context', {}) if isinstance(self.nested, SchemaABC): self.__schema = self.nested self.__schema.context.update(context) else: if isinstance(self.nested, type) and issubclass(self.nested, SchemaABC): schema_class = self.nested elif not isinstance(self.nested, basestring): raise ValueError( 'Nested fields must be passed a ' 'Schema, not {}.'.format(self.nested.__class__), ) elif self.nested == 'self': schema_class = self.parent.__class__ else: schema_class = class_registry.get_class(self.nested) self.__schema = schema_class( many=self.many, only=self.only, exclude=self.exclude, context=context, load_only=self._nested_normalized_option('load_only'), dump_only=self._nested_normalized_option('dump_only'), ) return self.__schema
[ "def", "schema", "(", "self", ")", ":", "if", "not", "self", ".", "__schema", ":", "# Inherit context from parent.", "context", "=", "getattr", "(", "self", ".", "parent", ",", "'context'", ",", "{", "}", ")", "if", "isinstance", "(", "self", ".", "nested", ",", "SchemaABC", ")", ":", "self", ".", "__schema", "=", "self", ".", "nested", "self", ".", "__schema", ".", "context", ".", "update", "(", "context", ")", "else", ":", "if", "isinstance", "(", "self", ".", "nested", ",", "type", ")", "and", "issubclass", "(", "self", ".", "nested", ",", "SchemaABC", ")", ":", "schema_class", "=", "self", ".", "nested", "elif", "not", "isinstance", "(", "self", ".", "nested", ",", "basestring", ")", ":", "raise", "ValueError", "(", "'Nested fields must be passed a '", "'Schema, not {}.'", ".", "format", "(", "self", ".", "nested", ".", "__class__", ")", ",", ")", "elif", "self", ".", "nested", "==", "'self'", ":", "schema_class", "=", "self", ".", "parent", ".", "__class__", "else", ":", "schema_class", "=", "class_registry", ".", "get_class", "(", "self", ".", "nested", ")", "self", ".", "__schema", "=", "schema_class", "(", "many", "=", "self", ".", "many", ",", "only", "=", "self", ".", "only", ",", "exclude", "=", "self", ".", "exclude", ",", "context", "=", "context", ",", "load_only", "=", "self", ".", "_nested_normalized_option", "(", "'load_only'", ")", ",", "dump_only", "=", "self", ".", "_nested_normalized_option", "(", "'dump_only'", ")", ",", ")", "return", "self", ".", "__schema" ]
The nested Schema object. .. versionchanged:: 1.0.0 Renamed from `serializer` to `schema`
[ "The", "nested", "Schema", "object", "." ]
python
train
42.30303
jmgilman/Neolib
neolib/pyamf/amf3.py
https://github.com/jmgilman/Neolib/blob/228fafeaed0f3195676137732384a14820ae285c/neolib/pyamf/amf3.py#L1232-L1238
def writeString(self, s): """ Writes a string to the stream. It will be B{UTF-8} encoded. """ s = self.context.getBytesForString(s) self.writeBytes(s)
[ "def", "writeString", "(", "self", ",", "s", ")", ":", "s", "=", "self", ".", "context", ".", "getBytesForString", "(", "s", ")", "self", ".", "writeBytes", "(", "s", ")" ]
Writes a string to the stream. It will be B{UTF-8} encoded.
[ "Writes", "a", "string", "to", "the", "stream", ".", "It", "will", "be", "B", "{", "UTF", "-", "8", "}", "encoded", "." ]
python
train
26.428571
a1ezzz/wasp-general
wasp_general/signals/signals.py
https://github.com/a1ezzz/wasp-general/blob/1029839d33eb663f8dec76c1c46754d53c1de4a9/wasp_general/signals/signals.py#L158-L167
def watch(self, signal_name): """ :meth:`.WSignalSourceProto.watch` implementation :rtype: watcher: WSignalSource.Watcher """ watcher = WSignalSource.Watcher( self.__queues[signal_name], lambda x: self.__watchers_callbacks[signal_name].remove(x) ) self.__watchers_callbacks[signal_name].add(watcher) return watcher
[ "def", "watch", "(", "self", ",", "signal_name", ")", ":", "watcher", "=", "WSignalSource", ".", "Watcher", "(", "self", ".", "__queues", "[", "signal_name", "]", ",", "lambda", "x", ":", "self", ".", "__watchers_callbacks", "[", "signal_name", "]", ".", "remove", "(", "x", ")", ")", "self", ".", "__watchers_callbacks", "[", "signal_name", "]", ".", "add", "(", "watcher", ")", "return", "watcher" ]
:meth:`.WSignalSourceProto.watch` implementation :rtype: watcher: WSignalSource.Watcher
[ ":", "meth", ":", ".", "WSignalSourceProto", ".", "watch", "implementation" ]
python
train
32.3
edx/edx-enterprise
enterprise/decorators.py
https://github.com/edx/edx-enterprise/blob/aea91379ab0a87cd3bc798961fce28b60ee49a80/enterprise/decorators.py#L53-L71
def ignore_warning(warning): """ Ignore any emitted warnings from a function. :param warning: The category of warning to ignore. """ def decorator(func): """ Return a decorated function whose emitted warnings are ignored. """ @wraps(func) def wrapper(*args, **kwargs): """ Wrap the function. """ warnings.simplefilter('ignore', warning) return func(*args, **kwargs) return wrapper return decorator
[ "def", "ignore_warning", "(", "warning", ")", ":", "def", "decorator", "(", "func", ")", ":", "\"\"\"\n Return a decorated function whose emitted warnings are ignored.\n \"\"\"", "@", "wraps", "(", "func", ")", "def", "wrapper", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "\"\"\"\n Wrap the function.\n \"\"\"", "warnings", ".", "simplefilter", "(", "'ignore'", ",", "warning", ")", "return", "func", "(", "*", "args", ",", "*", "*", "kwargs", ")", "return", "wrapper", "return", "decorator" ]
Ignore any emitted warnings from a function. :param warning: The category of warning to ignore.
[ "Ignore", "any", "emitted", "warnings", "from", "a", "function", "." ]
python
valid
26.947368
treycucco/bidon
bidon/json_patch.py
https://github.com/treycucco/bidon/blob/d9f24596841d0e69e8ac70a1d1a1deecea95e340/bidon/json_patch.py#L77-L84
def remove(parent, idx): """Remove a value from a dict.""" if isinstance(parent, dict): del parent[idx] elif isinstance(parent, list): del parent[int(idx)] else: raise JSONPathError("Invalid path for operation")
[ "def", "remove", "(", "parent", ",", "idx", ")", ":", "if", "isinstance", "(", "parent", ",", "dict", ")", ":", "del", "parent", "[", "idx", "]", "elif", "isinstance", "(", "parent", ",", "list", ")", ":", "del", "parent", "[", "int", "(", "idx", ")", "]", "else", ":", "raise", "JSONPathError", "(", "\"Invalid path for operation\"", ")" ]
Remove a value from a dict.
[ "Remove", "a", "value", "from", "a", "dict", "." ]
python
train
28
draios/python-sdc-client
sdcclient/_scanning.py
https://github.com/draios/python-sdc-client/blob/47f83415842048778939b90944f64386a3bcb205/sdcclient/_scanning.py#L317-L333
def delete_registry(self, registry): '''**Description** Delete an existing image registry **Arguments** - registry: Full hostname/port of registry. Eg. myrepo.example.com:5000 ''' # do some input string checking if re.match(".*\\/.*", registry): return [False, "input registry name cannot contain '/' characters - valid registry names are of the form <host>:<port> where :<port> is optional"] url = self.url + "/api/scanning/v1/anchore/registries/" + registry res = requests.delete(url, headers=self.hdrs, verify=self.ssl_verify) if not self._checkResponse(res): return [False, self.lasterr] return [True, res.json()]
[ "def", "delete_registry", "(", "self", ",", "registry", ")", ":", "# do some input string checking", "if", "re", ".", "match", "(", "\".*\\\\/.*\"", ",", "registry", ")", ":", "return", "[", "False", ",", "\"input registry name cannot contain '/' characters - valid registry names are of the form <host>:<port> where :<port> is optional\"", "]", "url", "=", "self", ".", "url", "+", "\"/api/scanning/v1/anchore/registries/\"", "+", "registry", "res", "=", "requests", ".", "delete", "(", "url", ",", "headers", "=", "self", ".", "hdrs", ",", "verify", "=", "self", ".", "ssl_verify", ")", "if", "not", "self", ".", "_checkResponse", "(", "res", ")", ":", "return", "[", "False", ",", "self", ".", "lasterr", "]", "return", "[", "True", ",", "res", ".", "json", "(", ")", "]" ]
**Description** Delete an existing image registry **Arguments** - registry: Full hostname/port of registry. Eg. myrepo.example.com:5000
[ "**", "Description", "**", "Delete", "an", "existing", "image", "registry" ]
python
test
42.588235
KrzyHonk/bpmn-python
bpmn_python/bpmn_diagram_export.py
https://github.com/KrzyHonk/bpmn-python/blob/6e5e28e3d656dbf5bd3d85d78fe8e3f2fb462629/bpmn_python/bpmn_diagram_export.py#L244-L254
def export_child_lane_set(parent_xml_element, child_lane_set, plane_element): """ Creates 'childLaneSet' element for exported BPMN XML file. :param parent_xml_element: an XML element, parent of exported 'childLaneSet' element, :param child_lane_set: dictionary with exported 'childLaneSet' element attributes and child elements, :param plane_element: XML object, representing 'plane' element of exported BPMN 2.0 XML. """ lane_set_xml = eTree.SubElement(parent_xml_element, consts.Consts.lane_set) for key, value in child_lane_set[consts.Consts.lanes].items(): BpmnDiagramGraphExport.export_lane(lane_set_xml, key, value, plane_element)
[ "def", "export_child_lane_set", "(", "parent_xml_element", ",", "child_lane_set", ",", "plane_element", ")", ":", "lane_set_xml", "=", "eTree", ".", "SubElement", "(", "parent_xml_element", ",", "consts", ".", "Consts", ".", "lane_set", ")", "for", "key", ",", "value", "in", "child_lane_set", "[", "consts", ".", "Consts", ".", "lanes", "]", ".", "items", "(", ")", ":", "BpmnDiagramGraphExport", ".", "export_lane", "(", "lane_set_xml", ",", "key", ",", "value", ",", "plane_element", ")" ]
Creates 'childLaneSet' element for exported BPMN XML file. :param parent_xml_element: an XML element, parent of exported 'childLaneSet' element, :param child_lane_set: dictionary with exported 'childLaneSet' element attributes and child elements, :param plane_element: XML object, representing 'plane' element of exported BPMN 2.0 XML.
[ "Creates", "childLaneSet", "element", "for", "exported", "BPMN", "XML", "file", "." ]
python
train
63.909091
apache/incubator-heron
heron/executor/src/python/heron_executor.py
https://github.com/apache/incubator-heron/blob/ad10325a0febe89ad337e561ebcbe37ec5d9a5ac/heron/executor/src/python/heron_executor.py#L322-L383
def parse_args(args): """Uses python argparse to collect positional args""" Log.info("Input args: %r" % args) parser = argparse.ArgumentParser() parser.add_argument("--shard", type=int, required=True) parser.add_argument("--topology-name", required=True) parser.add_argument("--topology-id", required=True) parser.add_argument("--topology-defn-file", required=True) parser.add_argument("--state-manager-connection", required=True) parser.add_argument("--state-manager-root", required=True) parser.add_argument("--state-manager-config-file", required=True) parser.add_argument("--tmaster-binary", required=True) parser.add_argument("--stmgr-binary", required=True) parser.add_argument("--metrics-manager-classpath", required=True) parser.add_argument("--instance-jvm-opts", required=True) parser.add_argument("--classpath", required=True) parser.add_argument("--master-port", required=True) parser.add_argument("--tmaster-controller-port", required=True) parser.add_argument("--tmaster-stats-port", required=True) parser.add_argument("--heron-internals-config-file", required=True) parser.add_argument("--override-config-file", required=True) parser.add_argument("--component-ram-map", required=True) parser.add_argument("--component-jvm-opts", required=True) parser.add_argument("--pkg-type", required=True) parser.add_argument("--topology-binary-file", required=True) parser.add_argument("--heron-java-home", required=True) parser.add_argument("--shell-port", required=True) parser.add_argument("--heron-shell-binary", required=True) parser.add_argument("--metrics-manager-port", required=True) parser.add_argument("--cluster", required=True) parser.add_argument("--role", required=True) parser.add_argument("--environment", required=True) parser.add_argument("--instance-classpath", required=True) parser.add_argument("--metrics-sinks-config-file", required=True) parser.add_argument("--scheduler-classpath", required=True) parser.add_argument("--scheduler-port", required=True) parser.add_argument("--python-instance-binary", required=True) parser.add_argument("--cpp-instance-binary", required=True) parser.add_argument("--metricscache-manager-classpath", required=True) parser.add_argument("--metricscache-manager-master-port", required=True) parser.add_argument("--metricscache-manager-stats-port", required=True) parser.add_argument("--metricscache-manager-mode", required=False) parser.add_argument("--is-stateful", required=True) parser.add_argument("--checkpoint-manager-classpath", required=True) parser.add_argument("--checkpoint-manager-port", required=True) parser.add_argument("--checkpoint-manager-ram", type=long, required=True) parser.add_argument("--stateful-config-file", required=True) parser.add_argument("--health-manager-mode", required=True) parser.add_argument("--health-manager-classpath", required=True) parser.add_argument("--jvm-remote-debugger-ports", required=False, help="ports to be used by a remote debugger for JVM instances") parsed_args, unknown_args = parser.parse_known_args(args[1:]) if unknown_args: Log.error('Unknown argument: %s' % unknown_args[0]) parser.print_help() sys.exit(1) return parsed_args
[ "def", "parse_args", "(", "args", ")", ":", "Log", ".", "info", "(", "\"Input args: %r\"", "%", "args", ")", "parser", "=", "argparse", ".", "ArgumentParser", "(", ")", "parser", ".", "add_argument", "(", "\"--shard\"", ",", "type", "=", "int", ",", "required", "=", "True", ")", "parser", ".", "add_argument", "(", "\"--topology-name\"", ",", "required", "=", "True", ")", "parser", ".", "add_argument", "(", "\"--topology-id\"", ",", "required", "=", "True", ")", "parser", ".", "add_argument", "(", "\"--topology-defn-file\"", ",", "required", "=", "True", ")", "parser", ".", "add_argument", "(", "\"--state-manager-connection\"", ",", "required", "=", "True", ")", "parser", ".", "add_argument", "(", "\"--state-manager-root\"", ",", "required", "=", "True", ")", "parser", ".", "add_argument", "(", "\"--state-manager-config-file\"", ",", "required", "=", "True", ")", "parser", ".", "add_argument", "(", "\"--tmaster-binary\"", ",", "required", "=", "True", ")", "parser", ".", "add_argument", "(", "\"--stmgr-binary\"", ",", "required", "=", "True", ")", "parser", ".", "add_argument", "(", "\"--metrics-manager-classpath\"", ",", "required", "=", "True", ")", "parser", ".", "add_argument", "(", "\"--instance-jvm-opts\"", ",", "required", "=", "True", ")", "parser", ".", "add_argument", "(", "\"--classpath\"", ",", "required", "=", "True", ")", "parser", ".", "add_argument", "(", "\"--master-port\"", ",", "required", "=", "True", ")", "parser", ".", "add_argument", "(", "\"--tmaster-controller-port\"", ",", "required", "=", "True", ")", "parser", ".", "add_argument", "(", "\"--tmaster-stats-port\"", ",", "required", "=", "True", ")", "parser", ".", "add_argument", "(", "\"--heron-internals-config-file\"", ",", "required", "=", "True", ")", "parser", ".", "add_argument", "(", "\"--override-config-file\"", ",", "required", "=", "True", ")", "parser", ".", "add_argument", "(", "\"--component-ram-map\"", ",", "required", "=", "True", ")", "parser", ".", "add_argument", "(", "\"--component-jvm-opts\"", ",", "required", "=", "True", ")", "parser", ".", "add_argument", "(", "\"--pkg-type\"", ",", "required", "=", "True", ")", "parser", ".", "add_argument", "(", "\"--topology-binary-file\"", ",", "required", "=", "True", ")", "parser", ".", "add_argument", "(", "\"--heron-java-home\"", ",", "required", "=", "True", ")", "parser", ".", "add_argument", "(", "\"--shell-port\"", ",", "required", "=", "True", ")", "parser", ".", "add_argument", "(", "\"--heron-shell-binary\"", ",", "required", "=", "True", ")", "parser", ".", "add_argument", "(", "\"--metrics-manager-port\"", ",", "required", "=", "True", ")", "parser", ".", "add_argument", "(", "\"--cluster\"", ",", "required", "=", "True", ")", "parser", ".", "add_argument", "(", "\"--role\"", ",", "required", "=", "True", ")", "parser", ".", "add_argument", "(", "\"--environment\"", ",", "required", "=", "True", ")", "parser", ".", "add_argument", "(", "\"--instance-classpath\"", ",", "required", "=", "True", ")", "parser", ".", "add_argument", "(", "\"--metrics-sinks-config-file\"", ",", "required", "=", "True", ")", "parser", ".", "add_argument", "(", "\"--scheduler-classpath\"", ",", "required", "=", "True", ")", "parser", ".", "add_argument", "(", "\"--scheduler-port\"", ",", "required", "=", "True", ")", "parser", ".", "add_argument", "(", "\"--python-instance-binary\"", ",", "required", "=", "True", ")", "parser", ".", "add_argument", "(", "\"--cpp-instance-binary\"", ",", "required", "=", "True", ")", "parser", ".", "add_argument", "(", "\"--metricscache-manager-classpath\"", ",", "required", "=", "True", ")", "parser", ".", "add_argument", "(", "\"--metricscache-manager-master-port\"", ",", "required", "=", "True", ")", "parser", ".", "add_argument", "(", "\"--metricscache-manager-stats-port\"", ",", "required", "=", "True", ")", "parser", ".", "add_argument", "(", "\"--metricscache-manager-mode\"", ",", "required", "=", "False", ")", "parser", ".", "add_argument", "(", "\"--is-stateful\"", ",", "required", "=", "True", ")", "parser", ".", "add_argument", "(", "\"--checkpoint-manager-classpath\"", ",", "required", "=", "True", ")", "parser", ".", "add_argument", "(", "\"--checkpoint-manager-port\"", ",", "required", "=", "True", ")", "parser", ".", "add_argument", "(", "\"--checkpoint-manager-ram\"", ",", "type", "=", "long", ",", "required", "=", "True", ")", "parser", ".", "add_argument", "(", "\"--stateful-config-file\"", ",", "required", "=", "True", ")", "parser", ".", "add_argument", "(", "\"--health-manager-mode\"", ",", "required", "=", "True", ")", "parser", ".", "add_argument", "(", "\"--health-manager-classpath\"", ",", "required", "=", "True", ")", "parser", ".", "add_argument", "(", "\"--jvm-remote-debugger-ports\"", ",", "required", "=", "False", ",", "help", "=", "\"ports to be used by a remote debugger for JVM instances\"", ")", "parsed_args", ",", "unknown_args", "=", "parser", ".", "parse_known_args", "(", "args", "[", "1", ":", "]", ")", "if", "unknown_args", ":", "Log", ".", "error", "(", "'Unknown argument: %s'", "%", "unknown_args", "[", "0", "]", ")", "parser", ".", "print_help", "(", ")", "sys", ".", "exit", "(", "1", ")", "return", "parsed_args" ]
Uses python argparse to collect positional args
[ "Uses", "python", "argparse", "to", "collect", "positional", "args" ]
python
valid
53.83871
JukeboxPipeline/jukebox-core
src/jukeboxcore/launcher.py
https://github.com/JukeboxPipeline/jukebox-core/blob/bac2280ca49940355270e4b69400ce9976ab2e6f/src/jukeboxcore/launcher.py#L185-L198
def compile_ui(self, namespace, unknown): """Compile qt designer files :param namespace: namespace containing arguments from the launch parser :type namespace: Namespace :param unknown: list of unknown arguments :type unknown: list :returns: None :rtype: None :raises: None """ uifiles = namespace.uifile for f in uifiles: qtcompile.compile_ui(f.name)
[ "def", "compile_ui", "(", "self", ",", "namespace", ",", "unknown", ")", ":", "uifiles", "=", "namespace", ".", "uifile", "for", "f", "in", "uifiles", ":", "qtcompile", ".", "compile_ui", "(", "f", ".", "name", ")" ]
Compile qt designer files :param namespace: namespace containing arguments from the launch parser :type namespace: Namespace :param unknown: list of unknown arguments :type unknown: list :returns: None :rtype: None :raises: None
[ "Compile", "qt", "designer", "files" ]
python
train
31.357143
spacetelescope/stsci.tools
lib/stsci/tools/editpar.py
https://github.com/spacetelescope/stsci.tools/blob/9a022503ad24ca54ce83331482dfa3ff6de9f403/lib/stsci/tools/editpar.py#L1473-L1530
def setAllEntriesFromParList(self, aParList, updateModel=False): """ Set all the parameter entry values in the GUI to the values in the given par list. If 'updateModel' is True, the internal param list will be updated to the new values as well as the GUI entries (slower and not always necessary). Note the corresponding TparDisplay method. """ # Get model data, the list of pars theParamList = self._taskParsObj.getParList() # we may modify members if len(aParList) != len(theParamList): showwarning(message="Attempting to set parameter values from a "+ \ "list of different length ("+str(len(aParList))+ \ ") than the number shown here ("+ \ str(len(theParamList))+"). Be aware.", title="Parameter List Length Mismatch") # LOOP THRU GUI PAR LIST for i in range(self.numParams): par = theParamList[i] if par.type == "pset": continue # skip PSET's for now gui_entry = self.entryNo[i] # Set the value in the paramList before setting it in the GUI # This may be in the form of a list, or an IrafParList (getValue) if isinstance(aParList, list): # Since "aParList" can have them in different order and number # than we do, we'll have to first find the matching param. found = False for newpar in aParList: if newpar.name==par.name and newpar.scope==par.scope: par.set(newpar.value) # same as .get(native=1,prompt=0) found = True break # Now see if newpar was found in our list if not found: pnm = par.name if len(par.scope): pnm = par.scope+'.'+par.name raise UnfoundParamError('Error - Unfound Parameter! \n\n'+\ 'Expected parameter "'+pnm+'" for task "'+ \ self.taskName+'". \nThere may be others...') else: # assume has getValue() par.set(aParList.getValue(par.name, native=1, prompt=0)) # gui holds a str, but par.value is native; conversion occurs gui_entry.forceValue(par.value, noteEdited=False) # no triggers yet if updateModel: # Update the model values via checkSetSaveEntries self.badEntriesList = self.checkSetSaveEntries(doSave=False) # If there were invalid entries, prepare the message dialog if self.badEntriesList: self.processBadEntries(self.badEntriesList, self.taskName, canCancel=False)
[ "def", "setAllEntriesFromParList", "(", "self", ",", "aParList", ",", "updateModel", "=", "False", ")", ":", "# Get model data, the list of pars", "theParamList", "=", "self", ".", "_taskParsObj", ".", "getParList", "(", ")", "# we may modify members", "if", "len", "(", "aParList", ")", "!=", "len", "(", "theParamList", ")", ":", "showwarning", "(", "message", "=", "\"Attempting to set parameter values from a \"", "+", "\"list of different length (\"", "+", "str", "(", "len", "(", "aParList", ")", ")", "+", "\") than the number shown here (\"", "+", "str", "(", "len", "(", "theParamList", ")", ")", "+", "\"). Be aware.\"", ",", "title", "=", "\"Parameter List Length Mismatch\"", ")", "# LOOP THRU GUI PAR LIST", "for", "i", "in", "range", "(", "self", ".", "numParams", ")", ":", "par", "=", "theParamList", "[", "i", "]", "if", "par", ".", "type", "==", "\"pset\"", ":", "continue", "# skip PSET's for now", "gui_entry", "=", "self", ".", "entryNo", "[", "i", "]", "# Set the value in the paramList before setting it in the GUI", "# This may be in the form of a list, or an IrafParList (getValue)", "if", "isinstance", "(", "aParList", ",", "list", ")", ":", "# Since \"aParList\" can have them in different order and number", "# than we do, we'll have to first find the matching param.", "found", "=", "False", "for", "newpar", "in", "aParList", ":", "if", "newpar", ".", "name", "==", "par", ".", "name", "and", "newpar", ".", "scope", "==", "par", ".", "scope", ":", "par", ".", "set", "(", "newpar", ".", "value", ")", "# same as .get(native=1,prompt=0)", "found", "=", "True", "break", "# Now see if newpar was found in our list", "if", "not", "found", ":", "pnm", "=", "par", ".", "name", "if", "len", "(", "par", ".", "scope", ")", ":", "pnm", "=", "par", ".", "scope", "+", "'.'", "+", "par", ".", "name", "raise", "UnfoundParamError", "(", "'Error - Unfound Parameter! \\n\\n'", "+", "'Expected parameter \"'", "+", "pnm", "+", "'\" for task \"'", "+", "self", ".", "taskName", "+", "'\". \\nThere may be others...'", ")", "else", ":", "# assume has getValue()", "par", ".", "set", "(", "aParList", ".", "getValue", "(", "par", ".", "name", ",", "native", "=", "1", ",", "prompt", "=", "0", ")", ")", "# gui holds a str, but par.value is native; conversion occurs", "gui_entry", ".", "forceValue", "(", "par", ".", "value", ",", "noteEdited", "=", "False", ")", "# no triggers yet", "if", "updateModel", ":", "# Update the model values via checkSetSaveEntries", "self", ".", "badEntriesList", "=", "self", ".", "checkSetSaveEntries", "(", "doSave", "=", "False", ")", "# If there were invalid entries, prepare the message dialog", "if", "self", ".", "badEntriesList", ":", "self", ".", "processBadEntries", "(", "self", ".", "badEntriesList", ",", "self", ".", "taskName", ",", "canCancel", "=", "False", ")" ]
Set all the parameter entry values in the GUI to the values in the given par list. If 'updateModel' is True, the internal param list will be updated to the new values as well as the GUI entries (slower and not always necessary). Note the corresponding TparDisplay method.
[ "Set", "all", "the", "parameter", "entry", "values", "in", "the", "GUI", "to", "the", "values", "in", "the", "given", "par", "list", ".", "If", "updateModel", "is", "True", "the", "internal", "param", "list", "will", "be", "updated", "to", "the", "new", "values", "as", "well", "as", "the", "GUI", "entries", "(", "slower", "and", "not", "always", "necessary", ")", ".", "Note", "the", "corresponding", "TparDisplay", "method", "." ]
python
train
48.344828
mwouts/jupytext
jupytext/languages.py
https://github.com/mwouts/jupytext/blob/eb7d6aee889f80ad779cfc53441c648f0db9246d/jupytext/languages.py#L87-L91
def comment_lines(lines, prefix): """Return commented lines""" if not prefix: return lines return [prefix + ' ' + line if line else prefix for line in lines]
[ "def", "comment_lines", "(", "lines", ",", "prefix", ")", ":", "if", "not", "prefix", ":", "return", "lines", "return", "[", "prefix", "+", "' '", "+", "line", "if", "line", "else", "prefix", "for", "line", "in", "lines", "]" ]
Return commented lines
[ "Return", "commented", "lines" ]
python
train
34.6
narfman0/helga-markovify
helga_markovify/plugin.py
https://github.com/narfman0/helga-markovify/blob/b5a82de070102e6da1fd3f5f81cad12d0a9185d8/helga_markovify/plugin.py#L70-L85
def _handle_match(client, channel, nick, message, matches): """ Match stores all channel info. If helga is asked something to stimulate a markov response about channel data, then we shall graciously provide it. """ generate_interrogative = _CHANNEL_GENERATE_REGEX.match(message) if generate_interrogative: return generate(_DEFAULT_TOPIC, _ADD_PUNCTUATION) current_topic = db.markovify.find_one({'topic': _DEFAULT_TOPIC}) if current_topic: message = punctuate(current_topic['text'], message, _ADD_PUNCTUATION) try: ingest(_DEFAULT_TOPIC, message) except ValueError as e: # not good, but this is done every message so just move along print str(e)
[ "def", "_handle_match", "(", "client", ",", "channel", ",", "nick", ",", "message", ",", "matches", ")", ":", "generate_interrogative", "=", "_CHANNEL_GENERATE_REGEX", ".", "match", "(", "message", ")", "if", "generate_interrogative", ":", "return", "generate", "(", "_DEFAULT_TOPIC", ",", "_ADD_PUNCTUATION", ")", "current_topic", "=", "db", ".", "markovify", ".", "find_one", "(", "{", "'topic'", ":", "_DEFAULT_TOPIC", "}", ")", "if", "current_topic", ":", "message", "=", "punctuate", "(", "current_topic", "[", "'text'", "]", ",", "message", ",", "_ADD_PUNCTUATION", ")", "try", ":", "ingest", "(", "_DEFAULT_TOPIC", ",", "message", ")", "except", "ValueError", "as", "e", ":", "# not good, but this is done every message so just move along", "print", "str", "(", "e", ")" ]
Match stores all channel info. If helga is asked something to stimulate a markov response about channel data, then we shall graciously provide it.
[ "Match", "stores", "all", "channel", "info", ".", "If", "helga", "is", "asked", "something", "to", "stimulate", "a", "markov", "response", "about", "channel", "data", "then", "we", "shall", "graciously", "provide", "it", "." ]
python
train
44.3125
LionelAuroux/pyrser
pyrser/parsing/base.py
https://github.com/LionelAuroux/pyrser/blob/f153a97ef2b6bf915a1ed468c0252a9a59b754d5/pyrser/parsing/base.py#L314-L327
def read_text(self, text: str) -> bool: """ Consume a strlen(text) text at current position in the stream else return False. Same as "" in BNF ex : read_text("ls");. """ if self.read_eof(): return False self._stream.save_context() if self.peek_text(text): self._stream.incpos(len(text)) return self._stream.validate_context() return self._stream.restore_context()
[ "def", "read_text", "(", "self", ",", "text", ":", "str", ")", "->", "bool", ":", "if", "self", ".", "read_eof", "(", ")", ":", "return", "False", "self", ".", "_stream", ".", "save_context", "(", ")", "if", "self", ".", "peek_text", "(", "text", ")", ":", "self", ".", "_stream", ".", "incpos", "(", "len", "(", "text", ")", ")", "return", "self", ".", "_stream", ".", "validate_context", "(", ")", "return", "self", ".", "_stream", ".", "restore_context", "(", ")" ]
Consume a strlen(text) text at current position in the stream else return False. Same as "" in BNF ex : read_text("ls");.
[ "Consume", "a", "strlen", "(", "text", ")", "text", "at", "current", "position", "in", "the", "stream", "else", "return", "False", ".", "Same", "as", "in", "BNF", "ex", ":", "read_text", "(", "ls", ")", ";", "." ]
python
test
33.285714
praekelt/django-preferences
preferences/models.py
https://github.com/praekelt/django-preferences/blob/724f23da45449e96feb5179cb34e3d380cf151a1/preferences/models.py#L44-L59
def site_cleanup(sender, action, instance, **kwargs): """ Make sure there is only a single preferences object per site. So remove sites from pre-existing preferences objects. """ if action == 'post_add': if isinstance(instance, Preferences) \ and hasattr(instance.__class__, 'objects'): site_conflicts = instance.__class__.objects.filter( sites__in=instance.sites.all() ).only('id').distinct() for conflict in site_conflicts: if conflict.id != instance.id: for site in instance.sites.all(): conflict.sites.remove(site)
[ "def", "site_cleanup", "(", "sender", ",", "action", ",", "instance", ",", "*", "*", "kwargs", ")", ":", "if", "action", "==", "'post_add'", ":", "if", "isinstance", "(", "instance", ",", "Preferences", ")", "and", "hasattr", "(", "instance", ".", "__class__", ",", "'objects'", ")", ":", "site_conflicts", "=", "instance", ".", "__class__", ".", "objects", ".", "filter", "(", "sites__in", "=", "instance", ".", "sites", ".", "all", "(", ")", ")", ".", "only", "(", "'id'", ")", ".", "distinct", "(", ")", "for", "conflict", "in", "site_conflicts", ":", "if", "conflict", ".", "id", "!=", "instance", ".", "id", ":", "for", "site", "in", "instance", ".", "sites", ".", "all", "(", ")", ":", "conflict", ".", "sites", ".", "remove", "(", "site", ")" ]
Make sure there is only a single preferences object per site. So remove sites from pre-existing preferences objects.
[ "Make", "sure", "there", "is", "only", "a", "single", "preferences", "object", "per", "site", ".", "So", "remove", "sites", "from", "pre", "-", "existing", "preferences", "objects", "." ]
python
train
41
RonenNess/Fileter
fileter/filters/pattern_filter.py
https://github.com/RonenNess/Fileter/blob/5372221b4049d5d46a9926573b91af17681c81f3/fileter/filters/pattern_filter.py#L24-L32
def match(self, filepath): """ The function to check file. Should return True if match, False otherwise. """ for pattern in self.__pattern: if len(fnmatch.filter([filepath], pattern)) > 0: return True return False
[ "def", "match", "(", "self", ",", "filepath", ")", ":", "for", "pattern", "in", "self", ".", "__pattern", ":", "if", "len", "(", "fnmatch", ".", "filter", "(", "[", "filepath", "]", ",", "pattern", ")", ")", ">", "0", ":", "return", "True", "return", "False" ]
The function to check file. Should return True if match, False otherwise.
[ "The", "function", "to", "check", "file", ".", "Should", "return", "True", "if", "match", "False", "otherwise", "." ]
python
train
31.222222
rigetti/grove
grove/tomography/state_tomography.py
https://github.com/rigetti/grove/blob/dc6bf6ec63e8c435fe52b1e00f707d5ce4cdb9b3/grove/tomography/state_tomography.py#L105-L168
def estimate_from_ssr(histograms, readout_povm, channel_ops, settings): """ Estimate a density matrix from single shot histograms obtained by measuring bitstrings in the Z-eigenbasis after application of given channel operators. :param numpy.ndarray histograms: The single shot histograms, `shape=(n_channels, dim)`. :param DiagognalPOVM readout_povm: The POVM corresponding to the readout plus classifier. :param list channel_ops: The tomography measurement channels as `qutip.Qobj`'s. :param TomographySettings settings: The solver and estimation settings. :return: The generated StateTomography object. :rtype: StateTomography """ nqc = len(channel_ops[0].dims[0]) pauli_basis = grove.tomography.operator_utils.PAULI_BASIS ** nqc pi_basis = readout_povm.pi_basis if not histograms.shape[1] == pi_basis.dim: # pragma no coverage raise ValueError("Currently tomography is only implemented for two-level systems.") # prepare the log-likelihood function parameters, see documentation n_kj = np.asarray(histograms) c_jk_m = _prepare_c_jk_m(readout_povm, pauli_basis, channel_ops) rho_m = cvxpy.Variable(pauli_basis.dim) p_jk = c_jk_m * rho_m obj = -n_kj.ravel() * cvxpy.log(p_jk) p_jk_mat = cvxpy.reshape(p_jk, pi_basis.dim, len(channel_ops)) # cvxpy has col-major order # Default constraints: # MLE must describe valid probability distribution # i.e., for each k, p_jk must sum to one and be element-wise non-negative: # 1. \sum_j p_jk == 1 for all k # 2. p_jk >= 0 for all j, k # where p_jk = \sum_m c_jk_m rho_m constraints = [ p_jk >= 0, np.matrix(np.ones((1, pi_basis.dim))) * p_jk_mat == 1, ] rho_m_real_imag = sum((rm * o_ut.to_realimag(Pm) for (rm, Pm) in ut.izip(rho_m, pauli_basis.ops)), 0) if POSITIVE in settings.constraints: if tomography._SDP_SOLVER.is_functional(): constraints.append(rho_m_real_imag >> 0) else: # pragma no coverage _log.warning("No convex solver capable of semi-definite problems installed.\n" "Dropping the positivity constraint on the density matrix.") if UNIT_TRACE in settings.constraints: # this assumes that the first element of the Pauli basis is always proportional to # the identity constraints.append(rho_m[0, 0] == 1. / pauli_basis.ops[0].tr().real) prob = cvxpy.Problem(cvxpy.Minimize(obj), constraints) _log.info("Starting convex solver") prob.solve(solver=tomography.SOLVER, **settings.solver_kwargs) if prob.status != cvxpy.OPTIMAL: # pragma no coverage _log.warning("Problem did not converge to optimal solution. " "Solver settings: {}".format(settings.solver_kwargs)) return StateTomography(np.array(rho_m.value).ravel(), pauli_basis, settings)
[ "def", "estimate_from_ssr", "(", "histograms", ",", "readout_povm", ",", "channel_ops", ",", "settings", ")", ":", "nqc", "=", "len", "(", "channel_ops", "[", "0", "]", ".", "dims", "[", "0", "]", ")", "pauli_basis", "=", "grove", ".", "tomography", ".", "operator_utils", ".", "PAULI_BASIS", "**", "nqc", "pi_basis", "=", "readout_povm", ".", "pi_basis", "if", "not", "histograms", ".", "shape", "[", "1", "]", "==", "pi_basis", ".", "dim", ":", "# pragma no coverage", "raise", "ValueError", "(", "\"Currently tomography is only implemented for two-level systems.\"", ")", "# prepare the log-likelihood function parameters, see documentation", "n_kj", "=", "np", ".", "asarray", "(", "histograms", ")", "c_jk_m", "=", "_prepare_c_jk_m", "(", "readout_povm", ",", "pauli_basis", ",", "channel_ops", ")", "rho_m", "=", "cvxpy", ".", "Variable", "(", "pauli_basis", ".", "dim", ")", "p_jk", "=", "c_jk_m", "*", "rho_m", "obj", "=", "-", "n_kj", ".", "ravel", "(", ")", "*", "cvxpy", ".", "log", "(", "p_jk", ")", "p_jk_mat", "=", "cvxpy", ".", "reshape", "(", "p_jk", ",", "pi_basis", ".", "dim", ",", "len", "(", "channel_ops", ")", ")", "# cvxpy has col-major order", "# Default constraints:", "# MLE must describe valid probability distribution", "# i.e., for each k, p_jk must sum to one and be element-wise non-negative:", "# 1. \\sum_j p_jk == 1 for all k", "# 2. p_jk >= 0 for all j, k", "# where p_jk = \\sum_m c_jk_m rho_m", "constraints", "=", "[", "p_jk", ">=", "0", ",", "np", ".", "matrix", "(", "np", ".", "ones", "(", "(", "1", ",", "pi_basis", ".", "dim", ")", ")", ")", "*", "p_jk_mat", "==", "1", ",", "]", "rho_m_real_imag", "=", "sum", "(", "(", "rm", "*", "o_ut", ".", "to_realimag", "(", "Pm", ")", "for", "(", "rm", ",", "Pm", ")", "in", "ut", ".", "izip", "(", "rho_m", ",", "pauli_basis", ".", "ops", ")", ")", ",", "0", ")", "if", "POSITIVE", "in", "settings", ".", "constraints", ":", "if", "tomography", ".", "_SDP_SOLVER", ".", "is_functional", "(", ")", ":", "constraints", ".", "append", "(", "rho_m_real_imag", ">>", "0", ")", "else", ":", "# pragma no coverage", "_log", ".", "warning", "(", "\"No convex solver capable of semi-definite problems installed.\\n\"", "\"Dropping the positivity constraint on the density matrix.\"", ")", "if", "UNIT_TRACE", "in", "settings", ".", "constraints", ":", "# this assumes that the first element of the Pauli basis is always proportional to", "# the identity", "constraints", ".", "append", "(", "rho_m", "[", "0", ",", "0", "]", "==", "1.", "/", "pauli_basis", ".", "ops", "[", "0", "]", ".", "tr", "(", ")", ".", "real", ")", "prob", "=", "cvxpy", ".", "Problem", "(", "cvxpy", ".", "Minimize", "(", "obj", ")", ",", "constraints", ")", "_log", ".", "info", "(", "\"Starting convex solver\"", ")", "prob", ".", "solve", "(", "solver", "=", "tomography", ".", "SOLVER", ",", "*", "*", "settings", ".", "solver_kwargs", ")", "if", "prob", ".", "status", "!=", "cvxpy", ".", "OPTIMAL", ":", "# pragma no coverage", "_log", ".", "warning", "(", "\"Problem did not converge to optimal solution. \"", "\"Solver settings: {}\"", ".", "format", "(", "settings", ".", "solver_kwargs", ")", ")", "return", "StateTomography", "(", "np", ".", "array", "(", "rho_m", ".", "value", ")", ".", "ravel", "(", ")", ",", "pauli_basis", ",", "settings", ")" ]
Estimate a density matrix from single shot histograms obtained by measuring bitstrings in the Z-eigenbasis after application of given channel operators. :param numpy.ndarray histograms: The single shot histograms, `shape=(n_channels, dim)`. :param DiagognalPOVM readout_povm: The POVM corresponding to the readout plus classifier. :param list channel_ops: The tomography measurement channels as `qutip.Qobj`'s. :param TomographySettings settings: The solver and estimation settings. :return: The generated StateTomography object. :rtype: StateTomography
[ "Estimate", "a", "density", "matrix", "from", "single", "shot", "histograms", "obtained", "by", "measuring", "bitstrings", "in", "the", "Z", "-", "eigenbasis", "after", "application", "of", "given", "channel", "operators", "." ]
python
train
48.0625
apache/incubator-superset
superset/dataframe.py
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/dataframe.py#L39-L60
def dedup(l, suffix='__', case_sensitive=True): """De-duplicates a list of string by suffixing a counter Always returns the same number of entries as provided, and always returns unique values. Case sensitive comparison by default. >>> print(','.join(dedup(['foo', 'bar', 'bar', 'bar', 'Bar']))) foo,bar,bar__1,bar__2,Bar >>> print(','.join(dedup(['foo', 'bar', 'bar', 'bar', 'Bar'], case_sensitive=False))) foo,bar,bar__1,bar__2,Bar__3 """ new_l = [] seen = {} for s in l: s_fixed_case = s if case_sensitive else s.lower() if s_fixed_case in seen: seen[s_fixed_case] += 1 s += suffix + str(seen[s_fixed_case]) else: seen[s_fixed_case] = 0 new_l.append(s) return new_l
[ "def", "dedup", "(", "l", ",", "suffix", "=", "'__'", ",", "case_sensitive", "=", "True", ")", ":", "new_l", "=", "[", "]", "seen", "=", "{", "}", "for", "s", "in", "l", ":", "s_fixed_case", "=", "s", "if", "case_sensitive", "else", "s", ".", "lower", "(", ")", "if", "s_fixed_case", "in", "seen", ":", "seen", "[", "s_fixed_case", "]", "+=", "1", "s", "+=", "suffix", "+", "str", "(", "seen", "[", "s_fixed_case", "]", ")", "else", ":", "seen", "[", "s_fixed_case", "]", "=", "0", "new_l", ".", "append", "(", "s", ")", "return", "new_l" ]
De-duplicates a list of string by suffixing a counter Always returns the same number of entries as provided, and always returns unique values. Case sensitive comparison by default. >>> print(','.join(dedup(['foo', 'bar', 'bar', 'bar', 'Bar']))) foo,bar,bar__1,bar__2,Bar >>> print(','.join(dedup(['foo', 'bar', 'bar', 'bar', 'Bar'], case_sensitive=False))) foo,bar,bar__1,bar__2,Bar__3
[ "De", "-", "duplicates", "a", "list", "of", "string", "by", "suffixing", "a", "counter" ]
python
train
34.772727
tjcsl/cslbot
cslbot/helpers/handler.py
https://github.com/tjcsl/cslbot/blob/aebe07be47141f61d7c180706bddfb707f19b2b5/cslbot/helpers/handler.py#L345-L374
def do_kick(self, send, target, nick, msg, slogan=True): """Kick users. - If kick is disabled, don't do anything. - If the bot is not a op, rage at a op. - Kick the user. """ if not self.kick_enabled: return if target not in self.channels: send("%s: you're lucky, private message kicking hasn't been implemented yet." % nick) return with self.data_lock: ops = [k for k, v in self.opers[target].items() if v] botnick = self.config['core']['nick'] if botnick not in ops: ops = ['someone'] if not ops else ops send(textutils.gen_creffett("%s: /op the bot" % random.choice(ops)), target=target) elif random.random() < 0.01 and msg == "shutting caps lock off": if nick in ops: send("%s: HUEHUEHUE GIBE CAPSLOCK PLS I REPORT U" % nick, target=target) else: self.connection.kick(target, nick, "HUEHUEHUE GIBE CAPSLOCK PLS I REPORT U") else: msg = textutils.gen_slogan(msg).upper() if slogan else msg if nick in ops: send("%s: %s" % (nick, msg), target=target) else: self.connection.kick(target, nick, msg)
[ "def", "do_kick", "(", "self", ",", "send", ",", "target", ",", "nick", ",", "msg", ",", "slogan", "=", "True", ")", ":", "if", "not", "self", ".", "kick_enabled", ":", "return", "if", "target", "not", "in", "self", ".", "channels", ":", "send", "(", "\"%s: you're lucky, private message kicking hasn't been implemented yet.\"", "%", "nick", ")", "return", "with", "self", ".", "data_lock", ":", "ops", "=", "[", "k", "for", "k", ",", "v", "in", "self", ".", "opers", "[", "target", "]", ".", "items", "(", ")", "if", "v", "]", "botnick", "=", "self", ".", "config", "[", "'core'", "]", "[", "'nick'", "]", "if", "botnick", "not", "in", "ops", ":", "ops", "=", "[", "'someone'", "]", "if", "not", "ops", "else", "ops", "send", "(", "textutils", ".", "gen_creffett", "(", "\"%s: /op the bot\"", "%", "random", ".", "choice", "(", "ops", ")", ")", ",", "target", "=", "target", ")", "elif", "random", ".", "random", "(", ")", "<", "0.01", "and", "msg", "==", "\"shutting caps lock off\"", ":", "if", "nick", "in", "ops", ":", "send", "(", "\"%s: HUEHUEHUE GIBE CAPSLOCK PLS I REPORT U\"", "%", "nick", ",", "target", "=", "target", ")", "else", ":", "self", ".", "connection", ".", "kick", "(", "target", ",", "nick", ",", "\"HUEHUEHUE GIBE CAPSLOCK PLS I REPORT U\"", ")", "else", ":", "msg", "=", "textutils", ".", "gen_slogan", "(", "msg", ")", ".", "upper", "(", ")", "if", "slogan", "else", "msg", "if", "nick", "in", "ops", ":", "send", "(", "\"%s: %s\"", "%", "(", "nick", ",", "msg", ")", ",", "target", "=", "target", ")", "else", ":", "self", ".", "connection", ".", "kick", "(", "target", ",", "nick", ",", "msg", ")" ]
Kick users. - If kick is disabled, don't do anything. - If the bot is not a op, rage at a op. - Kick the user.
[ "Kick", "users", "." ]
python
train
42.1
Stranger6667/postmarker
postmarker/models/stats.py
https://github.com/Stranger6667/postmarker/blob/013224ab1761e95c488c7d2701e6fa83f3108d94/postmarker/models/stats.py#L87-L92
def location(self, tag=None, fromdate=None, todate=None): """ Gets an overview of which part of the email links were clicked from (HTML or Text). This is only recorded when Link Tracking is enabled for that email. """ return self.call("GET", "/stats/outbound/clicks/location", tag=tag, fromdate=fromdate, todate=todate)
[ "def", "location", "(", "self", ",", "tag", "=", "None", ",", "fromdate", "=", "None", ",", "todate", "=", "None", ")", ":", "return", "self", ".", "call", "(", "\"GET\"", ",", "\"/stats/outbound/clicks/location\"", ",", "tag", "=", "tag", ",", "fromdate", "=", "fromdate", ",", "todate", "=", "todate", ")" ]
Gets an overview of which part of the email links were clicked from (HTML or Text). This is only recorded when Link Tracking is enabled for that email.
[ "Gets", "an", "overview", "of", "which", "part", "of", "the", "email", "links", "were", "clicked", "from", "(", "HTML", "or", "Text", ")", ".", "This", "is", "only", "recorded", "when", "Link", "Tracking", "is", "enabled", "for", "that", "email", "." ]
python
train
59
openergy/oplus
oplus/epm/epm.py
https://github.com/openergy/oplus/blob/f095868d1990c1d126e906ada6acbab26348b3d3/oplus/epm/epm.py#L100-L143
def _dev_populate_from_json_data(self, json_data): """ !! Must only be called once, when empty !! """ # workflow # -------- # (methods belonging to create/update/delete framework: # epm._dev_populate_from_json_data, table.batch_add, record.update, queryset.delete, record.delete) # 1. add inert # * data is checked # * old links are unregistered # * record is stored in table (=> pk uniqueness is checked) # 2. activate: hooks, links, external files # manage comment if any comment = json_data.pop("_comment", None) if comment is not None: self._comment = comment # populate external files external_files_data = json_data.pop("_external_files", dict()) self._dev_external_files_manager.populate_from_json_data(external_files_data) # manage records added_records = [] for table_ref, json_data_records in json_data.items(): # find table table = getattr(self, table_ref) # create record (inert) records = table._dev_add_inert(json_data_records) # add records (inert) added_records.extend(records) # activate hooks for r in added_records: r._dev_activate_hooks() # activate links and external files for r in added_records: r._dev_activate_links() r._dev_activate_external_files()
[ "def", "_dev_populate_from_json_data", "(", "self", ",", "json_data", ")", ":", "# workflow", "# --------", "# (methods belonging to create/update/delete framework:", "# epm._dev_populate_from_json_data, table.batch_add, record.update, queryset.delete, record.delete)", "# 1. add inert", "# * data is checked", "# * old links are unregistered", "# * record is stored in table (=> pk uniqueness is checked)", "# 2. activate: hooks, links, external files", "# manage comment if any", "comment", "=", "json_data", ".", "pop", "(", "\"_comment\"", ",", "None", ")", "if", "comment", "is", "not", "None", ":", "self", ".", "_comment", "=", "comment", "# populate external files", "external_files_data", "=", "json_data", ".", "pop", "(", "\"_external_files\"", ",", "dict", "(", ")", ")", "self", ".", "_dev_external_files_manager", ".", "populate_from_json_data", "(", "external_files_data", ")", "# manage records", "added_records", "=", "[", "]", "for", "table_ref", ",", "json_data_records", "in", "json_data", ".", "items", "(", ")", ":", "# find table", "table", "=", "getattr", "(", "self", ",", "table_ref", ")", "# create record (inert)", "records", "=", "table", ".", "_dev_add_inert", "(", "json_data_records", ")", "# add records (inert)", "added_records", ".", "extend", "(", "records", ")", "# activate hooks", "for", "r", "in", "added_records", ":", "r", ".", "_dev_activate_hooks", "(", ")", "# activate links and external files", "for", "r", "in", "added_records", ":", "r", ".", "_dev_activate_links", "(", ")", "r", ".", "_dev_activate_external_files", "(", ")" ]
!! Must only be called once, when empty !!
[ "!!", "Must", "only", "be", "called", "once", "when", "empty", "!!" ]
python
test
33.545455
sorgerlab/indra
indra/sources/bel/rdf_processor.py
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/bel/rdf_processor.py#L829-L871
def print_statement_coverage(self): """Display how many of the direct statements have been converted. Also prints how many are considered 'degenerate' and not converted.""" if not self.all_direct_stmts: self.get_all_direct_statements() if not self.degenerate_stmts: self.get_degenerate_statements() if not self.all_indirect_stmts: self.get_all_indirect_statements() logger.info('') logger.info("Total indirect statements: %d" % len(self.all_indirect_stmts)) logger.info("Converted indirect statements: %d" % len(self.converted_indirect_stmts)) logger.info(">> Unhandled indirect statements: %d" % (len(self.all_indirect_stmts) - len(self.converted_indirect_stmts))) logger.info('') logger.info("Total direct statements: %d" % len(self.all_direct_stmts)) logger.info("Converted direct statements: %d" % len(self.converted_direct_stmts)) logger.info("Degenerate direct statements: %d" % len(self.degenerate_stmts)) logger.info(">> Unhandled direct statements: %d" % (len(self.all_direct_stmts) - len(self.converted_direct_stmts) - len(self.degenerate_stmts))) logger.info('') logger.info("--- Unhandled direct statements ---------") for stmt in self.all_direct_stmts: if not (stmt in self.converted_direct_stmts or stmt in self.degenerate_stmts): logger.info(stmt) logger.info('') logger.info("--- Unhandled indirect statements ---------") for stmt in self.all_indirect_stmts: if not (stmt in self.converted_indirect_stmts or stmt in self.degenerate_stmts): logger.info(stmt)
[ "def", "print_statement_coverage", "(", "self", ")", ":", "if", "not", "self", ".", "all_direct_stmts", ":", "self", ".", "get_all_direct_statements", "(", ")", "if", "not", "self", ".", "degenerate_stmts", ":", "self", ".", "get_degenerate_statements", "(", ")", "if", "not", "self", ".", "all_indirect_stmts", ":", "self", ".", "get_all_indirect_statements", "(", ")", "logger", ".", "info", "(", "''", ")", "logger", ".", "info", "(", "\"Total indirect statements: %d\"", "%", "len", "(", "self", ".", "all_indirect_stmts", ")", ")", "logger", ".", "info", "(", "\"Converted indirect statements: %d\"", "%", "len", "(", "self", ".", "converted_indirect_stmts", ")", ")", "logger", ".", "info", "(", "\">> Unhandled indirect statements: %d\"", "%", "(", "len", "(", "self", ".", "all_indirect_stmts", ")", "-", "len", "(", "self", ".", "converted_indirect_stmts", ")", ")", ")", "logger", ".", "info", "(", "''", ")", "logger", ".", "info", "(", "\"Total direct statements: %d\"", "%", "len", "(", "self", ".", "all_direct_stmts", ")", ")", "logger", ".", "info", "(", "\"Converted direct statements: %d\"", "%", "len", "(", "self", ".", "converted_direct_stmts", ")", ")", "logger", ".", "info", "(", "\"Degenerate direct statements: %d\"", "%", "len", "(", "self", ".", "degenerate_stmts", ")", ")", "logger", ".", "info", "(", "\">> Unhandled direct statements: %d\"", "%", "(", "len", "(", "self", ".", "all_direct_stmts", ")", "-", "len", "(", "self", ".", "converted_direct_stmts", ")", "-", "len", "(", "self", ".", "degenerate_stmts", ")", ")", ")", "logger", ".", "info", "(", "''", ")", "logger", ".", "info", "(", "\"--- Unhandled direct statements ---------\"", ")", "for", "stmt", "in", "self", ".", "all_direct_stmts", ":", "if", "not", "(", "stmt", "in", "self", ".", "converted_direct_stmts", "or", "stmt", "in", "self", ".", "degenerate_stmts", ")", ":", "logger", ".", "info", "(", "stmt", ")", "logger", ".", "info", "(", "''", ")", "logger", ".", "info", "(", "\"--- Unhandled indirect statements ---------\"", ")", "for", "stmt", "in", "self", ".", "all_indirect_stmts", ":", "if", "not", "(", "stmt", "in", "self", ".", "converted_indirect_stmts", "or", "stmt", "in", "self", ".", "degenerate_stmts", ")", ":", "logger", ".", "info", "(", "stmt", ")" ]
Display how many of the direct statements have been converted. Also prints how many are considered 'degenerate' and not converted.
[ "Display", "how", "many", "of", "the", "direct", "statements", "have", "been", "converted", "." ]
python
train
44.581395
Fire-Proof/cuepy
cuepy/cuepy.py
https://github.com/Fire-Proof/cuepy/blob/5add7d62a31589bcdc7d2103c9c482bf718556ec/cuepy/cuepy.py#L48-L58
def device_count(self): """ Find amount of CUE devices :returns: amount of CUE devices :rtype: int """ device_count = get_device_count(self.corsair_sdk) if device_count == -1: self._raise_corsair_error() return device_count
[ "def", "device_count", "(", "self", ")", ":", "device_count", "=", "get_device_count", "(", "self", ".", "corsair_sdk", ")", "if", "device_count", "==", "-", "1", ":", "self", ".", "_raise_corsair_error", "(", ")", "return", "device_count" ]
Find amount of CUE devices :returns: amount of CUE devices :rtype: int
[ "Find", "amount", "of", "CUE", "devices" ]
python
train
26.363636
cirruscluster/cirruscluster
cirruscluster/cluster/ec2cluster.py
https://github.com/cirruscluster/cirruscluster/blob/977409929dd81322d886425cdced10608117d5d7/cirruscluster/cluster/ec2cluster.py#L213-L233
def CreateBlockDeviceMap(self, image_id, instance_type): """ If you launch without specifying a manual device block mapping, you may not get all the ephemeral devices available to the given instance type. This will build one that ensures all available ephemeral devices are mapped. """ # get the block device mapping stored with the image image = self.ec2.get_image(image_id) block_device_map = image.block_device_mapping assert(block_device_map) # update it to include the ephemeral devices # max is 4... is it an error for instances with fewer than 4 ? # see: http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ # InstanceStorage.html#StorageOnInstanceTypes ephemeral_device_names = ['/dev/sdb', '/dev/sdc', '/dev/sdd', '/dev/sde'] for i, device_name in enumerate(ephemeral_device_names): name = 'ephemeral%d' % (i) bdt = blockdevicemapping.BlockDeviceType(ephemeral_name = name) block_device_map[device_name] = bdt return block_device_map
[ "def", "CreateBlockDeviceMap", "(", "self", ",", "image_id", ",", "instance_type", ")", ":", "# get the block device mapping stored with the image", "image", "=", "self", ".", "ec2", ".", "get_image", "(", "image_id", ")", "block_device_map", "=", "image", ".", "block_device_mapping", "assert", "(", "block_device_map", ")", "# update it to include the ephemeral devices ", "# max is 4... is it an error for instances with fewer than 4 ? ", "# see: http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/", "# InstanceStorage.html#StorageOnInstanceTypes", "ephemeral_device_names", "=", "[", "'/dev/sdb'", ",", "'/dev/sdc'", ",", "'/dev/sdd'", ",", "'/dev/sde'", "]", "for", "i", ",", "device_name", "in", "enumerate", "(", "ephemeral_device_names", ")", ":", "name", "=", "'ephemeral%d'", "%", "(", "i", ")", "bdt", "=", "blockdevicemapping", ".", "BlockDeviceType", "(", "ephemeral_name", "=", "name", ")", "block_device_map", "[", "device_name", "]", "=", "bdt", "return", "block_device_map" ]
If you launch without specifying a manual device block mapping, you may not get all the ephemeral devices available to the given instance type. This will build one that ensures all available ephemeral devices are mapped.
[ "If", "you", "launch", "without", "specifying", "a", "manual", "device", "block", "mapping", "you", "may", "not", "get", "all", "the", "ephemeral", "devices", "available", "to", "the", "given", "instance", "type", ".", "This", "will", "build", "one", "that", "ensures", "all", "available", "ephemeral", "devices", "are", "mapped", "." ]
python
train
50.571429
smarie/python-parsyfiles
parsyfiles/parsing_combining_parsers.py
https://github.com/smarie/python-parsyfiles/blob/344b37e1151e8d4e7c2ee49ae09d6568715ae64e/parsyfiles/parsing_combining_parsers.py#L82-L95
def print_error_to_io_stream(err: Exception, io: TextIOBase, print_big_traceback : bool = True): """ Utility method to print an exception's content to a stream :param err: :param io: :param print_big_traceback: :return: """ if print_big_traceback: traceback.print_tb(err.__traceback__, file=io, limit=-GLOBAL_CONFIG.multiple_errors_tb_limit) else: traceback.print_tb(err.__traceback__, file=io, limit=-1) io.writelines(' ' + str(err.__class__.__name__) + ' : ' + str(err))
[ "def", "print_error_to_io_stream", "(", "err", ":", "Exception", ",", "io", ":", "TextIOBase", ",", "print_big_traceback", ":", "bool", "=", "True", ")", ":", "if", "print_big_traceback", ":", "traceback", ".", "print_tb", "(", "err", ".", "__traceback__", ",", "file", "=", "io", ",", "limit", "=", "-", "GLOBAL_CONFIG", ".", "multiple_errors_tb_limit", ")", "else", ":", "traceback", ".", "print_tb", "(", "err", ".", "__traceback__", ",", "file", "=", "io", ",", "limit", "=", "-", "1", ")", "io", ".", "writelines", "(", "' '", "+", "str", "(", "err", ".", "__class__", ".", "__name__", ")", "+", "' : '", "+", "str", "(", "err", ")", ")" ]
Utility method to print an exception's content to a stream :param err: :param io: :param print_big_traceback: :return:
[ "Utility", "method", "to", "print", "an", "exception", "s", "content", "to", "a", "stream" ]
python
train
36.928571
GeorgeArgyros/symautomata
symautomata/pythondfa.py
https://github.com/GeorgeArgyros/symautomata/blob/f5d66533573b27e155bec3f36b8c00b8e3937cb3/symautomata/pythondfa.py#L336-L356
def load(self, txt_fst_file_name): """ Save the transducer in the text file format of OpenFST. The format is specified as follows: arc format: src dest ilabel olabel [weight] final state format: state [weight] lines may occur in any order except initial state must be first line Args: txt_fst_file_name (str): The input file Returns: None """ with open(txt_fst_file_name, 'r') as input_filename: for line in input_filename: line = line.strip() split_line = line.split() if len(split_line) == 1: self[int(split_line[0])].final = True else: self.add_arc(int(split_line[0]), int(split_line[1]), split_line[2].decode('hex'))
[ "def", "load", "(", "self", ",", "txt_fst_file_name", ")", ":", "with", "open", "(", "txt_fst_file_name", ",", "'r'", ")", "as", "input_filename", ":", "for", "line", "in", "input_filename", ":", "line", "=", "line", ".", "strip", "(", ")", "split_line", "=", "line", ".", "split", "(", ")", "if", "len", "(", "split_line", ")", "==", "1", ":", "self", "[", "int", "(", "split_line", "[", "0", "]", ")", "]", ".", "final", "=", "True", "else", ":", "self", ".", "add_arc", "(", "int", "(", "split_line", "[", "0", "]", ")", ",", "int", "(", "split_line", "[", "1", "]", ")", ",", "split_line", "[", "2", "]", ".", "decode", "(", "'hex'", ")", ")" ]
Save the transducer in the text file format of OpenFST. The format is specified as follows: arc format: src dest ilabel olabel [weight] final state format: state [weight] lines may occur in any order except initial state must be first line Args: txt_fst_file_name (str): The input file Returns: None
[ "Save", "the", "transducer", "in", "the", "text", "file", "format", "of", "OpenFST", ".", "The", "format", "is", "specified", "as", "follows", ":", "arc", "format", ":", "src", "dest", "ilabel", "olabel", "[", "weight", "]", "final", "state", "format", ":", "state", "[", "weight", "]", "lines", "may", "occur", "in", "any", "order", "except", "initial", "state", "must", "be", "first", "line", "Args", ":", "txt_fst_file_name", "(", "str", ")", ":", "The", "input", "file", "Returns", ":", "None" ]
python
train
41
marcolagi/quantulum
quantulum/classifier.py
https://github.com/marcolagi/quantulum/blob/28b697dfa997116c1aa3ef63a3ceb8725bffd24f/quantulum/classifier.py#L139-L167
def disambiguate_unit(unit, text): """ Resolve ambiguity. Distinguish between units that have same names, symbols or abbreviations. """ new_unit = l.UNITS[unit] if not new_unit: new_unit = l.LOWER_UNITS[unit.lower()] if not new_unit: raise KeyError('Could not find unit "%s"' % unit) if len(new_unit) > 1: transformed = TFIDF_MODEL.transform([clean_text(text)]) scores = CLF.predict_proba(transformed).tolist()[0] scores = sorted(zip(scores, TARGET_NAMES), key=lambda x: x[0], reverse=True) names = [i.name for i in new_unit] scores = [i for i in scores if i[1] in names] try: final = l.UNITS[scores[0][1]][0] logging.debug('\tAmbiguity resolved for "%s" (%s)', unit, scores) except IndexError: logging.debug('\tAmbiguity not resolved for "%s"', unit) final = new_unit[0] else: final = new_unit[0] return final
[ "def", "disambiguate_unit", "(", "unit", ",", "text", ")", ":", "new_unit", "=", "l", ".", "UNITS", "[", "unit", "]", "if", "not", "new_unit", ":", "new_unit", "=", "l", ".", "LOWER_UNITS", "[", "unit", ".", "lower", "(", ")", "]", "if", "not", "new_unit", ":", "raise", "KeyError", "(", "'Could not find unit \"%s\"'", "%", "unit", ")", "if", "len", "(", "new_unit", ")", ">", "1", ":", "transformed", "=", "TFIDF_MODEL", ".", "transform", "(", "[", "clean_text", "(", "text", ")", "]", ")", "scores", "=", "CLF", ".", "predict_proba", "(", "transformed", ")", ".", "tolist", "(", ")", "[", "0", "]", "scores", "=", "sorted", "(", "zip", "(", "scores", ",", "TARGET_NAMES", ")", ",", "key", "=", "lambda", "x", ":", "x", "[", "0", "]", ",", "reverse", "=", "True", ")", "names", "=", "[", "i", ".", "name", "for", "i", "in", "new_unit", "]", "scores", "=", "[", "i", "for", "i", "in", "scores", "if", "i", "[", "1", "]", "in", "names", "]", "try", ":", "final", "=", "l", ".", "UNITS", "[", "scores", "[", "0", "]", "[", "1", "]", "]", "[", "0", "]", "logging", ".", "debug", "(", "'\\tAmbiguity resolved for \"%s\" (%s)'", ",", "unit", ",", "scores", ")", "except", "IndexError", ":", "logging", ".", "debug", "(", "'\\tAmbiguity not resolved for \"%s\"'", ",", "unit", ")", "final", "=", "new_unit", "[", "0", "]", "else", ":", "final", "=", "new_unit", "[", "0", "]", "return", "final" ]
Resolve ambiguity. Distinguish between units that have same names, symbols or abbreviations.
[ "Resolve", "ambiguity", "." ]
python
train
33.965517
scanny/python-pptx
pptx/shapes/shapetree.py
https://github.com/scanny/python-pptx/blob/d6ab8234f8b03953d2f831ff9394b1852db34130/pptx/shapes/shapetree.py#L171-L178
def _iter_member_elms(self): """ Generate each child of the ``<p:spTree>`` element that corresponds to a shape, in the sequence they appear in the XML. """ for shape_elm in self._spTree.iter_shape_elms(): if self._is_member_elm(shape_elm): yield shape_elm
[ "def", "_iter_member_elms", "(", "self", ")", ":", "for", "shape_elm", "in", "self", ".", "_spTree", ".", "iter_shape_elms", "(", ")", ":", "if", "self", ".", "_is_member_elm", "(", "shape_elm", ")", ":", "yield", "shape_elm" ]
Generate each child of the ``<p:spTree>`` element that corresponds to a shape, in the sequence they appear in the XML.
[ "Generate", "each", "child", "of", "the", "<p", ":", "spTree", ">", "element", "that", "corresponds", "to", "a", "shape", "in", "the", "sequence", "they", "appear", "in", "the", "XML", "." ]
python
train
39.5
erocarrera/pefile
pefile.py
https://github.com/erocarrera/pefile/blob/8a78a2e251a3f2336c232bf411133927b479edf2/pefile.py#L2217-L2237
def full_load(self): """Process the data directories. This method will load the data directories which might not have been loaded if the "fast_load" option was used. """ self.parse_data_directories() class RichHeader(object): pass rich_header = self.parse_rich_header() if rich_header: self.RICH_HEADER = RichHeader() self.RICH_HEADER.checksum = rich_header.get('checksum', None) self.RICH_HEADER.values = rich_header.get('values', None) self.RICH_HEADER.key = rich_header.get('key', None) self.RICH_HEADER.raw_data = rich_header.get('raw_data', None) self.RICH_HEADER.clear_data = rich_header.get('clear_data', None) else: self.RICH_HEADER = None
[ "def", "full_load", "(", "self", ")", ":", "self", ".", "parse_data_directories", "(", ")", "class", "RichHeader", "(", "object", ")", ":", "pass", "rich_header", "=", "self", ".", "parse_rich_header", "(", ")", "if", "rich_header", ":", "self", ".", "RICH_HEADER", "=", "RichHeader", "(", ")", "self", ".", "RICH_HEADER", ".", "checksum", "=", "rich_header", ".", "get", "(", "'checksum'", ",", "None", ")", "self", ".", "RICH_HEADER", ".", "values", "=", "rich_header", ".", "get", "(", "'values'", ",", "None", ")", "self", ".", "RICH_HEADER", ".", "key", "=", "rich_header", ".", "get", "(", "'key'", ",", "None", ")", "self", ".", "RICH_HEADER", ".", "raw_data", "=", "rich_header", ".", "get", "(", "'raw_data'", ",", "None", ")", "self", ".", "RICH_HEADER", ".", "clear_data", "=", "rich_header", ".", "get", "(", "'clear_data'", ",", "None", ")", "else", ":", "self", ".", "RICH_HEADER", "=", "None" ]
Process the data directories. This method will load the data directories which might not have been loaded if the "fast_load" option was used.
[ "Process", "the", "data", "directories", "." ]
python
train
38
dmlc/gluon-nlp
scripts/parsing/common/data.py
https://github.com/dmlc/gluon-nlp/blob/4b83eb6bcc8881e5f1081a3675adaa19fac5c0ba/scripts/parsing/common/data.py#L162-L171
def log_info(self, logger): """Print statistical information via the provided logger Parameters ---------- logger : logging.Logger logger created using logging.getLogger() """ logger.info('#words in training set: %d' % self._words_in_train_data) logger.info("Vocab info: #words %d, #tags %d #rels %d" % (self.vocab_size, self.tag_size, self.rel_size))
[ "def", "log_info", "(", "self", ",", "logger", ")", ":", "logger", ".", "info", "(", "'#words in training set: %d'", "%", "self", ".", "_words_in_train_data", ")", "logger", ".", "info", "(", "\"Vocab info: #words %d, #tags %d #rels %d\"", "%", "(", "self", ".", "vocab_size", ",", "self", ".", "tag_size", ",", "self", ".", "rel_size", ")", ")" ]
Print statistical information via the provided logger Parameters ---------- logger : logging.Logger logger created using logging.getLogger()
[ "Print", "statistical", "information", "via", "the", "provided", "logger" ]
python
train
41.1
pyviz/holoviews
holoviews/plotting/bokeh/element.py
https://github.com/pyviz/holoviews/blob/ae0dd2f3de448b0ca5e9065aabd6ef8d84c7e655/holoviews/plotting/bokeh/element.py#L2106-L2180
def update_frame(self, key, ranges=None, element=None): """ Update the internal state of the Plot to represent the given key tuple (where integers represent frames). Returns this state. """ reused = isinstance(self.hmap, DynamicMap) and self.overlaid if not reused and element is None: element = self._get_frame(key) elif element is not None: self.current_frame = element self.current_key = key items = [] if element is None else list(element.data.items()) if isinstance(self.hmap, DynamicMap): range_obj = element else: range_obj = self.hmap if element is not None: ranges = self.compute_ranges(range_obj, key, ranges) # Update plot options plot_opts = self.lookup_options(element, 'plot').options inherited = self._traverse_options(element, 'plot', self._propagate_options, defaults=False) plot_opts.update(**{k: v[0] for k, v in inherited.items() if k not in plot_opts}) self.param.set_param(**plot_opts) if element and not self.overlaid and not self.tabs and not self.batched: self._update_ranges(element, ranges) # Determine which stream (if any) triggered the update triggering = [stream for stream in self.streams if stream._triggering] for k, subplot in self.subplots.items(): el = None # If in Dynamic mode propagate elements to subplots if isinstance(self.hmap, DynamicMap) and element: # In batched mode NdOverlay is passed to subplot directly if self.batched: el = element # If not batched get the Element matching the subplot elif element is not None: idx, spec, exact = dynamic_update(self, subplot, k, element, items) if idx is not None: _, el = items.pop(idx) if not exact: self._update_subplot(subplot, spec) # Skip updates to subplots when its streams is not one of # the streams that initiated the update if (triggering and all(s not in triggering for s in subplot.streams) and not subplot in self.dynamic_subplots): continue subplot.update_frame(key, ranges, element=el) if not self.batched and isinstance(self.hmap, DynamicMap) and items: init_kwargs = {'plots': self.handles['plots']} if not self.tabs: init_kwargs['plot'] = self.handles['plot'] self._create_dynamic_subplots(key, items, ranges, **init_kwargs) if not self.overlaid and not self.tabs: self._process_legend() if element and not self.overlaid and not self.tabs and not self.batched: plot = self.handles['plot'] self._update_plot(key, plot, element) self._set_active_tools(plot) self._process_legend() self._execute_hooks(element)
[ "def", "update_frame", "(", "self", ",", "key", ",", "ranges", "=", "None", ",", "element", "=", "None", ")", ":", "reused", "=", "isinstance", "(", "self", ".", "hmap", ",", "DynamicMap", ")", "and", "self", ".", "overlaid", "if", "not", "reused", "and", "element", "is", "None", ":", "element", "=", "self", ".", "_get_frame", "(", "key", ")", "elif", "element", "is", "not", "None", ":", "self", ".", "current_frame", "=", "element", "self", ".", "current_key", "=", "key", "items", "=", "[", "]", "if", "element", "is", "None", "else", "list", "(", "element", ".", "data", ".", "items", "(", ")", ")", "if", "isinstance", "(", "self", ".", "hmap", ",", "DynamicMap", ")", ":", "range_obj", "=", "element", "else", ":", "range_obj", "=", "self", ".", "hmap", "if", "element", "is", "not", "None", ":", "ranges", "=", "self", ".", "compute_ranges", "(", "range_obj", ",", "key", ",", "ranges", ")", "# Update plot options", "plot_opts", "=", "self", ".", "lookup_options", "(", "element", ",", "'plot'", ")", ".", "options", "inherited", "=", "self", ".", "_traverse_options", "(", "element", ",", "'plot'", ",", "self", ".", "_propagate_options", ",", "defaults", "=", "False", ")", "plot_opts", ".", "update", "(", "*", "*", "{", "k", ":", "v", "[", "0", "]", "for", "k", ",", "v", "in", "inherited", ".", "items", "(", ")", "if", "k", "not", "in", "plot_opts", "}", ")", "self", ".", "param", ".", "set_param", "(", "*", "*", "plot_opts", ")", "if", "element", "and", "not", "self", ".", "overlaid", "and", "not", "self", ".", "tabs", "and", "not", "self", ".", "batched", ":", "self", ".", "_update_ranges", "(", "element", ",", "ranges", ")", "# Determine which stream (if any) triggered the update", "triggering", "=", "[", "stream", "for", "stream", "in", "self", ".", "streams", "if", "stream", ".", "_triggering", "]", "for", "k", ",", "subplot", "in", "self", ".", "subplots", ".", "items", "(", ")", ":", "el", "=", "None", "# If in Dynamic mode propagate elements to subplots", "if", "isinstance", "(", "self", ".", "hmap", ",", "DynamicMap", ")", "and", "element", ":", "# In batched mode NdOverlay is passed to subplot directly", "if", "self", ".", "batched", ":", "el", "=", "element", "# If not batched get the Element matching the subplot", "elif", "element", "is", "not", "None", ":", "idx", ",", "spec", ",", "exact", "=", "dynamic_update", "(", "self", ",", "subplot", ",", "k", ",", "element", ",", "items", ")", "if", "idx", "is", "not", "None", ":", "_", ",", "el", "=", "items", ".", "pop", "(", "idx", ")", "if", "not", "exact", ":", "self", ".", "_update_subplot", "(", "subplot", ",", "spec", ")", "# Skip updates to subplots when its streams is not one of", "# the streams that initiated the update", "if", "(", "triggering", "and", "all", "(", "s", "not", "in", "triggering", "for", "s", "in", "subplot", ".", "streams", ")", "and", "not", "subplot", "in", "self", ".", "dynamic_subplots", ")", ":", "continue", "subplot", ".", "update_frame", "(", "key", ",", "ranges", ",", "element", "=", "el", ")", "if", "not", "self", ".", "batched", "and", "isinstance", "(", "self", ".", "hmap", ",", "DynamicMap", ")", "and", "items", ":", "init_kwargs", "=", "{", "'plots'", ":", "self", ".", "handles", "[", "'plots'", "]", "}", "if", "not", "self", ".", "tabs", ":", "init_kwargs", "[", "'plot'", "]", "=", "self", ".", "handles", "[", "'plot'", "]", "self", ".", "_create_dynamic_subplots", "(", "key", ",", "items", ",", "ranges", ",", "*", "*", "init_kwargs", ")", "if", "not", "self", ".", "overlaid", "and", "not", "self", ".", "tabs", ":", "self", ".", "_process_legend", "(", ")", "if", "element", "and", "not", "self", ".", "overlaid", "and", "not", "self", ".", "tabs", "and", "not", "self", ".", "batched", ":", "plot", "=", "self", ".", "handles", "[", "'plot'", "]", "self", ".", "_update_plot", "(", "key", ",", "plot", ",", "element", ")", "self", ".", "_set_active_tools", "(", "plot", ")", "self", ".", "_process_legend", "(", ")", "self", ".", "_execute_hooks", "(", "element", ")" ]
Update the internal state of the Plot to represent the given key tuple (where integers represent frames). Returns this state.
[ "Update", "the", "internal", "state", "of", "the", "Plot", "to", "represent", "the", "given", "key", "tuple", "(", "where", "integers", "represent", "frames", ")", ".", "Returns", "this", "state", "." ]
python
train
42.333333
apache/incubator-heron
heron/tools/cli/src/python/deactivate.py
https://github.com/apache/incubator-heron/blob/ad10325a0febe89ad337e561ebcbe37ec5d9a5ac/heron/tools/cli/src/python/deactivate.py#L34-L43
def run(command, parser, cl_args, unknown_args): ''' :param command: :param parser: :param cl_args: :param unknown_args: :return: ''' Log.debug("Deactivate Args: %s", cl_args) return cli_helper.run(command, cl_args, "deactivate topology")
[ "def", "run", "(", "command", ",", "parser", ",", "cl_args", ",", "unknown_args", ")", ":", "Log", ".", "debug", "(", "\"Deactivate Args: %s\"", ",", "cl_args", ")", "return", "cli_helper", ".", "run", "(", "command", ",", "cl_args", ",", "\"deactivate topology\"", ")" ]
:param command: :param parser: :param cl_args: :param unknown_args: :return:
[ ":", "param", "command", ":", ":", "param", "parser", ":", ":", "param", "cl_args", ":", ":", "param", "unknown_args", ":", ":", "return", ":" ]
python
valid
24.7
phaethon/kamene
kamene/layers/inet6.py
https://github.com/phaethon/kamene/blob/11d4064844f4f68ac5d7546f5633ac7d02082914/kamene/layers/inet6.py#L86-L122
def getmacbyip6(ip6, chainCC=0): """ Returns the mac address to be used for provided 'ip6' peer. neighborCache.get() method is used on instantiated neighbor cache. Resolution mechanism is described in associated doc string. (chainCC parameter value ends up being passed to sending function used to perform the resolution, if needed) """ if in6_ismaddr(ip6): # Multicast mac = in6_getnsmac(inet_pton(socket.AF_INET6, ip6)) return mac iff,a,nh = conf.route6.route(ip6, dev=conf.iface6) if iff == LOOPBACK_NAME: return "ff:ff:ff:ff:ff:ff" if nh != '::': ip6 = nh # Found next hop mac = conf.netcache.in6_neighbor.get(ip6) if mac: return mac res = neighsol(ip6, a, iff, chainCC=chainCC) if res is not None: if ICMPv6NDOptDstLLAddr in res: mac = res[ICMPv6NDOptDstLLAddr].lladdr else: mac = res.src conf.netcache.in6_neighbor[ip6] = mac return mac return None
[ "def", "getmacbyip6", "(", "ip6", ",", "chainCC", "=", "0", ")", ":", "if", "in6_ismaddr", "(", "ip6", ")", ":", "# Multicast ", "mac", "=", "in6_getnsmac", "(", "inet_pton", "(", "socket", ".", "AF_INET6", ",", "ip6", ")", ")", "return", "mac", "iff", ",", "a", ",", "nh", "=", "conf", ".", "route6", ".", "route", "(", "ip6", ",", "dev", "=", "conf", ".", "iface6", ")", "if", "iff", "==", "LOOPBACK_NAME", ":", "return", "\"ff:ff:ff:ff:ff:ff\"", "if", "nh", "!=", "'::'", ":", "ip6", "=", "nh", "# Found next hop", "mac", "=", "conf", ".", "netcache", ".", "in6_neighbor", ".", "get", "(", "ip6", ")", "if", "mac", ":", "return", "mac", "res", "=", "neighsol", "(", "ip6", ",", "a", ",", "iff", ",", "chainCC", "=", "chainCC", ")", "if", "res", "is", "not", "None", ":", "if", "ICMPv6NDOptDstLLAddr", "in", "res", ":", "mac", "=", "res", "[", "ICMPv6NDOptDstLLAddr", "]", ".", "lladdr", "else", ":", "mac", "=", "res", ".", "src", "conf", ".", "netcache", ".", "in6_neighbor", "[", "ip6", "]", "=", "mac", "return", "mac", "return", "None" ]
Returns the mac address to be used for provided 'ip6' peer. neighborCache.get() method is used on instantiated neighbor cache. Resolution mechanism is described in associated doc string. (chainCC parameter value ends up being passed to sending function used to perform the resolution, if needed)
[ "Returns", "the", "mac", "address", "to", "be", "used", "for", "provided", "ip6", "peer", ".", "neighborCache", ".", "get", "()", "method", "is", "used", "on", "instantiated", "neighbor", "cache", ".", "Resolution", "mechanism", "is", "described", "in", "associated", "doc", "string", "." ]
python
train
26.783784
persandstrom/python-verisure
verisure/session.py
https://github.com/persandstrom/python-verisure/blob/babd25e7f8fb2b24f12e4109dfa8a04041e8dcb8/verisure/session.py#L529-L546
def set_heat_pump_mode(self, device_label, mode): """ Set heatpump mode Args: mode (str): 'HEAT', 'COOL', 'FAN' or 'AUTO' """ response = None try: response = requests.put( urls.set_heatpump_state(self._giid, device_label), headers={ 'Accept': 'application/json', 'Content-Type': 'application/json', 'Cookie': 'vid={}'.format(self._vid)}, data=json.dumps({'mode': mode})) except requests.exceptions.RequestException as ex: raise RequestError(ex) _validate_response(response) return json.loads(response.text)
[ "def", "set_heat_pump_mode", "(", "self", ",", "device_label", ",", "mode", ")", ":", "response", "=", "None", "try", ":", "response", "=", "requests", ".", "put", "(", "urls", ".", "set_heatpump_state", "(", "self", ".", "_giid", ",", "device_label", ")", ",", "headers", "=", "{", "'Accept'", ":", "'application/json'", ",", "'Content-Type'", ":", "'application/json'", ",", "'Cookie'", ":", "'vid={}'", ".", "format", "(", "self", ".", "_vid", ")", "}", ",", "data", "=", "json", ".", "dumps", "(", "{", "'mode'", ":", "mode", "}", ")", ")", "except", "requests", ".", "exceptions", ".", "RequestException", "as", "ex", ":", "raise", "RequestError", "(", "ex", ")", "_validate_response", "(", "response", ")", "return", "json", ".", "loads", "(", "response", ".", "text", ")" ]
Set heatpump mode Args: mode (str): 'HEAT', 'COOL', 'FAN' or 'AUTO'
[ "Set", "heatpump", "mode", "Args", ":", "mode", "(", "str", ")", ":", "HEAT", "COOL", "FAN", "or", "AUTO" ]
python
train
38.722222
chrisjrn/registrasion
registrasion/controllers/conditions.py
https://github.com/chrisjrn/registrasion/blob/461d5846c6f9f3b7099322a94f5d9911564448e4/registrasion/controllers/conditions.py#L75-L80
def passes_filter(self, user): ''' Returns true if the condition passes the filter ''' cls = type(self.condition) qs = cls.objects.filter(pk=self.condition.id) return self.condition in self.pre_filter(qs, user)
[ "def", "passes_filter", "(", "self", ",", "user", ")", ":", "cls", "=", "type", "(", "self", ".", "condition", ")", "qs", "=", "cls", ".", "objects", ".", "filter", "(", "pk", "=", "self", ".", "condition", ".", "id", ")", "return", "self", ".", "condition", "in", "self", ".", "pre_filter", "(", "qs", ",", "user", ")" ]
Returns true if the condition passes the filter
[ "Returns", "true", "if", "the", "condition", "passes", "the", "filter" ]
python
test
39.666667
quantmind/pulsar
pulsar/apps/wsgi/wrappers.py
https://github.com/quantmind/pulsar/blob/fee44e871954aa6ca36d00bb5a3739abfdb89b26/pulsar/apps/wsgi/wrappers.py#L367-L384
def absolute_uri(self, location=None, scheme=None, **query): """Builds an absolute URI from ``location`` and variables available in this request. If no ``location`` is specified, the relative URI is built from :meth:`full_path`. """ if not is_absolute_uri(location): if location or location is None: location = self.full_path(location, **query) if not scheme: scheme = self.is_secure and 'https' or 'http' base = '%s://%s' % (scheme, self.get_host()) return '%s%s' % (base, location) elif not scheme: return iri_to_uri(location) else: raise ValueError('Absolute location with scheme not valid')
[ "def", "absolute_uri", "(", "self", ",", "location", "=", "None", ",", "scheme", "=", "None", ",", "*", "*", "query", ")", ":", "if", "not", "is_absolute_uri", "(", "location", ")", ":", "if", "location", "or", "location", "is", "None", ":", "location", "=", "self", ".", "full_path", "(", "location", ",", "*", "*", "query", ")", "if", "not", "scheme", ":", "scheme", "=", "self", ".", "is_secure", "and", "'https'", "or", "'http'", "base", "=", "'%s://%s'", "%", "(", "scheme", ",", "self", ".", "get_host", "(", ")", ")", "return", "'%s%s'", "%", "(", "base", ",", "location", ")", "elif", "not", "scheme", ":", "return", "iri_to_uri", "(", "location", ")", "else", ":", "raise", "ValueError", "(", "'Absolute location with scheme not valid'", ")" ]
Builds an absolute URI from ``location`` and variables available in this request. If no ``location`` is specified, the relative URI is built from :meth:`full_path`.
[ "Builds", "an", "absolute", "URI", "from", "location", "and", "variables", "available", "in", "this", "request", "." ]
python
train
41.444444
niklasf/python-chess
chess/__init__.py
https://github.com/niklasf/python-chess/blob/d91f986ca3e046b300a0d7d9ee2a13b07610fe1a/chess/__init__.py#L2764-L2775
def push_san(self, san: str) -> Move: """ Parses a move in standard algebraic notation, makes the move and puts it on the the move stack. Returns the move. :raises: :exc:`ValueError` if neither legal nor a null move. """ move = self.parse_san(san) self.push(move) return move
[ "def", "push_san", "(", "self", ",", "san", ":", "str", ")", "->", "Move", ":", "move", "=", "self", ".", "parse_san", "(", "san", ")", "self", ".", "push", "(", "move", ")", "return", "move" ]
Parses a move in standard algebraic notation, makes the move and puts it on the the move stack. Returns the move. :raises: :exc:`ValueError` if neither legal nor a null move.
[ "Parses", "a", "move", "in", "standard", "algebraic", "notation", "makes", "the", "move", "and", "puts", "it", "on", "the", "the", "move", "stack", "." ]
python
train
28.166667
RedHatInsights/insights-core
insights/util/__init__.py
https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/util/__init__.py#L238-L273
def case_variants(*elements): """ For configs which take case-insensitive options, it is necessary to extend the list with various common case variants (all combinations are not practical). In the future, this should be removed, when parser filters are made case-insensitive. Args: *elements (str): list of elements which need case-sensitive expansion, you should use default case such as `Ciphers`, `MACs`, `UsePAM`, `MaxAuthTries` Returns: list: list of all expanded elements """ expanded_list = [] for element in elements: low = element.lower() up = element.upper() title = element.title() # Inner case conversion, such as `MACs` or `UsePAM` to `Macs` and `UsePam` converted = [] for i, letter in enumerate(element): if i == 0: converted.append(letter) else: if element[i - 1].isupper(): converted.append(letter.lower()) else: converted.append(letter) converted = "".join(converted) for new_element in (element, converted, low, up, title): if new_element not in expanded_list: expanded_list.append(new_element) return expanded_list
[ "def", "case_variants", "(", "*", "elements", ")", ":", "expanded_list", "=", "[", "]", "for", "element", "in", "elements", ":", "low", "=", "element", ".", "lower", "(", ")", "up", "=", "element", ".", "upper", "(", ")", "title", "=", "element", ".", "title", "(", ")", "# Inner case conversion, such as `MACs` or `UsePAM` to `Macs` and `UsePam`", "converted", "=", "[", "]", "for", "i", ",", "letter", "in", "enumerate", "(", "element", ")", ":", "if", "i", "==", "0", ":", "converted", ".", "append", "(", "letter", ")", "else", ":", "if", "element", "[", "i", "-", "1", "]", ".", "isupper", "(", ")", ":", "converted", ".", "append", "(", "letter", ".", "lower", "(", ")", ")", "else", ":", "converted", ".", "append", "(", "letter", ")", "converted", "=", "\"\"", ".", "join", "(", "converted", ")", "for", "new_element", "in", "(", "element", ",", "converted", ",", "low", ",", "up", ",", "title", ")", ":", "if", "new_element", "not", "in", "expanded_list", ":", "expanded_list", ".", "append", "(", "new_element", ")", "return", "expanded_list" ]
For configs which take case-insensitive options, it is necessary to extend the list with various common case variants (all combinations are not practical). In the future, this should be removed, when parser filters are made case-insensitive. Args: *elements (str): list of elements which need case-sensitive expansion, you should use default case such as `Ciphers`, `MACs`, `UsePAM`, `MaxAuthTries` Returns: list: list of all expanded elements
[ "For", "configs", "which", "take", "case", "-", "insensitive", "options", "it", "is", "necessary", "to", "extend", "the", "list", "with", "various", "common", "case", "variants", "(", "all", "combinations", "are", "not", "practical", ")", ".", "In", "the", "future", "this", "should", "be", "removed", "when", "parser", "filters", "are", "made", "case", "-", "insensitive", "." ]
python
train
35.805556
zsethna/OLGA
olga/load_model.py
https://github.com/zsethna/OLGA/blob/e825c333f0f9a4eb02132e0bcf86f0dca9123114/olga/load_model.py#L169-L187
def generate_cutV_genomic_CDR3_segs(self): """Add palindromic inserted nucleotides to germline V sequences. The maximum number of palindromic insertions are appended to the germline V segments so that delV can index directly for number of nucleotides to delete from a segment. Sets the attribute cutV_genomic_CDR3_segs. """ max_palindrome = self.max_delV_palindrome self.cutV_genomic_CDR3_segs = [] for CDR3_V_seg in [x[1] for x in self.genV]: if len(CDR3_V_seg) < max_palindrome: self.cutV_genomic_CDR3_segs += [cutR_seq(CDR3_V_seg, 0, len(CDR3_V_seg))] else: self.cutV_genomic_CDR3_segs += [cutR_seq(CDR3_V_seg, 0, max_palindrome)]
[ "def", "generate_cutV_genomic_CDR3_segs", "(", "self", ")", ":", "max_palindrome", "=", "self", ".", "max_delV_palindrome", "self", ".", "cutV_genomic_CDR3_segs", "=", "[", "]", "for", "CDR3_V_seg", "in", "[", "x", "[", "1", "]", "for", "x", "in", "self", ".", "genV", "]", ":", "if", "len", "(", "CDR3_V_seg", ")", "<", "max_palindrome", ":", "self", ".", "cutV_genomic_CDR3_segs", "+=", "[", "cutR_seq", "(", "CDR3_V_seg", ",", "0", ",", "len", "(", "CDR3_V_seg", ")", ")", "]", "else", ":", "self", ".", "cutV_genomic_CDR3_segs", "+=", "[", "cutR_seq", "(", "CDR3_V_seg", ",", "0", ",", "max_palindrome", ")", "]" ]
Add palindromic inserted nucleotides to germline V sequences. The maximum number of palindromic insertions are appended to the germline V segments so that delV can index directly for number of nucleotides to delete from a segment. Sets the attribute cutV_genomic_CDR3_segs.
[ "Add", "palindromic", "inserted", "nucleotides", "to", "germline", "V", "sequences", ".", "The", "maximum", "number", "of", "palindromic", "insertions", "are", "appended", "to", "the", "germline", "V", "segments", "so", "that", "delV", "can", "index", "directly", "for", "number", "of", "nucleotides", "to", "delete", "from", "a", "segment", ".", "Sets", "the", "attribute", "cutV_genomic_CDR3_segs", "." ]
python
train
40.842105
cloud9ers/gurumate
environment/lib/python2.7/site-packages/IPython/core/interactiveshell.py
https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/IPython/core/interactiveshell.py#L2065-L2094
def run_cell_magic(self, magic_name, line, cell): """Execute the given cell magic. Parameters ---------- magic_name : str Name of the desired magic function, without '%' prefix. line : str The rest of the first input line as a single string. cell : str The body of the cell as a (possibly multiline) string. """ fn = self.find_cell_magic(magic_name) if fn is None: lm = self.find_line_magic(magic_name) etpl = "Cell magic function `%%%%%s` not found%s." extra = '' if lm is None else (' (But line magic `%%%s` exists, ' 'did you mean that instead?)' % magic_name ) error(etpl % (magic_name, extra)) else: # Note: this is the distance in the stack to the user's frame. # This will need to be updated if the internal calling logic gets # refactored, or else we'll be expanding the wrong variables. stack_depth = 2 magic_arg_s = self.var_expand(line, stack_depth) with self.builtin_trap: result = fn(line, cell) return result
[ "def", "run_cell_magic", "(", "self", ",", "magic_name", ",", "line", ",", "cell", ")", ":", "fn", "=", "self", ".", "find_cell_magic", "(", "magic_name", ")", "if", "fn", "is", "None", ":", "lm", "=", "self", ".", "find_line_magic", "(", "magic_name", ")", "etpl", "=", "\"Cell magic function `%%%%%s` not found%s.\"", "extra", "=", "''", "if", "lm", "is", "None", "else", "(", "' (But line magic `%%%s` exists, '", "'did you mean that instead?)'", "%", "magic_name", ")", "error", "(", "etpl", "%", "(", "magic_name", ",", "extra", ")", ")", "else", ":", "# Note: this is the distance in the stack to the user's frame.", "# This will need to be updated if the internal calling logic gets", "# refactored, or else we'll be expanding the wrong variables.", "stack_depth", "=", "2", "magic_arg_s", "=", "self", ".", "var_expand", "(", "line", ",", "stack_depth", ")", "with", "self", ".", "builtin_trap", ":", "result", "=", "fn", "(", "line", ",", "cell", ")", "return", "result" ]
Execute the given cell magic. Parameters ---------- magic_name : str Name of the desired magic function, without '%' prefix. line : str The rest of the first input line as a single string. cell : str The body of the cell as a (possibly multiline) string.
[ "Execute", "the", "given", "cell", "magic", ".", "Parameters", "----------", "magic_name", ":", "str", "Name", "of", "the", "desired", "magic", "function", "without", "%", "prefix", "." ]
python
test
39.933333
Kronuz/pyScss
scss/extension/compass/images.py
https://github.com/Kronuz/pyScss/blob/fb32b317f6e2b4b4aad2b86a74844658ac4aa11e/scss/extension/compass/images.py#L199-L206
def inline_image(image, mime_type=None, dst_color=None, src_color=None, spacing=None, collapse_x=None, collapse_y=None): """ Embeds the contents of a file directly inside your stylesheet, eliminating the need for another HTTP request. For small files such images or fonts, this can be a performance benefit at the cost of a larger generated CSS file. """ return _image_url(image, False, False, dst_color, src_color, True, mime_type, spacing, collapse_x, collapse_y)
[ "def", "inline_image", "(", "image", ",", "mime_type", "=", "None", ",", "dst_color", "=", "None", ",", "src_color", "=", "None", ",", "spacing", "=", "None", ",", "collapse_x", "=", "None", ",", "collapse_y", "=", "None", ")", ":", "return", "_image_url", "(", "image", ",", "False", ",", "False", ",", "dst_color", ",", "src_color", ",", "True", ",", "mime_type", ",", "spacing", ",", "collapse_x", ",", "collapse_y", ")" ]
Embeds the contents of a file directly inside your stylesheet, eliminating the need for another HTTP request. For small files such images or fonts, this can be a performance benefit at the cost of a larger generated CSS file.
[ "Embeds", "the", "contents", "of", "a", "file", "directly", "inside", "your", "stylesheet", "eliminating", "the", "need", "for", "another", "HTTP", "request", ".", "For", "small", "files", "such", "images", "or", "fonts", "this", "can", "be", "a", "performance", "benefit", "at", "the", "cost", "of", "a", "larger", "generated", "CSS", "file", "." ]
python
train
60.75
mixcloud/django-experiments
experiments/utils.py
https://github.com/mixcloud/django-experiments/blob/1f45e9f8a108b51e44918daa647269b2b8d43f1d/experiments/utils.py#L143-L158
def incorporate(self, other_user): """Incorporate all enrollments and goals performed by the other user If this user is not enrolled in a given experiment, the results for the other user are incorporated. For experiments this user is already enrolled in the results of the other user are discarded. This takes a relatively large amount of time for each experiment the other user is enrolled in.""" for enrollment in other_user._get_all_enrollments(): if not self._get_enrollment(enrollment.experiment): self._set_enrollment(enrollment.experiment, enrollment.alternative, enrollment.enrollment_date, enrollment.last_seen) goals = self.experiment_counter.participant_goal_frequencies(enrollment.experiment, enrollment.alternative, other_user._participant_identifier()) for goal_name, count in goals: self.experiment_counter.increment_goal_count(enrollment.experiment, enrollment.alternative, goal_name, self._participant_identifier(), count) other_user._cancel_enrollment(enrollment.experiment)
[ "def", "incorporate", "(", "self", ",", "other_user", ")", ":", "for", "enrollment", "in", "other_user", ".", "_get_all_enrollments", "(", ")", ":", "if", "not", "self", ".", "_get_enrollment", "(", "enrollment", ".", "experiment", ")", ":", "self", ".", "_set_enrollment", "(", "enrollment", ".", "experiment", ",", "enrollment", ".", "alternative", ",", "enrollment", ".", "enrollment_date", ",", "enrollment", ".", "last_seen", ")", "goals", "=", "self", ".", "experiment_counter", ".", "participant_goal_frequencies", "(", "enrollment", ".", "experiment", ",", "enrollment", ".", "alternative", ",", "other_user", ".", "_participant_identifier", "(", ")", ")", "for", "goal_name", ",", "count", "in", "goals", ":", "self", ".", "experiment_counter", ".", "increment_goal_count", "(", "enrollment", ".", "experiment", ",", "enrollment", ".", "alternative", ",", "goal_name", ",", "self", ".", "_participant_identifier", "(", ")", ",", "count", ")", "other_user", ".", "_cancel_enrollment", "(", "enrollment", ".", "experiment", ")" ]
Incorporate all enrollments and goals performed by the other user If this user is not enrolled in a given experiment, the results for the other user are incorporated. For experiments this user is already enrolled in the results of the other user are discarded. This takes a relatively large amount of time for each experiment the other user is enrolled in.
[ "Incorporate", "all", "enrollments", "and", "goals", "performed", "by", "the", "other", "user" ]
python
train
70.4375
ska-sa/katcp-python
katcp/client.py
https://github.com/ska-sa/katcp-python/blob/9127c826a1d030c53b84d0e95743e20e5c5ea153/katcp/client.py#L1469-L1508
def request_check(client, exception, *msg_parms, **kwargs): """Make blocking request to client and raise exception if reply is not ok. Parameters ---------- client : DeviceClient instance exception: Exception class to raise *msg_parms : Message parameters sent to the Message.request() call **kwargs : Keyword arguments Forwards kwargs['timeout'] to client.blocking_request(). Forwards kwargs['mid'] to Message.request(). Returns ------- reply, informs : as returned by client.blocking_request Raises ------ *exception* passed as parameter is raised if reply.reply_ok() is False Notes ----- A typical use-case for this function is to use functools.partial() to bind a particular client and exception. The resulting function can then be used instead of direct client.blocking_request() calls to automate error handling. """ timeout = kwargs.get('timeout', None) req_msg = Message.request(*msg_parms) if timeout is not None: reply, informs = client.blocking_request(req_msg, timeout=timeout) else: reply, informs = client.blocking_request(req_msg) if not reply.reply_ok(): raise exception('Unexpected failure reply "{2}"\n' ' with device at {0}, request \n"{1}"' .format(client.bind_address_string, req_msg, reply)) return reply, informs
[ "def", "request_check", "(", "client", ",", "exception", ",", "*", "msg_parms", ",", "*", "*", "kwargs", ")", ":", "timeout", "=", "kwargs", ".", "get", "(", "'timeout'", ",", "None", ")", "req_msg", "=", "Message", ".", "request", "(", "*", "msg_parms", ")", "if", "timeout", "is", "not", "None", ":", "reply", ",", "informs", "=", "client", ".", "blocking_request", "(", "req_msg", ",", "timeout", "=", "timeout", ")", "else", ":", "reply", ",", "informs", "=", "client", ".", "blocking_request", "(", "req_msg", ")", "if", "not", "reply", ".", "reply_ok", "(", ")", ":", "raise", "exception", "(", "'Unexpected failure reply \"{2}\"\\n'", "' with device at {0}, request \\n\"{1}\"'", ".", "format", "(", "client", ".", "bind_address_string", ",", "req_msg", ",", "reply", ")", ")", "return", "reply", ",", "informs" ]
Make blocking request to client and raise exception if reply is not ok. Parameters ---------- client : DeviceClient instance exception: Exception class to raise *msg_parms : Message parameters sent to the Message.request() call **kwargs : Keyword arguments Forwards kwargs['timeout'] to client.blocking_request(). Forwards kwargs['mid'] to Message.request(). Returns ------- reply, informs : as returned by client.blocking_request Raises ------ *exception* passed as parameter is raised if reply.reply_ok() is False Notes ----- A typical use-case for this function is to use functools.partial() to bind a particular client and exception. The resulting function can then be used instead of direct client.blocking_request() calls to automate error handling.
[ "Make", "blocking", "request", "to", "client", "and", "raise", "exception", "if", "reply", "is", "not", "ok", "." ]
python
train
34.85
fastai/fastai
old/fastai/dataset.py
https://github.com/fastai/fastai/blob/9fb84a5cdefe5a766cdb792b8f5d8971737b7e67/old/fastai/dataset.py#L594-L607
def split_by_idx(idxs, *a): """ Split each array passed as *a, to a pair of arrays like this (elements selected by idxs, the remaining elements) This can be used to split multiple arrays containing training data to validation and training set. :param idxs [int]: list of indexes selected :param a list: list of np.array, each array should have same amount of elements in the first dimension :return: list of tuples, each containing a split of corresponding array from *a. First element of each tuple is an array composed from elements selected by idxs, second element is an array of remaining elements. """ mask = np.zeros(len(a[0]),dtype=bool) mask[np.array(idxs)] = True return [(o[mask],o[~mask]) for o in a]
[ "def", "split_by_idx", "(", "idxs", ",", "*", "a", ")", ":", "mask", "=", "np", ".", "zeros", "(", "len", "(", "a", "[", "0", "]", ")", ",", "dtype", "=", "bool", ")", "mask", "[", "np", ".", "array", "(", "idxs", ")", "]", "=", "True", "return", "[", "(", "o", "[", "mask", "]", ",", "o", "[", "~", "mask", "]", ")", "for", "o", "in", "a", "]" ]
Split each array passed as *a, to a pair of arrays like this (elements selected by idxs, the remaining elements) This can be used to split multiple arrays containing training data to validation and training set. :param idxs [int]: list of indexes selected :param a list: list of np.array, each array should have same amount of elements in the first dimension :return: list of tuples, each containing a split of corresponding array from *a. First element of each tuple is an array composed from elements selected by idxs, second element is an array of remaining elements.
[ "Split", "each", "array", "passed", "as", "*", "a", "to", "a", "pair", "of", "arrays", "like", "this", "(", "elements", "selected", "by", "idxs", "the", "remaining", "elements", ")", "This", "can", "be", "used", "to", "split", "multiple", "arrays", "containing", "training", "data", "to", "validation", "and", "training", "set", "." ]
python
train
54.571429
ranaroussi/ezibpy
ezibpy/ezibpy.py
https://github.com/ranaroussi/ezibpy/blob/1a9d4bf52018abd2a01af7c991d7cf00cda53e0c/ezibpy/ezibpy.py#L1832-L1844
def cancelMarketDepth(self, contracts=None): """ Cancel streaming market data for contract https://www.interactivebrokers.com/en/software/api/apiguide/java/cancelmktdepth.htm """ if contracts == None: contracts = list(self.contracts.values()) elif not isinstance(contracts, list): contracts = [contracts] for contract in contracts: tickerId = self.tickerId(self.contractString(contract)) self.ibConn.cancelMktDepth(tickerId=tickerId)
[ "def", "cancelMarketDepth", "(", "self", ",", "contracts", "=", "None", ")", ":", "if", "contracts", "==", "None", ":", "contracts", "=", "list", "(", "self", ".", "contracts", ".", "values", "(", ")", ")", "elif", "not", "isinstance", "(", "contracts", ",", "list", ")", ":", "contracts", "=", "[", "contracts", "]", "for", "contract", "in", "contracts", ":", "tickerId", "=", "self", ".", "tickerId", "(", "self", ".", "contractString", "(", "contract", ")", ")", "self", ".", "ibConn", ".", "cancelMktDepth", "(", "tickerId", "=", "tickerId", ")" ]
Cancel streaming market data for contract https://www.interactivebrokers.com/en/software/api/apiguide/java/cancelmktdepth.htm
[ "Cancel", "streaming", "market", "data", "for", "contract", "https", ":", "//", "www", ".", "interactivebrokers", ".", "com", "/", "en", "/", "software", "/", "api", "/", "apiguide", "/", "java", "/", "cancelmktdepth", ".", "htm" ]
python
train
40.461538
suds-community/suds
suds/sax/document.py
https://github.com/suds-community/suds/blob/6fb0a829337b5037a66c20aae6f89b41acd77e40/suds/sax/document.py#L129-L146
def getChildren(self, name=None, ns=None): """ Get a list of children by (optional) name and/or (optional) namespace. @param name: The name of a child element (may contain prefix). @type name: basestring @param ns: An optional namespace used to match the child. @type ns: (I{prefix}, I{name}) @return: The list of matching children. @rtype: [L{Element},...] """ if name is None: matched = self.__root else: matched = self.getChild(name, ns) if matched is None: return [] else: return [matched,]
[ "def", "getChildren", "(", "self", ",", "name", "=", "None", ",", "ns", "=", "None", ")", ":", "if", "name", "is", "None", ":", "matched", "=", "self", ".", "__root", "else", ":", "matched", "=", "self", ".", "getChild", "(", "name", ",", "ns", ")", "if", "matched", "is", "None", ":", "return", "[", "]", "else", ":", "return", "[", "matched", ",", "]" ]
Get a list of children by (optional) name and/or (optional) namespace. @param name: The name of a child element (may contain prefix). @type name: basestring @param ns: An optional namespace used to match the child. @type ns: (I{prefix}, I{name}) @return: The list of matching children. @rtype: [L{Element},...]
[ "Get", "a", "list", "of", "children", "by", "(", "optional", ")", "name", "and", "/", "or", "(", "optional", ")", "namespace", "." ]
python
train
34.944444
ioos/compliance-checker
compliance_checker/acdd.py
https://github.com/ioos/compliance-checker/blob/ee89c27b0daade58812489a2da3aa3b6859eafd9/compliance_checker/acdd.py#L117-L141
def get_applicable_variables(self, ds): ''' Returns a list of variable names that are applicable to ACDD Metadata Checks for variables. This includes geophysical and coordinate variables only. :param netCDF4.Dataset ds: An open netCDF dataset ''' if self._applicable_variables is None: self.applicable_variables = cfutil.get_geophysical_variables(ds) varname = cfutil.get_time_variable(ds) # avoid duplicates by checking if already present if varname and (varname not in self.applicable_variables): self.applicable_variables.append(varname) varname = cfutil.get_lon_variable(ds) if varname and (varname not in self.applicable_variables): self.applicable_variables.append(varname) varname = cfutil.get_lat_variable(ds) if varname and (varname not in self.applicable_variables): self.applicable_variables.append(varname) varname = cfutil.get_z_variable(ds) if varname and (varname not in self.applicable_variables): self.applicable_variables.append(varname) return self.applicable_variables
[ "def", "get_applicable_variables", "(", "self", ",", "ds", ")", ":", "if", "self", ".", "_applicable_variables", "is", "None", ":", "self", ".", "applicable_variables", "=", "cfutil", ".", "get_geophysical_variables", "(", "ds", ")", "varname", "=", "cfutil", ".", "get_time_variable", "(", "ds", ")", "# avoid duplicates by checking if already present", "if", "varname", "and", "(", "varname", "not", "in", "self", ".", "applicable_variables", ")", ":", "self", ".", "applicable_variables", ".", "append", "(", "varname", ")", "varname", "=", "cfutil", ".", "get_lon_variable", "(", "ds", ")", "if", "varname", "and", "(", "varname", "not", "in", "self", ".", "applicable_variables", ")", ":", "self", ".", "applicable_variables", ".", "append", "(", "varname", ")", "varname", "=", "cfutil", ".", "get_lat_variable", "(", "ds", ")", "if", "varname", "and", "(", "varname", "not", "in", "self", ".", "applicable_variables", ")", ":", "self", ".", "applicable_variables", ".", "append", "(", "varname", ")", "varname", "=", "cfutil", ".", "get_z_variable", "(", "ds", ")", "if", "varname", "and", "(", "varname", "not", "in", "self", ".", "applicable_variables", ")", ":", "self", ".", "applicable_variables", ".", "append", "(", "varname", ")", "return", "self", ".", "applicable_variables" ]
Returns a list of variable names that are applicable to ACDD Metadata Checks for variables. This includes geophysical and coordinate variables only. :param netCDF4.Dataset ds: An open netCDF dataset
[ "Returns", "a", "list", "of", "variable", "names", "that", "are", "applicable", "to", "ACDD", "Metadata", "Checks", "for", "variables", ".", "This", "includes", "geophysical", "and", "coordinate", "variables", "only", "." ]
python
train
48.56
saltstack/salt
salt/modules/virt.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/virt.py#L2600-L2626
def full_info(**kwargs): ''' Return the node_info, vm_info and freemem :param connection: libvirt connection URI, overriding defaults .. versionadded:: 2019.2.0 :param username: username to connect with, overriding defaults .. versionadded:: 2019.2.0 :param password: password to connect with, overriding defaults .. versionadded:: 2019.2.0 CLI Example: .. code-block:: bash salt '*' virt.full_info ''' conn = __get_conn(**kwargs) info = {'freecpu': _freecpu(conn), 'freemem': _freemem(conn), 'node_info': _node_info(conn), 'vm_info': vm_info()} conn.close() return info
[ "def", "full_info", "(", "*", "*", "kwargs", ")", ":", "conn", "=", "__get_conn", "(", "*", "*", "kwargs", ")", "info", "=", "{", "'freecpu'", ":", "_freecpu", "(", "conn", ")", ",", "'freemem'", ":", "_freemem", "(", "conn", ")", ",", "'node_info'", ":", "_node_info", "(", "conn", ")", ",", "'vm_info'", ":", "vm_info", "(", ")", "}", "conn", ".", "close", "(", ")", "return", "info" ]
Return the node_info, vm_info and freemem :param connection: libvirt connection URI, overriding defaults .. versionadded:: 2019.2.0 :param username: username to connect with, overriding defaults .. versionadded:: 2019.2.0 :param password: password to connect with, overriding defaults .. versionadded:: 2019.2.0 CLI Example: .. code-block:: bash salt '*' virt.full_info
[ "Return", "the", "node_info", "vm_info", "and", "freemem" ]
python
train
24.703704
elifesciences/proofreader-python
proofreader/license_checker/__init__.py
https://github.com/elifesciences/proofreader-python/blob/387b3c65ee7777e26b3a7340179dc4ed68f24f58/proofreader/license_checker/__init__.py#L42-L64
def run_license_checker(config_path): # type: (str) -> None """Generate table of installed packages and check for license warnings based off user defined restricted license values. :param config_path: str :return: """ whitelist_licenses = _get_whitelist_licenses(config_path) table = PrintTable(ROW_HEADERS) warnings = [] for pkg in _get_packages(): allowed = pkg.license in whitelist_licenses table.add_row((pkg.name, pkg.version, pkg.license, str(allowed))) if not allowed: warnings.append(pkg) print(table) print('{} RESTRICTED LICENSES DETECTED'.format(len(warnings)))
[ "def", "run_license_checker", "(", "config_path", ")", ":", "# type: (str) -> None", "whitelist_licenses", "=", "_get_whitelist_licenses", "(", "config_path", ")", "table", "=", "PrintTable", "(", "ROW_HEADERS", ")", "warnings", "=", "[", "]", "for", "pkg", "in", "_get_packages", "(", ")", ":", "allowed", "=", "pkg", ".", "license", "in", "whitelist_licenses", "table", ".", "add_row", "(", "(", "pkg", ".", "name", ",", "pkg", ".", "version", ",", "pkg", ".", "license", ",", "str", "(", "allowed", ")", ")", ")", "if", "not", "allowed", ":", "warnings", ".", "append", "(", "pkg", ")", "print", "(", "table", ")", "print", "(", "'{} RESTRICTED LICENSES DETECTED'", ".", "format", "(", "len", "(", "warnings", ")", ")", ")" ]
Generate table of installed packages and check for license warnings based off user defined restricted license values. :param config_path: str :return:
[ "Generate", "table", "of", "installed", "packages", "and", "check", "for", "license", "warnings", "based", "off", "user", "defined", "restricted", "license", "values", "." ]
python
train
27.826087
pyhys/minimalmodbus
minimalmodbus.py
https://github.com/pyhys/minimalmodbus/blob/e99f4d74c83258c6039073082955ac9bed3f2155/minimalmodbus.py#L1695-L1733
def _hexdecode(hexstring): """Convert a hex encoded string to a byte string. For example '4A' will return 'J', and '04' will return ``'\\x04'`` (which has length 1). Args: hexstring (str): Can be for example 'A3' or 'A3B4'. Must be of even length. Allowed characters are '0' to '9', 'a' to 'f' and 'A' to 'F' (not space). Returns: A string of half the length, with characters corresponding to all 0-255 values for each byte. Raises: TypeError, ValueError """ # Note: For Python3 the appropriate would be: raise TypeError(new_error_message) from err # but the Python2 interpreter will indicate SyntaxError. # Thus we need to live with this warning in Python3: # 'During handling of the above exception, another exception occurred' _checkString(hexstring, description='hexstring') if len(hexstring) % 2 != 0: raise ValueError('The input hexstring must be of even length. Given: {!r}'.format(hexstring)) if sys.version_info[0] > 2: by = bytes(hexstring, 'latin1') try: return str(binascii.unhexlify(by), encoding='latin1') except binascii.Error as err: new_error_message = 'Hexdecode reported an error: {!s}. Input hexstring: {}'.format(err.args[0], hexstring) raise TypeError(new_error_message) else: try: return hexstring.decode('hex') except TypeError as err: raise TypeError('Hexdecode reported an error: {}. Input hexstring: {}'.format(err.message, hexstring))
[ "def", "_hexdecode", "(", "hexstring", ")", ":", "# Note: For Python3 the appropriate would be: raise TypeError(new_error_message) from err", "# but the Python2 interpreter will indicate SyntaxError.", "# Thus we need to live with this warning in Python3:", "# 'During handling of the above exception, another exception occurred'", "_checkString", "(", "hexstring", ",", "description", "=", "'hexstring'", ")", "if", "len", "(", "hexstring", ")", "%", "2", "!=", "0", ":", "raise", "ValueError", "(", "'The input hexstring must be of even length. Given: {!r}'", ".", "format", "(", "hexstring", ")", ")", "if", "sys", ".", "version_info", "[", "0", "]", ">", "2", ":", "by", "=", "bytes", "(", "hexstring", ",", "'latin1'", ")", "try", ":", "return", "str", "(", "binascii", ".", "unhexlify", "(", "by", ")", ",", "encoding", "=", "'latin1'", ")", "except", "binascii", ".", "Error", "as", "err", ":", "new_error_message", "=", "'Hexdecode reported an error: {!s}. Input hexstring: {}'", ".", "format", "(", "err", ".", "args", "[", "0", "]", ",", "hexstring", ")", "raise", "TypeError", "(", "new_error_message", ")", "else", ":", "try", ":", "return", "hexstring", ".", "decode", "(", "'hex'", ")", "except", "TypeError", "as", "err", ":", "raise", "TypeError", "(", "'Hexdecode reported an error: {}. Input hexstring: {}'", ".", "format", "(", "err", ".", "message", ",", "hexstring", ")", ")" ]
Convert a hex encoded string to a byte string. For example '4A' will return 'J', and '04' will return ``'\\x04'`` (which has length 1). Args: hexstring (str): Can be for example 'A3' or 'A3B4'. Must be of even length. Allowed characters are '0' to '9', 'a' to 'f' and 'A' to 'F' (not space). Returns: A string of half the length, with characters corresponding to all 0-255 values for each byte. Raises: TypeError, ValueError
[ "Convert", "a", "hex", "encoded", "string", "to", "a", "byte", "string", "." ]
python
train
39.205128
apache/spark
python/pyspark/context.py
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/context.py#L650-L665
def binaryFiles(self, path, minPartitions=None): """ .. note:: Experimental Read a directory of binary files from HDFS, a local file system (available on all nodes), or any Hadoop-supported file system URI as a byte array. Each file is read as a single record and returned in a key-value pair, where the key is the path of each file, the value is the content of each file. .. note:: Small files are preferred, large file is also allowable, but may cause bad performance. """ minPartitions = minPartitions or self.defaultMinPartitions return RDD(self._jsc.binaryFiles(path, minPartitions), self, PairDeserializer(UTF8Deserializer(), NoOpSerializer()))
[ "def", "binaryFiles", "(", "self", ",", "path", ",", "minPartitions", "=", "None", ")", ":", "minPartitions", "=", "minPartitions", "or", "self", ".", "defaultMinPartitions", "return", "RDD", "(", "self", ".", "_jsc", ".", "binaryFiles", "(", "path", ",", "minPartitions", ")", ",", "self", ",", "PairDeserializer", "(", "UTF8Deserializer", "(", ")", ",", "NoOpSerializer", "(", ")", ")", ")" ]
.. note:: Experimental Read a directory of binary files from HDFS, a local file system (available on all nodes), or any Hadoop-supported file system URI as a byte array. Each file is read as a single record and returned in a key-value pair, where the key is the path of each file, the value is the content of each file. .. note:: Small files are preferred, large file is also allowable, but may cause bad performance.
[ "..", "note", "::", "Experimental" ]
python
train
47.25
jason-weirather/py-seq-tools
seqtools/structure/transcript/__init__.py
https://github.com/jason-weirather/py-seq-tools/blob/f642c2c73ffef2acc83656a78059a476fc734ca1/seqtools/structure/transcript/__init__.py#L607-L630
def cmp(self,junc,tolerance=0): """ output comparison and allow for tolerance if desired * -1 if junc comes before self * 1 if junc comes after self * 0 if overlaps * 2 if else :param junc: :param tolerance: optional search space (default=0, no tolerance) :type junc: Junction :type tolerance: int :return: value of comparison :rtype: int """ if self.overlaps(junc,tolerance): return 0 #equal if self.left.chr == junc.right.chr: if self.left.start > junc.right.start: return -1 #comes before if self.right.chr == junc.left.chr: if self.right.start < junc.right.start: return 1 #comes after return 2
[ "def", "cmp", "(", "self", ",", "junc", ",", "tolerance", "=", "0", ")", ":", "if", "self", ".", "overlaps", "(", "junc", ",", "tolerance", ")", ":", "return", "0", "#equal", "if", "self", ".", "left", ".", "chr", "==", "junc", ".", "right", ".", "chr", ":", "if", "self", ".", "left", ".", "start", ">", "junc", ".", "right", ".", "start", ":", "return", "-", "1", "#comes before", "if", "self", ".", "right", ".", "chr", "==", "junc", ".", "left", ".", "chr", ":", "if", "self", ".", "right", ".", "start", "<", "junc", ".", "right", ".", "start", ":", "return", "1", "#comes after", "return", "2" ]
output comparison and allow for tolerance if desired * -1 if junc comes before self * 1 if junc comes after self * 0 if overlaps * 2 if else :param junc: :param tolerance: optional search space (default=0, no tolerance) :type junc: Junction :type tolerance: int :return: value of comparison :rtype: int
[ "output", "comparison", "and", "allow", "for", "tolerance", "if", "desired" ]
python
train
28.125
PolyJIT/benchbuild
benchbuild/likwid.py
https://github.com/PolyJIT/benchbuild/blob/9ad2ec54d96e97b642b1f06eddcbad9ba7aeaf58/benchbuild/likwid.py#L54-L90
def read_table(fstream): """ Read a likwid table info from the text stream. Args: fstream: Likwid's filestream. Returns (dict(str: str)): A dict containing likwid's table info as key/value pairs. """ pos = fstream.tell() line = fstream.readline().strip() fragments = line.split(",") fragments = [x for x in fragments if x is not None] partition = dict() if not len(fragments) >= 4: return None partition["table"] = fragments[0] partition["group"] = fragments[1] partition["set"] = fragments[2] partition["num_lines"] = fragments[3] struct = None if partition is not None and partition["table"] == "TABLE": num_lines = int(partition["num_lines"].strip()) struct = {} header = fetch_cols(fstream) struct.update({header[0]: header[1:]}) for _ in range(num_lines): cols = fetch_cols(fstream) struct.update({cols[0]: cols[1:]}) else: fstream.seek(pos) return struct
[ "def", "read_table", "(", "fstream", ")", ":", "pos", "=", "fstream", ".", "tell", "(", ")", "line", "=", "fstream", ".", "readline", "(", ")", ".", "strip", "(", ")", "fragments", "=", "line", ".", "split", "(", "\",\"", ")", "fragments", "=", "[", "x", "for", "x", "in", "fragments", "if", "x", "is", "not", "None", "]", "partition", "=", "dict", "(", ")", "if", "not", "len", "(", "fragments", ")", ">=", "4", ":", "return", "None", "partition", "[", "\"table\"", "]", "=", "fragments", "[", "0", "]", "partition", "[", "\"group\"", "]", "=", "fragments", "[", "1", "]", "partition", "[", "\"set\"", "]", "=", "fragments", "[", "2", "]", "partition", "[", "\"num_lines\"", "]", "=", "fragments", "[", "3", "]", "struct", "=", "None", "if", "partition", "is", "not", "None", "and", "partition", "[", "\"table\"", "]", "==", "\"TABLE\"", ":", "num_lines", "=", "int", "(", "partition", "[", "\"num_lines\"", "]", ".", "strip", "(", ")", ")", "struct", "=", "{", "}", "header", "=", "fetch_cols", "(", "fstream", ")", "struct", ".", "update", "(", "{", "header", "[", "0", "]", ":", "header", "[", "1", ":", "]", "}", ")", "for", "_", "in", "range", "(", "num_lines", ")", ":", "cols", "=", "fetch_cols", "(", "fstream", ")", "struct", ".", "update", "(", "{", "cols", "[", "0", "]", ":", "cols", "[", "1", ":", "]", "}", ")", "else", ":", "fstream", ".", "seek", "(", "pos", ")", "return", "struct" ]
Read a likwid table info from the text stream. Args: fstream: Likwid's filestream. Returns (dict(str: str)): A dict containing likwid's table info as key/value pairs.
[ "Read", "a", "likwid", "table", "info", "from", "the", "text", "stream", "." ]
python
train
27.081081
autokey/autokey
lib/autokey/service.py
https://github.com/autokey/autokey/blob/35decb72f286ce68cd2a1f09ace8891a520b58d1/lib/autokey/service.py#L414-L425
def phrase_contains_special_keys(expansion: model.Expansion) -> bool: """ Determine if the expansion contains any special keys, including those resulting from any processed macros (<script>, <file>, etc). If any are found, the phrase cannot be undone. Python Zen: »In the face of ambiguity, refuse the temptation to guess.« The question 'What does the phrase expansion "<ctrl>+a<shift>+<insert>" do?' cannot be answered. Because the key bindings cannot be assumed to result in the actions "select all text, then replace with clipboard content", the undo operation can not be performed. Thus always disable undo, when special keys are found. """ found_special_keys = KEY_FIND_RE.findall(expansion.string.lower()) return bool(found_special_keys)
[ "def", "phrase_contains_special_keys", "(", "expansion", ":", "model", ".", "Expansion", ")", "->", "bool", ":", "found_special_keys", "=", "KEY_FIND_RE", ".", "findall", "(", "expansion", ".", "string", ".", "lower", "(", ")", ")", "return", "bool", "(", "found_special_keys", ")" ]
Determine if the expansion contains any special keys, including those resulting from any processed macros (<script>, <file>, etc). If any are found, the phrase cannot be undone. Python Zen: »In the face of ambiguity, refuse the temptation to guess.« The question 'What does the phrase expansion "<ctrl>+a<shift>+<insert>" do?' cannot be answered. Because the key bindings cannot be assumed to result in the actions "select all text, then replace with clipboard content", the undo operation can not be performed. Thus always disable undo, when special keys are found.
[ "Determine", "if", "the", "expansion", "contains", "any", "special", "keys", "including", "those", "resulting", "from", "any", "processed", "macros", "(", "<script", ">", "<file", ">", "etc", ")", ".", "If", "any", "are", "found", "the", "phrase", "cannot", "be", "undone", "." ]
python
train
67.75