Unnamed: 0
int64
0
389k
code
stringlengths
26
79.6k
docstring
stringlengths
1
46.9k
25,000
def set_command(value, parameter): conf = get_config_obj() section = "cli" if "." in parameter: section, parameter = parameter.split(".", 1) if section not in conf: conf[section] = {} conf[section][parameter] = value safeprint("Writing updated config to {}".format(conf.filename)) conf.write()
Executor for `globus config set`
25,001
def splitAt(iterable, indices): r iterable = iter(iterable) now = 0 for to in indices: try: res = [] for i in range(now, to): res.append(iterable.next()) except StopIteration: yield res; return yield res now = to res = list(iterable) if res: yield res
r"""Yield chunks of `iterable`, split at the points in `indices`: >>> [l for l in splitAt(range(10), [2,5])] [[0, 1], [2, 3, 4], [5, 6, 7, 8, 9]] splits past the length of `iterable` are ignored: >>> [l for l in splitAt(range(10), [2,5,10])] [[0, 1], [2, 3, 4], [5, 6, 7, 8, 9]]
25,002
def send(self): for name, array in iteritems(self): shader_id = c_int(0) gl.glGetIntegerv(gl.GL_CURRENT_PROGRAM, byref(shader_id)) if shader_id.value == 0: raise UnboundLocalError() try: loc, shader_id_for_array = array.loc if shader_id.value != shader_id_for_array: raise Exception() except (AttributeError, Exception) as e: array.loc = (gl.glGetUniformLocation(shader_id.value, name.encode()), shader_id.value) if array.ndim == 2: try: pointer = array.pointer except AttributeError: array.pointer = array.ctypes.data_as(POINTER(c_float * 16)).contents pointer = array.pointer gl.glUniformMatrix4fv(array.loc[0], 1, True, pointer) else: sendfun = self._sendfuns[array.dtype.kind][len(array) - 1] sendfun(array.loc[0], *array)
Sends all the key-value pairs to the graphics card. These uniform variables will be available in the currently-bound shader.
25,003
def split(url): scheme = netloc = path = query = fragment = ip6_start = url.find() scheme_end = url.find() if ip6_start > 0 and ip6_start < scheme_end: scheme_end = -1 if scheme_end > 0: for c in url[:scheme_end]: if c not in SCHEME_CHARS: break else: scheme = url[:scheme_end].lower() rest = url[scheme_end:].lstrip() if not scheme: rest = url l_path = rest.find() l_query = rest.find() l_frag = rest.find() if l_path > 0: if l_query > 0 and l_frag > 0: netloc = rest[:l_path] path = rest[l_path:min(l_query, l_frag)] elif l_query > 0: if l_query > l_path: netloc = rest[:l_path] path = rest[l_path:l_query] else: netloc = rest[:l_query] path = elif l_frag > 0: netloc = rest[:l_path] path = rest[l_path:l_frag] else: netloc = rest[:l_path] path = rest[l_path:] else: if l_query > 0: netloc = rest[:l_query] elif l_frag > 0: netloc = rest[:l_frag] else: netloc = rest if l_query > 0: if l_frag > 0: query = rest[l_query+1:l_frag] else: query = rest[l_query+1:] if l_frag > 0: fragment = rest[l_frag+1:] if not scheme: path = netloc + path netloc = return SplitResult(scheme, netloc, path, query, fragment)
Split URL into scheme, netloc, path, query and fragment. >>> split('http://www.example.com/abc?x=1&y=2#foo') SplitResult(scheme='http', netloc='www.example.com', path='/abc', query='x=1&y=2', fragment='foo')
25,004
def parse(self, element): result = [] if element.text is not None and element.tag == self.identifier: l, k = (0, 0) raw = element.text.split() while k < len(self.values): dtype = self.dtype[k] if isinstance(self.values[k], int): for i in range(self.values[k]): result.append(self._caster[dtype](raw[i + l])) l += self.values[k] k += 1 else: rest = [ self._caster[dtype](val) for val in raw[l::] ] result.extend(rest) break else: msg.warn("no results for parsing {} using line {}".format(element.tag, self.identifier)) return result
Parses the contents of the specified XML element using template info. :arg element: the XML element from the input file being converted.
25,005
def rotate_vector(evecs, old_vector, rescale_factor, index): temp = 0 for i in range(len(evecs)): temp += (evecs[i,index] * rescale_factor) * old_vector[i] return temp
Function to find the position of the system(s) in one of the xi_i or mu_i directions. Parameters ----------- evecs : numpy.matrix Matrix of the eigenvectors of the metric in lambda_i coordinates. Used to rotate to a Cartesian coordinate system. old_vector : list of floats or numpy.arrays The position of the system(s) in the original coordinates rescale_factor : float Scaling factor to apply to resulting position(s) index : int The index of the final coordinate system that is being computed. Ie. if we are going from mu_i -> xi_j, this will give j. Returns -------- positions : float or numpy.array Position of the point(s) in the resulting coordinate.
25,006
def init(self): if not self.export_enable: return None if self.user is None: server_uri = .format(self.host, self.port) else: server_uri = .format(self.user, self.password, self.host, self.port) try: s = couchdb.Server(server_uri) except Exception as e: logger.critical("Cannot connect to CouchDB server %s (%s)" % (server_uri, e)) sys.exit(2) else: logger.info("Connected to the CouchDB server %s" % server_uri) try: s[self.db] except Exception as e: s.create(self.db) else: logger.info("There is already a %s database" % self.db) return s
Init the connection to the CouchDB server.
25,007
def run_git(self, args, git_env=None): popen_kwargs = { : subprocess.PIPE, : subprocess.PIPE, } if git_env: popen_kwargs[] = git_env if self._git_toplevel: popen_kwargs[] = self._git_toplevel git_process = subprocess.Popen( [GitRunner._git_executable] + args, **popen_kwargs ) try: out, err = git_process.communicate() git_process.wait() except Exception as e: raise GitError("Couldngit {args} utf_8git {args} utf_8').splitlines()
Runs the git executable with the arguments given and returns a list of lines produced on its standard output.
25,008
def download_sample(job, ids, input_args, sample): if len(sample) == 2: uuid, sample_location = sample url1, url2 = None, None else: uuid, url1, url2 = sample sample_location = None sample_input = dict(input_args) sample_input[] = uuid sample_input[] = sample_location if sample_input[]: sample_input[] = os.path.join(input_args[], uuid) sample_input[] = multiprocessing.cpu_count() job_vars = (sample_input, ids) if sample_input[]: ids[] = job.fileStore.writeGlobalFile(os.path.abspath(sample_location)) elif sample_input[]: ids[] = job.fileStore.writeGlobalFile(urlparse(url1).path) ids[] = job.fileStore.writeGlobalFile(urlparse(url2).path) else: if sample_input[]: ids[] = job.addChildJobFn(download_encrypted_file, sample_input, , disk=).rv() else: ids[] = job.addChildJobFn(download_from_url, sample_input[], disk=).rv() job.addFollowOnJobFn(static_dag_launchpoint, job_vars)
Defines variables unique to a sample that are used in the rest of the pipelines ids: dict Dictionary of fileStore IDS input_args: dict Dictionary of input arguments sample: tuple Contains uuid and sample_url
25,009
def _z2deriv(self,R,z,phi=0.,t=0.): return self._R2deriv(numpy.fabs(z),R)
NAME: _z2deriv PURPOSE: evaluate the second vertical derivative for this potential INPUT: R - Galactocentric cylindrical radius z - vertical height phi - azimuth t- time OUTPUT: the second vertical derivative HISTORY: 2012-07-26 - Written - Bovy (IAS@MPIA)
25,010
def _println(self, *args): string = .join([str(arg) for arg in args]) print(string, file=self._stream)
Convenience function for the print function.
25,011
def close(self): with self.stop_lock: self.stopped = True return ioloop_util.submit(self._flush, io_loop=self.io_loop)
Ensure that all spans from the queue are submitted. Returns Future that will be completed once the queue is empty.
25,012
def exchange_declare(self, exchange, type, passive=False, durable=False, auto_delete=True, internal=False, nowait=False, arguments=None, ticket=None): if arguments is None: arguments = {} args = AMQPWriter() if ticket is not None: args.write_short(ticket) else: args.write_short(self.default_ticket) args.write_shortstr(exchange) args.write_shortstr(type) args.write_bit(passive) args.write_bit(durable) args.write_bit(auto_delete) args.write_bit(internal) args.write_bit(nowait) args.write_table(arguments) self._send_method((40, 10), args) if not nowait: return self.wait(allowed_methods=[ (40, 11), ])
declare exchange, create if needed This method creates an exchange if it does not already exist, and if the exchange exists, verifies that it is of the correct and expected class. RULE: The server SHOULD support a minimum of 16 exchanges per virtual host and ideally, impose no limit except as defined by available resources. PARAMETERS: exchange: shortstr RULE: Exchange names starting with "amq." are reserved for predeclared and standardised exchanges. If the client attempts to create an exchange starting with "amq.", the server MUST raise a channel exception with reply code 403 (access refused). type: shortstr exchange type Each exchange belongs to one of a set of exchange types implemented by the server. The exchange types define the functionality of the exchange - i.e. how messages are routed through it. It is not valid or meaningful to attempt to change the type of an existing exchange. RULE: If the exchange already exists with a different type, the server MUST raise a connection exception with a reply code 507 (not allowed). RULE: If the server does not support the requested exchange type it MUST raise a connection exception with a reply code 503 (command invalid). passive: boolean do not create exchange If set, the server will not create the exchange. The client can use this to check whether an exchange exists without modifying the server state. RULE: If set, and the exchange does not already exist, the server MUST raise a channel exception with reply code 404 (not found). durable: boolean request a durable exchange If set when creating a new exchange, the exchange will be marked as durable. Durable exchanges remain active when a server restarts. Non-durable exchanges (transient exchanges) are purged if/when a server restarts. RULE: The server MUST support both durable and transient exchanges. RULE: The server MUST ignore the durable field if the exchange already exists. auto_delete: boolean auto-delete when unused If set, the exchange is deleted when all queues have finished using it. RULE: The server SHOULD allow for a reasonable delay between the point when it determines that an exchange is not being used (or no longer used), and the point when it deletes the exchange. At the least it must allow a client to create an exchange and then bind a queue to it, with a small but non-zero delay between these two actions. RULE: The server MUST ignore the auto-delete field if the exchange already exists. internal: boolean create internal exchange If set, the exchange may not be used directly by publishers, but only when bound to other exchanges. Internal exchanges are used to construct wiring that is not visible to applications. nowait: boolean do not send a reply method If set, the server will not respond to the method. The client should not wait for a reply method. If the server could not complete the method it will raise a channel or connection exception. arguments: table arguments for declaration A set of arguments for the declaration. The syntax and semantics of these arguments depends on the server implementation. This field is ignored if passive is True. ticket: short When a client defines a new exchange, this belongs to the access realm of the ticket used. All further work done with that exchange must be done with an access ticket for the same realm. RULE: The client MUST provide a valid access ticket giving "active" access to the realm in which the exchange exists or will be created, or "passive" access if the if-exists flag is set.
25,013
def checksum_identity_card_number(characters): weights_for_check_digit = [7, 3, 1, 0, 7, 3, 1, 7, 3] check_digit = 0 for i in range(3): check_digit += weights_for_check_digit[i] * (ord(characters[i]) - 55) for i in range(4, 9): check_digit += weights_for_check_digit[i] * characters[i] check_digit %= 10 return check_digit
Calculates and returns a control digit for given list of characters basing on Identity Card Number standards.
25,014
def set_user_perm(obj, perm, sid): info = ( win32security.OWNER_SECURITY_INFORMATION | win32security.GROUP_SECURITY_INFORMATION | win32security.DACL_SECURITY_INFORMATION ) sd = win32security.GetUserObjectSecurity(obj, info) dacl = sd.GetSecurityDescriptorDacl() ace_cnt = dacl.GetAceCount() found = False for idx in range(0, ace_cnt): (aceType, aceFlags), ace_mask, ace_sid = dacl.GetAce(idx) ace_exists = ( aceType == ntsecuritycon.ACCESS_ALLOWED_ACE_TYPE and ace_mask == perm and ace_sid == sid ) if ace_exists: break else: dacl.AddAccessAllowedAce(dacl.GetAclRevision(), perm, sid) sd.SetSecurityDescriptorDacl(1, dacl, 0) win32security.SetUserObjectSecurity(obj, info, sd)
Set an object permission for the given user sid
25,015
def add_filter(self, filter): if not isinstance(filter, AbstractFilter): err = .format(AbstractFilter) raise InvalidFilter(err) if filter not in self.filters: self.filters.append(filter) return self
Add filter to property :param filter: object, extending from AbstractFilter :return: None
25,016
def generate_express_checkout_redirect_url(self, token, useraction=None): url_vars = (self.config.PAYPAL_URL_BASE, token) url = "%s?cmd=_express-checkout&token=%s" % url_vars if useraction: if not useraction.lower() in (, ): warnings.warn( % useraction, RuntimeWarning) url += % useraction return url
Returns the URL to redirect the user to for the Express checkout. Express Checkouts must be verified by the customer by redirecting them to the PayPal website. Use the token returned in the response from :meth:`set_express_checkout` with this function to figure out where to redirect the user to. The button text on the PayPal page can be controlled via `useraction`. The documented possible values are `commit` and `continue`. However, any other value will only result in a warning. :param str token: The unique token identifying this transaction. :param str useraction: Control the button text on the PayPal page. :rtype: str :returns: The URL to redirect the user to for approval.
25,017
def connect_table(self, table, chunk, markup): k = markup.find(chunk) i = markup.rfind("\n=", 0, k) j = markup.find("\n", i+1) paragraph_title = markup[i:j].strip().strip("= ") for paragraph in self.paragraphs: if paragraph.title == paragraph_title: paragraph.tables.append(table) table.paragraph = paragraph
Creates a link from the table to paragraph and vice versa. Finds the first heading above the table in the markup. This is the title of the paragraph the table belongs to.
25,018
def validate_votes(self, validators_H, validators_prevH): "set of validators may change between heights" assert self.sender def check(lockset, validators): if not lockset.num_eligible_votes == len(validators): raise InvalidProposalError() for v in lockset: if v.sender not in validators: raise InvalidProposalError() if self.round_lockset: check(self.round_lockset, validators_H) check(self.signing_lockset, validators_prevH) return True
set of validators may change between heights
25,019
def raise_for_status(self, r): if self.type in (, , , ): r.raise_for_status() if 500 <= r.status_code < 600: raise RedcapError(r.content)
Given a response, raise for bad status for certain actions Some redcap api methods don't return error messages that the user could test for or otherwise use. Therefore, we need to do the testing ourself Raising for everything wouldn't let the user see the (hopefully helpful) error message
25,020
def continuityGrouping(values, limit): lastValue = values[0] lastPos = 0 groupStartPos = 0 groupPos = list() for currPos, currValue in enumerate(values): if currValue - lastValue > limit: groupPos.append((groupStartPos, lastPos)) groupStartPos = currPos lastPos = currPos lastValue = currValue groupPos.append((groupStartPos, lastPos)) return groupPos
#TODO docstring :param values: ``numpy.array`` containg ``int`` or ``float``, must be sorted :param limit: the maximal difference between two values, if this number is exceeded a new group is generated :returns: a list containing array start and end positions of continuous groups
25,021
def map_values( cr, source_column, target_column, mapping, model=None, table=None, write=): if write not in (, ): logger.exception( "map_values is called with unknown value for write param: %s", write) if not table: if not model: logger.exception("map_values is called with no table and no model") table = model._table if source_column == target_column: logger.exception( "map_values is called with the same value for source and old" " columns : %s", source_column) for old, new in mapping: new = "" % new if old is True: old = op = elif old is False: old = op = else: old = "" % old op = values = { : table, : source_column, : target_column, : old, : new, : op, } if write == : query = % values else: query = % values logged_query(cr, query, values) if write == : model.write( cr, SUPERUSER_ID, [row[0] for row in cr.fetchall()], {target_column: new})
Map old values to new values within the same model or table. Old values presumably come from a legacy column. You will typically want to use it in post-migration scripts. :param cr: The database cursor :param source_column: the database column that contains old values to be \ mapped :param target_column: the database column, or model field (if 'write' is \ 'orm') that the new values are written to :param mapping: list of tuples [(old value, new value)] Old value True represents "is set", False "is not set". :param model: used for writing if 'write' is 'orm', or to retrieve the \ table if 'table' is not given. :param table: the database table used to query the old values, and write \ the new values (if 'write' is 'sql') :param write: Either 'orm' or 'sql'. Note that old ids are always \ identified by an sql read. This method does not support mapping m2m, o2m or property fields. \ For o2m you can migrate the inverse field's column instead. .. versionadded:: 8.0
25,022
def render_table(output_dir, packages, jenv=JENV): destination_filename = output_dir + "/com/swiftnav/sbp/client/MessageTable.java" with open(destination_filename, ) as f: print(destination_filename) f.write(jenv.get_template(TEMPLATE_TABLE_NAME).render(packages=packages))
Render and output dispatch table
25,023
def auto_sort(parser, token): "usage: {% auto_sort queryset %}" try: tag_name, queryset = token.split_contents() except ValueError: raise template.TemplateSyntaxError("{0} tag requires a single argument".format(token.contents.split()[0])) return SortedQuerysetNode(queryset)
usage: {% auto_sort queryset %}
25,024
def _get_tls_object(self, ssl_params): if ssl_params is None: return None if not ssl_params["verify"] and ssl_params["ca_certs"]: self.warning( "Incorrect configuration: trying to disable server certificate validation, " "while also specifying a capath. No validation will be performed. Fix your " "configuration to remove this warning" ) validate = ssl.CERT_REQUIRED if ssl_params["verify"] else ssl.CERT_NONE if ssl_params["ca_certs"] is None or os.path.isfile(ssl_params["ca_certs"]): tls = ldap3.core.tls.Tls( local_private_key_file=ssl_params["key"], local_certificate_file=ssl_params["cert"], ca_certs_file=ssl_params["ca_certs"], version=ssl.PROTOCOL_SSLv23, validate=validate, ) elif os.path.isdir(ssl_params["ca_certs"]): tls = ldap3.core.tls.Tls( local_private_key_file=ssl_params["key"], local_certificate_file=ssl_params["cert"], ca_certs_path=ssl_params["ca_certs"], version=ssl.PROTOCOL_SSLv23, validate=validate, ) else: raise ConfigurationError( .format(ssl_params[]) ) return tls
Return a TLS object to establish a secure connection to a server
25,025
def insert(self, context): status_code, msg = self.__endpoint.post( "/resources/custom-resource", data={ "id": self.__name, "restype": self.__restype, "factoryclass": self.__factclass, "property": props_value(self.__props) } ) self.__available = True
Create resource. :param resort.engine.execution.Context context: Current execution context.
25,026
def evaluate(contents, jsonnet_library_paths=None): if not jsonnet_library_paths: jsonnet_library_paths = __salt__[]( , []) return salt.utils.json.loads( _jsonnet.evaluate_snippet( "snippet", contents, import_callback=partial( _import_callback, library_paths=jsonnet_library_paths)))
Evaluate a jsonnet input string. contents Raw jsonnet string to evaluate. jsonnet_library_paths List of jsonnet library paths.
25,027
def image_plot(shap_values, x, labels=None, show=True, width=20, aspect=0.2, hspace=0.2, labelpad=None): multi_output = True if type(shap_values) != list: multi_output = False shap_values = [shap_values] if labels is not None: assert labels.shape[0] == shap_values[0].shape[0], "Labels must have same row count as shap_values arrays!" if multi_output: assert labels.shape[1] == len(shap_values), "Labels must have a column for each output in shap_values!" else: assert len(labels.shape) == 1, "Labels must be a vector for single output shap_values." label_kwargs = {} if labelpad is None else {: labelpad} fig_size = np.array([3 * (len(shap_values) + 1), 2.5 * (x.shape[0] + 1)]) if fig_size[0] > width: fig_size *= width / fig_size[0] fig, axes = pl.subplots(nrows=x.shape[0], ncols=len(shap_values) + 1, figsize=fig_size) if len(axes.shape) == 1: axes = axes.reshape(1,axes.size) for row in range(x.shape[0]): x_curr = x[row].copy() if len(x_curr.shape) == 3 and x_curr.shape[2] == 1: x_curr = x_curr.reshape(x_curr.shape[:2]) if x_curr.max() > 1: x_curr /= 255. if len(x_curr.shape) == 3 and x_curr.shape[2] == 3: x_curr_gray = (0.2989 * x_curr[:,:,0] + 0.5870 * x_curr[:,:,1] + 0.1140 * x_curr[:,:,2]) else: x_curr_gray = x_curr axes[row,0].imshow(x_curr, cmap=pl.get_cmap()) axes[row,0].axis() if len(shap_values[0][row].shape) == 2: abs_vals = np.stack([np.abs(shap_values[i]) for i in range(len(shap_values))], 0).flatten() else: abs_vals = np.stack([np.abs(shap_values[i].sum(-1)) for i in range(len(shap_values))], 0).flatten() max_val = np.nanpercentile(abs_vals, 99.9) for i in range(len(shap_values)): if labels is not None: axes[row,i+1].set_title(labels[row,i], **label_kwargs) sv = shap_values[i][row] if len(shap_values[i][row].shape) == 2 else shap_values[i][row].sum(-1) axes[row,i+1].imshow(x_curr_gray, cmap=pl.get_cmap(), alpha=0.15, extent=(-1, sv.shape[0], sv.shape[1], -1)) im = axes[row,i+1].imshow(sv, cmap=colors.red_transparent_blue, vmin=-max_val, vmax=max_val) axes[row,i+1].axis() if hspace == : fig.tight_layout() else: fig.subplots_adjust(hspace=hspace) cb = fig.colorbar(im, ax=np.ravel(axes).tolist(), label="SHAP value", orientation="horizontal", aspect=fig_size[0]/aspect) cb.outline.set_visible(False) if show: pl.show()
Plots SHAP values for image inputs.
25,028
def channel_post_handler(self, *custom_filters, commands=None, regexp=None, content_types=None, state=None, run_task=None, **kwargs): def decorator(callback): self.register_channel_post_handler(callback, *custom_filters, commands=commands, regexp=regexp, content_types=content_types, state=state, run_task=run_task, **kwargs) return callback return decorator
Decorator for channel post handler :param commands: list of commands :param regexp: REGEXP :param content_types: List of content types. :param state: :param custom_filters: list of custom filters :param run_task: run callback in task (no wait results) :param kwargs: :return: decorated function
25,029
def _populate_sgc_payoff_arrays(payoff_arrays): n = payoff_arrays[0].shape[0] m = (n+1)//2 - 1 for payoff_array in payoff_arrays: for i in range(m): for j in range(m): payoff_array[i, j] = 0.75 for j in range(m, n): payoff_array[i, j] = 0.5 for i in range(m, n): for j in range(n): payoff_array[i, j] = 0 payoff_array[0, m-1] = 1 payoff_array[0, 1] = 0.5 for i in range(1, m-1): payoff_array[i, i-1] = 1 payoff_array[i, i+1] = 0.5 payoff_array[m-1, m-2] = 1 payoff_array[m-1, 0] = 0.5 k = (m+1)//2 for h in range(k): i, j = m + 2*h, m + 2*h payoff_arrays[0][i, j] = 0.75 payoff_arrays[0][i+1, j+1] = 0.75 payoff_arrays[1][j, i+1] = 0.75 payoff_arrays[1][j+1, i] = 0.75
Populate the ndarrays in `payoff_arrays` with the payoff values of the SGC game. Parameters ---------- payoff_arrays : tuple(ndarray(float, ndim=2)) Tuple of 2 ndarrays of shape (4*k-1, 4*k-1). Modified in place.
25,030
def connection_made(self, transport): _LOGGER.info() _LOGGER.debug(, transport) self.transport = transport self._restart_writer = True self.restart_writing() if self._aldb.status != ALDBStatus.LOADED: asyncio.ensure_future(self._setup_devices(), loop=self._loop)
Start the Hub connection process. Called when asyncio.Protocol establishes the network connection.
25,031
def get_obsolete_user_ids(self, db_read=None): db_read = db_read or self.db_read already_awarded_ids = self.get_already_awarded_user_ids(db_read=db_read, show_log=False) current_ids = self.get_current_user_ids(db_read=db_read) obsolete_ids = list(set(already_awarded_ids) - set(current_ids)) obsolete_ids_count = len(obsolete_ids) logger.debug( , self.slug, obsolete_ids_count) return (obsolete_ids, obsolete_ids_count)
Returns obsolete users IDs to unaward.
25,032
def zdivide(a, b, null=0): s divide function or a/b syntax, zdivide will thread over the earliest dimension possible; thus if a.shape is (4,2) and b.shape is 4, zdivide(a,b) is a equivalent to [ai*zinv(bi) for (ai,bi) in zip(a,b)]. The optional argument null (default: 0) may be given to specify that zeros in the arary b should instead be replaced with the given value in the result. Note that if this value is not equal to 0, then any sparse array passed as argument b must be reified. The zdivide function never raises an error due to divide-by-zero; if you desire this behavior, use the divide function instead. Note that zdivide(a,b, null=z) is not quite equivalent to a*zinv(b, null=z) unless z is 0; if z is not zero, then the same elements that are zet to z in zinv(b, null=z) are set to z in the result of zdivide(a,b, null=z) rather than the equivalent element of a times z. ' (a,b) = unbroadcast(a,b) return czdivide(a,b, null=null)
zdivide(a, b) returns the quotient a / b as a numpy array object. Unlike numpy's divide function or a/b syntax, zdivide will thread over the earliest dimension possible; thus if a.shape is (4,2) and b.shape is 4, zdivide(a,b) is a equivalent to [ai*zinv(bi) for (ai,bi) in zip(a,b)]. The optional argument null (default: 0) may be given to specify that zeros in the arary b should instead be replaced with the given value in the result. Note that if this value is not equal to 0, then any sparse array passed as argument b must be reified. The zdivide function never raises an error due to divide-by-zero; if you desire this behavior, use the divide function instead. Note that zdivide(a,b, null=z) is not quite equivalent to a*zinv(b, null=z) unless z is 0; if z is not zero, then the same elements that are zet to z in zinv(b, null=z) are set to z in the result of zdivide(a,b, null=z) rather than the equivalent element of a times z.
25,033
def _assert_lt(self, cost): if self.tot == None: self.tot = ITotalizer(lits=self.sels, ubound=cost-1, top_id=self.topv) self.topv = self.tot.top_id for cl in self.tot.cnf.clauses: self.oracle.add_clause(cl) self.oracle.add_clause([-self.tot.rhs[cost-1]])
The method enforces an upper bound on the cost of the MaxSAT solution. This is done by encoding the sum of all soft clause selectors with the use the iterative totalizer encoding, i.e. :class:`.ITotalizer`. Note that the sum is created once, at the beginning. Each of the following calls to this method only enforces the upper bound on the created sum by adding the corresponding unit size clause. Each such clause is added on the fly with no restart of the underlying SAT oracle. :param cost: the cost of the next MaxSAT solution is enforced to be *lower* than this current cost :type cost: int
25,034
def json_data(self): return { "vector_id": self.vector_id, "origin_id": self.origin_id, "destination_id": self.destination_id, "info_id": self.info_id, "network_id": self.network_id, "receive_time": self.receive_time, "status": self.status, }
The json representation of a transmissions.
25,035
def _parse_docstring(fh): find_fades = re.compile(r).search for line in fh: if line.startswith("" break if line.startswith(): quote = break else: return {} if line[1] == quote: endquote = quote * 3 else: endquote = quote if endquote in line[len(endquote):]: docstring_lines = [line[:line.index(endquote)]] else: docstring_lines = [line] for line in fh: if endquote in line: docstring_lines.append(line[:line.index(endquote)]) break docstring_lines.append(line) docstring_lines = iter(docstring_lines) for doc_line in docstring_lines: if find_fades(doc_line): break else: return {} return _parse_requirement(list(docstring_lines))
Parse the docstrings of a script to find marked dependencies.
25,036
def change_default(config): config_file, cf = read_latoolscfg() if config not in cf.sections(): raise ValueError("\n is not a defined configuration.".format(config)) if config == : pstr = ( + ) else: pstr = (.format(cf[][]) + .format(config)) response = input(pstr + ) if response.lower() == : cf.set(, , config) with open(config_file, ) as f: cf.write(f) print() else: print()
Change the default configuration.
25,037
def dict_to_qs(dct): itms = ["%s=%s" % (key, val) for key, val in list(dct.items()) if val is not None] return "&".join(itms)
Takes a dictionary and uses it to create a query string.
25,038
def peddy_het_check_plot(self): data = {} for s_name, d in self.peddy_data.items(): if in d and in d: data[s_name] = { : d[], : d[] } pconfig = { : , : , : , : , } self.add_section ( name = , description = "Proportion of sites that were heterozygous against median depth.", helptext = , anchor = , plot = scatter.plot(data, pconfig) )
plot the het_check scatter plot
25,039
def get_payload(self): return bytes( [self.major_version >> 8 & 255, self.major_version & 255, self.minor_version >> 8 & 255, self.minor_version & 255])
Return Payload.
25,040
def resolve_resource_id_refs(self, input_dict, supported_resource_id_refs): if not self.can_handle(input_dict): return input_dict ref_value = input_dict[self.intrinsic_name] if not isinstance(ref_value, string_types) or self._resource_ref_separator in ref_value: return input_dict logical_id = ref_value resolved_value = supported_resource_id_refs.get(logical_id) if not resolved_value: return input_dict return { self.intrinsic_name: resolved_value }
Updates references to the old logical id of a resource to the new (generated) logical id. Example: {"Ref": "MyLayer"} => {"Ref": "MyLayerABC123"} :param dict input_dict: Dictionary representing the Ref function to be resolved. :param dict supported_resource_id_refs: Dictionary that maps old logical ids to new ones. :return dict: Dictionary with resource references resolved.
25,041
def toimages(self): from thunder.images.images import Images if self.mode == : values = self.values.values_to_keys((0,)).unchunk() if self.mode == : values = self.values.unchunk() return Images(values)
Convert blocks to images.
25,042
def process(self, data): button_changed = False for name,(chan,b1,b2,flip) in self.mappings.items(): if data[0] == chan: self.dict_state[name] = flip * to_int16(data[b1], data[b2])/float(self.axis_scale) for button_index, (chan, byte, bit) in enumerate(self.button_mapping): if data[0] == chan: button_changed = True mask = 1<<bit self.dict_state["buttons"][button_index] = 1 if (data[byte] & mask) != 0 else 0 self.dict_state["t"] = high_acc_clock() if len(self.dict_state)==8: self.tuple_state = SpaceNavigator(**self.dict_state) if self.callback: self.callback(self.tuple_state) if self.button_callback and button_changed: self.button_callback(self.tuple_state, self.tuple_state.buttons)
Update the state based on the incoming data This function updates the state of the DeviceSpec object, giving values for each axis [x,y,z,roll,pitch,yaw] in range [-1.0, 1.0] The state tuple is only set when all 6 DoF have been read correctly. The timestamp (in fractional seconds since the start of the program) is written as element "t" If callback is provided, it is called on with a copy of the current state tuple. If button_callback is provided, it is called only on button state changes with the argument (state, button_state). Parameters: data The data for this HID event, as returned by the HID callback
25,043
def fit(self, X, y): if not isinstance(X, pd.DataFrame): X = pd.DataFrame(X.copy()) if not isinstance(y, pd.Series): y = pd.Series(y.copy()) relevance_table = calculate_relevance_table( X, y, ml_task=self.ml_task, n_jobs=self.n_jobs, chunksize=self.chunksize, fdr_level=self.fdr_level, hypotheses_independent=self.hypotheses_independent, test_for_binary_target_real_feature=self.test_for_binary_target_real_feature) self.relevant_features = relevance_table.loc[relevance_table.relevant].feature.tolist() self.feature_importances_ = 1.0 - relevance_table.p_value.values self.p_values = relevance_table.p_value.values self.features = relevance_table.index.tolist() return self
Extract the information, which of the features are relevent using the given target. For more information, please see the :func:`~tsfresh.festure_selection.festure_selector.check_fs_sig_bh` function. All columns in the input data sample are treated as feature. The index of all rows in X must be present in y. :param X: data sample with the features, which will be classified as relevant or not :type X: pandas.DataFrame or numpy.array :param y: target vector to be used, to classify the features :type y: pandas.Series or numpy.array :return: the fitted estimator with the information, which features are relevant :rtype: FeatureSelector
25,044
def _control_longitude(self): if self.lonm < 0.0: self.lonm = 360.0 + self.lonm if self.lonM < 0.0: self.lonM = 360.0 + self.lonM if self.lonm > 360.0: self.lonm = self.lonm - 360.0 if self.lonM > 360.0: self.lonM = self.lonM - 360.0
Control on longitude values
25,045
def delete(self, request, uri): uri = self.decode_uri(uri) uris = cio.delete(uri) if uri not in uris: raise Http404 return self.render_to_response()
Delete versioned uri and return empty text response on success.
25,046
def solve_linear_diop(total: int, *coeffs: int) -> Iterator[Tuple[int, ...]]: r if len(coeffs) == 0: if total == 0: yield tuple() return if len(coeffs) == 1: if total % coeffs[0] == 0: yield (total // coeffs[0], ) return if len(coeffs) == 2: yield from base_solution_linear(coeffs[0], coeffs[1], total) return remainder_gcd = math.gcd(coeffs[1], coeffs[2]) for coeff in coeffs[3:]: remainder_gcd = math.gcd(remainder_gcd, coeff) for coeff0_solution, remainder_gcd_solution in base_solution_linear(coeffs[0], remainder_gcd, total): new_coeffs = [c // remainder_gcd for c in coeffs[1:]] for remainder_solution in solve_linear_diop(remainder_gcd_solution, *new_coeffs): yield (coeff0_solution, ) + remainder_solution
r"""Yield non-negative integer solutions of a linear Diophantine equation of the format :math:`c_1 x_1 + \dots + c_n x_n = total`. If there are at most two coefficients, :func:`base_solution_linear()` is used to find the solutions. Otherwise, the solutions are found recursively, by reducing the number of variables in each recursion: 1. Compute :math:`d := gcd(c_2, \dots , c_n)` 2. Solve :math:`c_1 x + d y = total` 3. Recursively solve :math:`c_2 x_2 + \dots + c_n x_n = y` for each solution for :math:`y` 4. Combine these solutions to form a solution for the whole equation Args: total: The constant of the equation. *coeffs: The coefficients :math:`c_i` of the equation. Yields: The non-negative integer solutions of the equation as a tuple :math:`(x_1, \dots, x_n)`.
25,047
def _project_perturbation(perturbation, epsilon, input_image, clip_min=None, clip_max=None): if clip_min is None or clip_max is None: raise NotImplementedError("_project_perturbation currently has clipping " "hard-coded in.") with tf.control_dependencies([ utils_tf.assert_less_equal(input_image, tf.cast(clip_max, input_image.dtype)), utils_tf.assert_greater_equal(input_image, tf.cast(clip_min, input_image.dtype)) ]): clipped_perturbation = utils_tf.clip_by_value( perturbation, -epsilon, epsilon) new_image = utils_tf.clip_by_value( input_image + clipped_perturbation, clip_min, clip_max) return new_image - input_image
Project `perturbation` onto L-infinity ball of radius `epsilon`. Also project into hypercube such that the resulting adversarial example is between clip_min and clip_max, if applicable.
25,048
def start(self, contract_names, target): if isinstance(contract_names, str): contract_names = [contract_names] if not isinstance(contract_names, list): return None, "error: expecting a string, or a list of contract names" contract_listeners = [] for name in contract_names: c, err = Contract.get(name, self) if err: EZO.log.error(red("error loading contract {}".format(name))) EZO.log.error(red(err)) continue if not c: EZO.log.warn(blue("contract {} not found".format(name))) continue address, err = Contract.get_address(name, c.hash, self.db, target=target) if err: EZO.log.error(red("error obtaining address for contract {}").format(name)) EZO.log.error(red(err)) continue if not address: EZO.log.error(red("no address for contract {}".format(name))) continue contract_listeners.append(c.listen(address, target)) if contract_listeners: loop = asyncio.get_event_loop() loop.run_until_complete( asyncio.gather(*contract_listeners) ) else: return None, "unable to start contract listeners"
loads the contracts -- starts their event listeners :param contract_names: :return:
25,049
def _strip_marker_elem(elem_name, elements): extra_indexes = [] preceding_operators = ["and"] if elem_name == "extra" else ["and", "or"] for i, element in enumerate(elements): if isinstance(element, list): cancelled = _strip_marker_elem(elem_name, element) if cancelled: extra_indexes.append(i) elif isinstance(element, tuple) and element[0].value == elem_name: extra_indexes.append(i) for i in reversed(extra_indexes): del elements[i] if i > 0 and elements[i - 1] in preceding_operators: del elements[i - 1] elif elements: del elements[0] return not elements
Remove the supplied element from the marker. This is not a comprehensive implementation, but relies on an important characteristic of metadata generation: The element's operand is always associated with an "and" operator. This means that we can simply remove the operand and the "and" operator associated with it.
25,050
def proximal_huber(space, gamma): gamma = float(gamma) class ProximalHuber(Operator): def __init__(self, sigma): self.sigma = float(sigma) super(ProximalHuber, self).__init__(domain=space, range=space, linear=False) def _call(self, x, out): if isinstance(self.domain, ProductSpace): norm = PointwiseNorm(self.domain, 2)(x) else: norm = x.ufuncs.absolute() mask = norm.ufuncs.less_equal(gamma + self.sigma) out[mask] = gamma / (gamma + self.sigma) * x[mask] mask.ufuncs.logical_not(out=mask) sign_x = x.ufuncs.sign() out[mask] = x[mask] - self.sigma * sign_x[mask] return out return ProximalHuber
Proximal factory of the Huber norm. Parameters ---------- space : `TensorSpace` The domain of the functional gamma : float The smoothing parameter of the Huber norm functional. Returns ------- prox_factory : function Factory for the proximal operator to be initialized. See Also -------- odl.solvers.default_functionals.Huber : the Huber norm functional Notes ----- The proximal operator is given by given by the proximal operator of ``1/(2*gamma) * L2 norm`` in points that are ``<= gamma``, and by the proximal operator of the l1 norm in points that are ``> gamma``.
25,051
def _onNextBookmark(self): for block in qutepart.iterateBlocksFrom(self._qpart.textCursor().block().next()): if self.isBlockMarked(block): self._qpart.setTextCursor(QTextCursor(block)) return
Previous Bookmark action triggered. Move cursor
25,052
def _get_coords(self, obj): xdim, ydim = obj.dimensions(label=True)[:2] xcoords = obj.dimension_values(xdim, False) ycoords = obj.dimension_values(ydim, False) grouped = obj.groupby(xdim, container_type=OrderedDict, group_type=Dataset).values() orderings = OrderedDict() sort = True for group in grouped: vals = group.dimension_values(ydim, False) if len(vals) == 1: orderings[vals[0]] = [vals[0]] else: for i in range(len(vals)-1): p1, p2 = vals[i:i+2] orderings[p1] = [p2] if sort: if vals.dtype.kind in (, ): sort = (np.diff(vals)>=0).all() else: sort = np.array_equal(np.sort(vals), vals) if sort or one_to_one(orderings, ycoords): ycoords = np.sort(ycoords) elif not is_cyclic(orderings): coords = list(itertools.chain(*sort_topologically(orderings))) ycoords = coords if len(coords) == len(ycoords) else np.sort(ycoords) return xcoords, ycoords
Get the coordinates of the 2D aggregate, maintaining the correct sorting order.
25,053
def _get_ipmitool_path(self, cmd=): p = subprocess.Popen(["which", cmd], stdout=subprocess.PIPE, stderr=subprocess.PIPE) out, err = p.communicate() return out.strip()
Get full path to the ipmitool command using the unix `which` command
25,054
def get_param(self, number): logger.debug("retreiving param number %s" % number) type_ = param_types[number] value = type_() code = self.library.Cli_GetParam(self.pointer, c_int(number), byref(value)) check_error(code) return value.value
Reads an internal Client object parameter.
25,055
def create_archive( source: Path, target: Path, interpreter: str, main: str, compressed: bool = True ) -> None: mod, sep, fn = main.partition(":") mod_ok = all(part.isidentifier() for part in mod.split(".")) fn_ok = all(part.isidentifier() for part in fn.split(".")) if not (sep == ":" and mod_ok and fn_ok): raise zipapp.ZipAppError("Invalid entry point: " + main) main_py = MAIN_TEMPLATE.format(module=mod, fn=fn) with maybe_open(target, "wb") as fd: write_file_prefix(fd, interpreter) compression = zipfile.ZIP_DEFLATED if compressed else zipfile.ZIP_STORED with zipfile.ZipFile(fd, "w", compression=compression) as z: for child in source.rglob("*"): if child.suffix == : continue arcname = child.relative_to(source) z.write(str(child), str(arcname)) z.writestr("__main__.py", main_py.encode("utf-8")) target.chmod(target.stat().st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH)
Create an application archive from SOURCE. A slightly modified version of stdlib's `zipapp.create_archive <https://docs.python.org/3/library/zipapp.html#zipapp.create_archive>`_
25,056
def get_default_qubit_mapping(program): fake_qubits, real_qubits, qubits = _what_type_of_qubit_does_it_use(program) if real_qubits: warnings.warn("This program contains integer qubits, " "so getting a mapping doesn't make sense.") return {q: q for q in qubits} return {qp: Qubit(i) for i, qp in enumerate(qubits)}
Takes a program which contains qubit placeholders and provides a mapping to the integers 0 through N-1. The output of this function is suitable for input to :py:func:`address_qubits`. :param program: A program containing qubit placeholders :return: A dictionary mapping qubit placeholder to an addressed qubit from 0 through N-1.
25,057
def create_multispan_plots(tag_ids): import matplotlib.gridspec as gridspec fig = plt.figure() nrows = 1 if len(tag_ids) > 1: nrows = 2 fig.set_size_inches(10, 5*nrows) gs = gridspec.GridSpec(nrows, len(tag_ids)) ax_list = [fig.add_subplot(g) for g in gs] ax_dict = {} for i, tag_dict in enumerate(tag_ids): ax_dict[tag_dict[]] = ax_list[i] ax_dict[tag_dict[]].set_title( .format(tag_dict[], tag_dict[])) if nrows > 1: ax_total = plt.subplot(gs[1, :]) title = .format(tag_ids[0][]) for i in range(1, len(tag_ids)): title = title + .format(tag_ids[i][]) ax_total.set_title(title) gs.tight_layout(fig, rect=[0, 0.03, 1, 0.95]) return fig, ax_dict, ax_total gs.tight_layout(fig, rect=[0, 0.03, 1, 0.95]) return fig, ax_dict, None
Create detail plots (first row) and total block(second row) of experiments. Args: tag_ids: list of tag-dictionaries, where the dictionaries must have fields 'name' (used for naming) and 'id' (used for numbering axis_dict) Returns: Figure element fig, ax_dict containing the first row plots (accessed via id) and ax_total containing the second row block.
25,058
def map_df(self, df): if len(df) == 0: return aesthetics = set(self.aesthetics) & set(df.columns) for ae in aesthetics: df[ae] = self.map(df[ae]) return df
Map df
25,059
def _proxy(self): if self._context is None: self._context = ModelBuildContext( self._version, assistant_sid=self._solution[], sid=self._solution[], ) return self._context
Generate an instance context for the instance, the context is capable of performing various actions. All instance actions are proxied to the context :returns: ModelBuildContext for this ModelBuildInstance :rtype: twilio.rest.autopilot.v1.assistant.model_build.ModelBuildContext
25,060
def add_item(self, text, font=("default", 12, "bold"), backgroundcolor="yellow", textcolor="black", highlightcolor="blue"): item = self.canvas.create_text(0, 0, anchor=tk.NW, text=text, font=font, fill=textcolor, tag="item") rectangle = self.canvas.create_rectangle(self.canvas.bbox(item), fill=backgroundcolor) self.canvas.tag_lower(rectangle, item) self.items[item] = rectangle if callable(self._callback_add): self._callback_add(item, rectangle) self.item_colors[item] = (backgroundcolor, textcolor, highlightcolor)
Add a new item on the Canvas. :param text: text to display :type text: str :param font: font of the text :type font: tuple or :class:`~tkinter.font.Font` :param backgroundcolor: background color :type backgroundcolor: str :param textcolor: text color :type textcolor: str :param highlightcolor: the color of the text when the item is selected :type highlightcolor: str
25,061
def _delete_wals_before(self, segment_info): wal_key_depth = self.layout.wal_directory().count() + 1 for key in self._backup_list(prefix=self.layout.wal_directory()): key_name = self.layout.key_name(key) bucket = self._container_name(key) url = .format(scm=self.layout.scheme, bucket=bucket, name=key_name) key_parts = key_name.split() key_depth = len(key_parts) if key_depth != wal_key_depth: logger.warning( msg="skipping non-qualifying key in ", detail=( .format(url)), hint=generic_weird_key_hint_message) elif key_depth == wal_key_depth: segment_match = (re.match(storage.SEGMENT_REGEXP + r, key_parts[-1])) label_match = (re.match(storage.SEGMENT_REGEXP + r, key_parts[-1])) history_match = re.match(r, key_parts[-1]) all_matches = [segment_match, label_match, history_match] non_matches = len(list(m for m in all_matches if m is None)) assert non_matches in (len(all_matches) - 1, len(all_matches)) if non_matches == len(all_matches): logger.warning( msg="skipping non-qualifying key in ", detail=( .format(url)), hint=generic_weird_key_hint_message) elif segment_match is not None: scanned_sn = self._groupdict_to_segment_number( segment_match.groupdict()) self._delete_if_before(segment_info, scanned_sn, key, ) elif label_match is not None: scanned_sn = self._groupdict_to_segment_number( label_match.groupdict()) self._delete_if_before(segment_info, scanned_sn, key, ) elif history_match is not None: pass else: assert False else: assert False
Delete all WAL files before segment_info. Doesn't delete any base-backup data.
25,062
def has_approx_support(m, m_hat, prob=0.01): m_nz = np.flatnonzero(np.triu(m, 1)) m_hat_nz = np.flatnonzero(np.triu(m_hat, 1)) upper_diagonal_mask = np.flatnonzero(np.triu(np.ones(m.shape), 1)) not_m_nz = np.setdiff1d(upper_diagonal_mask, m_nz) intersection = np.in1d(m_hat_nz, m_nz) not_intersection = np.in1d(m_hat_nz, not_m_nz) true_positive_rate = 0.0 if len(m_nz): true_positive_rate = 1. * np.sum(intersection) / len(m_nz) true_negative_rate = 1. - true_positive_rate false_positive_rate = 0.0 if len(not_m_nz): false_positive_rate = 1. * np.sum(not_intersection) / len(not_m_nz) return int(np.less_equal(true_negative_rate + false_positive_rate, prob))
Returns 1 if model selection error is less than or equal to prob rate, 0 else. NOTE: why does np.nonzero/np.flatnonzero create so much problems?
25,063
def tabs_or_spaces(physical_line, indent_char): r indent = INDENT_REGEX.match(physical_line).group(1) for offset, char in enumerate(indent): if char != indent_char: return offset, "E101 indentation contains mixed spaces and tabs"
r"""Never mix tabs and spaces. The most popular way of indenting Python is with spaces only. The second-most popular way is with tabs only. Code indented with a mixture of tabs and spaces should be converted to using spaces exclusively. When invoking the Python command line interpreter with the -t option, it issues warnings about code that illegally mixes tabs and spaces. When using -tt these warnings become errors. These options are highly recommended! Okay: if a == 0:\n a = 1\n b = 1 E101: if a == 0:\n a = 1\n\tb = 1
25,064
def vincenty(lon0, lat0, a1, s): lon0 = np.deg2rad(lon0) lat0 = np.deg2rad(lat0) a1 = np.deg2rad(a1) s = np.deg2rad(s) sina = np.cos(lat0) * np.sin(a1) num1 = np.sin(lat0)*np.cos(s) + np.cos(lat0)*np.sin(s)*np.cos(a1) den1 = np.sqrt(sina**2 + (np.sin(lat0)*np.sin(s) - np.cos(lat0)*np.cos(a1))**2) lat = np.rad2deg(np.arctan2(num1, den1)) num2 = np.sin(s)*np.sin(a1) den2 = np.cos(lat0)*np.cos(s) - np.sin(lat0)*np.sin(s)*np.cos(a1) L = np.arctan2(num2, den2) lon = np.rad2deg(lon0 + L) return lon, lat
Returns the coordinates of a new point that is a given angular distance s away from a starting point (lon0, lat0) at bearing (angle from north) a1), to within a given precision Note that this calculation is a simplified version of the full vincenty problem, which solves for the coordinates on the surface on an arbitrary ellipsoid. Here we only care about the surface of a sphere. Note: All parameters are assumed to be given in DEGREES :param lon0: float, longitude of starting point :param lat0: float, latitude of starting point :param a1: float, bearing to second point, i.e. angle between due north and line connecting 2 points :param s: float, angular distance between the two points :return: coordinates of second point in degrees
25,065
def getVolumeInformation( self, volumeNameBuffer, volumeNameSize, volumeSerialNumber, maximumComponentLength, fileSystemFlags, fileSystemNameBuffer, fileSystemNameSize, dokanFileInfo, ): ret = self.operations() ctypes.memmove( volumeNameBuffer, ret[], min( ctypes.sizeof(ctypes.c_wchar) * len(ret[]), volumeNameSize, ), ) serialNum = ctypes.c_ulong(self.serialNumber) ctypes.memmove( volumeSerialNumber, ctypes.byref(serialNum), ctypes.sizeof(ctypes.c_ulong) ) maxCompLen = ctypes.c_ulong(ret[]) ctypes.memmove( maximumComponentLength, ctypes.byref(maxCompLen), ctypes.sizeof(ctypes.c_ulong), ) fsFlags = ctypes.c_ulong(ret[]) ctypes.memmove( fileSystemFlags, ctypes.byref(fsFlags), ctypes.sizeof(ctypes.c_ulong) ) ctypes.memmove( fileSystemNameBuffer, ret[], min( ctypes.sizeof(ctypes.c_wchar) * len(ret[]), fileSystemNameSize, ), ) return d1_onedrive.impl.drivers.dokan.const.DOKAN_SUCCESS
Get information about the volume. :param volumeNameBuffer: buffer for volume name :type volumeNameBuffer: ctypes.c_void_p :param volumeNameSize: volume name buffer size :type volumeNameSize: ctypes.c_ulong :param volumeSerialNumber: buffer for volume serial number :type volumeSerialNumber: ctypes.c_void_p :param maximumComponentLength: buffer for maximum component length :type maximumComponentLength: ctypes.c_void_p :param fileSystemFlags: buffer for file system flags :type fileSystemFlags: ctypes.c_void_p :param fileSystemNameBuffer: buffer for file system name :type fileSystemNameBuffer: ctypes.c_void_p :param fileSystemNameSize: file system name buffer size :type fileSystemNameSize: ctypes.c_ulong :param dokanFileInfo: used by Dokan :type dokanFileInfo: PDOKAN_FILE_INFO :return: error code :rtype: ctypes.c_int
25,066
def call_async(self, fn, *args, **kwargs): LOG.debug(, self, CallSpec(fn, args, kwargs)) return self.context.send_async(self.make_msg(fn, *args, **kwargs))
Arrange for `fn(*args, **kwargs)` to be invoked on the context's main thread. :param fn: A free function in module scope or a class method of a class directly reachable from module scope: .. code-block:: python # mymodule.py def my_func(): '''A free function reachable as mymodule.my_func''' class MyClass: @classmethod def my_classmethod(cls): '''Reachable as mymodule.MyClass.my_classmethod''' def my_instancemethod(self): '''Unreachable: requires a class instance!''' class MyEmbeddedClass: @classmethod def my_classmethod(cls): '''Not directly reachable from module scope!''' :param tuple args: Function arguments, if any. See :ref:`serialization-rules` for permitted types. :param dict kwargs: Function keyword arguments, if any. See :ref:`serialization-rules` for permitted types. :returns: :class:`mitogen.core.Receiver` configured to receive the result of the invocation: .. code-block:: python recv = context.call_async(os.check_output, 'ls /tmp/') try: # Prints output once it is received. msg = recv.get() print(msg.unpickle()) except mitogen.core.CallError, e: print('Call failed:', str(e)) Asynchronous calls may be dispatched in parallel to multiple contexts and consumed as they complete using :class:`mitogen.select.Select`.
25,067
def handle_one_request(self): self.raw_requestline = self.rfile.readline() if not self.raw_requestline: self.close_connection = 1 elif self.parse_request(): return self.run_wsgi()
Handle a single HTTP request.
25,068
def get_responses(self, assessment_taken_id): mgr = self._get_provider_manager(, local=True) taken_lookup_session = mgr.get_assessment_taken_lookup_session(proxy=self._proxy) taken_lookup_session.use_federated_bank_view() taken = taken_lookup_session.get_assessment_taken(assessment_taken_id) response_list = OsidListList() if in taken._my_map: for section_id in taken._my_map[]: section = get_assessment_section(Id(section_id), runtime=self._runtime, proxy=self._proxy) response_list.append(section.get_responses()) return ResponseList(response_list)
Gets the submitted responses. arg: assessment_taken_id (osid.id.Id): ``Id`` of the ``AssessmentTaken`` return: (osid.assessment.ResponseList) - the submitted answers raise: NotFound - ``assessment_taken_id`` is not found raise: NullArgument - ``assessment_taken_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.*
25,069
def spec_var(model, ph): var_dens = 2. * np.abs(ph)**2 / model.M**2 var_dens[...,0] /= 2 var_dens[...,-1] /= 2 return var_dens.sum(axis=(-1,-2))
Compute variance of ``p`` from Fourier coefficients ``ph``. Parameters ---------- model : pyqg.Model instance The model object from which `ph` originates ph : complex array The field on which to compute the variance Returns ------- var_dens : float The variance of `ph`
25,070
def to_array(self): array = super(ForceReply, self).to_array() array[] = bool(self.force_reply) if self.selective is not None: array[] = bool(self.selective) return array
Serializes this ForceReply to a dictionary. :return: dictionary representation of this object. :rtype: dict
25,071
def normal_case(name): s1 = re.sub(r, r, name) return re.sub(r, r, s1).lower()
Converts "CamelCaseHere" to "camel case here".
25,072
def init(redis_address=None, num_cpus=None, num_gpus=None, resources=None, object_store_memory=None, redis_max_memory=None, log_to_driver=True, node_ip_address=None, object_id_seed=None, local_mode=False, redirect_worker_output=None, redirect_output=None, ignore_reinit_error=False, num_redis_shards=None, redis_max_clients=None, redis_password=None, plasma_directory=None, huge_pages=False, include_webui=False, driver_id=None, configure_logging=True, logging_level=logging.INFO, logging_format=ray_constants.LOGGER_FORMAT, plasma_store_socket_name=None, raylet_socket_name=None, temp_dir=None, load_code_from_local=False, _internal_config=None): if configure_logging: setup_logger(logging_level, logging_format) if local_mode: driver_mode = LOCAL_MODE else: driver_mode = SCRIPT_MODE if setproctitle is None: logger.warning( "WARNING: Not updating worker name since `setproctitle` is not " "installed. Install this with `pip install setproctitle` " "(or ray[debug]) to enable monitoring of worker processes.") if global_worker.connected: if ignore_reinit_error: logger.error("Calling ray.init() again after it has already been " "called.") return else: raise Exception("Perhaps you called ray.init twice by accident? " "This error can be suppressed by passing in " " or by calling " " prior to .") if node_ip_address is not None: node_ip_address = services.address_to_ip(node_ip_address) if redis_address is not None: redis_address = services.address_to_ip(redis_address) global _global_node if driver_mode == LOCAL_MODE: _global_node = ray.node.LocalNode() elif redis_address is None: ray_params = ray.parameter.RayParams( redis_address=redis_address, node_ip_address=node_ip_address, object_id_seed=object_id_seed, local_mode=local_mode, driver_mode=driver_mode, redirect_worker_output=redirect_worker_output, redirect_output=redirect_output, num_cpus=num_cpus, num_gpus=num_gpus, resources=resources, num_redis_shards=num_redis_shards, redis_max_clients=redis_max_clients, redis_password=redis_password, plasma_directory=plasma_directory, huge_pages=huge_pages, include_webui=include_webui, object_store_memory=object_store_memory, redis_max_memory=redis_max_memory, plasma_store_socket_name=plasma_store_socket_name, raylet_socket_name=raylet_socket_name, temp_dir=temp_dir, load_code_from_local=load_code_from_local, _internal_config=_internal_config, ) _global_node = ray.node.Node( head=True, shutdown_at_exit=False, ray_params=ray_params) else: if num_cpus is not None or num_gpus is not None: raise Exception("When connecting to an existing cluster, num_cpus " "and num_gpus must not be provided.") if resources is not None: raise Exception("When connecting to an existing cluster, " "resources must not be provided.") if num_redis_shards is not None: raise Exception("When connecting to an existing cluster, " "num_redis_shards must not be provided.") if redis_max_clients is not None: raise Exception("When connecting to an existing cluster, " "redis_max_clients must not be provided.") if object_store_memory is not None: raise Exception("When connecting to an existing cluster, " "object_store_memory must not be provided.") if redis_max_memory is not None: raise Exception("When connecting to an existing cluster, " "redis_max_memory must not be provided.") if plasma_directory is not None: raise Exception("When connecting to an existing cluster, " "plasma_directory must not be provided.") if huge_pages: raise Exception("When connecting to an existing cluster, " "huge_pages must not be provided.") if temp_dir is not None: raise Exception("When connecting to an existing cluster, " "temp_dir must not be provided.") if plasma_store_socket_name is not None: raise Exception("When connecting to an existing cluster, " "plasma_store_socket_name must not be provided.") if raylet_socket_name is not None: raise Exception("When connecting to an existing cluster, " "raylet_socket_name must not be provided.") if _internal_config is not None: raise Exception("When connecting to an existing cluster, " "_internal_config must not be provided.") ray_params = ray.parameter.RayParams( node_ip_address=node_ip_address, redis_address=redis_address, redis_password=redis_password, object_id_seed=object_id_seed, temp_dir=temp_dir, load_code_from_local=load_code_from_local) _global_node = ray.node.Node( ray_params, head=False, shutdown_at_exit=False, connect_only=True) connect( _global_node, mode=driver_mode, log_to_driver=log_to_driver, worker=global_worker, driver_id=driver_id) for hook in _post_init_hooks: hook() return _global_node.address_info
Connect to an existing Ray cluster or start one and connect to it. This method handles two cases. Either a Ray cluster already exists and we just attach this driver to it, or we start all of the processes associated with a Ray cluster and attach to the newly started cluster. To start Ray and all of the relevant processes, use this as follows: .. code-block:: python ray.init() To connect to an existing Ray cluster, use this as follows (substituting in the appropriate address): .. code-block:: python ray.init(redis_address="123.45.67.89:6379") Args: redis_address (str): The address of the Redis server to connect to. If this address is not provided, then this command will start Redis, a raylet, a plasma store, a plasma manager, and some workers. It will also kill these processes when Python exits. num_cpus (int): Number of cpus the user wishes all raylets to be configured with. num_gpus (int): Number of gpus the user wishes all raylets to be configured with. resources: A dictionary mapping the name of a resource to the quantity of that resource available. object_store_memory: The amount of memory (in bytes) to start the object store with. By default, this is capped at 20GB but can be set higher. redis_max_memory: The max amount of memory (in bytes) to allow each redis shard to use. Once the limit is exceeded, redis will start LRU eviction of entries. This only applies to the sharded redis tables (task, object, and profile tables). By default, this is capped at 10GB but can be set higher. log_to_driver (bool): If true, then output from all of the worker processes on all nodes will be directed to the driver. node_ip_address (str): The IP address of the node that we are on. object_id_seed (int): Used to seed the deterministic generation of object IDs. The same value can be used across multiple runs of the same driver in order to generate the object IDs in a consistent manner. However, the same ID should not be used for different drivers. local_mode (bool): True if the code should be executed serially without Ray. This is useful for debugging. ignore_reinit_error: True if we should suppress errors from calling ray.init() a second time. num_redis_shards: The number of Redis shards to start in addition to the primary Redis shard. redis_max_clients: If provided, attempt to configure Redis with this maxclients number. redis_password (str): Prevents external clients without the password from connecting to Redis if provided. plasma_directory: A directory where the Plasma memory mapped files will be created. huge_pages: Boolean flag indicating whether to start the Object Store with hugetlbfs support. Requires plasma_directory. include_webui: Boolean flag indicating whether to start the web UI, which displays the status of the Ray cluster. driver_id: The ID of driver. configure_logging: True if allow the logging cofiguration here. Otherwise, the users may want to configure it by their own. logging_level: Logging level, default will be logging.INFO. logging_format: Logging format, default contains a timestamp, filename, line number, and message. See ray_constants.py. plasma_store_socket_name (str): If provided, it will specify the socket name used by the plasma store. raylet_socket_name (str): If provided, it will specify the socket path used by the raylet process. temp_dir (str): If provided, it will specify the root temporary directory for the Ray process. load_code_from_local: Whether code should be loaded from a local module or from the GCS. _internal_config (str): JSON configuration for overriding RayConfig defaults. For testing purposes ONLY. Returns: Address information about the started processes. Raises: Exception: An exception is raised if an inappropriate combination of arguments is passed in.
25,073
def Cylinder(center=(0.,0.,0.), direction=(1.,0.,0.), radius=0.5, height=1.0, resolution=100, **kwargs): capping = kwargs.get(, kwargs.get(, True)) cylinderSource = vtk.vtkCylinderSource() cylinderSource.SetRadius(radius) cylinderSource.SetHeight(height) cylinderSource.SetCapping(capping) cylinderSource.SetResolution(resolution) cylinderSource.Update() surf = PolyData(cylinderSource.GetOutput()) surf.rotate_z(-90) translate(surf, center, direction) return surf
Create the surface of a cylinder. Parameters ---------- center : list or np.ndarray Location of the centroid in [x, y, z] direction : list or np.ndarray Direction cylinder points to in [x, y, z] radius : float Radius of the cylinder. height : float Height of the cylinder. resolution : int Number of points on the circular face of the cylinder. capping : bool, optional Cap cylinder ends with polygons. Default True Returns ------- cylinder : vtki.PolyData Cylinder surface. Examples -------- >>> import vtki >>> import numpy as np >>> cylinder = vtki.Cylinder(np.array([1, 2, 3]), np.array([1, 1, 1]), 1, 1) >>> cylinder.plot() # doctest:+SKIP
25,074
def set_standard(self): if self.__maxrange_state: data = self._controller.command(self._id, , wake_if_asleep=True) if data and data[][]: self.__maxrange_state = False self.__manual_update_time = time.time()
Set the charger to standard range for daily commute.
25,075
def title(words_quantity=4): result = words(quantity=words_quantity) result += random.choice() return result.capitalize()
Return a random sentence to be used as e.g. an e-mail subject.
25,076
def init(name, languages, run): contents = [file_name for file_name in glob.glob("*.*") if file_name != "brains.yaml"] with open(CONFIG_FILE, "w") as output: output.write(yaml.safe_dump({ "run": run, "name": name, "languages": languages, "contents": contents, }, default_flow_style=False)) print "" cprint("Automatically including the follow files in brain contents:", "cyan") for file_name in contents: print "\t", file_name print "" cprint("done! brains.yaml created", )
Initializes your CONFIG_FILE for the current submission
25,077
def get_initial_status_brok(self, extra=None): data = {: self.uuid} self.fill_data_brok_from(data, ) if extra: data.update(extra) return Brok({: + self.my_type + , : data})
Create an initial status brok :param extra: some extra information to be added in the brok data :type extra: dict :return: Brok object :rtype: alignak.Brok
25,078
def update_file(self, path): try: result, stat = self.zoo_client.get(path, watch=self.watch_file) except ZookeeperError: self.set_valid(False) self.call_error(self.INVALID_GET) return False if self.pointer: if result is not None and len(result) > 0: self.pointed_at_expired = False self.point_path = result if self.compare_pointer(result): self.update_pointed() else: self.pointed_at_expired = True self.old_pointed = self.old_data = self.set_valid(False) self.call_error(self.INVALID_PATH) else: if self.compare_data(result): self.call_config(result) self.set_valid(True) return True
Updates the file watcher and calls the appropriate method for results @return: False if we need to keep trying the connection
25,079
def dump_addresses(self, network, filename=None): addrs = [addr.data for a in self.accounts.values() if a.network == network for addr in a.addresses] if filename: from json import dump with open(filename, ) as f: dump(addrs, f) return addrs
Return a list of address dictionaries for each address in all of the accounts in this wallet of the network specified by `network`
25,080
def propagate(self, assumptions=[], phase_saving=0): if self.solver: return self.solver.propagate(assumptions, phase_saving)
The method takes a list of assumption literals and does unit propagation of each of these literals consecutively. A Boolean status is returned followed by a list of assigned (assumed and also propagated) literals. The status is ``True`` if no conflict arised during propagation. Otherwise, the status is ``False``. Additionally, a user may specify an optional argument ``phase_saving`` (``0`` by default) to enable MiniSat-like phase saving. **Note** that only MiniSat-like solvers support this functionality (e.g. :class:`Lingeling` does not support it). :param assumptions: a list of assumption literals. :param phase_saving: enable phase saving (can be ``0``, ``1``, and ``2``). :type assumptions: iterable(int) :type phase_saving: int :rtype: tuple(bool, list(int)). Usage example: .. code-block:: python >>> from pysat.solvers import Glucose3 >>> from pysat.card import * >>> >>> cnf = CardEnc.atmost(lits=range(1, 6), bound=1, encoding=EncType.pairwise) >>> g = Glucose3(bootstrap_with=cnf.clauses) >>> >>> g.propagate(assumptions=[1]) (True, [1, -2, -3, -4, -5]) >>> >>> g.add_clause([2]) >>> g.propagate(assumptions=[1]) (False, []) >>> >>> g.delete()
25,081
def on_unavailable(self, query, consistency, required_replicas, alive_replicas, retry_num): return (self.RETRY_NEXT_HOST, None) if retry_num == 0 else (self.RETHROW, None)
This is called when the coordinator node determines that a read or write operation cannot be successful because the number of live replicas are too low to meet the requested :class:`.ConsistencyLevel`. This means that the read or write operation was never forwarded to any replicas. `query` is the :class:`.Statement` that failed. `consistency` is the :class:`.ConsistencyLevel` that the operation was attempted at. `required_replicas` is the number of replicas that would have needed to acknowledge the operation to meet the requested consistency level. `alive_replicas` is the number of replicas that the coordinator considered alive at the time of the request. `retry_num` counts how many times the operation has been retried, so the first time this method is called, `retry_num` will be 0. By default, if this is the first retry, it triggers a retry on the next host in the query plan with the same consistency level. If this is not the first retry, no retries will be attempted and the error will be re-raised.
25,082
def _get_modules(path): lst = [] folder_contents = os.listdir(path) is_python_module = "__init__.py" in folder_contents if is_python_module: for file in folder_contents: full_path = os.path.join(path, file) if is_file(full_path): lst.append(full_path) if is_folder(full_path): lst += _get_modules(full_path) return list(set(lst))
Finds modules in folder recursively :param path: directory :return: list of modules
25,083
def import_from_xml(xml, edx_video_id, resource_fs, static_dir, external_transcripts=dict(), course_id=None): if xml.tag != : raise ValCannotCreateError() try: if not edx_video_id: raise Video.DoesNotExist video = Video.objects.get(edx_video_id=edx_video_id) logger.info( "edx_video_id present in course not imported because it exists in VAL.", edx_video_id, course_id, ) create_transcript_objects(xml, edx_video_id, resource_fs, static_dir, external_transcripts) return edx_video_id
Imports data from a video_asset element about the given video_id. If the edx_video_id already exists, then no changes are made. If an unknown profile is referenced by an encoded video, that encoding will be ignored. Arguments: xml (Element): An lxml video_asset element containing import data edx_video_id (str): val video id resource_fs (OSFS): Import file system. static_dir (str): The Directory to retrieve transcript file. external_transcripts (dict): A dict containing the list of names of the external transcripts. Example: { 'en': ['The_Flash.srt', 'Harry_Potter.srt'], 'es': ['Green_Arrow.srt'] } course_id (str): The ID of a course to associate the video with Raises: ValCannotCreateError: if there is an error importing the video Returns: edx_video_id (str): val video id.
25,084
def delete_many(self, keys, noreply=None): if not keys: return True if noreply is None: noreply = self.default_noreply cmds = [] for key in keys: cmds.append( b + self.check_key(key) + (b if noreply else b) + b) self._misc_cmd(cmds, b, noreply) return True
A convenience function to delete multiple keys. Args: keys: list(str), the list of keys to delete. noreply: optional bool, True to not wait for the reply (defaults to self.default_noreply). Returns: True. If an exception is raised then all, some or none of the keys may have been deleted. Otherwise all the keys have been sent to memcache for deletion and if noreply is False, they have been acknowledged by memcache.
25,085
def validate_satisfied_by(self, obj): if self.satisfied_by(obj): return obj raise self.make_type_constraint_error(obj, self)
Return `obj` if the object satisfies this type constraint, or raise. :raises: `TypeConstraintError` if `obj` does not satisfy the constraint.
25,086
def get_function_signature(func): if func is None: return try: func_name = func.__name__ except AttributeError: func_name = if not inspect.isfunction(func): raise TypeError( % (func_name, type(func))) return func_name + str(inspect.signature(func))
Return the signature string of the specified function. >>> def foo(name): pass >>> get_function_signature(foo) 'foo(name)' >>> something = 'Hello' >>> get_function_signature(something) Traceback (most recent call last): ... TypeError: The argument must be a function object: None type is <class 'str'>
25,087
def get_reporters(self): if not hasattr(self, ): self._report_generators_by_key = {r.key: r for r in self.report_generators} return self._report_generators_by_key
Converts the report_generators list to a dictionary, and caches the result. :return: A dictionary with such references.
25,088
def get_relname_and_parent(self, treepos): node = self.dgtree[treepos] node_type = get_node_type(node) assert node_type in (TreeNodeTypes.relation_node, TreeNodeTypes.leaf_node) parent_pos = self.get_parent_treepos(treepos) if parent_pos is None: return None, None else: parent_label = self.get_parent_label(treepos) grandparent_pos = self.get_parent_treepos(parent_pos) if grandparent_pos is None: return None, None else: grandparent_id = self.get_node_id(grandparent_pos) grandparent_label = self.get_parent_label(parent_pos) reltype = self.get_reltype(grandparent_label) if reltype == : if parent_label == : return , grandparent_id elif parent_label == : cousins_pos = self.get_cousins_treepos(treepos) assert len(cousins_pos) == 1 cousin_id = self.get_node_id(cousins_pos[0]) return grandparent_label, cousin_id elif reltype == : return grandparent_label, grandparent_id
Return the (relation name, parent ID) tuple that a node is in. Return None if this node is not in a relation.
25,089
def build_attachment2(): attachment = Attachment() attachment.content = "BwdW" attachment.type = "image/png" attachment.filename = "banner.png" attachment.disposition = "inline" attachment.content_id = "Banner" return attachment
Build attachment mock.
25,090
def _check_steps(a, b): if a.step != 1: raise ValueError( % a.step) if b.step != 1: raise ValueError( % b.step)
Check that the steps of ``a`` and ``b`` are both 1. Parameters ---------- a : range The first range to check. b : range The second range to check. Raises ------ ValueError Raised when either step is not 1.
25,091
def euler_scheme(traj, diff_func): steps = traj.steps initial_conditions = traj.initial_conditions dimension = len(initial_conditions) result_array = np.zeros((steps,dimension)) func_params_dict = traj.func_params.f_to_dict(short_names=True, fast_access=True) result_array[0] = initial_conditions for idx in range(1,steps): result_array[idx] = diff_func(result_array[idx-1], **func_params_dict) * traj.dt + \ result_array[idx-1]
Simulation function for Euler integration. :param traj: Container for parameters and results :param diff_func: The differential equation we want to integrate
25,092
def program_checks(job, input_args): for program in [, , , ]: assert which(program), .format(program) job.addChildJobFn(download_shared_files, input_args)
Checks that dependency programs are installed. input_args: dict Dictionary of input arguments (from main())
25,093
def run(self): env = self.state.document.settings.env modname = self.arguments[0].strip() noindex = in self.options env.temp_data[] = modname ret = [] if not noindex: env.domaindata[][][modname] = \ (env.docname, self.options.get(, ), self.options.get(, ), in self.options) env.domaindata[][][modname] = ( env.docname, ) targetnode = nodes.target(, , ids=[ + modname], ismod=True) self.state.document.note_explicit_target(targetnode) ret.append(targetnode) indextext = _() % modname inode = addnodes.index(entries=[(, indextext, + modname, )]) ret.append(inode) return ret
Custom execution for chapel module directive. This class is instantiated by the directive implementation and then this method is called. It parses the options on the module directive, updates the environment according, and creates an index entry for the module. Based on the python domain module directive.
25,094
def check(codeString, filename, reporter=modReporter.Default, settings_path=None, **setting_overrides): if not settings_path and filename: settings_path = os.path.dirname(os.path.abspath(filename)) settings_path = settings_path or os.getcwd() active_settings = settings.from_path(settings_path).copy() for key, value in itemsview(setting_overrides): access_key = key.replace(, ).lower() if type(active_settings.get(access_key)) in (list, tuple): if key.startswith(): active_settings[access_key] = list(set(active_settings[access_key]).difference(value)) else: active_settings[access_key] = list(set(active_settings[access_key]).union(value)) else: active_settings[key] = value active_settings.update(setting_overrides) if _should_skip(filename, active_settings.get(, [])): if active_settings.get(, None) == 1: reporter.flake(FileSkipped(filename)) return 1 elif active_settings.get(, False): ignore = active_settings.get(, []) if(not "W200" in ignore and not "W201" in ignore): reporter.flake(FileSkipped(filename, None, verbose=active_settings.get())) return 0 try: tree = compile(codeString, filename, "exec", _ast.PyCF_ONLY_AST) except SyntaxError: value = sys.exc_info()[1] msg = value.args[0] (lineno, offset, text) = value.lineno, value.offset, value.text w = checker.Checker(tree, filename, None, ignore_lines=_noqa_lines(codeString), **active_settings) w.messages.sort(key=lambda m: m.lineno) for warning in w.messages: reporter.flake(warning) return len(w.messages)
Check the Python source given by codeString for unfrosted flakes.
25,095
def do_cd(self, arglist): if not arglist or len(arglist) != 1: self.perror("cd requires exactly 1 argument:", traceback_war=False) self.do_help() self._last_result = cmd2.CommandResult(, ) return path = os.path.abspath(os.path.expanduser(arglist[0])) out = err = None data = None if not os.path.isdir(path): err = .format(path) elif not os.access(path, os.R_OK): err = .format(path) else: try: os.chdir(path) except Exception as ex: err = .format(ex) else: out = .format(path) self.stdout.write(out) data = path if err: self.perror(err, traceback_war=False) self._last_result = cmd2.CommandResult(out, err, data)
Change directory. Usage: cd <new_dir>
25,096
def build_index_name(app, *parts): base_index = os.path.splitext( .join([part for part in parts if part]) )[0] return prefix_index(app=app, index=base_index)
Build an index name from parts. :param parts: Parts that should be combined to make an index name.
25,097
def has_target(alias, target): * if target == : raise SaltInvocationError() aliases = list_aliases() if alias not in aliases: return False if isinstance(target, list): target = .join(target) return target == aliases[alias]
Return true if the alias/target is set CLI Example: .. code-block:: bash salt '*' aliases.has_target alias target
25,098
def semanticSimilarity(self, text1, text2, distanceMeasure = "cosine"): return self._er.jsonRequestAnalytics("/api/v1/semanticSimilarity", { "text1": text1, "text2": text2, "distanceMeasure": distanceMeasure })
determine the semantic similarity of the two provided documents @param text1: first document to analyze @param text2: second document to analyze @param distanceMeasure: distance measure to use for comparing two documents. Possible values are "cosine" (default) or "jaccard" @returns: dict
25,099
def _generate(self, size=None): "Generates a new word" corpus_letters = list(self.vectors.keys()) current_letter = random.choice(corpus_letters) if size is None: size = int(random.normalvariate(self.avg, self.std_dev)) letters = [current_letter] for _ in range(size): if current_letter not in corpus_letters: break found_letter = self.vectors[current_letter].choose() letters.append(found_letter) current_letter = found_letter return .join(letters)
Generates a new word