Unnamed: 0
int64
0
389k
code
stringlengths
26
79.6k
docstring
stringlengths
1
46.9k
22,800
def assemble(input_): global MEMORY if MEMORY is None: MEMORY = Memory() parser.parse(input_, lexer=LEXER, debug=OPTIONS.Debug.value > 2) if len(MEMORY.scopes): error(MEMORY.scopes[-1], ) return gl.has_errors
Assembles input string, and leave the result in the MEMORY global object
22,801
def next(self): try: entry = {} row = self._csv_reader.next() for i in range(0, len(row)): entry[self._headers[i]] = row[i] return entry except Exception as e: self._file.close() raise e
Gets next entry as a dictionary. Returns: object - Object key/value pair representing a row. {key1: value1, key2: value2, ...}
22,802
def forum_list(context, forum_visibility_contents): request = context.get() tracking_handler = TrackingHandler(request=request) data_dict = { : forum_visibility_contents, : tracking_handler.get_unread_forums_from_list( request.user, forum_visibility_contents.forums), : request.user, : request, } root_level = forum_visibility_contents.root_level if root_level is not None: data_dict[] = root_level data_dict[] = root_level + 1 data_dict[] = root_level + 2 return data_dict
Renders the considered forum list. This will render the given list of forums by respecting the order and the depth of each forum in the forums tree. Usage:: {% forum_list my_forums %}
22,803
def to_aws_name(self, name): name = self.absolute_name(name) assert name.startswith() return name[1:].replace(, ).replace(, )
Returns a transliteration of the name that safe to use for resource names on AWS. If the given name is relative, it converted to its absolute form before the transliteration. The transliteration uses two consequitive '_' to encode a single '_' and a single '_' to separate the name components. AWS-safe names are by definition absolute such that the leading separator can be removed. This leads to fairly readable AWS-safe names, especially for names in the root namespace, where the transliteration is the identity function if the input does not contain any '_'. This scheme only works if name components don't start with '_'. Without that condition, '/_' would become '___' the inverse of which is '_/'. >>> ctx = Context( 'us-west-1b', namespace='/' ) >>> ctx.to_aws_name( 'foo' ) 'foo' >>> ctx.from_aws_name( 'foo' ) 'foo' Illegal paths that would introduce ambiguity need to raise an exception >>> ctx.to_aws_name('/_') # doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ... InvalidPathError: Invalid path '/_' >>> ctx.to_aws_name('/_/') # doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ... InvalidPathError: Invalid path '/_/' >>> ctx.from_aws_name('___') # doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ... InvalidPathError: Invalid path '/_/' >>> ctx.to_aws_name( 'foo_bar') 'foo__bar' >>> ctx.from_aws_name( 'foo__bar') 'foo_bar' >>> ctx.to_aws_name( '/sub_ns/foo_bar') 'sub__ns_foo__bar' >>> ctx.to_aws_name( 'sub_ns/foo_bar') 'sub__ns_foo__bar' >>> ctx.from_aws_name( 'sub__ns_foo__bar' ) 'sub_ns/foo_bar' >>> ctx.to_aws_name( 'g_/' ) 'g___' >>> ctx.from_aws_name( 'g___' ) 'g_/' >>> ctx = Context( 'us-west-1b', namespace='/this_ns/' ) >>> ctx.to_aws_name( 'foo' ) 'this__ns_foo' >>> ctx.from_aws_name( 'this__ns_foo' ) 'foo' >>> ctx.to_aws_name( 'foo_bar') 'this__ns_foo__bar' >>> ctx.from_aws_name( 'this__ns_foo__bar') 'foo_bar' >>> ctx.to_aws_name( '/other_ns/foo_bar' ) 'other__ns_foo__bar' >>> ctx.from_aws_name( 'other__ns_foo__bar' ) '/other_ns/foo_bar' >>> ctx.to_aws_name( 'other_ns/foo_bar' ) 'this__ns_other__ns_foo__bar' >>> ctx.from_aws_name( 'this__ns_other__ns_foo__bar' ) 'other_ns/foo_bar' >>> ctx.to_aws_name( '/this_ns/foo_bar' ) 'this__ns_foo__bar' >>> ctx.from_aws_name( 'this__ns_foo__bar' ) 'foo_bar'
22,804
def stoichiometry_coefficients(compound, elements): stoichiometry = parse_compound(compound.strip()).count() return [stoichiometry[element] for element in elements]
Determine the stoichiometry coefficients of the specified elements in the specified chemical compound. :param compound: Formula of a chemical compound, e.g. 'SiO2'. :param elements: List of elements, e.g. ['Si', 'O', 'C']. :returns: List of stoichiometry coefficients.
22,805
def replace(self, html): self.html = html text = html.text() positions = [] def perform_replacement(match): offset = sum(positions) start, stop = match.start() + offset, match.end() + offset s = self.html[start:stop] if self._is_replacement_allowed(s): repl = match.expand(self.replacement) self.html[start:stop] = repl else: repl = match.group() positions.append(match.end()) return repl while True: if positions: text = text[positions[-1]:] text, n = self.pattern.subn(perform_replacement, text, count=1) if not n: break
Perform replacements on given HTML fragment.
22,806
def addGene( self, gene_id, gene_label, gene_type=None, gene_description=None ): if gene_type is None: gene_type = self.globaltt[] self.model.addClassToGraph(gene_id, gene_label, gene_type, gene_description) return
genes are classes
22,807
def main(): parser = argparse.ArgumentParser(description=) parser.add_argument(, , help="Specify this arg to read patterns from a file.", type=argparse.FileType("r")) args = parser.parse_args() pass_count = fail_count = 0 import functools if args.file: nextpattern = args.file.readline else: nextpattern = functools.partial(six.moves.input, "Enter a pattern to validate: ") try: while True: pattern = nextpattern() if not pattern: break tests_passed, err_strings = validate(pattern, True) if tests_passed: print("\nPASS: %s" % pattern) pass_count += 1 else: for err in err_strings: print(err, ) fail_count += 1 except (EOFError, KeyboardInterrupt): pass finally: if args.file: args.file.close() print("\nPASSED:", pass_count, " patterns") print("FAILED:", fail_count, " patterns")
Continues to validate patterns until it encounters EOF within a pattern file or Ctrl-C is pressed by the user.
22,808
def update(self, pid, session, **kwargs): request = TOPRequest() request[] = pid for k, v in kwargs.iteritems(): if k not in (, , , , , , , ,,,,,,,,,,,,,,,,,,,,,,,,,,,,) and v==None: continue request[k] = v self.create(self.execute(request, session), fields=[,], models={:TOPDate}) return self
taobao.fenxiao.product.update 更新产品 - 更新分销平台产品数据,不传更新数据返回失败 - 对sku进行增、删操作时,原有的sku_ids字段会被忽略,请使用sku_properties和sku_properties_del。
22,809
def _do_setup(self): (self._base, self._power, self._name_singular, self._name_plural) = self._setup() self._unit_value = self._base ** self._power
Setup basic parameters for this class. `base` is the numeric base which when raised to `power` is equivalent to 1 unit of the corresponding prefix. I.e., base=2, power=10 represents 2^10, which is the NIST Binary Prefix for 1 Kibibyte. Likewise, for the SI prefix classes `base` will be 10, and the `power` for the Kilobyte is 3.
22,810
def many(cls, filter=None, **kwargs): from mongoframes.queries import Condition, Group, to_refs kwargs[], references, subs = \ cls._flatten_projection( kwargs.get(, cls._default_projection) ) if isinstance(filter, (Condition, Group)): filter = filter.to_dict() documents = list(cls.get_collection().find(to_refs(filter), **kwargs)) if references: cls._dereference(documents, references) if subs: cls._apply_sub_frames(documents, subs) return [cls(d) for d in documents]
Return a list of documents matching the filter
22,811
def __ProcessHttpResponse(self, method_config, http_response, request): if http_response.status_code not in (http_client.OK, http_client.CREATED, http_client.NO_CONTENT): raise exceptions.HttpError.FromResponse( http_response, method_config=method_config, request=request) if http_response.status_code == http_client.NO_CONTENT: return content response_type = _LoadClass(method_config.response_type_name, self.__client.MESSAGES_MODULE) return self.__client.DeserializeMessage(response_type, content)
Process the given http response.
22,812
def deltaW(N, m, h): return np.random.normal(0.0, np.sqrt(h), (N, m))
Generate sequence of Wiener increments for m independent Wiener processes W_j(t) j=0..m-1 for each of N time intervals of length h. Returns: dW (array of shape (N, m)): The [n, j] element has the value W_j((n+1)*h) - W_j(n*h)
22,813
def transitive_subgraph_of_addresses(self, addresses, *vargs, **kwargs): ret = OrderedSet() self.walk_transitive_dependency_graph(addresses, ret.add, *vargs, **kwargs) return ret
Returns all transitive dependencies of `addresses`. Note that this uses `walk_transitive_dependencies_graph` and the predicate is passed through, hence it trims graphs rather than just filtering out Targets that do not match the predicate. See `walk_transitive_dependency_graph for more detail on `predicate`. :API: public :param list<Address> addresses: The root addresses to transitively close over. :param function predicate: The predicate passed through to `walk_transitive_dependencies_graph`. :param bool postorder: When ``True``, the traversal order is postorder (children before parents), else it is preorder (parents before children). :param function predicate: If this parameter is not given, no Targets will be filtered out of the closure. If it is given, any Target which fails the predicate will not be walked, nor will its dependencies. Thus predicate effectively trims out any subgraph that would only be reachable through Targets that fail the predicate. :param function dep_predicate: Takes two parameters, the current target and the dependency of the current target. If this parameter is not given, no dependencies will be filtered when traversing the closure. If it is given, when the predicate fails, the edge to the dependency will not be expanded.
22,814
def _bit_is_one(self, n, hash_bytes): scale = 16 if not hash_bytes[int(n / (scale / 2))] >> int( (scale / 2) - ((n % (scale / 2)) + 1)) & 1 == 1: return False return True
Check if the n (index) of hash_bytes is 1 or 0.
22,815
def validate( message, get_certificate=lambda url: urlopen(url).read(), certificate_url_regex=DEFAULT_CERTIFICATE_URL_REGEX, max_age=DEFAULT_MAX_AGE ): SigningCertURLValidator(certificate_url_regex).validate(message) if not isinstance(max_age, datetime.timedelta): raise ValueError("max_age must be None or a timedelta object") MessageAgeValidator(max_age).validate(message) certificate = get_certificate(message["SigningCertURL"]) SignatureValidator(certificate).validate(message)
Validate a decoded SNS message. Parameters: message: Decoded SNS message. get_certificate: Function that receives a URL, and returns the certificate from that URL as a string. The default doesn't implement caching. certificate_url_regex: Regex that validates the signing certificate URL. Default value checks it's hosted on an AWS-controlled domain, in the format "https://sns.<data-center>.amazonaws.com/" max_age: Maximum age of an SNS message before it fails validation, expressed as a `datetime.timedelta`. Defaults to one hour, the max. lifetime of an SNS message.
22,816
def handleButtonClick(self, button): if ( self.signalsBlocked() ): return if ( button == self._buttonBox.button(QDialogButtonBox.Reset) ): self.resetRequested.emit() self.buttonClicked.emit(button)
Handles the button click for this widget. If the Reset button was clicked, then the resetRequested signal will be emitted. All buttons will emit the buttonClicked signal. :param button | <QAbstractButton>
22,817
def add_package_origins(self, modpath): parts = modpath.split() parent = parts[0] for part in parts[1:]: child = parent + + part if self.find_module(child): self.add(parent, part, child) parent = child
Whenever you 'import a.b.c', Python automatically binds 'b' in a to the a.b module and binds 'c' in a.b to the a.b.c module.
22,818
def bm3_g(p, v0, g0, g0p, k0, k0p): return cal_g_bm3(p, [g0, g0p], [v0, k0, k0p])
calculate shear modulus at given pressure. not fully tested with mdaap. :param p: pressure :param v0: volume at reference condition :param g0: shear modulus at reference condition :param g0p: pressure derivative of shear modulus at reference condition :param k0: bulk modulus at reference condition :param k0p: pressure derivative of bulk modulus at reference condition :return: shear modulus at high pressure
22,819
def get_feed_list(opml_obj: OPML) -> List[str]: rv = list() def collect(obj): for outline in obj.outlines: if outline.type == and outline.xml_url: rv.append(outline.xml_url) if outline.outlines: collect(outline) collect(opml_obj) return rv
Walk an OPML document to extract the list of feed it contains.
22,820
def QA_indicator_DMI(DataFrame, M1=14, M2=6): HIGH = DataFrame.high LOW = DataFrame.low CLOSE = DataFrame.close OPEN = DataFrame.open TR = SUM(MAX(MAX(HIGH-LOW, ABS(HIGH-REF(CLOSE, 1))), ABS(LOW-REF(CLOSE, 1))), M1) HD = HIGH-REF(HIGH, 1) LD = REF(LOW, 1)-LOW DMP = SUM(IFAND(HD>0,HD>LD,HD,0), M1) DMM = SUM(IFAND(LD>0,LD>HD,LD,0), M1) DI1 = DMP*100/TR DI2 = DMM*100/TR ADX = MA(ABS(DI2-DI1)/(DI1+DI2)*100, M2) ADXR = (ADX+REF(ADX, M2))/2 return pd.DataFrame({ : DI1, : DI2, : ADX, : ADXR })
趋向指标 DMI
22,821
def letternum(letter): if not isinstance(letter, str): raise TypeError("Invalid letter provided.") if not len(letter) == 1: raise ValueError("Invalid letter length provided.") letter = letter.lower() alphaletters = string.ascii_lowercase for i in range(len(alphaletters)): if letter[0] == alphaletters[i]: return i + 1
Get The Number Corresponding To A Letter
22,822
def stringpatterns(table, field): counter = stringpatterncounter(table, field) output = [(, , )] counter = counter.most_common() total = sum(c[1] for c in counter) cnts = [(c[0], c[1], float(c[1])/total) for c in counter] output.extend(cnts) return wrap(output)
Profile string patterns in the given field, returning a table of patterns, counts and frequencies. E.g.:: >>> import petl as etl >>> table = [['foo', 'bar'], ... ['Mr. Foo', '123-1254'], ... ['Mrs. Bar', '234-1123'], ... ['Mr. Spo', '123-1254'], ... [u'Mr. Baz', u'321 1434'], ... [u'Mrs. Baz', u'321 1434'], ... ['Mr. Quux', '123-1254-XX']] >>> etl.stringpatterns(table, 'foo') +------------+-------+---------------------+ | pattern | count | frequency | +============+=======+=====================+ | 'Aa. Aaa' | 3 | 0.5 | +------------+-------+---------------------+ | 'Aaa. Aaa' | 2 | 0.3333333333333333 | +------------+-------+---------------------+ | 'Aa. Aaaa' | 1 | 0.16666666666666666 | +------------+-------+---------------------+ >>> etl.stringpatterns(table, 'bar') +---------------+-------+---------------------+ | pattern | count | frequency | +===============+=======+=====================+ | '999-9999' | 3 | 0.5 | +---------------+-------+---------------------+ | '999 9999' | 2 | 0.3333333333333333 | +---------------+-------+---------------------+ | '999-9999-AA' | 1 | 0.16666666666666666 | +---------------+-------+---------------------+
22,823
def return_port(port): if port in _random_ports: _random_ports.remove(port) elif port in _owned_ports: _owned_ports.remove(port) _free_ports.add(port) elif port in _free_ports: logging.info("Returning a port that was already returned: %s", port) else: logging.info("Returning a port that wasn't given by portpicker: %s", port)
Return a port that is no longer being used so it can be reused.
22,824
def delete_credential(self, identifier, credential_id=None): credential = { : credential_id } return self.client.call(, , credential, id=identifier)
Delete the object storage credential. :param int id: The object storage account identifier. :param int credential_id: The credential id to be deleted.
22,825
def extract_data(self): data = PackageData( local_file=self.local_file, name=self.name, pkg_name=self.rpm_name or self.name_convertor.rpm_name( self.name, pkg_name=True), version=self.version, srcname=self.srcname) with self.archive: data.set_from(self.data_from_archive) if self.venv_extraction_disabled and getattr(data, "packages") == []: data.packages = [data.name] return data
Extracts data from archive. Returns: PackageData object containing the extracted data.
22,826
def import_committees(src): committees = [] subcommittees = [] with open("{0}/{1}/committees-current.yaml".format(src, LEGISLATOR_DIR), ) as stream: committees += yaml.load(stream) with open("{0}/{1}/committees-historical.yaml".format(src, LEGISLATOR_DIR), ) as stream: committees += yaml.load(stream) committees_df = pd.DataFrame(committees) subcommittees_df = pd.DataFrame(subcommittees) return [committees_df, subcommittees_df]
Read the committees from the csv files into a single Dataframe. Intended for importing new data.
22,827
def to_uint(self): num = 0 for i, f in enumerate(self._items): if f.is_zero(): pass elif f.is_one(): num += 1 << i else: fstr = "expected all functions to be a constant (0 or 1) form" raise ValueError(fstr) return num
Convert vector to an unsigned integer, if possible. This is only useful for arrays filled with zero/one entries.
22,828
def _connect(self): future = concurrent.Future() try: connection = self._pool_manager.get(self.pid, self) self._connections[connection.fileno()] = connection future.set_result(connection) self._ioloop.add_handler(connection.fileno(), self._on_io_events, ioloop.IOLoop.WRITE) except pool.NoIdleConnectionsError: self._create_connection(future) return future
Connect to PostgreSQL, either by reusing a connection from the pool if possible, or by creating the new connection. :rtype: psycopg2.extensions.connection :raises: pool.NoIdleConnectionsError
22,829
def finish(self): self.lines.reverse() self._content = .join(self.lines) self.lines = None
Creates block of content with lines belonging to fragment.
22,830
def _map_arg(arg): if isinstance(arg, _ast.Str): return repr(arg.s) elif isinstance(arg, _ast.Num): return arg.n elif isinstance(arg, _ast.Name): name = arg.id if name == : return True elif name == : return False elif name == : return None return name else: return Unparseable()
Return `arg` appropriately parsed or mapped to a usable value.
22,831
def closest_point(mesh, points): points = np.asanyarray(points, dtype=np.float64) if not util.is_shape(points, (-1, 3)): raise ValueError() candidates = nearby_faces(mesh, points) triangles = mesh.triangles.view(np.ndarray) query_point = deque() query_tri = deque() for triangle_ids, point in zip(candidates, points): query_point.append(np.tile(point, (len(triangle_ids), 1))) query_tri.append(triangles[triangle_ids]) query_point = np.vstack(query_point) query_tri = np.vstack(query_tri) query_close = closest_point_corresponding(query_tri, query_point) query_group = np.cumsum(np.array([len(i) for i in candidates]))[:-1] distance_2 = ((query_close - query_point) ** 2).sum(axis=1) result_close = np.zeros((len(points), 3), dtype=np.float64) result_tid = np.zeros(len(points), dtype=np.int64) result_distance = np.zeros(len(points), dtype=np.float64) for i, close_points, distance, candidate in zip( np.arange(len(points)), np.array_split(query_close, query_group), np.array_split(distance_2, query_group), candidates): idx = distance.argmin() if len(candidate) > 1: idxs = distance.argsort()[:2] check_distance = distance[idxs].ptp() < tol.merge check_magnitude = (np.abs(distance[idxs]) > tol.merge).all() if check_distance and check_magnitude: normals = mesh.face_normals[np.array(candidate)[idxs]] vectors = ((points[i] - close_points[idxs]) / distance[idxs, np.newaxis] ** 0.5) dots = util.diagonal_dot(normals, vectors) idx = idxs[dots.argmax()] result_close[i] = close_points[idx] result_tid[i] = candidate[idx] result_distance[i] = distance[idx] result_distance **= .5 return result_close, result_distance, result_tid
Given a mesh and a list of points, find the closest point on any triangle. Parameters ---------- mesh : Trimesh object points : (m,3) float, points in space Returns ---------- closest : (m,3) float, closest point on triangles for each point distance : (m,) float, distance triangle_id : (m,) int, index of triangle containing closest point
22,832
async def start(self): try: self.reader, self.writer = await asyncio.open_connection( self.ip_address, self.port, loop=self.loop) except OSError: print("Can't open connection to " + self.ip_address) sys.exit(0)
This method opens an IP connection on the IP device :return: None
22,833
def resolvefaults(self, definitions, op): ptop = self.type.operation(op.name) if ptop is None: raise Exception, \ "operation not defined in portType" % op.name soap = op.soap for fault in soap.faults: for f in ptop.faults: if f.name == fault.name: fault.parts = f.message.parts continue if hasattr(fault, ): continue raise Exception, \ "fault not defined in portType " % (fault.name, self.type.name)
Resolve soap fault I{message} references by cross-referencing with operation defined in port type. @param definitions: A definitions object. @type definitions: L{Definitions} @param op: An I{operation} object. @type op: I{operation}
22,834
def _source_is_newer(src_fs, src_path, dst_fs, dst_path): try: if dst_fs.exists(dst_path): namespace = ("details", "modified") src_modified = src_fs.getinfo(src_path, namespace).modified if src_modified is not None: dst_modified = dst_fs.getinfo(dst_path, namespace).modified return dst_modified is None or src_modified > dst_modified return True except FSError: return True
Determine if source file is newer than destination file. Arguments: src_fs (FS): Source filesystem (instance or URL). src_path (str): Path to a file on the source filesystem. dst_fs (FS): Destination filesystem (instance or URL). dst_path (str): Path to a file on the destination filesystem. Returns: bool: `True` if the source file is newer than the destination file or file modification time cannot be determined, `False` otherwise.
22,835
def forwardSlash(listOfFiles): for i,fname in enumerate(listOfFiles): listOfFiles[i]=fname.replace("\\","/") return listOfFiles
convert silly C:\\names\\like\\this.txt to c:/names/like/this.txt
22,836
def eval_advs(self, x, y, preds_adv, X_test, Y_test, att_type): end = (len(X_test) // self.batch_size) * self.batch_size if self.hparams.fast_tests: end = 10*self.batch_size acc = model_eval(self.sess, x, y, preds_adv, X_test[:end], Y_test[:end], args=self.eval_params) self.log_value( % att_type, acc, ) return acc
Evaluate the accuracy of the model on adversarial examples :param x: symbolic input to model. :param y: symbolic variable for the label. :param preds_adv: symbolic variable for the prediction on an adversarial example. :param X_test: NumPy array of test set inputs. :param Y_test: NumPy array of test set labels. :param att_type: name of the attack.
22,837
def mzminus(df, minus=0, noise=10000): mol_ions = ((df.values > noise) * df.columns).max(axis=1) - minus mol_ions[np.abs(mol_ions) < 0] = 0 d = np.abs(np.ones(df.shape) * df.columns - (mol_ions[np.newaxis].T * np.ones(df.shape))) < 1 d = (df.values * d).sum(axis=1) return Trace(d, df.index, name= + str(minus))
The abundances of ions which are minus below the molecular ion.
22,838
def raise_http_error(cls, response): response_xml = response.read() logging.getLogger().debug(response_xml) exc_class = recurly.errors.error_class_for_http_status(response.status) raise exc_class(response_xml)
Raise a `ResponseError` of the appropriate subclass in reaction to the given `http_client.HTTPResponse`.
22,839
def daemonize_posix(self): logger.info() try: pid = os.fork() if pid > 0: logger.debug( % (pid,)) return pid logger.debug( % (pid, )) except OSError as error: logger.exception() sys.stderr.write("fork sys.exit(1) os.chdir("/") os.setsid() os.umask(0) try: pid = os.fork() if pid > 0: logger.debug( % (pid,)) sys.exit(0) except OSError as error: logger.exception() sys.stderr.write("fork sys.exit(1) logger.info( % (pid, )) sys.stdin.flush() sys.stdout.flush() sys.stderr.flush() os.dup2(self.stdin.fileno(), sys.stdin.fileno()) os.dup2(self.stdout.fileno(), sys.stdout.fileno()) os.dup2(self.stderr.fileno(), sys.stderr.fileno()) atexit.register(self.delpid) pid = str(os.getpid()) with open(self.pidfile, ) as fd: fd.write("%s\n" % pid)
do the UNIX double-fork magic, see Stevens' "Advanced Programming in the UNIX Environment" for details (ISBN 0201563177) http://www.erlenstar.demon.co.uk/unix/faq_2.html#SEC16
22,840
def to_utf8(text): if not text: if isinstance(text, string_types): text = "" return text try: return text.encode("utf8") except UnicodeDecodeError: try: if text.startswith(codecs.BOM_UTF8): text = text[len(codecs.BOM_UTF8):] return text.decode("utf8").encode("utf8") except UnicodeDecodeError: if text.startswith(codecs.BOM_UTF16_LE): encoding = "utf-16le" text = text[len(codecs.BOM_UTF16_LE):] elif text.startswith(codecs.BOM_UTF16_BE): encoding = "utf-16be" text = text[len(codecs.BOM_UTF16_BE):] else: encoding = "cp1252" try: return text.decode(encoding).encode("utf8") except UnicodeDecodeError as exc: for line in text.splitlines(): try: line.decode(encoding).encode("utf8") except UnicodeDecodeError: log.warn("Cannot transcode the following into UTF8 cause of %s: %r" % (exc, line)) break return text
Enforce UTF8 encoding.
22,841
def setattr_context(obj, **kwargs): old_kwargs = dict([(key, getattr(obj, key)) for key in kwargs]) [setattr(obj, key, val) for key, val in kwargs.items()] try: yield finally: [setattr(obj, key, val) for key, val in old_kwargs.items()]
Context manager to temporarily change the values of object attributes while executing a function. Example ------- >>> class Foo: pass >>> f = Foo(); f.attr = 'hello' >>> with setattr_context(f, attr='goodbye'): ... print(f.attr) goodbye >>> print(f.attr) hello
22,842
def commit( self, message: str, files_to_add: typing.Optional[typing.Union[typing.List[str], str]] = None, allow_empty: bool = False, ): message = str(message) LOGGER.debug(, message) files_to_add = self._sanitize_files_to_add(files_to_add) LOGGER.debug(, files_to_add) if not message: LOGGER.error() sys.exit(-1) if os.getenv(): LOGGER.info() message = self.add_skip_ci_to_commit_msg(message) if files_to_add is None: self.stage_all() else: self.reset_index() self.stage_subset(*files_to_add) if self.index_is_empty() and not allow_empty: LOGGER.error() sys.exit(-1) self.repo.index.commit(message=message)
Commits changes to the repo :param message: first line of the message :type message: str :param files_to_add: files to commit :type files_to_add: optional list of str :param allow_empty: allow dummy commit :type allow_empty: bool
22,843
def load_prefix(s3_loc, success_only=None, recent_versions=None, exclude_regex=None, just_sql=False): bucket_name, prefix = _get_bucket_and_prefix(s3_loc) datasets = _get_common_prefixes(bucket_name, prefix) bash_cmd = for dataset in datasets: dataset = _remove_trailing_backslash(dataset) try: bash_cmd += get_bash_cmd(.format(bucket_name, dataset), success_only=success_only, recent_versions=recent_versions, exclude_regex=exclude_regex, just_sql=just_sql) except Exception as e: sys.stderr.write(.format(dataset, str(e))) return bash_cmd
Get a bash command which will load every dataset in a bucket at a prefix. For this to work, all datasets must be of the form `s3://$BUCKET_NAME/$PREFIX/$DATASET_NAME/v$VERSION/$PARTITIONS`. Any other formats will be ignored. :param bucket_name :param prefix
22,844
def decipher(self,string): string = self.remove_punctuation(string,filter=+self.chars+) ret = for i in range(0,len(string),3): ind = tuple([int(string[i+k]) for k in [0,1,2]]) ret += IND2L[ind] return ret
Decipher string using Delastelle cipher according to initialised key. Example:: plaintext = Delastelle('APCZ WRLFBDKOTYUQGENHXMIVS').decipher(ciphertext) :param string: The string to decipher. :returns: The deciphered string. The plaintext will be 1/3 the length of the ciphertext.
22,845
def generate_random_perovskite(lat=None): if not lat: lat = round(random.uniform(3.5, Perovskite_tilting.OCTAHEDRON_BOND_LENGTH_LIMIT*2), 3) A_site = random.choice(Perovskite_Structure.A) B_site = random.choice(Perovskite_Structure.B) Ci_site = random.choice(Perovskite_Structure.C) Cii_site = random.choice(Perovskite_Structure.C) while covalent_radii[chemical_symbols.index(A_site)] - \ covalent_radii[chemical_symbols.index(B_site)] < 0.05 or \ covalent_radii[chemical_symbols.index(A_site)] - \ covalent_radii[chemical_symbols.index(B_site)] > 0.5: A_site = random.choice(Perovskite_Structure.A) B_site = random.choice(Perovskite_Structure.B) return crystal( [A_site, B_site, Ci_site, Cii_site], [(0.5, 0.25, 0.0), (0.0, 0.0, 0.0), (0.0, 0.25, 0.0), (0.25, 0.0, 0.75)], spacegroup=62, cellpar=[lat*math.sqrt(2), 2*lat, lat*math.sqrt(2), 90, 90, 90] )
This generates a random valid perovskite structure in ASE format. Useful for testing. Binary and organic perovskites are not considered.
22,846
def check_grad(f_df, xref, stepsize=1e-6, tol=1e-6, width=15, style=, out=sys.stdout): CORRECT = u INCORRECT = u obj, grad = wrap(f_df, xref, size=0) x0 = destruct(xref) df = grad(x0) out.write(tp.header(["Numerical", "Analytic", "Error"], width=width, style=style) + "\n") out.flush() def parse_error(number): failure = "\033[91m" passing = "\033[92m" warning = "\033[93m" end = "\033[0m" base = "{}{:0.3e}{}" if error < 0.1 * tol: return base.format(passing, error, end) elif error < tol: return base.format(warning, error, end) else: return base.format(failure, error, end) num_errors = 0 for j in range(x0.size): dx = np.zeros(x0.size) dx[j] = stepsize df_approx = (obj(x0 + dx) - obj(x0 - dx)) / (2 * stepsize) df_analytic = df[j] abs_error = np.linalg.norm(df_approx - df_analytic) error = abs_error if np.allclose(abs_error, 0) else abs_error / \ (np.linalg.norm(df_analytic) + np.linalg.norm(df_approx)) num_errors += error >= tol errstr = CORRECT if error < tol else INCORRECT out.write(tp.row([df_approx, df_analytic, parse_error(error) + + errstr], width=width, style=style) + "\n") out.flush() out.write(tp.bottom(3, width=width, style=style) + "\n") return num_errors
Compares the numerical gradient to the analytic gradient Parameters ---------- f_df : function The analytic objective and gradient function to check x0 : array_like Parameter values to check the gradient at stepsize : float, optional Stepsize for the numerical gradient. Too big and this will poorly estimate the gradient. Too small and you will run into precision issues (default: 1e-6) tol : float, optional Tolerance to use when coloring correct/incorrect gradients (default: 1e-5) width : int, optional Width of the table columns (default: 15) style : string, optional Style of the printed table, see tableprint for a list of styles (default: 'round')
22,847
def _match_real(filename, include, exclude, follow, symlinks): sep = if util.platform() == "windows" else if isinstance(filename, bytes): sep = os.fsencode(sep) if not filename.endswith(sep) and os.path.isdir(filename): filename += sep matched = False for pattern in include: if _fs_match(pattern, filename, sep, follow, symlinks): matched = True break if matched: matched = True if exclude: for pattern in exclude: if _fs_match(pattern, filename, sep, follow, symlinks): matched = False break return matched
Match real filename includes and excludes.
22,848
def cublasSsyr2(handle, uplo, n, alpha, x, incx, y, incy, A, lda): status = _libcublas.cublasSsyr2_v2(handle, _CUBLAS_FILL_MODE[uplo], n, ctypes.byref(ctypes.c_float(alpha)), int(x), incx, int(y), incy, int(A), lda) cublasCheckStatus(status)
Rank-2 operation on real symmetric matrix.
22,849
def update_resource_assignments(self, id_or_uri, resource_assignments, timeout=-1): uri = self._client.build_uri(id_or_uri) + "/resource-assignments" headers = {: } return self._client.patch_request(uri, resource_assignments, timeout=timeout, custom_headers=headers)
Modifies scope membership by adding or removing resource assignments. Args: id_or_uri: Can be either the resource ID or the resource URI. resource_assignments (dict): A dict object with a list of resource URIs to be added and a list of resource URIs to be removed. timeout: Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation in OneView; it just stops waiting for its completion. Returns: dict: Updated resource.
22,850
def get_pyxb(self): dataone_exception_pyxb = dataoneErrors.error() dataone_exception_pyxb.name = self.__class__.__name__ dataone_exception_pyxb.errorCode = self.errorCode dataone_exception_pyxb.detailCode = self.detailCode if self.description is not None: dataone_exception_pyxb.description = self.description dataone_exception_pyxb.traceInformation = self.traceInformation if self.identifier is not None: dataone_exception_pyxb.identifier = self.identifier if self.nodeId is not None: dataone_exception_pyxb.nodeId = self.nodeId return dataone_exception_pyxb
Generate a DataONE Exception PyXB object. The PyXB object supports directly reading and writing the individual values that may be included in a DataONE Exception.
22,851
def _run_cmd(self, command): if self.throttle: tables = self.engine.describe_all(False) limiter = self.throttle.get_limiter(tables) else: limiter = None self.engine.rate_limit = limiter results = self.engine.execute(command) if results is None: pass elif isinstance(results, basestring): print(results) else: with self.display() as ostream: formatter = FORMATTERS[self.conf["format"]]( results, ostream, pagesize=self.conf["pagesize"], width=self.conf["width"], ) formatter.display() print_count = 0 total = None for (cmd_fragment, capacity) in self.engine.consumed_capacities: total += capacity print(cmd_fragment) print(indent(str(capacity))) print_count += 1 if print_count > 1: print("TOTAL") print(indent(str(total)))
Run a DQL command
22,852
def normalize(url, strip=False): "RFC3986 normalize URL & Optionally removing url-query/fragment string" if strip: p = _urltools.parse(url) url = p.scheme + + p.subdomain + p.domain + p.path return _urltools.normalize(url)
RFC3986 normalize URL & Optionally removing url-query/fragment string
22,853
def configure_specials_key(self, keyboard): special_row = VKeyRow() max_length = self.max_length i = len(self.rows) - 1 current_row = self.rows[i] special_keys = [VBackKey()] if self.allow_uppercase: special_keys.append(VUppercaseKey(keyboard)) if self.allow_special_chars: special_keys.append(VSpecialCharKey(keyboard)) while len(special_keys) > 0: first = False while len(special_keys) > 0 and len(current_row) < max_length: current_row.add_key(special_keys.pop(0), first=first) first = not first if i > 0: i -= 1 current_row = self.rows[i] else: break if self.allow_space: space_length = len(current_row) - len(special_keys) special_row.add_key(VSpaceKey(space_length)) first = True while len(special_keys) > 0: special_row.add_key(special_keys.pop(0), first=first) first = not first if len(special_row) > 0: self.rows.append(special_row)
Configures specials key if needed. :param keyboard: Keyboard instance this layout belong.
22,854
def unget_service(self, reference): return self.__framework._registry.unget_service( self.__bundle, reference )
Disables a reference to the service :return: True if the bundle was using this reference, else False
22,855
def _normalize_lang_attrs(self, text, strip): uninitialized = -1
Remove embedded bracketed attributes. This (potentially) bitwise-ands bracketed attributes together and adds to the end. This is applied to a single alternative at a time -- not to a parenthesized list. It removes all embedded bracketed attributes, logically-ands them together, and places them at the end. However if strip is true, this can indeed remove embedded bracketed attributes from a parenthesized list. Parameters ---------- text : str A Beider-Morse phonetic encoding (in progress) strip : bool Remove the bracketed attributes (and throw away) Returns ------- str A Beider-Morse phonetic code Raises ------ ValueError No closing square bracket
22,856
def _CheckGitkitError(self, raw_response): try: response = simplejson.loads(raw_response) if not in response: return response else: error = response[] if in error: code = error[] if str(code).startswith(): raise errors.GitkitClientError(error[]) else: raise errors.GitkitServerError(error[]) except simplejson.JSONDecodeError: pass raise errors.GitkitServerError()
Raises error if API invocation failed. Args: raw_response: string, the http response. Raises: GitkitClientError: if the error code is 4xx. GitkitServerError: if the response if malformed. Returns: Successful response as dict.
22,857
def unprotect_response(self, response, **kwargs): body = response.content self.server_signature_key.verify(signature=_b64_to_bstr(jws.signature), data=data) decrypted = self._unprotect_payload(jws.payload) response._content = decrypted response.headers[] = return response
Removes protection from the specified response :param request: response from the key vault service :return: unprotected response with any security protocal encryption removed
22,858
def write_recording(recording, save_path): assert HAVE_MREX, "To use the MEArec extractors, install MEArec: \n\n pip install MEArec\n\n" save_path = Path(save_path) if save_path.is_dir(): print("The file will be saved as recording.h5 in the provided folder") save_path = save_path / if save_path.suffix == or save_path.suffix == : info = {: {: recording.get_sampling_frequency()}} rec_dict = {: recording.get_traces()} if in recording.get_channel_property_names(): positions = np.array([recording.get_channel_property(chan, ) for chan in recording.get_channel_ids()]) rec_dict[] = positions recgen = mr.RecordingGenerator(rec_dict=rec_dict, info=info) mr.save_recording_generator(recgen, str(save_path), verbose=False) else: raise Exception("Provide a folder or an .h5/.hdf5 as ")
Save recording extractor to MEArec format. Parameters ---------- recording: RecordingExtractor Recording extractor object to be saved save_path: str .h5 or .hdf5 path
22,859
def record_evaluation(eval_result): if not isinstance(eval_result, dict): raise TypeError() eval_result.clear() def _init(env): for data_name, _, _, _ in env.evaluation_result_list: eval_result.setdefault(data_name, collections.defaultdict(list)) def _callback(env): if not eval_result: _init(env) for data_name, eval_name, result, _ in env.evaluation_result_list: eval_result[data_name][eval_name].append(result) _callback.order = 20 return _callback
Create a callback that records the evaluation history into ``eval_result``. Parameters ---------- eval_result : dict A dictionary to store the evaluation results. Returns ------- callback : function The callback that records the evaluation history into the passed dictionary.
22,860
def resizeEvent( self, event ): super(XNavigationEdit, self).resizeEvent(event) w = self.width() h = self.height() self._scrollWidget.resize(w - 4, h - 4) if ( self._scrollWidget.width() < self._partsWidget.width() ): self.scrollParts( self._partsWidget.width() - self._scrollWidget.width() )
Resizes the current widget and its parts widget. :param event | <QResizeEvent>
22,861
def _save_pys(self, filepath): try: with Bz2AOpen(filepath, "wb", main_window=self.main_window) as outfile: interface = Pys(self.grid.code_array, outfile) interface.from_code_array() except (IOError, ValueError), err: try: post_command_event(self.main_window, self.StatusBarMsg, text=err) return except TypeError: pass return not outfile.aborted
Saves file as pys file and returns True if save success Parameters ---------- filepath: String \tTarget file path for xls file
22,862
def accept(self, message=None, expires_at=None): with db.session.begin_nested(): if self.status != RequestStatus.PENDING: raise InvalidRequestStateError(RequestStatus.PENDING) self.status = RequestStatus.ACCEPTED request_accepted.send(self, message=message, expires_at=expires_at)
Accept request.
22,863
def dict_merge(a, b, path=None): return dict_selective_merge(a, b, b.keys(), path)
merges b into a
22,864
def iter_query_indexes(self): if self._table.range_key is None: range_key = None else: range_key = self._table.range_key.name yield QueryIndex("TABLE", True, self._table.hash_key.name, range_key) for index in self._table.indexes: yield QueryIndex.from_table_index(self._table, index) for index in self._table.global_indexes: yield QueryIndex.from_table_index(self._table, index)
Iterator that constructs :class:`~dql.models.QueryIndex` for all global and local indexes, and a special one for the default table hash & range key with the name 'TABLE'
22,865
def settings(self): result = self.client.get() if not in result: raise UnexpectedResponseError(, json=result) s = AccountSettings(self.client, result[], result) return s
Resturns the account settings data for this acocunt. This is not a listing endpoint.
22,866
def set_doc(self, doc: str): self.__doc__ = doc if hasattr(self, ): ref = f self.module.__dict__[][ref] = doc
Assign the given docstring to the property instance and, if possible, to the `__test__` dictionary of the module of its owner class.
22,867
def save_screenshot(driver, name): if hasattr(driver, ): screenshot_dir = os.environ.get() if not screenshot_dir: LOGGER.warning() return elif not os.path.exists(screenshot_dir): os.makedirs(screenshot_dir) image_name = os.path.join(screenshot_dir, name + ) driver.save_screenshot(image_name) else: msg = ( u"Browser does not support screenshots. " u"Could not save screenshot " ).format(name=name) LOGGER.warning(msg)
Save a screenshot of the browser. The location of the screenshot can be configured by the environment variable `SCREENSHOT_DIR`. If not set, this defaults to the current working directory. Args: driver (selenium.webdriver): The Selenium-controlled browser. name (str): A name for the screenshot, which will be used in the output file name. Returns: None
22,868
def dataframe(self): temp_index = self._index rows = [] indices = [] if not self._season: return None for season in self._season: self._index = self._season.index(season) rows.append(self._dataframe_fields()) indices.append(season) self._index = temp_index return pd.DataFrame(rows, index=[indices])
Returns a ``pandas DataFrame`` containing all other relevant class properties and values where each index is a different season plus the career stats.
22,869
def build_result(data): more = {} for key, value in data.items(): if key != : newnode = value else: newnode = {} for el in value: nkey, nvalue = process_node(el) newnode[nkey] = nvalue more[key] = newnode return more
Create a dictionary with the contents of result.json
22,870
def cache_clear(self): for f in os.listdir(self.directory): f = os.path.join(self.directory, f) if os.path.isfile(f): os.remove(f) elif os.path.isdir(f): shutil.rmtree(f) self.cache = {}
Clear local cache by deleting all cached resources and their downloaded files.
22,871
def read_cifar10(filename_queue): class CIFAR10Record(object): pass result = CIFAR10Record() label_bytes = 1 result.height = 32 result.width = 32 result.depth = 3 image_bytes = result.height * result.width * result.depth record_bytes = label_bytes + image_bytes reader = tf.FixedLengthRecordReader(record_bytes=record_bytes) result.key, value = reader.read(filename_queue) record_bytes = tf.decode_raw(value, tf.uint8) result.label = tf.cast( tf.strided_slice(record_bytes, [0], [label_bytes]), tf.int32) depth_major = tf.reshape( tf.strided_slice(record_bytes, [label_bytes], [label_bytes + image_bytes]), [result.depth, result.height, result.width]) result.uint8image = tf.transpose(depth_major, [1, 2, 0]) return result
Reads and parses examples from CIFAR10 data files. Recommendation: if you want N-way read parallelism, call this function N times. This will give you N independent Readers reading different files & positions within those files, which will give better mixing of examples. Args: filename_queue: A queue of strings with the filenames to read from. Returns: An object representing a single example, with the following fields: height: number of rows in the result (32) width: number of columns in the result (32) depth: number of color channels in the result (3) key: a scalar string Tensor describing the filename & record number for this example. label: an int32 Tensor with the label in the range 0..9. uint8image: a [height, width, depth] uint8 Tensor with the image data
22,872
def create_group(self, attrs, members, folder_id=None, tags=None): cn = {} cn[] = members if folder_id: cn[] = str(folder_id) if tags: cn[] = tags attrs = [{: k, : v} for k, v in attrs.items()] attrs.append({: , : }) cn[] = attrs resp = self.request_single(, {: cn}) return zobjects.Contact.from_dict(resp)
Create a contact group XML example : <cn l="7> ## ContactSpec <a n="lastName">MARTIN</a> <a n="firstName">Pierre</a> <a n="email">[email protected]</a> </cn> Which would be in zimsoap : attrs = { 'lastname': 'MARTIN', 'firstname': 'Pierre', 'email': '[email protected]' } folder_id = 7 :param folder_id: a string of the ID's folder where to create contact. Default '7' :param tags: comma-separated list of tag names :param members: list of dict. Members with their type. Example {'type': 'I', 'value': '[email protected]'}. :param attrs: a dictionary of attributes to set ({key:value,...}). At least one attr is required :returns: the created zobjects.Contact
22,873
def to_dict(self): return { : self.task_name, : self.dag_name, : self.workflow_name, : self.workflow_id, : self.worker_hostname }
Return the task context content as a dictionary.
22,874
def _find_pair(self, protocol, remote_candidate): for pair in self._check_list: if (pair.protocol == protocol and pair.remote_candidate == remote_candidate): return pair return None
Find a candidate pair in the check list.
22,875
def copytree(source_directory, destination_directory, ignore=None): if os.path.isdir(source_directory): if not os.path.isdir(destination_directory): os.makedirs(destination_directory) files = os.listdir(source_directory) if ignore is not None: ignored = ignore(source_directory, files) else: ignored = set() for f in files: if f not in ignored: copytree( os.path.join(source_directory, f), os.path.join(destination_directory, f), ignore ) else: shutil.copyfile(source_directory, destination_directory)
Recursively copy the contents of a source directory into a destination directory. Both directories must exist. This function does not copy the root directory ``source_directory`` into ``destination_directory``. Since ``shutil.copytree(src, dst)`` requires ``dst`` not to exist, we cannot use for our purposes. Code adapted from http://stackoverflow.com/a/12686557 :param string source_directory: the source directory, already existing :param string destination_directory: the destination directory, already existing
22,876
def _calculate(cls): percentages = { "up": PyFunceble.INTERN["counter"]["number"]["up"], "down": PyFunceble.INTERN["counter"]["number"]["down"], "invalid": PyFunceble.INTERN["counter"]["number"]["invalid"], } for percentage in percentages: calculation = ( percentages[percentage] * 100 // PyFunceble.INTERN["counter"]["number"]["tested"] ) PyFunceble.INTERN["counter"]["percentage"].update({percentage: calculation})
Calculate the percentage of each status.
22,877
def _get_paging_controls(request): start = request.url.query.get(, None) limit = request.url.query.get(, None) controls = {} if limit is not None: try: controls[] = int(limit) except ValueError: LOGGER.debug(, limit) raise errors.CountInvalid() if controls[] <= 0: LOGGER.debug(, limit) raise errors.CountInvalid() if start is not None: controls[] = start return controls
Parses start and/or limit queries into a paging controls dict.
22,878
def _check_raising_stopiteration_in_generator_next_call(self, node): def _looks_like_infinite_iterator(param): inferred = utils.safe_infer(param) if inferred: return inferred.qname() in KNOWN_INFINITE_ITERATORS return False if isinstance(node.func, astroid.Attribute): return inferred = utils.safe_infer(node.func) if getattr(inferred, "name", "") == "next": frame = node.frame() has_sentinel_value = len(node.args) > 1 if ( isinstance(frame, astroid.FunctionDef) and frame.is_generator() and not has_sentinel_value and not utils.node_ignores_exception(node, StopIteration) and not _looks_like_infinite_iterator(node.args[0]) ): self.add_message("stop-iteration-return", node=node)
Check if a StopIteration exception is raised by the call to next function If the next value has a default value, then do not add message. :param node: Check to see if this Call node is a next function :type node: :class:`astroid.node_classes.Call`
22,879
def is_open(location, now=None): if now is None: now = get_now() if has_closing_rule_for_now(location): return False now_time = datetime.time(now.hour, now.minute, now.second) if location: ohs = OpeningHours.objects.filter(company=location) else: ohs = Company.objects.first().openinghours_set.all() for oh in ohs: is_open = False if (oh.weekday == now.isoweekday() and oh.from_hour <= now_time and now_time <= oh.to_hour): is_open = oh if (oh.weekday == now.isoweekday() and oh.from_hour <= now_time and ((oh.to_hour < oh.from_hour) and (now_time < datetime.time(23, 59, 59)))): is_open = oh if (oh.weekday == (now.isoweekday() - 1) % 7 and oh.from_hour >= now_time and oh.to_hour >= now_time and oh.to_hour < oh.from_hour): is_open = oh if is_open is not False: return oh return False
Is the company currently open? Pass "now" to test with a specific timestamp. Can be used stand-alone or as a helper.
22,880
def _add_request_parameters(func): async def decorated_func(*args, handle_ratelimit=None, max_tries=None, request_timeout=None, **kwargs): return await func(*args, handle_ratelimit=handle_ratelimit, max_tries=max_tries, request_timeout=request_timeout, **kwargs) return decorated_func
Adds the ratelimit and request timeout parameters to a function.
22,881
def reduce_annotations(self, annotations, options): getfnc_qual_ev = options.getfnc_qual_ev() return [nt for nt in annotations if getfnc_qual_ev(nt.Qualifier, nt.Evidence_Code)]
Reduce annotations to ones used to identify enrichment (normally exclude ND and NOT).
22,882
def load_obj(self, jref, getter=None, parser=None): obj = self.__resolver.resolve(jref, getter) tmp = {: {}} version = utils.get_swagger_version(obj) if version == : with ResourceListContext(tmp, ) as ctx: ctx.parse(obj, jref, self.__resolver, getter) elif version == : with SwaggerContext(tmp, ) as ctx: ctx.parse(obj) elif version == None and parser: with parser(tmp, ) as ctx: ctx.parse(obj) version = tmp[].__swagger_version__ if hasattr(tmp[], ) else version else: raise NotImplementedError(.format(version, jref)) if not tmp[]: raise Exception(.format(jref)) logger.info(.format(version)) return tmp[], version
load a object(those in spec._version_.objects) from a JSON reference.
22,883
def process_alias_create_namespace(namespace): namespace = filter_alias_create_namespace(namespace) _validate_alias_name(namespace.alias_name) _validate_alias_command(namespace.alias_command) _validate_alias_command_level(namespace.alias_name, namespace.alias_command) _validate_pos_args_syntax(namespace.alias_name, namespace.alias_command)
Validate input arguments when the user invokes 'az alias create'. Args: namespace: argparse namespace object.
22,884
def verify(self): if self.firstdate >= self.lastdate: raise ValueError( f f) if (self.lastdate-self.firstdate) % self.stepsize: raise ValueError( f f f)
Raise an |ValueError| if the dates or the step size of the time frame are inconsistent.
22,885
def set_entry(self, filename, obj): self.entries[filename] = obj self.dirty = True
Set the entry.
22,886
def add(self, event, pk, ts=None, ttl=None): key = self._keygen(event, ts) try: self._zadd(key, pk, ts, ttl) return True except redis.ConnectionError as e: self.logger.error( "redis event store failed with connection error %r" % e) return False
Add an event to event store. All events were stored in a sorted set in redis with timestamp as rank score. :param event: the event to be added, format should be ``table_action`` :param pk: the primary key of event :param ts: timestamp of the event, default to redis_server's current timestamp :param ttl: the expiration time of event since the last update :return: bool
22,887
def estimator_cov(self,method): Y = np.array([reg[self.lags:] for reg in self.data]) Z = self._create_Z(Y) if method == : sigma = self.ols_covariance() else: sigma = self.custom_covariance(self.latent_variables.get_z_values()) return np.kron(np.linalg.inv(np.dot(Z,np.transpose(Z))), sigma)
Creates covariance matrix for the estimators Parameters ---------- method : str Estimation method Returns ---------- A Covariance Matrix
22,888
def group_info(name): *xorg pkgtypes = (, , , ) ret = {} for pkgtype in pkgtypes: ret[pkgtype] = set() cmd = [, , name] out = __salt__[](cmd, output_loglevel=, python_shell=False) for line in salt.utils.itertools.split(out, ): if not line: continue try: pkg = line.split()[1] except ValueError: log.error( %s\, line) else: ret[].add(pkg) for pkgtype in pkgtypes: ret[pkgtype] = sorted(ret[pkgtype]) return ret
.. versionadded:: 2016.11.0 Lists all packages in the specified group CLI Example: .. code-block:: bash salt '*' pkg.group_info 'xorg'
22,889
def _run_psql(cmd, runas=None, password=None, host=None, port=None, user=None): kwargs = { : False, : True, } if runas is None: if not host: host = __salt__[]() if not host or host.startswith(): if in __grains__[]: runas = elif in __grains__[]: runas = else: runas = if user is None: user = runas if runas: kwargs[] = runas if password is None: password = __salt__[]() if password is not None: pgpassfile = salt.utils.files.mkstemp(text=True) with salt.utils.files.fopen(pgpassfile, ) as fp_: fp_.write(salt.utils.stringutils.to_str(.format( if not host or host.startswith() else host, port if port else , user if user else , password, ))) __salt__[](pgpassfile, runas, ) kwargs[] = {: pgpassfile} ret = __salt__[](cmd, python_shell=False, **kwargs) if ret.get(, 0) != 0: log.error() if password is not None and not __salt__[](pgpassfile): log.warning() return ret
Helper function to call psql, because the password requirement makes this too much code to be repeated in each function below
22,890
def x_axis_properties(self, title_size=None, title_offset=None, label_angle=None, label_align=None, color=None): self._axis_properties(, title_size, title_offset, label_angle, label_align, color) return self
Change x-axis title font size and label angle Parameters ---------- title_size: int, default None Title size, in px title_offset: int, default None Pixel offset from given axis label_angle: int, default None label angle in degrees label_align: str, default None Label alignment color: str, default None Hex color
22,891
def to_deeper_graph(graph): weighted_layer_ids = graph.deep_layer_ids() if len(weighted_layer_ids) >= Constant.MAX_LAYERS: return None deeper_layer_ids = sample(weighted_layer_ids, 1) for layer_id in deeper_layer_ids: layer = graph.layer_list[layer_id] new_layer = create_new_layer(layer, graph.n_dim) graph.to_deeper_model(layer_id, new_layer) return graph
deeper graph
22,892
def subscribe(self): self.conn("POST", "{0}/users/ME/endpoints/{1}/subscriptions".format(self.conn.msgsHost, self.id), auth=SkypeConnection.Auth.RegToken, json={"interestedResources": ["/v1/threads/ALL", "/v1/users/ME/contacts/ALL", "/v1/users/ME/conversations/ALL/messages", "/v1/users/ME/conversations/ALL/properties"], "template": "raw", "channelType": "httpLongPoll"}) self.subscribed = True
Subscribe to contact and conversation events. These are accessible through :meth:`getEvents`.
22,893
def get_resource_children(raml_resource): path = raml_resource.path return [res for res in raml_resource.root.resources if res.parent and res.parent.path == path]
Get children of :raml_resource:. :param raml_resource: Instance of ramlfications.raml.ResourceNode.
22,894
def get(self, sent_id, **kwargs): if sent_id is not None and not isinstance(sent_id, int): sent_id = int(sent_id) if sent_id is None or not self.has_id(sent_id): if in kwargs: return kwargs[] else: raise KeyError("Invalid sentence ID ({})".format(sent_id)) return self.__sent_map[sent_id]
If sent_id exists, remove and return the associated sentence object else return default. If no default is provided, KeyError will be raised.
22,895
def record(self): if not self._initialized: raise pycdlibexception.PyCdlibInternalError() return struct.pack(self.FMT, self._group_id, self._user_id, self._attributes, b, self._filenum, b * 5)
Record this Extended Attribute Record. Parameters: None. Returns: A string representing this Extended Attribute Record.
22,896
def supported_auth_methods(self) -> List[str]: return [auth for auth in self.AUTH_METHODS if auth in self.server_auth_methods]
Get all AUTH methods supported by the both server and by us.
22,897
def _get_all_eip_addresses(addresses=None, allocation_ids=None, region=None, key=None, keyid=None, profile=None): s associated with the current credentials. addresses (list) - Optional list of addresses. If provided, only those those in the list will be returned. allocation_ids (list) - Optional list of allocation IDs. If provided, only the addresses associated with the given allocation IDs will be returned. returns (list) - The requested Addresses as a list of :class:`boto.ec2.address.Address` ' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) try: return conn.get_all_addresses(addresses=addresses, allocation_ids=allocation_ids) except boto.exception.BotoServerError as e: log.error(e) return []
Get all EIP's associated with the current credentials. addresses (list) - Optional list of addresses. If provided, only those those in the list will be returned. allocation_ids (list) - Optional list of allocation IDs. If provided, only the addresses associated with the given allocation IDs will be returned. returns (list) - The requested Addresses as a list of :class:`boto.ec2.address.Address`
22,898
def set_simple_fault_geometry_3D(w, src): assert "simpleFaultSource" in src.tag geometry_node = src.nodes[get_taglist(src).index("simpleFaultGeometry")] fault_attrs = parse_simple_fault_geometry(geometry_node) build_polygon_from_fault_attrs(w, fault_attrs)
Builds a 3D polygon from a node instance
22,899
def _extract_nn_info(self, structure, nns): if self.targets is None: targets = structure.composition.elements else: targets = self.targets siw = [] max_weight = max(nn[self.weight] for nn in nns.values()) for nstats in nns.values(): site = nstats[] if nstats[self.weight] > self.tol * max_weight \ and self._is_in_targets(site, targets): nn_info = {: site, : self._get_image(structure, site), : nstats[self.weight] / max_weight, : self._get_original_site( structure, site)} if self.extra_nn_info: poly_info = nstats del poly_info[] nn_info[] = poly_info siw.append(nn_info) return siw
Given Voronoi NNs, extract the NN info in the form needed by NearestNeighbors Args: structure (Structure): Structure being evaluated nns ([dicts]): Nearest neighbor information for a structure Returns: (list of tuples (Site, array, float)): See nn_info