Unnamed: 0
int64
0
389k
code
stringlengths
26
79.6k
docstring
stringlengths
1
46.9k
375,900
def non_blocking(func): from functools import wraps @wraps(func) def non_blocking_version(*args, **kwargs): t = ReturnThread(target=func, args=args, kwargs=kwargs) t.start() return t return non_blocking_version
Decorator to run a function in a different thread. It can be used to execute a command in a non-blocking way like this:: @non_blocking def add_one(n): print 'starting' import time time.sleep(2) print 'ending' return n+1 thread = add_one(5) # Starts the function result = thread.join() # Waits for it to complete print result
375,901
def verify_rsa_sha1(request, rsa_public_key): norm_params = normalize_parameters(request.params) bs_uri = base_string_uri(request.uri) sig_base_str = signature_base_string(request.http_method, bs_uri, norm_params).encode() sig = binascii.a2b_base64(request.signature.encode()) alg = _jwt_rs1_signing_algorithm() key = _prepare_key_plus(alg, rsa_public_key) verify_ok = alg.verify(sig_base_str, key, sig) if not verify_ok: log.debug(, sig_base_str) return verify_ok
Verify a RSASSA-PKCS #1 v1.5 base64 encoded signature. Per `section 3.4.3`_ of the spec. Note this method requires the jwt and cryptography libraries. .. _`section 3.4.3`: https://tools.ietf.org/html/rfc5849#section-3.4.3 To satisfy `RFC2616 section 5.2`_ item 1, the request argument's uri attribute MUST be an absolute URI whose netloc part identifies the origin server or gateway on which the resource resides. Any Host item of the request argument's headers dict attribute will be ignored. .. _`RFC2616 section 5.2`: https://tools.ietf.org/html/rfc2616#section-5.2
375,902
def parse_message(message, nodata=False): header = read_machine_header(message) h_len = __get_machine_header_length(header) meta_raw = message[h_len:h_len + header[]] meta = __parse_meta(meta_raw, header) data_start = h_len + header[] data = b if not nodata: data = __decompress( meta, message[data_start:data_start + header[]] ) return header, meta, data
Parse df message from bytearray. @message - message data @nodata - do not load data @return - [binary header, metadata, binary data]
375,903
def _get_missing_trees(self, path, root_tree): dirpath = posixpath.split(path)[0] dirs = dirpath.split() if not dirs or dirs == []: return [] def get_tree_for_dir(tree, dirname): for name, mode, id in tree.iteritems(): if name == dirname: obj = self.repository._repo[id] if isinstance(obj, objects.Tree): return obj else: raise RepositoryError("Cannot create directory %s " "at tree %s as path is occupied and is not a " "Tree" % (dirname, tree)) return None trees = [] parent = root_tree for dirname in dirs: tree = get_tree_for_dir(parent, dirname) if tree is None: tree = objects.Tree() dirmode = 040000 parent.add(dirmode, dirname, tree.id) parent = tree trees.append(tree) return trees
Creates missing ``Tree`` objects for the given path. :param path: path given as a string. It may be a path to a file node (i.e. ``foo/bar/baz.txt``) or directory path - in that case it must end with slash (i.e. ``foo/bar/``). :param root_tree: ``dulwich.objects.Tree`` object from which we start traversing (should be commit's root tree)
375,904
def compile_datetime(rule): if isinstance(rule.value, datetime.datetime): return rule try: return DatetimeRule(datetime.datetime.fromtimestamp(float(rule.value))) except (TypeError, ValueError): pass res = TIMESTAMP_RE.match(str(rule.value)) if res is not None: year, month, day, hour, minute, second = (int(n or 0) for n in res.group(*range(1, 7))) us_str = (res.group(7) or "0")[:6].ljust(6, "0") us_int = int(us_str) zonestr = res.group(8) zonespl = (0, 0) if zonestr in [, ] else [int(i) for i in zonestr.split(":")] zonediff = datetime.timedelta(minutes = zonespl[0]*60+zonespl[1]) return DatetimeRule(datetime.datetime(year, month, day, hour, minute, second, us_int) - zonediff) raise ValueError("Wrong datetime format ".format(rule.value))
Compiler helper method: attempt to compile constant into object representing datetime object to enable relations and thus simple comparisons using Python operators.
375,905
def get_stp_mst_detail_output_msti_port_link_type(self, **kwargs): config = ET.Element("config") get_stp_mst_detail = ET.Element("get_stp_mst_detail") config = get_stp_mst_detail output = ET.SubElement(get_stp_mst_detail, "output") msti = ET.SubElement(output, "msti") instance_id_key = ET.SubElement(msti, "instance-id") instance_id_key.text = kwargs.pop() port = ET.SubElement(msti, "port") link_type = ET.SubElement(port, "link-type") link_type.text = kwargs.pop() callback = kwargs.pop(, self._callback) return callback(config)
Auto Generated Code
375,906
def pixel_array_to_image(self, width, height, channels, undefined_on_failure=True, allow_rounding=False): if(self.dtype != array.array): raise TypeError("array_to_img expects SArray of arrays as input SArray") num_to_test = 10 num_test = min(len(self), num_to_test) mod_values = [val % 1 for x in range(num_test) for val in self[x]] out_of_range_values = [(val > 255 or val < 0) for x in range(num_test) for val in self[x]] if sum(mod_values) != 0.0 and not allow_rounding: raise ValueError("There are non-integer values in the array data. Images only support integer data values between 0 and 255. To permit rounding, set the parameter to 1.") if sum(out_of_range_values) != 0: raise ValueError("There are values outside the range of 0 to 255. Images only support integer data values between 0 and 255.") from .. import extensions return extensions.vector_sarray_to_image_sarray(self, width, height, channels, undefined_on_failure)
Create a new SArray with all the values cast to :py:class:`turicreate.image.Image` of uniform size. Parameters ---------- width: int The width of the new images. height: int The height of the new images. channels: int. Number of channels of the new images. undefined_on_failure: bool , optional , default True If True, return None type instead of Image type in failure instances. If False, raises error upon failure. allow_rounding: bool, optional , default False If True, rounds non-integer values when converting to Image type. If False, raises error upon rounding. Returns ------- out : SArray[turicreate.Image] The SArray converted to the type 'turicreate.Image'. See Also -------- astype, str_to_datetime, datetime_to_str Examples -------- The MNIST data is scaled from 0 to 1, but our image type only loads integer pixel values from 0 to 255. If we just convert without scaling, all values below one would be cast to 0. >>> mnist_array = turicreate.SArray('https://static.turi.com/datasets/mnist/mnist_vec_sarray') >>> scaled_mnist_array = mnist_array * 255 >>> mnist_img_sarray = tc.SArray.pixel_array_to_image(scaled_mnist_array, 28, 28, 1, allow_rounding = True)
375,907
def pytype_to_ctype(t): if isinstance(t, List): return .format( pytype_to_ctype(t.__args__[0]) ) elif isinstance(t, Set): return .format( pytype_to_ctype(t.__args__[0]) ) elif isinstance(t, Dict): tkey, tvalue = t.__args__ return .format(pytype_to_ctype(tkey), pytype_to_ctype(tvalue)) elif isinstance(t, Tuple): return .format( ", ".join(.format(pytype_to_ctype(p)) for p in t.__args__) ) elif isinstance(t, NDArray): dtype = pytype_to_ctype(t.__args__[0]) ndim = len(t.__args__) - 1 shapes = .join(( if s.stop == -1 or s.stop is None else .format( s.stop) ) for s in t.__args__[1:]) pshape = .format(shapes) arr = .format( dtype, pshape) if t.__args__[1].start == -1: return .format(arr) elif any(s.step is not None and s.step < 0 for s in t.__args__[1:]): slices = ", ".join([] * ndim) return .format(arr, slices) else: return arr elif isinstance(t, Pointer): return .format( pytype_to_ctype(t.__args__[0]) ) elif isinstance(t, Fun): return .format( pytype_to_ctype(t.__args__[-1]), ", ".join(pytype_to_ctype(arg) for arg in t.__args__[:-1]), ) elif t in PYTYPE_TO_CTYPE_TABLE: return PYTYPE_TO_CTYPE_TABLE[t] else: raise NotImplementedError("{0}:{1}".format(type(t), t))
Python -> pythonic type binding.
375,908
def load_XAML(file_obj, *args, **kwargs): def element_to_color(element): if element is None: return visual.DEFAULT_COLOR hexcolor = int(element.attrib[].replace(, ), 16) opacity = float(element.attrib[]) rgba = [(hexcolor >> 16) & 0xFF, (hexcolor >> 8) & 0xFF, (hexcolor & 0xFF), opacity * 0xFF] rgba = np.array(rgba, dtype=np.uint8) return rgba def element_to_transform(element): try: matrix = next(element.iter( tag=ns + )).attrib[] matrix = np.array(matrix.split(), dtype=np.float64).reshape((4, 4)).T return matrix except StopIteration: current = geometry transforms = collections.deque() while current is not None: transform_element = current.find(ns + ) if transform_element is not None: transforms.appendleft(element_to_transform(transform_element)) c_vertices = transformations.transform_points(c_vertices, transform) c_faces = np.array( g.attrib[].replace( , ).split(), dtype=np.int64).reshape( (-1, 3)) vertices.append(c_vertices) faces.append(c_faces) colors.append(np.tile(diffuse, (len(c_faces), 1))) normals.append(c_normals) result = dict() result[], result[] = util.append_faces(vertices, faces) result[] = np.vstack(colors) result[] = np.vstack(normals) return result
Load a 3D XAML file. Parameters ---------- file_obj : file object Open, containing XAML file Returns ---------- result : dict kwargs for a trimesh constructor, including: vertices: (n,3) np.float64, points in space faces: (m,3) np.int64, indices of vertices face_colors: (m,4) np.uint8, RGBA colors vertex_normals: (n,3) np.float64, vertex normals
375,909
def _do_perform_delete_on_model(self): if self._force_deleting: return self.with_trashed().where(self.get_key_name(), self.get_key()).force_delete() return self._run_soft_delete()
Perform the actual delete query on this model instance.
375,910
def get_trace(self, project_id, trace_id): trace_pb = self._gapic_api.get_trace(project_id, trace_id) trace_mapping = _parse_trace_pb(trace_pb) return trace_mapping
Gets a single trace by its ID. Args: trace_id (str): ID of the trace to return. project_id (str): Required. ID of the Cloud project where the trace data is stored. Returns: A Trace dict.
375,911
def applyKeyMapping(self, mapping): for coltype, colname in zip(self.columntypes, self.columnnames): if coltype in ligolwtypes.IDTypes and (self.next_id is None or colname != self.next_id.column_name): column = self.getColumnByName(colname) for i, old in enumerate(column): try: column[i] = mapping[old] except KeyError: pass
Used as the second half of the key reassignment algorithm. Loops over each row in the table, replacing references to old row keys with the new values from the mapping.
375,912
def thanksgiving(year, country=): if country == : if year in [1940, 1941]: return nth_day_of_month(3, THU, NOV, year) elif year == 1939: return nth_day_of_month(4, THU, NOV, year) else: return nth_day_of_month(0, THU, NOV, year) if country == : return nth_day_of_month(2, MON, OCT, year)
USA: last Thurs. of November, Canada: 2nd Mon. of October
375,913
def download(client, target_dir): print() print("download inappproducts") print() products = client.list_inappproducts() for product in products: path = os.path.join(target_dir, ) del product[] mkdir_p(path) with open(os.path.join(path, product[] + ), ) as outfile: print("save product for {0}".format(product[])) json.dump( product, outfile, sort_keys=True, indent=4, separators=(, ))
Download inappproducts from play store.
375,914
def supported(cls, stream=sys.stdout): if not stream.isatty(): return False try: import curses except ImportError: return False else: try: try: return curses.tigetnum("colors") > 2 except curses.error: curses.setupterm() return curses.tigetnum("colors") > 2 except: raise return False
A class method that returns True if the current platform supports coloring terminal output using this method. Returns False otherwise.
375,915
def get_tournament(self, tag: crtag, **params: keys): url = self.api.TOURNAMENT + + tag return self._get_model(url, **params)
Get a tournament information Parameters ---------- tag: str A valid tournament tag. Minimum length: 3 Valid characters: 0289PYLQGRJCUV \*\*keys: Optional[list] = None Filter which keys should be included in the response \*\*exclude: Optional[list] = None Filter which keys should be excluded from the response \*\*timeout: Optional[int] = None Custom timeout that overwrites Client.timeout
375,916
def loading(self): if getattr(self, , False): raise ValueError("Already loading") self._initialized = False yield self._initialized = True
Context manager for when you need to instantiate entities upon unpacking
375,917
def ast_to_code(ast, indent=0): code = [] def append(line): code.append((" " * indent) + line) if isinstance(ast, Node): append("ast.{}(".format(ast.__class__.__name__)) indent += 1 for i, k in enumerate(ast._fields, 1): v = getattr(ast, k) append("{}={},".format(k, ast_to_code(v, indent))) if ast.loc: append("loc={}".format(ast_to_code(ast.loc, indent))) indent -= 1 append(")") elif isinstance(ast, Loc): append("loc({}, {})".format(ast.start, ast.end)) elif isinstance(ast, list): if ast: append("[") indent += 1 for i, it in enumerate(ast, 1): is_last = i == len(ast) append(ast_to_code(it, indent) + ("," if not is_last else "")) indent -= 1 append("]") else: append("[]") else: append(repr(ast)) return "\n".join(code).strip()
Converts an ast into a python code representation of the AST.
375,918
def list_all(self): url = code, xml = self.submit(None, , url) return self.response(code, xml)
Return all equipments in database :return: Dictionary with the following structure: :: {'equipaments': {'name' :< name_equipament >}, {... demais equipamentos ...} } :raise DataBaseError: Networkapi failed to access the database. :raise XMLError: Networkapi failed to generate the XML response.
375,919
def calc_time_step(self): system = self.system config = self.config convergence = self.convergence niter = self.niter t = self.t if t == 0: self._calc_time_step_first() return if convergence: if niter >= 15: config.deltat = max(config.deltat * 0.5, config.deltatmin) elif niter <= 6: config.deltat = min(config.deltat * 1.1, config.deltatmax) else: config.deltat = max(config.deltat * 0.95, config.deltatmin) if config.fixt: config.deltat = min(config.tstep, config.deltat) else: config.deltat *= 0.9 if config.deltat < config.deltatmin: config.deltat = 0 if system.Fault.is_time(t) or system.Breaker.is_time(t): config.deltat = min(config.deltat, 0.002778) elif system.check_event(t): config.deltat = min(config.deltat, 0.002778) if config.method == : config.deltat = min(config.deltat, config.tstep) if self.t + config.deltat > config.tf: config.deltat = config.tf - self.t for fixed_t in self.fixed_times: if (fixed_t > self.t) and (fixed_t <= self.t + config.deltat): config.deltat = fixed_t - self.t self.switch = True break self.h = config.deltat
Set the time step during time domain simulations Parameters ---------- convergence: bool truth value of the convergence of the last step niter: int current iteration count t: float current simulation time Returns ------- float computed time step size
375,920
def get_script_args(dist, executable=sys_executable, wininst=False): spec = str(dist.as_requirement()) header = get_script_header("", executable, wininst) for group in , : for name, ep in dist.get_entry_map(group).items(): script_text = ( " "__requires__ = %(spec)r\n" "import sys\n" "from pkg_resources import load_entry_point\n" "\n" "if __name__ == :" "\n" " sys.exit(\n" " load_entry_point(%(spec)r, %(group)r, %(name)r)()\n" " )\n" ) % locals() if sys.platform== or wininst: if group==: ext, launcher = , old = [] new_header = re.sub(,,header) else: ext, launcher = , old = [,,] new_header = re.sub(,,header) if is_64bit(): launcher = launcher.replace(".", "-64.") else: launcher = launcher.replace(".", "-32.") if os.path.exists(new_header[2:-1]) or sys.platform!=: hdr = new_header else: hdr = header yield (name+ext, hdr+script_text, , [name+x for x in old]) yield ( name+, resource_string(, launcher), ) else: yield (name, header+script_text)
Yield write_script() argument tuples for a distribution's entrypoints
375,921
def coupling_matrix_2j(j1, j2): ur M1 = [-j1 + i for i in range(2*j1+1)] M2 = [-j2 + i for i in range(2*j2+1)] j1j2nums = [(j1, m1, j2, m2) for m1 in M1 for m2 in M2] Jper = perm_j(j1, j2) jmjnums = [(J, MJ-J) for J in Jper for MJ in range(2*J+1)] U = zeros((2*j1+1)*(2*j2+1)) for ii, numj in enumerate(jmjnums): j, mj = numj for jj, numi in enumerate(j1j2nums): j1, m1, j2, m2 = numi U[ii, jj] = clebsch_gordan(j1, j2, j, m1, m2, mj) return U
ur"""For angular momenta $j_1, j_2$ the unitary transformation from the \ uncoupled basis into the $j = j_1 \oplus j_2$ coupled basis. >>> from sympy import Integer, pprint >>> L = 0 >>> S = 1/Integer(2) >>> pprint(coupling_matrix_2j(L, S)) ⎑1 0⎀ ⎒ βŽ₯ ⎣0 1⎦ >>> L = 1 >>> S = 1/Integer(2) >>> pprint(coupling_matrix_2j(L, S)) ⎑ -√6 √3 ⎀ ⎒0 ──── ── 0 0 0βŽ₯ ⎒ 3 3 βŽ₯ ⎒ βŽ₯ ⎒ -√3 √6 βŽ₯ ⎒0 0 0 ──── ── 0βŽ₯ ⎒ 3 3 βŽ₯ ⎒ βŽ₯ ⎒1 0 0 0 0 0βŽ₯ ⎒ βŽ₯ ⎒ √3 √6 βŽ₯ ⎒0 ── ── 0 0 0βŽ₯ ⎒ 3 3 βŽ₯ ⎒ βŽ₯ ⎒ √6 √3 βŽ₯ ⎒0 0 0 ── ── 0βŽ₯ ⎒ 3 3 βŽ₯ ⎒ βŽ₯ ⎣0 0 0 0 0 1⎦
375,922
def upload_file(self, file_or_path, obj_name=None, content_type=None, etag=None, return_none=False, content_encoding=None, ttl=None, content_length=None, headers=None): return self.create(file_or_path=file_or_path, obj_name=obj_name, content_type=content_type, etag=etag, content_encoding=content_encoding, headers=headers, content_length=content_length, ttl=ttl, return_none=return_none)
Uploads the specified file to this container. If no name is supplied, the file's name will be used. Either a file path or an open file-like object may be supplied. A StorageObject reference to the uploaded file will be returned, unless 'return_none' is set to True. You may optionally set the `content_type` and `content_encoding` parameters; pyrax will create the appropriate headers when the object is stored. If the size of the file is known, it can be passed as `content_length`. If you wish for the object to be temporary, specify the time it should be stored in seconds in the `ttl` parameter. If this is specified, the object will be deleted after that number of seconds. The 'extra_info' parameter is included for backwards compatibility. It is no longer used at all, and will not be modified with swiftclient info, since swiftclient is not used any more.
375,923
def unit(self): unit = ncVarUnit(self._ncVar) fieldNames = self._ncVar.dtype.names if hasattr(unit, ) and len(unit) == len(fieldNames): idx = fieldNames.index(self.nodeName) return unit[idx] else: return unit
Returns the unit attribute of the underlying ncdf variable. If the units has a length (e.g is a list) and has precisely one element per field, the unit for this field is returned.
375,924
def detect_protocol(cls, message): main = cls._message_to_payload(message) def protocol_for_payload(payload): if not isinstance(payload, dict): return JSONRPCLoose version = payload.get() if version == : return JSONRPCv2 if version == : return JSONRPCv1 if in payload and in payload: return JSONRPCv1 return JSONRPCLoose if isinstance(main, list): parts = set(protocol_for_payload(payload) for payload in main) if len(parts) == 1: return parts.pop() for protocol in (JSONRPCv2, JSONRPCv1): if protocol in parts: return protocol return JSONRPCLoose return protocol_for_payload(main)
Attempt to detect the protocol from the message.
375,925
def create_archive(archive, filenames, verbosity=0, program=None, interactive=True): util.check_new_filename(archive) util.check_archive_filelist(filenames) if verbosity >= 0: util.log_info("Creating %s ..." % archive) res = _create_archive(archive, filenames, verbosity=verbosity, interactive=interactive, program=program) if verbosity >= 0: util.log_info("... %s created." % archive) return res
Create given archive with given files.
375,926
def is_web_url(string): assert isinstance(string, basestring) parsed_url = urllib.parse.urlparse(string) return ( ( parsed_url.scheme.lower() == or parsed_url.scheme.lower() == ) and parsed_url.netloc )
Check to see if string is an validly-formatted web url.
375,927
def scan_config_argument(ctx, param, value, config_dir=None): if callable(config_dir): config_dir = config_dir() if not config: click.echo("Enter at least one CONFIG") click.echo(ctx.get_help(), color=ctx.color) ctx.exit() if isinstance(value, string_types): value = scan_config(value, config_dir=config_dir) elif isinstance(value, tuple): value = tuple([scan_config(v, config_dir=config_dir) for v in value]) return value
Validate / translate config name/path values for click config arg. Wrapper on top of :func:`cli.scan_config`.
375,928
def compare(jaide, commands): output = color("show | compare:\n", ) return output + color_diffs(jaide.compare_config(commands))
Perform a show | compare with some set commands. @param jaide: The jaide connection to the device. @type jaide: jaide.Jaide object @param commands: The set commands to send to the device to compare with. @type commands: str or list @returns: The output from the device. @rtype str
375,929
def offset(self): if not self._registry_key and self._registry: self._GetKeyFromRegistry() if not self._registry_key: return None return self._registry_key.offset
int: offset of the key within the Windows Registry file or None.
375,930
def collect_results(self) -> Optional[Tuple[int, Dict[str, float]]]: self.wait_to_finish() if self.decoder_metric_queue.empty(): if self._results_pending: self._any_process_died = True self._results_pending = False return None decoded_checkpoint, decoder_metrics = self.decoder_metric_queue.get() assert self.decoder_metric_queue.empty() self._results_pending = False logger.info("Decoder-%d finished: %s", decoded_checkpoint, decoder_metrics) return decoded_checkpoint, decoder_metrics
Returns the decoded checkpoint and the decoder metrics or None if the queue is empty.
375,931
def currentDateTime(self): view = self.uiGanttVIEW scene = view.scene() point = view.mapToScene(0, 0) return scene.datetimeAt(point.x())
Returns the current date time for this widget. :return <datetime.datetime>
375,932
def _create_ret_object(self, status=SUCCESS, data=None, error=False, error_message=None, error_cause=None): ret = {} if status == self.FAILURE: ret[] = self.FAILURE else: ret[] = self.SUCCESS ret[] = data if error: ret[] = {} if error_message is not None: ret[][] = error_message if error_cause is not None: ret[][] = error_cause else: ret[] = None return ret
Create generic reponse objects. :param str status: The SUCCESS or FAILURE of the request :param obj data: The data to return :param bool error: Set to True to add Error response :param str error_message: The generic error message :param str error_cause: The cause of the error :returns: A dictionary of values
375,933
def __configure_interior(self, *args): (size_x, size_y) = (self.interior.winfo_reqwidth(), self.interior.winfo_reqheight()) self._canvas.config(scrollregion="0 0 {0} {1}".format(size_x, size_y)) if self.interior.winfo_reqwidth() is not self._canvas.winfo_width(): self._canvas.config(width=self.interior.winfo_reqwidth())
Private function to configure the interior Frame. :param args: Tkinter event
375,934
def get_connection(self): if not self.open: raise exc.ResourceClosedError() return Connection(self._engine.connect())
Get a connection to this Database. Connections are retrieved from a pool.
375,935
def add(self, nb = 1, name = None, xid = None): for x in xrange(nb): self.count_lock.acquire() if self.workers >= self.max_workers: self.count_lock.release() continue self.workers += 1 if xid is None: xid = self.workers self.count_lock.release() self.kill_event.clear() w = WorkerThread(xid, self) w.setName(self.get_name(xid, name)) w.start()
Create one or many workers.
375,936
def fetch(self, wait=0): if self.started: return fetch(self.id, wait=wait, cached=self.cached)
get the task result objects. :param int wait: how many milliseconds to wait for a result :return: an unsorted list of task objects
375,937
def extend(validator, validators=(), version=None, type_checker=None): all_validators = dict(validator.VALIDATORS) all_validators.update(validators) if type_checker is None: type_checker = validator.TYPE_CHECKER elif validator._CREATED_WITH_DEFAULT_TYPES: raise TypeError( "Cannot extend a validator created with default_types " "with a type_checker. Update the validator to use a " "type_checker when created." ) return create( meta_schema=validator.META_SCHEMA, validators=all_validators, version=version, type_checker=type_checker, id_of=validator.ID_OF, )
Create a new validator class by extending an existing one. Arguments: validator (jsonschema.IValidator): an existing validator class validators (collections.Mapping): a mapping of new validator callables to extend with, whose structure is as in `create`. .. note:: Any validator callables with the same name as an existing one will (silently) replace the old validator callable entirely, effectively overriding any validation done in the "parent" validator class. If you wish to instead extend the behavior of a parent's validator callable, delegate and call it directly in the new validator function by retrieving it using ``OldValidator.VALIDATORS["validator_name"]``. version (str): a version for the new validator class type_checker (jsonschema.TypeChecker): a type checker, used when applying the :validator:`type` validator. If unprovided, the type checker of the extended `jsonschema.IValidator` will be carried along.` Returns: a new `jsonschema.IValidator` class extending the one provided .. note:: Meta Schemas The new validator class will have its parent's meta schema. If you wish to change or extend the meta schema in the new validator class, modify ``META_SCHEMA`` directly on the returned class. Note that no implicit copying is done, so a copy should likely be made before modifying it, in order to not affect the old validator.
375,938
def list_same_dimensions(self, unit_object): equiv = [k for k, v in self.lut.items() if v[1] is unit_object.dimensions] equiv = list(sorted(set(equiv))) return equiv
Return a list of base unit names that this registry knows about that are of equivalent dimensions to *unit_object*.
375,939
def MeshLines(*inputobj, **options): scale = options.pop("scale", 1) lw = options.pop("lw", 1) c = options.pop("c", None) alpha = options.pop("alpha", 1) mesh, u = _inputsort(inputobj) startPoints = mesh.coordinates() u_values = np.array([u(p) for p in mesh.coordinates()]) if not utils.isSequence(u_values[0]): printc("~times Error: cannot show Lines for 1D scalar values!", c=1) exit() endPoints = mesh.coordinates() + u_values if u_values.shape[1] == 2: u_values = np.insert(u_values, 2, 0, axis=1) startPoints = np.insert(startPoints, 2, 0, axis=1) endPoints = np.insert(endPoints, 2, 0, axis=1) actor = shapes.Lines( startPoints, endPoints, scale=scale, lw=lw, c=c, alpha=alpha ) actor.mesh = mesh actor.u = u actor.u_values = u_values return actor
Build the line segments between two lists of points `startPoints` and `endPoints`. `startPoints` can be also passed in the form ``[[point1, point2], ...]``. A dolfin ``Mesh`` that was deformed/modified by a function can be passed together as inputs. :param float scale: apply a rescaling factor to the length
375,940
def dt(self, start_node=None): if start_node is None: return self.root2tree(start_node=start_node) elem_id = start_node if elem_id not in self.elem_dict: return [] elem = self.elem_dict[elem_id] elem_type = elem[] assert elem_type in (, ) if elem_type == : return self.segment2tree( elem_id, elem, elem_type, start_node=start_node) else: return self.group2tree( elem_id, elem, elem_type, start_node=start_node)
main method to create an RSTTree from the output of get_rs3_data(). TODO: add proper documentation
375,941
def dump(obj, fp, skipkeys=False, ensure_ascii=True, check_circular=True, allow_nan=True, cls=None, indent=None, **kw): if cls is None: cls = JSONEncoder iterable = cls(skipkeys=skipkeys, ensure_ascii=ensure_ascii, check_circular=check_circular, allow_nan=allow_nan, indent=indent, **kw).iterencode(obj) for chunk in iterable: fp.write(chunk)
Serialize ``obj`` as a JSON formatted stream to ``fp`` (a ``.write()``-supporting file-like object). If ``skipkeys`` is ``True`` then ``dict`` keys that are not basic types (``str``, ``unicode``, ``int``, ``long``, ``float``, ``bool``, ``None``) will be skipped instead of raising a ``TypeError``. If ``ensure_ascii`` is ``False``, then the some chunks written to ``fp`` may be ``unicode`` instances, subject to normal Python ``str`` to ``unicode`` coercion rules. Unless ``fp.write()`` explicitly understands ``unicode`` (as in ``codecs.getwriter()``) this is likely to cause an error. If ``check_circular`` is ``False``, then the circular reference check for container types will be skipped and a circular reference will result in an ``OverflowError`` (or worse). If ``allow_nan`` is ``False``, then it will be a ``ValueError`` to serialize out of range ``float`` values (``nan``, ``inf``, ``-inf``) in strict compliance of the JSON specification, instead of using the JavaScript equivalents (``NaN``, ``Infinity``, ``-Infinity``). If ``indent`` is a non-negative integer, then JSON array elements and object members will be pretty-printed with that indent level. An indent level of 0 will only insert newlines. ``None`` is the most compact representation. To use a custom ``JSONEncoder`` subclass (e.g. one that overrides the ``.default()`` method to serialize additional types), specify it with the ``cls`` kwarg.
375,942
def shared_databases(self): endpoint = .join(( self.server_url, , , , )) resp = self.r_session.get(endpoint) resp.raise_for_status() data = response_to_json_dict(resp) return data.get(, [])
Retrieves a list containing the names of databases shared with this account. :returns: List of database names
375,943
def _file_in_patch(self, filename, patch, ignore): file = self.quilt_pc + File(os.path.join(patch.get_name(), filename)) if file.exists(): if ignore: return True else: raise QuiltError("File %s is already in patch %s" % (filename, patch.get_name())) return False
Checks if a backup file of the filename in the current patch exists
375,944
def _get_magnitude_term(self, C, mag): dmag = mag - 8.0 return C["c0"] + C["c3"] * dmag + C["c4"] * (dmag ** 2.)
Returns the magnitude scaling term provided in Equation (5)
375,945
def matches_to_marker_results(df): assert isinstance(df, pd.DataFrame) from collections import defaultdict d = defaultdict(list) for idx, row in df.iterrows(): marker = row[] d[marker].append(row) marker_results = {} for k,v in d.items(): if len(v) > 1: logging.debug(, len(v), k) df_marker = pd.DataFrame(v) df_marker.sort_values(, ascending=False, inplace=True) for i,r in df_marker.iterrows(): allele = r[] slen = r[] logging.debug(, allele, slen) seq = r[] if in seq: logging.warning(, r) seq = seq.replace(, ).upper() allele = allele_name(seq) marker_results[k] = allele_result_dict(allele, seq, r.to_dict()) break elif len(v) == 1: row = v[0] seq = row[] if in seq: logging.warning(, row) seq = seq.replace(, ).upper() allele = allele_name(seq) marker_results[k] = allele_result_dict(allele, seq, row.to_dict()) else: err_msg = .format(k) logging.error(err_msg) raise Exception(err_msg) return marker_results
Perfect BLAST matches to marker results dict Parse perfect BLAST matches to marker results dict. Args: df (pandas.DataFrame): DataFrame of perfect BLAST matches Returns: dict: cgMLST330 marker names to matching allele numbers
375,946
async def release( self, *, comment: str = None, erase: bool = None, secure_erase: bool = None, quick_erase: bool = None, wait: bool = False, wait_interval: int = 5): params = remove_None({ "system_id": self.system_id, "comment": comment, "erase": erase, "secure_erase": secure_erase, "quick_erase": quick_erase, }) self._data = await self._handler.release(**params) if not wait: return self else: while self.status in [ NodeStatus.RELEASING, NodeStatus.DISK_ERASING]: await asyncio.sleep(wait_interval) try: self._data = await self._handler.read( system_id=self.system_id) except CallError as error: if error.status == HTTPStatus.NOT_FOUND: return self else: raise if self.status == NodeStatus.FAILED_RELEASING: msg = "{hostname} failed to be released.".format( hostname=self.hostname ) raise FailedReleasing(msg, self) elif self.status == NodeStatus.FAILED_DISK_ERASING: msg = "{hostname} failed to erase disk.".format( hostname=self.hostname ) raise FailedDiskErasing(msg, self) return self
Release the machine. :param comment: Reason machine was released. :type comment: `str` :param erase: Erase the disk when release. :type erase: `bool` :param secure_erase: Use the drive's secure erase feature if available. :type secure_erase: `bool` :param quick_erase: Wipe the just the beginning and end of the disk. This is not secure. :param wait: If specified, wait until the deploy is complete. :type wait: `bool` :param wait_interval: How often to poll, defaults to 5 seconds. :type wait_interval: `int`
375,947
def balance(self): self.check() if not sum(map(lambda x: x.amount, self.src)) == -self.amount: raise XnBalanceError("Sum of source amounts " "not equal to transaction amount") if not sum(map(lambda x: x.amount, self.dst)) == self.amount: raise XnBalanceError("Sum of destination amounts " "not equal to transaction amount") return True
Check this transaction for correctness
375,948
def wv45(msg): d = hex2bin(data(msg)) if d[12] == : return None ws = bin2int(d[13:15]) return ws
Wake vortex. Args: msg (String): 28 bytes hexadecimal message string Returns: int: Wake vortex level. 0=NIL, 1=Light, 2=Moderate, 3=Severe
375,949
def _full_name(self, record_name): if not record_name: return self.domain return super(Provider, self)._full_name(record_name)
Returns full domain name of a sub-domain name
375,950
def _format_finite(negative, digits, dot_pos): olddigits = digits digits = digits.lstrip() dot_pos -= len(olddigits) - len(digits) use_exponent = dot_pos <= -4 or dot_pos > len(digits) if use_exponent: exp = dot_pos - 1 if digits else dot_pos dot_pos -= exp if dot_pos <= 0: digits = * (1 - dot_pos) + digits dot_pos += 1 - dot_pos assert 1 <= dot_pos <= len(digits) if dot_pos < len(digits): digits = digits[:dot_pos] + + digits[dot_pos:] if use_exponent: digits += "e{0:+03d}".format(exp) return + digits if negative else digits
Given a (possibly empty) string of digits and an integer dot_pos indicating the position of the decimal point relative to the start of that string, output a formatted numeric string with the same value and same implicit exponent.
375,951
def get_product(id=None, name=None): content = get_product_raw(id, name) if content: return utils.format_json(content)
Get a specific Product by name or ID
375,952
def wirevector_subset(self, cls=None, exclude=tuple()): if cls is None: initial_set = self.wirevector_set else: initial_set = (x for x in self.wirevector_set if isinstance(x, cls)) if exclude == tuple(): return set(initial_set) else: return set(x for x in initial_set if not isinstance(x, exclude))
Return set of wirevectors, filtered by the type or tuple of types provided as cls. If no cls is specified, the full set of wirevectors associated with the Block are returned. If cls is a single type, or a tuple of types, only those wirevectors of the matching types will be returned. This is helpful for getting all inputs, outputs, or registers of a block for example.
375,953
def create(url, filename): files = {: open(filename, )} response = requests.post(url, files=files) if response.status_code != 201: raise ValueError( + filename) return references_to_dict(response.json()[])[REF_SELF]
Create new fMRI for given experiment by uploading local file. Expects an tar-archive. Parameters ---------- url : string Url to POST fMRI create request filename : string Path to tar-archive on local disk Returns ------- string Url of created functional data resource
375,954
def one_hot_encoding(input_tensor, num_labels): xview = input_tensor.view(-1, 1).to(torch.long) onehot = torch.zeros(xview.size(0), num_labels, device=input_tensor.device, dtype=torch.float) onehot.scatter_(1, xview, 1) return onehot.view(list(input_tensor.shape) + [-1])
One-hot encode labels from input
375,955
def tf_action_exploration(self, action, exploration, action_spec): action_shape = tf.shape(input=action) exploration_value = exploration.tf_explore( episode=self.global_episode, timestep=self.global_timestep, shape=action_spec[] ) exploration_value = tf.expand_dims(input=exploration_value, axis=0) if action_spec[] == : action = tf.where( condition=(tf.random_uniform(shape=action_shape) < exploration_value), x=(tf.random_uniform(shape=action_shape) < 0.5), y=action ) elif action_spec[] == : action = tf.where( condition=(tf.random_uniform(shape=action_shape) < exploration_value), x=tf.random_uniform(shape=action_shape, maxval=action_spec[], dtype=util.tf_dtype()), y=action ) elif action_spec[] == : noise = tf.random_normal(shape=action_shape, dtype=util.tf_dtype()) action += noise * exploration_value if in action_spec: action = tf.clip_by_value( t=action, clip_value_min=action_spec[], clip_value_max=action_spec[] ) return action
Applies optional exploration to the action (post-processor for action outputs). Args: action (tf.Tensor): The original output action tensor (to be post-processed). exploration (Exploration): The Exploration object to use. action_spec (dict): Dict specifying the action space. Returns: The post-processed action output tensor.
375,956
def _element_keywords(cls, backend, elements=None): "Returns a dictionary of element names to allowed keywords" if backend not in Store.loaded_backends(): return {} mapping = {} backend_options = Store.options(backend) elements = elements if elements is not None else backend_options.keys() for element in elements: if in element: continue element = element if isinstance(element, tuple) else (element,) element_keywords = [] options = backend_options[.join(element)] for group in Options._option_groups: element_keywords.extend(options[group].allowed_keywords) mapping[element[0]] = element_keywords return mapping
Returns a dictionary of element names to allowed keywords
375,957
def reset(self): self._allocated_node_names = set() self._nodes = {} self._links = {} self._drawings = {} self._snapshots = {} snapshot_dir = os.path.join(self.path, "snapshots") if os.path.exists(snapshot_dir): for snap in os.listdir(snapshot_dir): if snap.endswith(".gns3project"): snapshot = Snapshot(self, filename=snap) self._snapshots[snapshot.id] = snapshot self._project_created_on_compute = set()
Called when open/close a project. Cleanup internal stuff
375,958
def eth_getStorageAt(self, address, position=0, block=BLOCK_TAG_LATEST): block = validate_block(block) return hex_to_dec((yield from self.rpc_call(, [address, hex(position), block])))
https://github.com/ethereum/wiki/wiki/JSON-RPC#eth_getstorageat :param address: Storage address :type address: str :param position: Position in storage (optional) :type position: int :param block: Block tag or number (optional) :type block: int or BLOCK_TAGS :rtype: int
375,959
def read_memory(self, addr, transfer_size=32, now=True): assert transfer_size in (8, 16, 32) if transfer_size == 32: result = conversion.byte_list_to_u32le_list(self._link.read_mem32(addr, 4, self._apsel))[0] elif transfer_size == 16: result = conversion.byte_list_to_u16le_list(self._link.read_mem16(addr, 2, self._apsel))[0] elif transfer_size == 8: result = self._link.read_mem8(addr, 1, self._apsel)[0] def read_callback(): return result return result if now else read_callback
! @brief Read a memory location. By default, a word will be read.
375,960
def print_matrix(X, decimals=1): for row in np.round(X, decimals=decimals): print(row)
Pretty printing for numpy matrix X
375,961
def diam_swamee(FlowRate, HeadLossFric, Length, Nu, PipeRough): ut.check_range([FlowRate, ">0", "Flow rate"], [Length, ">0", "Length"], [HeadLossFric, ">0", "Headloss due to friction"], [Nu, ">0", "Nu"], [PipeRough, "0-1", "Pipe roughness"]) a = ((PipeRough ** 1.25) * ((Length * FlowRate**2) / (gravity.magnitude * HeadLossFric) )**4.75 ) b = (Nu * FlowRate**9.4 * (Length / (gravity.magnitude * HeadLossFric)) ** 5.2 ) return 0.66 * (a+b)**0.04
Return the inner diameter of a pipe. The Swamee Jain equation is dimensionally correct and returns the inner diameter of a pipe given the flow rate and the head loss due to shear on the pipe walls. The Swamee Jain equation does NOT take minor losses into account. This equation ONLY applies to turbulent flow.
375,962
def save_config( self, cmd="copy running-configuration startup-configuration", confirm=False, confirm_response="", ): return super(DellForce10SSH, self).save_config( cmd=cmd, confirm=confirm, confirm_response=confirm_response )
Saves Config
375,963
def drop_duplicates(self, subset=None, keep=): subset = check_and_obtain_subset_columns(subset, self) df = self.reset_index() df_names = df._gather_column_names() subset_indices = [df_names.index(col_name) for col_name in subset] weld_objects = weld_drop_duplicates(df._gather_data_for_weld(), df._gather_weld_types(), subset_indices, keep) index_data = self.index._gather_data(name=None) new_index = [Index(weld_objects[i], v.dtype, k) for i, k, v in zip(list(range(len(index_data))), index_data.keys(), index_data.values())] if len(new_index) > 1: new_index = MultiIndex(new_index, self.index._gather_names()) else: new_index = new_index[0] new_data = OrderedDict((sr.name, Series(obj, new_index, sr.dtype, sr.name)) for sr, obj in zip(self._iter(), weld_objects[len(index_data):])) return DataFrame(new_data, new_index)
Return DataFrame with duplicate rows (excluding index) removed, optionally only considering subset columns. Note that the row order is NOT maintained due to hashing. Parameters ---------- subset : list of str, optional Which columns to consider keep : {'+', '*', 'min', 'max'}, optional What to select from the duplicate rows. These correspond to the possible merge operations in Weld. Note that '+' and '-' might produce unexpected results for strings. Returns ------- DataFrame DataFrame without duplicate rows.
375,964
def _pycall_path_simple( x1: int, y1: int, x2: int, y2: int, handle: Any ) -> float: return ffi.from_handle(handle)(x1, y1, x2, y2)
Does less and should run faster, just calls the handle function.
375,965
def annotate_op(self, op): if isinstance(op, Label): return op else: return AnnotatedOp(self, op.name, op.arg)
Takes a bytecode operation (:class:`Op`) and annotates it using the data contained in this code object. Arguments: op(Op): An :class:`Op` instance. Returns: AnnotatedOp: An annotated bytecode operation.
375,966
def _set_config(self, config=None): if not config: config = {} try: self.config = self.componentmodel(config) try: name = self.config.name self.log("Name set to: ", name, lvl=verbose) except (AttributeError, KeyError): self.log("Has no name.", lvl=verbose) try: self.config.name = self.uniquename except (AttributeError, KeyError) as e: self.log("Cannot set component name for configuration: ", e, type(e), self.name, exc=True, lvl=critical) try: uuid = self.config.uuid self.log("UUID set to: ", uuid, lvl=verbose) except (AttributeError, KeyError): self.log("Has no UUID", lvl=verbose) self.config.uuid = str(uuid4()) try: notes = self.config.notes self.log("Notes set to: ", notes, lvl=verbose) except (AttributeError, KeyError): self.log("Has no notes, trying docstring", lvl=verbose) notes = self.__doc__ if notes is None: notes = "No notes." else: notes = notes.lstrip().rstrip() self.log(notes) self.config.notes = notes try: componentclass = self.config.componentclass self.log("Componentclass set to: ", componentclass, lvl=verbose) except (AttributeError, KeyError): self.log("Has no component class", lvl=verbose) self.config.componentclass = self.name except ValidationError as e: self.log("Not setting invalid component configuration: ", e, type(e), exc=True, lvl=error)
Set this component's initial configuration
375,967
def wrap_penalty(p, fit_linear, linear_penalty=0.): def wrapped_p(n, *args): if fit_linear: if n == 1: return sp.sparse.block_diag([linear_penalty], format=) return sp.sparse.block_diag([linear_penalty, p(n-1, *args)], format=) else: return p(n, *args) return wrapped_p
tool to account for unity penalty on the linear term of any feature. example: p = wrap_penalty(derivative, fit_linear=True)(n, coef) Parameters ---------- p : callable. penalty-matrix-generating function. fit_linear : boolean. whether the current feature has a linear term or not. linear_penalty : float, default: 0. penalty on the linear term Returns ------- wrapped_p : callable modified penalty-matrix-generating function
375,968
def f2p(phrase, max_word_size=15, cutoff=3): results = f2p_list(phrase, max_word_size, cutoff) return .join(i[0][0] for i in results)
Convert a Finglish phrase to the most probable Persian phrase.
375,969
def _unpack_zipfile(filename, extract_dir): try: import zipfile except ImportError: raise ReadError() if not zipfile.is_zipfile(filename): raise ReadError("%s is not a zip file" % filename) zip = zipfile.ZipFile(filename) try: for info in zip.infolist(): name = info.filename try: f.write(data) finally: f.close() del data finally: zip.close()
Unpack zip `filename` to `extract_dir`
375,970
def Decrypt(self, encrypted_data): index_split = -(len(encrypted_data) % AES.block_size) if index_split: remaining_encrypted_data = encrypted_data[index_split:] encrypted_data = encrypted_data[:index_split] else: remaining_encrypted_data = b decrypted_data = self._aes_cipher.decrypt(encrypted_data) return decrypted_data, remaining_encrypted_data
Decrypts the encrypted data. Args: encrypted_data (bytes): encrypted data. Returns: tuple[bytes, bytes]: decrypted data and remaining encrypted data.
375,971
def allocate_objects(self, eps = 0.01, noise_size = 1): if (self.__object_segment_analysers is None): return []; segments = []; for object_segment_analyser in self.__object_segment_analysers: indexes = object_segment_analyser[]; analyser = object_segment_analyser[]; segments += analyser.allocate_clusters(eps, indexes); real_segments = [segment for segment in segments if len(segment) > noise_size]; return real_segments;
! @brief Allocates object segments. @param[in] eps (double): Tolerance level that define maximal difference between phases of oscillators in one segment. @param[in] noise_size (uint): Threshold that defines noise - segments size (in pixels) that is less then the threshold is considered as a noise. @return (list) Object segments where each object segment consists of indexes of pixels that forms object segment.
375,972
def verify_certificate_issuer(self, certificate_issuer_id, **kwargs): kwargs[] = True if kwargs.get(): return self.verify_certificate_issuer_with_http_info(certificate_issuer_id, **kwargs) else: (data) = self.verify_certificate_issuer_with_http_info(certificate_issuer_id, **kwargs) return data
Verify certificate issuer. # noqa: E501 A utility API that can be used to validate the user configuration before activating a certificate issuer. Verifies that the certificate issuer is accessible and can be used to generate certificates by Device Management. <br> **Note:** The API requests the 3rd party CA to sign a test certificate. For some 3rd party CAs, this operation may make use of the account quota. <br> **Example usage:** ``` curl -X POST \\ -H 'authorization: <valid access token>' \\ -H 'content-type: application/json;charset=UTF-8' \\ https://api.us-east-1.mbedcloud.com/v3/certificate-issuers/01621a36719d507b9d48a91b00000000/verify ``` # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass asynchronous=True >>> thread = api.verify_certificate_issuer(certificate_issuer_id, asynchronous=True) >>> result = thread.get() :param asynchronous bool :param str certificate_issuer_id: Certificate issuer ID. <br> The ID of the certificate issuer. (required) :return: CertificateIssuerVerifyResponse If the method is called asynchronously, returns the request thread.
375,973
def evaluate(self): X = mp_Z_Y Xf = mp_Zf Df = mp_Df Sf = mp_Sf Ef = sl.inner(Df[np.newaxis, ...], Xf, axis=self.xstep.cri.axisM+1) - Sf Ef = np.swapaxes(Ef, 0, self.xstep.cri.axisK+1)[0] dfd = sl.rfl2norm2(Ef, self.xstep.S.shape, axis=self.xstep.cri.axisN)/2.0 rl1 = np.sum(np.abs(X)) obj = dfd + self.xstep.lmbda*rl1 return (obj, dfd, rl1)
Evaluate functional value of previous iteration.
375,974
def elapsed(self): if self.count == self.total: elapsed = self.last_update - self.start else: elapsed = time.time() - self.start return elapsed
Get elapsed time is seconds (float)
375,975
def set_editor(self, editor): if self._editor is not None: try: self._editor.offset_calculator.pic_infos_available.disconnect( self._update) except (AttributeError, RuntimeError, ReferenceError): pass self._editor = weakref.proxy(editor) if editor else editor try: self._editor.offset_calculator.pic_infos_available.connect( self._update) except AttributeError: pass
Sets the associated editor, when the editor's offset calculator mode emit the signal pic_infos_available, the table is automatically refreshed. You can also refresh manually by calling :meth:`update_pic_infos`.
375,976
def parse_model_specifier(specifier): tests.TestModeltests.models.TestModeltests.TestModel.imagetests.models.TestModelimage values = specifier.split() if len(values) == 2: values.append(None) elif len(values) != 3: raise ValueError( .format( len(values) ) ) app_name, model_name, field_name = values model = get_model(app_name, model_name) if not model: raise ValueError( .format( model_name, app_name, ) ) return model, field_name
Parses a string that specifies either a model or a field. The string should look like ``app.model.[field]``. >>> print parse_model_specifier('tests.TestModel') (<class 'tests.models.TestModel'>, None) >>> print parse_model_specifier('tests.TestModel.image') (<class 'tests.models.TestModel'>, 'image') :return: model and (optionally) field name :rtype: tuple of :py:class:`~django.db.models.Model` and str or None
375,977
def addSynapse(self, srcCellCol, srcCellIdx, perm): self.syns.append([int(srcCellCol), int(srcCellIdx), numpy.float32(perm)])
Add a new synapse :param srcCellCol source cell column :param srcCellIdx source cell index within the column :param perm initial permanence
375,978
def setdefault(self, k, d=None): return super(HeaderDict, self).setdefault(k.title(), d)
Override dict.setdefault() to title-case keys.
375,979
def _define_output_buffers(self): self.target_buffers = { None: [(step, self.buffers[step]) for step in self._get_input_steps()] } for step in self.steps_sorted: if step != self: child_steps = [edge[1] for edge in self.graph.out_edges(step)] self.target_buffers[step] = [(child_step, self.buffers[child_step]) for child_step in child_steps]
Prepare a dictionary so we know what buffers have to be update with the the output of every step.
375,980
def parse_for(control_line): error = for $var in expression\ + control_line regex = re.compile(r) res = regex.match(control_line) if not res: raise exceptions.YamlSyntaxError(error) groups = res.groups() control_vars = [] control_vars.append(get_var_name(groups[0])) if groups[1]: control_vars.append(get_var_name(groups[1])) iter_type = groups[2] expr = groups[3] return (control_vars, iter_type, expr)
Returns name of loop control variable(s), iteration type (in/word_in) and expression to iterate on. For example: - given "for $i in $foo", returns (['i'], '$foo') - given "for ${i} in $(ls $foo)", returns (['i'], '$(ls $foo)') - given "for $k, $v in $foo", returns (['k', 'v'], '$foo')
375,981
def global_var(self, name): self.newline_label(name, False, True) self.newline_text("WORD\t1", True)
Inserts a new static (global) variable definition
375,982
def crop(self, extent, copy=False): try: if extent[0] is None: extent = (self.start.z, extent[1]) if extent[1] is None: extent = (extent[0], self.stop.z) except: m = "You must provide a 2-tuple for the new extents. Use None for" m += " the existing start or stop." raise StriplogError(m) first_ix = self.read_at(extent[0], index=True) last_ix = self.read_at(extent[1], index=True) first = self[first_ix].split_at(extent[0])[1] last = self[last_ix].split_at(extent[1])[0] new_list = self.__list[first_ix:last_ix+1].copy() new_list[0] = first new_list[-1] = last if copy: return Striplog(new_list) else: self.__list = new_list return
Crop to a new depth range. Args: extent (tuple): The new start and stop depth. Must be 'inside' existing striplog. copy (bool): Whether to operate in place or make a copy. Returns: Operates in place by deault; if copy is True, returns a striplog.
375,983
def OnInsertCols(self, event): bbox = self.grid.selection.get_bbox() if bbox is None or bbox[1][1] is None: ins_point = self.grid.actions.cursor[1] - 1 no_cols = 1 else: ins_point = bbox[0][1] - 1 no_cols = self._get_no_rowscols(bbox)[1] with undo.group(_("Insert columns")): self.grid.actions.insert_cols(ins_point, no_cols) self.grid.GetTable().ResetView() self.grid.actions.zoom() event.Skip()
Inserts the maximum of 1 and the number of selected columns
375,984
def get_issns_for_journal(nlm_id): params = {: , : , : nlm_id} tree = send_request(pubmed_fetch, params) if tree is None: return None issn_list = tree.findall() issn_linking = tree.findall() issns = issn_list + issn_linking if not issns: return None else: return [issn.text for issn in issns]
Get a list of the ISSN numbers for a journal given its NLM ID. Information on NLM XML DTDs is available at https://www.nlm.nih.gov/databases/dtd/
375,985
def towgs84(E, N, pkm=False, presentation=None): _lng0 = lng0pkm if pkm else lng0 E /= 1000.0 N /= 1000.0 epsilon = (N-N0) / (k0*A) eta = (E-E0) / (k0*A) epsilonp = epsilon - beta1*sin(2*1*epsilon)*cosh(2*1*eta) - \ beta2*sin(2*2*epsilon)*cosh(2*2*eta) - \ beta3*sin(2*3*epsilon)*cosh(2*3*eta) etap = eta - beta1*cos(2*1*epsilon)*sinh(2*1*eta) - \ beta2*cos(2*2*epsilon)*sinh(2*2*eta) - \ beta3*cos(2*3*epsilon)*sinh(2*3*eta) sigmap = 1 - 2*1*beta1*cos(2*1*epsilon)*cosh(2*1*eta) - \ 2*2*beta2*cos(2*2*epsilon)*cosh(2*2*eta) - \ 2*3*beta3*cos(2*3*epsilon)*cosh(2*3*eta) taup = 2*1*beta1*sin(2*1*epsilon)*sinh(2*1*eta) + \ 2*2*beta2*sin(2*2*epsilon)*sinh(2*2*eta) + \ 2*3*beta3*sin(2*3*epsilon)*sinh(2*3*eta) chi = asin(sin(epsilonp) / cosh(etap)) latitude = chi + delta1*sin(2*1*chi) + \ delta2*sin(2*2*chi) + \ delta3*sin(2*3*chi) longitude = _lng0 + atan(sinh(etap) / cos(epsilonp)) func = None presentation = % presentation if presentation else None if presentation in presentations: func = getattr(sys.modules[__name__], presentation) if func and func != : return func(degrees(latitude)), func(degrees(longitude)) return (degrees(latitude), degrees(longitude))
Convert coordintes from TWD97 to WGS84 The east and north coordinates should be in meters and in float pkm true for Penghu, Kinmen and Matsu area You can specify one of the following presentations of the returned values: dms - A tuple with degrees (int), minutes (int) and seconds (float) dmsstr - [+/-]DDDΒ°MMM'DDD.DDDDD" (unicode) mindec - A tuple with degrees (int) and minutes (float) mindecstr - [+/-]DDDΒ°MMM.MMMMM' (unicode) (default)degdec - DDD.DDDDD (float)
375,986
def state_get(self): virtual_names = list(self.virtual_columns.keys()) + list(self.variables.keys()) units = {key: str(value) for key, value in self.units.items()} ucds = {key: value for key, value in self.ucds.items() if key in virtual_names} descriptions = {key: value for key, value in self.descriptions.items()} import vaex.serialize def check(key, value): if not vaex.serialize.can_serialize(value.f): warnings.warn(.format(key)) return False return True def clean(value): return vaex.serialize.to_dict(value.f) functions = {key: clean(value) for key, value in self.functions.items() if check(key, value)} virtual_columns = {key: value for key, value in self.virtual_columns.items()} selections = {name: self.get_selection(name) for name, history in self.selection_histories.items()} selections = {name: selection.to_dict() if selection is not None else None for name, selection in selections.items()} state = dict(virtual_columns=virtual_columns, column_names=self.column_names, renamed_columns=self._renamed_columns, variables=self.variables, functions=functions, selections=selections, ucds=ucds, units=units, descriptions=descriptions, description=self.description, active_range=[self._index_start, self._index_end]) return state
Return the internal state of the DataFrame in a dictionary Example: >>> import vaex >>> df = vaex.from_scalars(x=1, y=2) >>> df['r'] = (df.x**2 + df.y**2)**0.5 >>> df.state_get() {'active_range': [0, 1], 'column_names': ['x', 'y', 'r'], 'description': None, 'descriptions': {}, 'functions': {}, 'renamed_columns': [], 'selections': {'__filter__': None}, 'ucds': {}, 'units': {}, 'variables': {}, 'virtual_columns': {'r': '(((x ** 2) + (y ** 2)) ** 0.5)'}}
375,987
def new(self, request): form = (self.form or generate_form(self.model))() return self._render( request = request, template = , context = { : form }, status = 200 )
Render a form to create a new object.
375,988
def connect_cloudfront(self): "Connect to Cloud Front. This is done automatically for you when needed." self.conn_cloudfront = connect_cloudfront(self.AWS_ACCESS_KEY_ID, self.AWS_SECRET_ACCESS_KEY, debug=self.S3UTILS_DEBUG_LEVEL)
Connect to Cloud Front. This is done automatically for you when needed.
375,989
def filter_paragraphs(paragraphs, contains=None): if contains is None: pattern = else: if isinstance(contains, str): contains = [contains] pattern = .join(r % shortform for shortform in contains) paragraphs = [p for p in paragraphs if re.search(pattern, p)] return .join(paragraphs) +
Filter paragraphs to only those containing one of a list of strings Parameters ---------- paragraphs : list of str List of plaintext paragraphs from an article contains : str or list of str Exclude paragraphs not containing this string as a token, or at least one of the strings in contains if it is a list Returns ------- str Plaintext consisting of all input paragraphs containing at least one of the supplied tokens.
375,990
def begin_transaction(self, transaction_type, trace_parent=None): return self.tracer.begin_transaction(transaction_type, trace_parent=trace_parent)
Register the start of a transaction on the client
375,991
def resolve_freezer(freezer): if not freezer: return _Default() if isinstance(freezer, six.string_types): cls = _freezer_lookup(freezer) return cls() if freezer.__class__ == type.__class__: return freezer() if freezer not in FREEZER.ALL: warn(u"Using custom freezer implelmentation: {0}".format(freezer)) return freezer
Locate the appropriate freezer given FREEZER or string input from the programmer. :param freezer: FREEZER constant or string for the freezer that is requested. (None = FREEZER.DEFAULT) :return:
375,992
def parse_rst(text: str) -> docutils.nodes.document: parser = docutils.parsers.rst.Parser() components = (docutils.parsers.rst.Parser,) settings = docutils.frontend.OptionParser(components=components).get_default_values() document = docutils.utils.new_document(, settings=settings) parser.parse(text, document) return document
Parse text assuming it's an RST markup.
375,993
def use(parser, token): args, kwargs = parser.parse_args(token) assert isinstance(args[0], ast.Str), \ name = args[0].s action = ast.YieldFrom( value=_a.Call(_a.Attribute(_a.Name(), name), [ _a.Name(), ]) ) if kwargs: kwargs = _wrap_kwargs(kwargs) return _create_with_scope([ast.Expr(value=action)], kwargs) return action
Counterpart to `macro`, lets you render any block/macro in place.
375,994
def read_binary(self, num, item_type=): if in item_type: return self.read(num) if item_type[0] in (, , , , ): order = item_type[0] item_type = item_type[1:] else: order = return list(self.read_struct(Struct(order + .format(int(num)) + item_type)))
Parse the current buffer offset as the specified code.
375,995
def setup(app): app.add_config_value(, True, ) app.add_config_value(, False, ) app.add_config_value(, gallery_conf, ) app.add_stylesheet() app.connect(, generate_gallery_rst) app.connect(, embed_code_links)
Setup sphinx-gallery sphinx extension
375,996
def search_stack_for_var(varname, verbose=util_arg.NOT_QUIET): curr_frame = inspect.currentframe() if verbose: print( + six.text_type(varname)) frame_no = 0 while curr_frame.f_back is not None: if varname in curr_frame.f_locals.keys(): if verbose: print( + six.text_type(frame_no)) return curr_frame.f_locals[varname] if varname in curr_frame.f_globals.keys(): if verbose: print( + six.text_type(frame_no)) return curr_frame.f_globals[varname] frame_no += 1 curr_frame = curr_frame.f_back if verbose: print( + six.text_type(frame_no) + ) return None
Finds a varable (local or global) somewhere in the stack and returns the value Args: varname (str): variable name Returns: None if varname is not found else its value
375,997
def init_storage(self): if not self.storage.exists(): self.storage.update({ : 0, }) self.storage.update({ : None })
Set current term to zero upon initialization & voted_for to None
375,998
def to_dot(self, path: str, title: Optional[str] = None): g = graphviz.Digraph(format=) g.node(, style=) for state in self._states: if state == self._initial_state: if state in self._accepting_states: g.node(str(state), root=, shape=) else: g.node(str(state), root=) elif state in self._accepting_states: g.node(str(state), shape=) else: g.node(str(state)) g.edge(, str(self._initial_state), style=) for start in self._transition_function: for symbol, end in self._transition_function[start].items(): g.edge(str(start), str(end), label=str(symbol)) if title: g.attr(label=title) g.attr(fontsize=) g.render(filename=path) return
Print the automaton to a dot file :param path: the path where to save the file. :param title: :return:
375,999
def connected(self, node_id): conn = self._conns.get(node_id) if conn is None: return False return conn.connected()
Return True iff the node_id is connected.