Unnamed: 0
int64
0
389k
code
stringlengths
26
79.6k
docstring
stringlengths
1
46.9k
12,200
def visited(self): try: binfo = self.binfo except AttributeError: pass else: self.ninfo.update(self) SCons.Node.store_info_map[self.store_info](self)
Called just after this node has been visited (with or without a build).
12,201
def save_csv(p, sheet): with p.open_text(mode=) as fp: cw = csv.writer(fp, **csvoptions()) colnames = [col.name for col in sheet.visibleCols] if .join(colnames): cw.writerow(colnames) for r in Progress(sheet.rows, ): cw.writerow([col.getDisplayValue(r) for col in sheet.visibleCols])
Save as single CSV file, handling column names as first line.
12,202
def main(): desc = parser = argparse.ArgumentParser(description=desc) parser.add_argument( , dest=, default=, help= ) parser.add_argument( , dest=, default=, help= ) parser.add_argument( , dest=, default=, help= ) parser.add_argument( , dest=, default=, help= ) parser.add_argument( , dest=, default=, help= ) parser.add_argument( , dest=, type=int, default=5, help= ) args = parser.parse_args() compiler = compiler_info(args.compiler) results = benchmark( args.src_dir, args.compiler, [args.include, args.boost_headers], args.repeat_count ) plot_diagrams(results, configs_in(args.src_dir), compiler, args.out_dir)
The main function of the script
12,203
def cat(src_filename, dst_file): (dev, dev_filename) = get_dev_and_path(src_filename) if dev is None: with open(dev_filename, ) as txtfile: for line in txtfile: dst_file.write(line) else: filesize = dev.remote_eval(get_filesize, dev_filename) return dev.remote(send_file_to_host, dev_filename, dst_file, filesize, xfer_func=recv_file_from_remote)
Copies the contents of the indicated file to an already opened file.
12,204
def _create_messages(self, names, data, isDms=False): chats = {} empty_dms = [] formatter = SlackFormatter(self.__USER_DATA, data) for name in names: dir_path = os.path.join(self._PATH, name) messages = [] day_files = glob.glob(os.path.join(dir_path, "*.json")) if not day_files: if isDms: empty_dms.append(name) continue for day in sorted(day_files): with io.open(os.path.join(self._PATH, day), encoding="utf8") as f: day_messages = json.load(f) messages.extend([Message(formatter, d) for d in day_messages]) chats[name] = messages if isDms: self._EMPTY_DMS = empty_dms return chats
Creates object of arrays of messages from each json file specified by the names or ids :param [str] names: names of each group of messages :param [object] data: array of objects detailing where to get the messages from in the directory structure :param bool isDms: boolean value used to tell if the data is dm data so the function can collect the empty dm directories and store them in memory only :return: object of arrays of messages :rtype: object
12,205
def _settle_message(self, message_number, response): if not response or isinstance(response, errors.MessageAlreadySettled): return if isinstance(response, errors.MessageAccepted): self._receiver.settle_accepted_message(message_number) elif isinstance(response, errors.MessageReleased): self._receiver.settle_released_message(message_number) elif isinstance(response, errors.MessageRejected): self._receiver.settle_rejected_message( message_number, response.error_condition, response.error_description) elif isinstance(response, errors.MessageModified): self._receiver.settle_modified_message( message_number, response.failed, response.undeliverable, response.annotations) else: raise ValueError("Invalid message response type: {}".format(response))
Send a settle dispostition for a received message. :param message_number: The delivery number of the message to settle. :type message_number: int :response: The type of disposition to respond with, e.g. whether the message was accepted, rejected or abandoned. :type response: ~uamqp.errors.MessageResponse
12,206
def positions(self, word): right = len(word) - self.right return [i for i in self.hd.positions(word) if self.left <= i <= right]
Returns a list of positions where the word can be hyphenated. See also Hyph_dict.positions. The points that are too far to the left or right are removed.
12,207
def setup(self, config_file=None, aws_config=None, gpg_config=None, decrypt_gpg=True, decrypt_kms=True): if aws_config is not None: self.aws_config = aws_config if gpg_config is not None: self.gpg_config = gpg_config if decrypt_kms is not None: self.decrypt_kms = decrypt_kms if decrypt_gpg is not None: self.decrypt_gpg = decrypt_gpg if config_file is not None: self.config_file = config_file return self
Make setup easier by providing a constructor method. Move to config_file File can be located with a filename only, relative path, or absolute path. If only name or relative path is provided, look in this order: 1. current directory 2. `~/.config/<file_name>` 3. `/etc/<file_name>` It is a good idea to include you __package__ in the file name. For example, `cfg = Config(os.path.join(__package__, 'config.yaml'))`. This way it will look for your_package/config.yaml, ~/.config/your_package/config.yaml, and /etc/your_package/config.yaml.
12,208
def delete(gandi, resource, background, force): resource = sorted(tuple(set(resource))) possible_resources = gandi.ip.resource_list() for item in resource: if item not in possible_resources: gandi.echo( % item) gandi.echo( % possible_resources) return if not force: proceed = click.confirm( % .join(resource)) if not proceed: return return gandi.ip.delete(resource, background, force)
Delete one or more IPs (after detaching them from VMs if necessary). resource can be an ip id or ip.
12,209
def dataframe(self, force_refresh=False): if force_refresh: self.clear_cache() if self._dataframe is None: self._dataframe = self._fetch_dataframe() return self._dataframe
A pandas dataframe with lots of interesting results about this object. Created by calling SageMaker List and Describe APIs and converting them into a convenient tabular summary. Args: force_refresh (bool): Set to True to fetch the latest data from SageMaker API.
12,210
def require(method): def decorator(func): @wraps(func) def wrapper(*args, **kwargs): if not getattr(args[0], callmethod): getattr(args[0], method)() setattr(args[0], callmethod, True) return func(*args, **kwargs) return wrapper return decorator
Decorator for managing chained dependencies of different class properties. The @require decorator allows developers to specify that a function call must be operated on before another property or function call is accessed, so that data and processing for an entire class can be evaluated in a lazy way (i.e. not all upon instantiation). Examples: >>> class Foo(Bar): >>> >>> def a(self): >>> print 'a!' >>> return 1 >>> >>> @require('a') >>> @property >>> def b(self): >>> print 'b!' >>> return self.a + 1 >>> >>> foo = Foo() >>> print foo.b >>> 'a!' 'b!' 2
12,211
def get_python_logger(): global _python_logger if _python_logger is None: fn = "a99.log" l = logging.Logger("a99", level=a99.logging_level) if a99.flag_log_file: add_file_handler(l, fn) if a99.flag_log_console: ch = logging.StreamHandler() ch.setFormatter(_fmtr) l.addHandler(ch) _python_logger = l for line in a99.format_box("a99 logging session started @ {}".format(a99.now_str())): l.info(line) if a99.flag_log_file: l.info("$ Logging to console $") if a99.flag_log_file: l.info("$ Logging to file $".format(fn)) return _python_logger
Returns logger to receive Python messages (as opposed to Fortran). At first call, _python_logger is created. At subsequent calls, _python_logger is returned. Therefore, if you want to change `a99.flag_log_file` or `a99.flag_log_console`, do so before calling get_python_logger(), otherwise these changes will be ineffective.
12,212
def generate_main_h(directory, xml): f = open(os.path.join(directory, xml.basename + ".h"), mode=) t.write(f, , xml) f.close()
generate main header per XML file
12,213
def module_path(name, path): define = Define(name, path) assert os.path.isdir(path), "%r doesn't exist" % path name = "malcolm.modules.%s" % name import_package_from_path(name, path) return define
Load an external malcolm module (e.g. ADCore/etc/malcolm)
12,214
def _add_vertex_attributes(self, genes: List[Gene], disease_associations: Optional[dict] = None) -> None: self._set_default_vertex_attributes() self._add_vertex_attributes_by_genes(genes) up_regulated = self.get_upregulated_genes() down_regulated = self.get_downregulated_genes() self.graph.vs(up_regulated.indices)["diff_expressed"] = True self.graph.vs(up_regulated.indices)["up_regulated"] = True self.graph.vs(down_regulated.indices)["diff_expressed"] = True self.graph.vs(down_regulated.indices)["down_regulated"] = True self._add_disease_associations(disease_associations) logger.info("Number of all differentially expressed genes is: {}". format(len(up_regulated) + len(down_regulated)))
Add attributes to vertices. :param genes: A list of genes containing attribute information.
12,215
def _blocks_to_samples(sig_data, n_samp, fmt): if fmt == : if n_samp % 2: n_samp += 1 added_samps = 1 sig_data = np.append(sig_data, np.zeros(1, dtype=)) else: added_samps = 0 sig_data = sig_data.astype() sig = np.zeros(n_samp, dtype=) sig[0::2] = sig_data[0::3] + 256 * np.bitwise_and(sig_data[1::3], 0x0f) sig[1::2] = sig_data[2::3] + 256*np.bitwise_and(sig_data[1::3] >> 4, 0x0f) if added_samps: sig = sig[:-added_samps] sig[sig > 511] -= 1024 elif fmt == : if n_samp % 3: n_samp = upround(n_samp,3) added_samps = n_samp % 3 sig_data = np.append(sig_data, np.zeros(added_samps, dtype=)) else: added_samps = 0 sig_data = sig_data.astype() sig = np.zeros(n_samp, dtype=) sig[0::3] = sig_data[0::4][0:len(sig[0::3])] + 256 * np.bitwise_and(sig_data[1::4], 0x03)[0:len(sig[0::3])] sig[1::3] = (sig_data[1::4] >> 2)[0:len(sig[1::3])] + 64 * np.bitwise_and(sig_data[2::4], 0x0f)[0:len(sig[1::3])] sig[2::3] = (sig_data[2::4] >> 4)[0:len(sig[2::3])] + 16 * np.bitwise_and(sig_data[3::4], 0x7f)[0:len(sig[2::3])] if added_samps: sig = sig[:-added_samps] sig[sig > 511] -= 1024 return sig
Convert uint8 blocks into signal samples for unaligned dat formats. Parameters ---------- sig_data : numpy array The uint8 data blocks. n_samp : int The number of samples contained in the bytes Returns ------- signal : numpy array The numpy array of digital samples
12,216
def get_xml_parser(encoding=None): parser = etree.ETCompatXMLParser( huge_tree=True, remove_comments=True, strip_cdata=False, remove_blank_text=True, resolve_entities=False, encoding=encoding ) return parser
Returns an ``etree.ETCompatXMLParser`` instance.
12,217
def _set_sample_rate_cpu(self, v, load=False): if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={: []}, int_size=32), restriction_dict={: [u]}), is_leaf=True, yang_name="sample-rate-cpu", rest_name="sample-rate", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u: {u: u, u: u, u: u, u: None}}, namespace=, defining_module=, yang_type=, is_config=True) except (TypeError, ValueError): raise ValueError({ : , : "uint32", : , }) self.__sample_rate_cpu = t if hasattr(self, ): self._set()
Setter method for sample_rate_cpu, mapped from YANG variable /resource_monitor/cpu/sample_rate_cpu (uint32) If this variable is read-only (config: false) in the source YANG file, then _set_sample_rate_cpu is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_sample_rate_cpu() directly.
12,218
def delete_feature_base(dbpath, set_object, name): engine = create_engine( + dbpath) session_cl = sessionmaker(bind=engine) session = session_cl() tmp_object = session.query(set_object).get(1) if tmp_object.features is not None and name in tmp_object.features: for i in session.query(set_object).order_by(set_object.id): del i.features[name] session.commit() session.close() return None
Generic function which deletes a feature from a database Parameters ---------- dbpath : string, path to SQLite database file set_object : object (either TestSet or TrainSet) which is stored in the database name : string, name of the feature to be deleted Returns ------- None
12,219
def execute_process_synchronously_or_raise(self, execute_process_request, name, labels=None): fallible_result = self.execute_process_synchronously_without_raising(execute_process_request, name, labels) return fallible_to_exec_result_or_raise( fallible_result, execute_process_request )
Execute process synchronously, and throw if the return code is not 0. See execute_process_synchronously for the api docs.
12,220
def bz2_pack(source): import bz2, base64 out = "" out += base64.b64encode(compressed_source).decode() out += "')))\n" return out
Returns 'source' as a bzip2-compressed, self-extracting python script. .. note:: This method uses up more space than the zip_pack method but it has the advantage in that the resulting .py file can still be imported into a python program.
12,221
def _set_overlay_gateway(self, v, load=False): if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=YANGListType("name",overlay_gateway.overlay_gateway, yang_name="overlay-gateway", rest_name="overlay-gateway", parent=self, is_container=, user_ordered=False, path_helper=self._path_helper, yang_keys=, extensions={u: {u: u, u: u, u: None, u: None, u: u, u: u}}), is_container=, yang_name="overlay-gateway", rest_name="overlay-gateway", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u: {u: u, u: u, u: None, u: None, u: u, u: u}}, namespace=, defining_module=, yang_type=, is_config=True) except (TypeError, ValueError): raise ValueError({ : , : "list", : , }) self.__overlay_gateway = t if hasattr(self, ): self._set()
Setter method for overlay_gateway, mapped from YANG variable /overlay_gateway (list) If this variable is read-only (config: false) in the source YANG file, then _set_overlay_gateway is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_overlay_gateway() directly.
12,222
def merge_odd_even_csu_configurations(conf_odd, conf_even): merged_conf = deepcopy(conf_odd) for i in range(EMIR_NBARS): ibar = i + 1 if ibar % 2 == 0: merged_conf._csu_bar_left[i] = conf_even._csu_bar_left[i] merged_conf._csu_bar_right[i] = conf_even._csu_bar_right[i] merged_conf._csu_bar_slit_center[i] = \ conf_even._csu_bar_slit_center[i] merged_conf._csu_bar_slit_width[i] = \ conf_even._csu_bar_slit_width[i] return merged_conf
Merge CSU configuration using odd- and even-numbered values. The CSU returned CSU configuration include the odd-numbered values from 'conf_odd' and the even-numbered values from 'conf_even'. Parameters ---------- conf_odd : CsuConfiguration instance CSU configuration corresponding to odd-numbered slitlets. conf_even : CsuConfiguration instance CSU configuration corresponding to even-numbered slitlets. Returns ------- merged_conf : CsuConfiguration instance CSU configuration resulting from the merging process.
12,223
def get_station_year_text(WMO, WBAN, year): if WMO is None: WMO = 999999 if WBAN is None: WBAN = 99999 station = str(int(WMO)) + + str(int(WBAN)) gsod_year_dir = os.path.join(data_dir, , str(year)) path = os.path.join(gsod_year_dir, station + ) if os.path.exists(path): data = open(path).read() if data and data != : return data else: raise Exception(data) toget = ( + str(year) + + station + + str(year) +) try: data = urlopen(toget, timeout=5) except Exception as e: if not os.path.exists(gsod_year_dir): os.makedirs(gsod_year_dir) open(path, ).write() raise Exception( %(e)) data = data.read() data_thing = StringIO(data) f = gzip.GzipFile(fileobj=data_thing, mode="r") year_station_data = f.read() try: year_station_data = year_station_data.decode() except: pass if not os.path.exists(gsod_year_dir): os.makedirs(gsod_year_dir) open(path, ).write(year_station_data) return year_station_data
Basic method to download data from the GSOD database, given a station identifier and year. Parameters ---------- WMO : int or None World Meteorological Organization (WMO) identifiers, [-] WBAN : int or None Weather Bureau Army Navy (WBAN) weather station identifier, [-] year : int Year data should be retrieved from, [year] Returns ------- data : str Downloaded data file
12,224
def default(self, o): if isinstance(o, Atom) or isinstance(o, Bond): return o._ctab_data else: return o.__dict__
Default encoder. :param o: Atom or Bond instance. :type o: :class:`~ctfile.ctfile.Atom` or :class:`~ctfile.ctfile.Bond`. :return: Dictionary that contains information required for atom and bond block of ``Ctab``. :rtype: :py:class:`collections.OrderedDict`
12,225
def timeout(delay, handler=None): delay = int(delay) if handler is None: def default_handler(signum, frame): raise RuntimeError("{:d} seconds timeout expired".format(delay)) handler = default_handler prev_sigalrm_handler = signal.getsignal(signal.SIGALRM) signal.signal(signal.SIGALRM, handler) signal.alarm(delay) yield signal.alarm(0) signal.signal(signal.SIGALRM, prev_sigalrm_handler)
Context manager to run code and deliver a SIGALRM signal after `delay` seconds. Note that `delay` must be a whole number; otherwise it is converted to an integer by Python's `int()` built-in function. For floating-point numbers, that means rounding off to the nearest integer from below. If the optional argument `handler` is supplied, it must be a callable that is invoked if the alarm triggers while the code is still running. If no `handler` is provided (default), then a `RuntimeError` with message ``Timeout`` is raised.
12,226
def bounds(self, pixelbuffer=0): left = self._left bottom = self._bottom right = self._right top = self._top if pixelbuffer: offset = self.pixel_x_size * float(pixelbuffer) left -= offset bottom -= offset right += offset top += offset if self.tp.grid.is_global: top = min([top, self.tile_pyramid.top]) bottom = max([bottom, self.tile_pyramid.bottom]) return Bounds(left, bottom, right, top)
Return Tile boundaries. - pixelbuffer: tile buffer in pixels
12,227
def fit(self, X): D = self._initialize(X) for i in range(self.max_iter): gamma = self._transform(D, X) e = np.linalg.norm(X - gamma.dot(D)) if e < self.tol: break D, gamma = self._update_dict(X, D, gamma) self.components_ = D return self
Parameters ---------- X: shape = [n_samples, n_features]
12,228
def completed_work_items(self): "Iterable of `(work-item, result)`s for all completed items." completed = self._conn.execute( "SELECT * FROM work_items, results WHERE work_items.job_id == results.job_id" ) return ((_row_to_work_item(result), _row_to_work_result(result)) for result in completed)
Iterable of `(work-item, result)`s for all completed items.
12,229
def get_requirements(*args): requirements = set() contents = get_contents(*args) for line in contents.splitlines(): line = re.sub(r, , line) if line and not line.isspace(): requirements.add(re.sub(r, , line)) return sorted(requirements)
Get requirements from pip requirement files.
12,230
def grad_local_log_likelihood(self, x): C, D, u, y = self.C, self.D, self.inputs, self.data psi = x.dot(C.T) + u.dot(D.T) p = 1. / (1 + np.exp(-psi)) return (y - p).dot(C)
d/d \psi y \psi - log (1 + exp(\psi)) = y - exp(\psi) / (1 + exp(\psi)) = y - sigma(psi) = y - p d \psi / dx = C d / dx = (y - sigma(psi)) * C
12,231
def add_conversion_steps(self, converters: List[Converter], inplace: bool = False): check_var(converters, var_types=list, min_len=1) if inplace: for converter in converters: self.add_conversion_step(converter, inplace=True) else: new = copy(self) new.add_conversion_steps(converters, inplace=True) return new
Utility method to add converters to this chain. If inplace is True, this object is modified and None is returned. Otherwise, a copy is returned :param converters: the list of converters to add :param inplace: boolean indicating whether to modify this object (True) or return a copy (False) :return: None or a copy with the converters added
12,232
def linear(X, n, *args, **kwargs): hyper_deriv = kwargs.pop(, None) m = scipy.asarray(args[:-1]) b = args[-1] if sum(n) > 1: return scipy.zeros(X.shape[0]) elif sum(n) == 0: if hyper_deriv is not None: if hyper_deriv < len(m): return X[:, hyper_deriv] elif hyper_deriv == len(m): return scipy.ones(X.shape[0]) else: raise ValueError("Invalid value for hyper_deriv, " + str(hyper_deriv)) else: return (m * X).sum(axis=1) + b else: if hyper_deriv is not None: if n[hyper_deriv] == 1: return scipy.ones(X.shape[0]) else: return scipy.zeros(X.shape[0]) return m[n == 1] * scipy.ones(X.shape[0])
Linear mean function of arbitrary dimension, suitable for use with :py:class:`MeanFunction`. The form is :math:`m_0 * X[:, 0] + m_1 * X[:, 1] + \dots + b`. Parameters ---------- X : array, (`M`, `D`) The points to evaluate the model at. n : array of non-negative int, (`D`) The derivative order to take, specified as an integer order for each dimension in `X`. *args : num_dim+1 floats The slopes for each dimension, plus the constant term. Must be of the form `m0, m1, ..., b`.
12,233
def main(): check_python_version() check_python_modules() check_executables() home = os.path.expanduser("~") print("\033[1mCheck files\033[0m") rcfile = os.path.join(home, ".hwrtrc") if os.path.isfile(rcfile): print("~/.hwrtrc... %sFOUND%s" % (Bcolors.OKGREEN, Bcolors.ENDC)) else: print("~/.hwrtrc... %sNOT FOUND%s" % (Bcolors.FAIL, Bcolors.ENDC)) misc_path = pkg_resources.resource_filename(, ) print("misc-path: %s" % misc_path)
Execute all checks.
12,234
def available_actions(self, obs): available_actions = set() hide_specific_actions = self._agent_interface_format.hide_specific_actions for i, func in six.iteritems(actions.FUNCTIONS_AVAILABLE): if func.avail_fn(obs): available_actions.add(i) for a in obs.abilities: if a.ability_id not in actions.ABILITY_IDS: logging.warning("Unknown ability %s seen as available.", a.ability_id) continue for func in actions.ABILITY_IDS[a.ability_id]: if func.function_type in actions.POINT_REQUIRED_FUNCS[a.requires_point]: if func.general_id == 0 or not hide_specific_actions: available_actions.add(func.id) if func.general_id != 0: for general_func in actions.ABILITY_IDS[func.general_id]: if general_func.function_type is func.function_type: available_actions.add(general_func.id) break return list(available_actions)
Return the list of available action ids.
12,235
def extract_edges(self, feature_angle=30, boundary_edges=True, non_manifold_edges=True, feature_edges=True, manifold_edges=True, inplace=False): surf = self.extract_surface() return surf.extract_edges(feature_angle, boundary_edges, non_manifold_edges, feature_edges, manifold_edges, inplace=inplace)
Extracts edges from the surface of the grid. From vtk documentation: These edges are either 1) boundary (used by one polygon) or a line cell; 2) non-manifold (used by three or more polygons) 3) feature edges (edges used by two triangles and whose dihedral angle > feature_angle) 4) manifold edges (edges used by exactly two polygons). Parameters ---------- feature_angle : float, optional Defaults to 30 degrees. boundary_edges : bool, optional Defaults to True non_manifold_edges : bool, optional Defaults to True feature_edges : bool, optional Defaults to True manifold_edges : bool, optional Defaults to True inplace : bool, optional Return new mesh or overwrite input. Returns ------- edges : vtki.vtkPolyData Extracted edges
12,236
def mk_token(self, load): if not self.authenticate_eauth(load): return {} if self._allow_custom_expire(load): token_expire = load.pop(, self.opts[]) else: _ = load.pop(, None) token_expire = self.opts[] tdata = {: time.time(), : time.time() + token_expire, : self.load_name(load), : load[]} if self.opts[]: acl_ret = self.__get_acl(load) tdata[] = acl_ret groups = self.get_groups(load) if groups: tdata[] = groups return self.tokens["{0}.mk_token".format(self.opts[])](self.opts, tdata)
Run time_auth and create a token. Return False or the token
12,237
def flatten(self): for key in self.keys: try: arr = self.__dict__[key] shape = arr.shape if shape[2] == 1: self.__dict__[key] = arr.reshape(shape[0], shape[1]) except: pass
Flattens any np.array of column vectors into 1D arrays. Basically, this makes data readable for humans if you are just inspecting via the REPL. For example, if you have saved a KalmanFilter object with 89 epochs, self.x will be shape (89, 9, 1) (for example). After flatten is run, self.x.shape == (89, 9), which displays nicely from the REPL. There is no way to unflatten, so it's a one way trip.
12,238
def shifted(self, rows, cols): shifted_block_tl = \ [(row + rows, col + cols) for row, col in self.block_tl] shifted_block_br = \ [(row + rows, col + cols) for row, col in self.block_br] shifted_rows = [row + rows for row in self.rows] shifted_cols = [col + cols for col in self.cols] shifted_cells = [(row + rows, col + cols) for row, col in self.cells] return Selection(shifted_block_tl, shifted_block_br, shifted_rows, shifted_cols, shifted_cells)
Returns a new selection that is shifted by rows and cols. Negative values for rows and cols may result in a selection that addresses negative cells. Parameters ---------- rows: Integer \tNumber of rows that the new selection is shifted down cols: Integer \tNumber of columns that the new selection is shifted right
12,239
def update_role(self, service_name, deployment_name, role_name, os_virtual_hard_disk=None, network_config=None, availability_set_name=None, data_virtual_hard_disks=None, role_size=None, role_type=, resource_extension_references=None, provision_guest_agent=None): _validate_not_none(, service_name) _validate_not_none(, deployment_name) _validate_not_none(, role_name) return self._perform_put( self._get_role_path(service_name, deployment_name, role_name), _XmlSerializer.update_role_to_xml( role_name, os_virtual_hard_disk, role_type, network_config, availability_set_name, data_virtual_hard_disks, role_size, resource_extension_references, provision_guest_agent), as_async=True)
Updates the specified virtual machine. service_name: The name of the service. deployment_name: The name of the deployment. role_name: The name of the role. os_virtual_hard_disk: Contains the parameters Windows Azure uses to create the operating system disk for the virtual machine. network_config: Encapsulates the metadata required to create the virtual network configuration for a virtual machine. If you do not include a network configuration set you will not be able to access the VM through VIPs over the internet. If your virtual machine belongs to a virtual network you can not specify which subnet address space it resides under. availability_set_name: Specifies the name of an availability set to which to add the virtual machine. This value controls the virtual machine allocation in the Windows Azure environment. Virtual machines specified in the same availability set are allocated to different nodes to maximize availability. data_virtual_hard_disks: Contains the parameters Windows Azure uses to create a data disk for a virtual machine. role_size: The size of the virtual machine to allocate. The default value is Small. Possible values are: ExtraSmall, Small, Medium, Large, ExtraLarge. The specified value must be compatible with the disk selected in the OSVirtualHardDisk values. role_type: The type of the role for the virtual machine. The only supported value is PersistentVMRole. resource_extension_references: Optional. Contains a collection of resource extensions that are to be installed on the Virtual Machine. This element is used if provision_guest_agent is set to True. provision_guest_agent: Optional. Indicates whether the VM Agent is installed on the Virtual Machine. To run a resource extension in a Virtual Machine, this service must be installed.
12,240
def processData(config, stats): if not in stats or not in stats: stats.total_time = [0.0] stats.total_clock = [0.0] stats.total_time = sum([float(number) for number in stats.total_time]) stats.total_clock = sum([float(number) for number in stats.total_clock]) collatedStatsTag = Expando(total_run_time=stats.total_time, total_clock=stats.total_clock, batch_system=config.batchSystem, default_memory=str(config.defaultMemory), default_cores=str(config.defaultCores), max_cores=str(config.maxCores) ) worker = [_f for _f in getattr(stats, , []) if _f] jobs = [_f for _f in getattr(stats, , []) if _f] jobs = [item for sublist in jobs for item in sublist] def fn4(job): try: return list(jobs) except TypeError: return [] buildElement(collatedStatsTag, worker, "worker") createSummary(buildElement(collatedStatsTag, jobs, "jobs"), getattr(stats, , []), "worker", fn4) jobNames = set() for job in jobs: jobNames.add(job.class_name) jobTypesTag = Expando() collatedStatsTag.job_types = jobTypesTag for jobName in jobNames: jobTypes = [ job for job in jobs if job.class_name == jobName ] buildElement(jobTypesTag, jobTypes, jobName) collatedStatsTag.name = "collatedStatsTag" return collatedStatsTag
Collate the stats and report
12,241
def rhymes(word): phones = phones_for_word(word) combined_rhymes = [] if phones: for element in phones: combined_rhymes.append([w for w in rhyme_lookup.get(rhyming_part( element), []) if w != word]) combined_rhymes = list(chain.from_iterable(combined_rhymes)) unique_combined_rhymes = sorted(set(combined_rhymes)) return unique_combined_rhymes else: return []
Get words rhyming with a given word. This function may return an empty list if no rhyming words are found in the dictionary, or if the word you pass to the function is itself not found in the dictionary. .. doctest:: >>> import pronouncing >>> pronouncing.rhymes("conditioner") ['commissioner', 'parishioner', 'petitioner', 'practitioner'] :param word: a word :returns: a list of rhyming words
12,242
def fromtimestamp(cls, ts, tzi=None): if tzi is None: tzi = MinutesFromUTC(cls.get_local_utcoffset()) return cls(datetime.fromtimestamp(ts, tzi))
Factory method that returns a new :class:`~pywbem.CIMDateTime` object from a POSIX timestamp value and optional timezone information. A POSIX timestamp value is the number of seconds since "the epoch", i.e. 1970-01-01 00:00:00 UTC. Thus, a POSIX timestamp value is unambiguous w.r.t. the timezone, but it is not timezone-aware. The optional timezone information is used to convert the CIM datetime value into the desired timezone. That does not change the point in time that is represented by the value, but it changes the value of the ``hhmmss`` components of the CIM datetime value to compensate for changes in the timezone offset component. Parameters: ts (:term:`integer`): POSIX timestamp value. tzi (:class:`~pywbem.MinutesFromUTC`): Timezone information. `None` means that the current local timezone is used. Returns: A new :class:`~pywbem.CIMDateTime` object representing the specified point in time.
12,243
def update(self, track=values.unset, publisher=values.unset, kind=values.unset, status=values.unset): data = values.of({: track, : publisher, : kind, : status, }) payload = self._version.update( , self._uri, data=data, ) return SubscribedTrackInstance( self._version, payload, room_sid=self._solution[], subscriber_sid=self._solution[], )
Update the SubscribedTrackInstance :param unicode track: The track :param unicode publisher: The publisher :param SubscribedTrackInstance.Kind kind: The kind :param SubscribedTrackInstance.Status status: The status :returns: Updated SubscribedTrackInstance :rtype: twilio.rest.video.v1.room.room_participant.room_participant_subscribed_track.SubscribedTrackInstance
12,244
def to_array(self): array = super(StickerMessage, self).to_array() if isinstance(self.sticker, InputFile): array[] = self.sticker.to_array() elif isinstance(self.sticker, str): array[] = u(self.sticker) else: raise TypeError() if self.receiver is not None: if isinstance(self.receiver, None): array[] = None(self.receiver) array[] = u(self.receiver) elif isinstance(self.receiver, int): array[] = int(self.receiver) raise TypeError() if self.reply_id is not None: if isinstance(self.reply_id, DEFAULT_MESSAGE_ID): array[] = DEFAULT_MESSAGE_ID(self.reply_id) array[] = int(self.reply_id) raise TypeError() if self.disable_notification is not None: array[] = bool(self.disable_notification) if self.reply_markup is not None: if isinstance(self.reply_markup, InlineKeyboardMarkup): array[] = self.reply_markup.to_array() elif isinstance(self.reply_markup, ReplyKeyboardMarkup): array[] = self.reply_markup.to_array() elif isinstance(self.reply_markup, ReplyKeyboardRemove): array[] = self.reply_markup.to_array() elif isinstance(self.reply_markup, ForceReply): array[] = self.reply_markup.to_array() else: raise TypeError() return array
Serializes this StickerMessage to a dictionary. :return: dictionary representation of this object. :rtype: dict
12,245
def installedRequirements(self, target): myDepends = dependentsOf(self.__class__) for dc in self.store.query(_DependencyConnector, _DependencyConnector.target == target): if dc.installee.__class__ in myDepends: yield dc.installee
Return an iterable of things installed on the target that this item requires.
12,246
def minimum_needs_extractor(impact_report, component_metadata): context = {} extra_args = component_metadata.extra_args analysis_layer = impact_report.analysis analysis_keywords = analysis_layer.keywords[] use_rounding = impact_report.impact_function.use_rounding header = resolve_from_dictionary(extra_args, ) context[] = header try: displaced_field_name = analysis_keywords[displaced_field[]] total_displaced = value_from_field_name( displaced_field_name, analysis_layer) if total_displaced == 0: zero_displaced_message = resolve_from_dictionary( extra_args, ) context[] = { : True, : zero_displaced_message } return context except KeyError: pass have_minimum_needs_field = False for field_key in analysis_keywords: if field_key.startswith(minimum_needs_namespace): have_minimum_needs_field = True break if not have_minimum_needs_field: return context frequencies = {} for field in (minimum_needs_fields + additional_minimum_needs): need_parameter = field.get() if isinstance(need_parameter, ResourceParameter): frequency = need_parameter.frequency else: frequency = field.get() if frequency: if frequency not in frequencies: frequencies[frequency] = [field] else: frequencies[frequency].append(field) needs = [] analysis_feature = next(analysis_layer.getFeatures()) header_frequency_format = resolve_from_dictionary( extra_args, ) total_header = resolve_from_dictionary(extra_args, ) need_header_format = resolve_from_dictionary( extra_args, ) for key, frequency in list(frequencies.items()): group = { : header_frequency_format.format(frequency=tr(key)), : total_header, : [] } for field in frequency: field_idx = analysis_layer.fields( ).lookupField(field[]) if field_idx == -1: return context
Extracting minimum needs of the impact layer. :param impact_report: the impact report that acts as a proxy to fetch all the data that extractor needed :type impact_report: safe.report.impact_report.ImpactReport :param component_metadata: the component metadata. Used to obtain information about the component we want to render :type component_metadata: safe.report.report_metadata. ReportComponentsMetadata :return: context for rendering phase :rtype: dict .. versionadded:: 4.0
12,247
def _readLoop(self): try: readTermSeq = list(self.RX_EOL_SEQ) readTermLen = len(readTermSeq) rxBuffer = [] while self.alive: data = self.serial.read(1) if data != : rxBuffer.append(data) if rxBuffer[-readTermLen:] == readTermSeq: line = .join(rxBuffer[:-readTermLen]) rxBuffer = [] if len(line) > 0: self._handleLineRead(line) elif self._expectResponseTermSeq: if rxBuffer[-len(self._expectResponseTermSeq):] == self._expectResponseTermSeq: line = .join(rxBuffer) rxBuffer = [] self._handleLineRead(line, checkForResponseTerm=False) except serial.SerialException as e: self.alive = False try: self.serial.close() except Exception: pass self.fatalErrorCallback(e)
Read thread main loop Reads lines from the connected device
12,248
def _standardize_data( model: pd.DataFrame, data: pd.DataFrame, batch_key: str, ) -> Tuple[pd.DataFrame, pd.DataFrame, np.ndarray, np.ndarray]: batch_items = model.groupby(batch_key).groups.items() batch_levels, batch_info = zip(*batch_items) n_batch = len(batch_info) n_batches = np.array([len(v) for v in batch_info]) n_array = float(sum(n_batches)) design = _design_matrix(model, batch_key, batch_levels) B_hat = np.dot(np.dot(la.inv(np.dot(design.T, design)), design.T), data.T) grand_mean = np.dot((n_batches / n_array).T, B_hat[:n_batch, :]) var_pooled = (data - np.dot(design, B_hat).T)**2 var_pooled = np.dot(var_pooled, np.ones((int(n_array), 1)) / int(n_array)) if np.sum(var_pooled == 0) > 0: print( .format(np.sum(var_pooled == 0)) ) stand_mean = np.dot(grand_mean.T.reshape((len(grand_mean), 1)), np.ones((1, int(n_array)))) tmp = np.array(design.copy()) tmp[:, :n_batch] = 0 stand_mean += np.dot(tmp, B_hat).T s_data = np.where(var_pooled == 0, 0, ( (data - stand_mean) / np.dot(np.sqrt(var_pooled), np.ones((1, int(n_array)))) )) s_data = pd.DataFrame(s_data, index=data.index, columns=data.columns) return s_data, design, var_pooled, stand_mean
Standardizes the data per gene. The aim here is to make mean and variance be comparable across batches. Parameters -------- model Contains the batch annotation data Contains the Data batch_key Name of the batch column in the model matrix Returns -------- s_data : pandas.DataFrame Standardized Data design : pandas.DataFrame Batch assignment as one-hot encodings var_pooled : numpy.ndarray Pooled variance per gene stand_mean : numpy.ndarray Gene-wise mean
12,249
def plot_fit(self, **kwargs): import matplotlib.pyplot as plt import seaborn as sns figsize = kwargs.get(,(10,7)) if self.latent_variables.estimated is False: raise Exception("No latent variables estimated!") else: date_index = self.index.copy() mu, Y, scores, coefficients = self._model(self.latent_variables.get_z_values()) if self.model_name2 == "Exponential": values_to_plot = 1.0/self.link(mu) elif self.model_name2 == "Skewt": t_params = self.transform_z() model_scale, model_shape, model_skewness = self._get_scale_and_shape(t_params) m1 = (np.sqrt(model_shape)*sp.gamma((model_shape-1.0)/2.0))/(np.sqrt(np.pi)*sp.gamma(model_shape/2.0)) additional_loc = (model_skewness - (1.0/model_skewness))*model_scale*m1 values_to_plot = mu + additional_loc else: values_to_plot = self.link(mu) plt.figure(figsize=figsize) plt.subplot(len(self.X_names)+1, 1, 1) plt.title(self.y_name + " Filtered") plt.plot(date_index,Y,label=) plt.plot(date_index,values_to_plot,label=,c=) plt.legend(loc=2) for coef in range(0,len(self.X_names)): plt.subplot(len(self.X_names)+1, 1, 2+coef) plt.title("Beta " + self.X_names[coef]) plt.plot(date_index,coefficients[coef,0:-1],label=) plt.legend(loc=2) plt.show()
Plots the fit of the model Notes ---------- Intervals are bootstrapped as follows: take the filtered values from the algorithm (thetas). Use these thetas to generate a pseudo data stream from the measurement density. Use the GAS algorithm and estimated latent variables to filter the pseudo data. Repeat this N times. Returns ---------- None (plots data and the fit)
12,250
def partial_to_complete_sha_hex(self, partial_hexsha): try: hexsha, typename, size = self._git.get_object_header(partial_hexsha) return hex_to_bin(hexsha) except (GitCommandError, ValueError): raise BadObject(partial_hexsha)
:return: Full binary 20 byte sha from the given partial hexsha :raise AmbiguousObjectName: :raise BadObject: :note: currently we only raise BadObject as git does not communicate AmbiguousObjects separately
12,251
def clean(self, point_merging=True, merge_tol=None, lines_to_points=True, polys_to_lines=True, strips_to_polys=True, inplace=False): clean = vtk.vtkCleanPolyData() clean.SetConvertLinesToPoints(lines_to_points) clean.SetConvertPolysToLines(polys_to_lines) clean.SetConvertStripsToPolys(strips_to_polys) if merge_tol: clean.ToleranceIsAbsoluteOn() clean.SetAbsoluteTolerance(merge_tol) clean.SetInputData(self) clean.Update() output = _get_output(clean) if output.n_points < 1: raise AssertionError() if inplace: self.overwrite(output) else: return output
Cleans mesh by merging duplicate points, remove unused points, and/or remove degenerate cells. Parameters ---------- point_merging : bool, optional Enables point merging. On by default. merge_tol : float, optional Set merging tolarance. When enabled merging is set to absolute distance lines_to_points : bool, optional Turn on/off conversion of degenerate lines to points. Enabled by default. polys_to_lines : bool, optional Turn on/off conversion of degenerate polys to lines. Enabled by default. strips_to_polys : bool, optional Turn on/off conversion of degenerate strips to polys. inplace : bool, optional Updates mesh in-place while returning nothing. Default True. Returns ------- mesh : vtki.PolyData Cleaned mesh. None when inplace=True
12,252
def QA_util_sql_async_mongo_setting(uri=): try: loop = asyncio.get_event_loop() except RuntimeError: loop = asyncio.new_event_loop() asyncio.set_event_loop(loop) return AsyncIOMotorClient(uri, io_loop=loop)
异步mongo示例 Keyword Arguments: uri {str} -- [description] (default: {'mongodb://localhost:27017/quantaxis'}) Returns: [type] -- [description]
12,253
def sg_init(sess): r sess.run(tf.group(tf.global_variables_initializer(), tf.local_variables_initializer()))
r""" Initializes session variables. Args: sess: Session to initialize.
12,254
def import_ohm(filename, verbose=False, reciprocals=False): if verbose: print(("Reading in %s... \n" % filename)) file = open(filename) eleccount = int(file.readline().split(" elecs_str = file.readline().split(" elecs_dim = len(elecs_str.split()) elecs_ix = elecs_str.split() elecs = np.zeros((eleccount, elecs_dim), ) for i in range(eleccount): line = file.readline().split(" elecs[i] = line.rsplit() datacount = int(file.readline().split(" data_str = file.readline().split(" data_dim = len(data_str.split()) data_ix = data_str.split() _string_ = % (eleccount, elecs_dim, elecs_str, datacount, data_str) data = np.zeros((datacount, data_dim), ) for i in range(datacount): line = file.readline() data[i] = line.rsplit() file.close() data = pd.DataFrame(data, columns=data_ix) data_reda = data.rename( index=str, columns={ : , } ) if ( not in data_reda.keys()) and \ ( in data_reda.keys() and in data_reda.keys()): data_reda[] = data_reda[] / data_reda[] print( "Calculating resistance from apparent resistivity and " "geometric factors. (r = rhoa_ / k)") elecs = pd.DataFrame(elecs, columns=elecs_ix) elecs.columns = elecs.columns.str.upper() if type(reciprocals) == int: print() data_reda[[, , , ]] = reciprocals + 1 - data_reda[ [, , , ]] if verbose: print((_string_)) return data_reda, elecs, None
Construct pandas data frame from BERT`s unified data format (.ohm). Parameters ---------- filename : string File path to .ohm file verbose : bool, optional Enables extended debug output reciprocals : int, optional if provided, then assume that this is a reciprocal measurement where only the electrode cables were switched. The provided number N is treated as the maximum electrode number, and denotations are renamed according to the equation :math:`X_n = N - (X_a - 1)` Returns ------- data : :class:`pandas.DataFrame` The measurement data elecs : :class:`pandas.DataFrame` Electrode positions (columns: X, Y, Z) topography : None No topography information is provided at the moment
12,255
def get_single_review_comments(self, id): assert isinstance(id, (int, long)), id return github.PaginatedList.PaginatedList( github.PullRequestComment.PullRequestComment, self._requester, self.url + "/reviews/" + str(id) + "/comments", None )
:calls: `GET /repos/:owner/:repo/pulls/:number/review/:id/comments <https://developer.github.com/v3/pulls/reviews/>`_ :param id: integer :rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.PullRequestComment.PullRequestComment`
12,256
def kill(self): try: logger.info() self.process.terminate() time_waited_seconds = 0 while self.process.poll() is None and time_waited_seconds < CONSTANTS.SECONDS_TO_KILL_AFTER_SIGTERM: time.sleep(0.5) time_waited_seconds += 0.5 if self.process.poll() is None: self.process.kill() logger.warning(, CONSTANTS.SECONDS_TO_KILL_AFTER_SIGTERM) except OSError, e: logger.error(, e)
If run_step needs to be killed, this method will be called :return: None
12,257
def lookup_by_number(errno): for key, val in globals().items(): if errno == val: print(key)
Used for development only
12,258
def sum_from(zero: T1 = None) -> Callable[[ActualIterable[T1]], T1]: def _(collection: Iterable[T1]) -> T1: if zero is None: collection = iter(collection) _zero = next(collection) return builtins.sum(collection, _zero) return builtins.sum(collection, zero) return _
>>> from Redy.Collections import Traversal, Flow >>> lst: Iterable[int] = [1, 2, 3] >>> x = Flow(lst)[Traversal.sum_from(0)].unbox >>> assert x is 6 >>> x = Flow(lst)[Traversal.sum_from()].unbox >>> assert x is 6
12,259
def parse_identifier(source, start, throw=True): start = pass_white(source, start) end = start if not end < len(source): if throw: raise SyntaxError() return None if source[end] not in IDENTIFIER_START: if throw: raise SyntaxError( % source[end]) return None end += 1 while end < len(source) and source[end] in IDENTIFIER_PART: end += 1 if not is_valid_lval(source[start:end]): if throw: raise SyntaxError( % source[start:end]) return None return source[start:end], end
passes white space from start and returns first identifier, if identifier invalid and throw raises SyntaxError otherwise returns None
12,260
def copy_file_to_remote(self, local_path, remote_path): sftp_client = self.transport.open_sftp_client() LOG.debug( % {: local_path, : remote_path}) try: sftp_client.put(local_path, remote_path) except Exception as ex: LOG.error( % six.text_type(ex)) raise SFtpExecutionError(err=ex)
scp the local file to remote folder. :param local_path: local path :param remote_path: remote path
12,261
def add_handler(self, name=, level=, formatter=, **kwargs): if name == and not in kwargs: kwargs.update({: self.logfilename}) if name == and not in kwargs: kwargs.update({: StringIO.StringIO()}) handler = types[name](**kwargs) self.add_handler_raw(handler, name, level=level, formatter=formatter)
Add another handler to the logging system if not present already. Available handlers are currently: ['console-bw', 'console-color', 'rotating-log']
12,262
def splitext(self): filename, ext = self.module.splitext(self) return self._next_class(filename), ext
p.splitext() -> Return ``(p.stripext(), p.ext)``. Split the filename extension from this path and return the two parts. Either part may be empty. The extension is everything from ``'.'`` to the end of the last path segment. This has the property that if ``(a, b) == p.splitext()``, then ``a + b == p``. .. seealso:: :func:`os.path.splitext`
12,263
def _compileRegExp(string, insensitive, minimal): flags = 0 if insensitive: flags = re.IGNORECASE string = string.replace(, ) string = string.replace(, ) string = string.replace(, ) try: return re.compile(string, flags) except (re.error, AssertionError) as ex: _logger.warning("Invalid pattern : %s", string, str(ex)) return None
Compile regular expression. Python function, used by C code NOTE minimal flag is not supported here, but supported on PCRE
12,264
def convert2geojson(jsonfile, src_srs, dst_srs, src_file): if os.path.exists(jsonfile): os.remove(jsonfile) if sysstr == : exepath = % sys.exec_prefix else: exepath = FileClass.get_executable_fullpath() s = % ( exepath, src_srs, dst_srs, jsonfile, src_file) UtilClass.run_command(s)
convert shapefile to geojson file
12,265
def get_token(self): token = None if self.token_path.exists(): with self.token_path.open() as token_file: token = self.token_constructor(self.serializer.load(token_file)) self.token = token return token
Retrieves the token from the File System :return dict or None: The token if exists, None otherwise
12,266
def search(): pattern = flask.request.args.get(, "*").strip().lower() collections = [c["name"].lower() for c in current_app.kwdb.get_collections()] words = [] filters = [] if pattern.startswith("name:"): pattern = pattern[5:].strip() mode = "name" else: mode="both" for word in pattern.split(" "): if word.lower().startswith("in:"): filters.extend([name for name in collections if name.startswith(word[3:])]) else: words.append(word) pattern = " ".join(words) keywords = [] for keyword in current_app.kwdb.search(pattern, mode): kw = list(keyword) collection_id = kw[0] collection_name = kw[1].lower() if len(filters) == 0 or collection_name in filters: url = flask.url_for(".doc_for_library", collection_id=kw[0], keyword=kw[2]) row_id = "row-%s.%s" % (keyword[1].lower(), keyword[2].lower().replace(" ","-")) keywords.append({"collection_id": keyword[0], "collection_name": keyword[1], "name": keyword[2], "synopsis": keyword[3], "version": __version__, "url": url, "row_id": row_id }) keywords.sort(key=lambda kw: kw["name"]) return flask.render_template("search.html", data={"keywords": keywords, "version": __version__, "pattern": pattern })
Show all keywords that match a pattern
12,267
def compute_stats2(arrayNR, stats, weights): newshape = list(arrayNR.shape) if newshape[1] != len(weights): raise ValueError( % (len(weights), newshape[1])) newshape[1] = len(stats) newarray = numpy.zeros(newshape, arrayNR.dtype) data = [arrayNR[:, i] for i in range(len(weights))] for i, func in enumerate(stats): newarray[:, i] = apply_stat(func, data, weights) return newarray
:param arrayNR: an array of (N, R) elements :param stats: a sequence of S statistic functions :param weights: a list of R weights :returns: an array of (N, S) elements
12,268
def generate_login(self, min_length=6, max_length=10, digits=True): chars = string.ascii_lowercase if digits: chars += string.digits length = random.randint(min_length, max_length) return .join(random.choice(chars) for x in range(length))
Generate string for email address login with defined length and alphabet. :param min_length: (optional) min login length. Default value is ``6``. :param max_length: (optional) max login length. Default value is ``10``. :param digits: (optional) use digits in login generation. Default value is ``True``.
12,269
def delete_local_docker_cache(docker_tag): history_cmd = [, , , docker_tag] try: image_ids_b = subprocess.check_output(history_cmd) image_ids_str = image_ids_b.decode().strip() layer_ids = [id.strip() for id in image_ids_str.split() if id != ] delete_cmd = [, , , ] delete_cmd.extend(layer_ids) subprocess.check_call(delete_cmd) except subprocess.CalledProcessError as error: logging.debug(, error)
Delete the local docker cache for the entire docker image chain :param docker_tag: Docker tag :return: None
12,270
def move(self, target): if isinstance(target, Folder): target_id = target.object_id elif isinstance(target, Drive): root_folder = target.get_root_folder() if not root_folder: return False target_id = root_folder.object_id elif isinstance(target, str): target_id = target else: raise ValueError() if not self.object_id or not target_id: raise ValueError( ) if target_id == : raise ValueError("When moving, target id canrootitemparentReferenceid': target_id}} response = self.con.patch(url, data=data) if not response: return False self.parent_id = target_id return True
Moves this DriveItem to another Folder. Can't move between different Drives. :param target: a Folder, Drive item or Item Id string. If it's a drive the item will be moved to the root folder. :type target: drive.Folder or DriveItem or str :return: Success / Failure :rtype: bool
12,271
def _ring_2d(m, n): if m == 1: return [(0, i) for i in range(n)] if n == 1: return [(i, 0) for i in range(m)] if m % 2 != 0: tf.logging.warning("Odd dimension") return [(i % m, i // m) for i in range(n * m)] ret = [(0, 0)] for i in range(m // 2): for j in range(1, n): ret.append((2 * i, j)) for j in range(n-1, 0, -1): ret.append((2 * i + 1, j)) for i in range(m-1, 0, -1): ret.append((i, 0)) return ret
Ring-order of a mxn mesh. Args: m: an integer n: an integer Returns: a list of mxn pairs
12,272
def vol_tetra(vt1, vt2, vt3, vt4): vol_tetra = np.abs(np.dot((vt1 - vt4), np.cross((vt2 - vt4), (vt3 - vt4)))) / 6 return vol_tetra
Calculate the volume of a tetrahedron, given the four vertices of vt1, vt2, vt3 and vt4. Args: vt1 (array-like): coordinates of vertex 1. vt2 (array-like): coordinates of vertex 2. vt3 (array-like): coordinates of vertex 3. vt4 (array-like): coordinates of vertex 4. Returns: (float): volume of the tetrahedron.
12,273
def get_bgp_config(self, group="", neighbor=""): bgp_config = {} def build_prefix_limit(af_table, limit, prefix_percent, prefix_timeout): prefix_limit = {} inet = False inet6 = False preifx_type = "inet" if isinstance(af_table, list): af_table = str(af_table) if "ipv4" in af_table.lower(): inet = True if "ipv6" in af_table.lower(): inet6 = True preifx_type = "inet6" if len(af_table.split()) == 2: safi = "unicast" else: safi = af_table.split()[-1] if inet or inet6: prefix_limit = { preifx_type: { safi: { "limit": limit, "teardown": { "threshold": prefix_percent, "timeout": prefix_timeout, }, } } } return prefix_limit cfg = self.get_config(retrieve="running") cfg = cfg["running"].splitlines() bgp_config_text = napalm.base.helpers.cisco_conf_parse_objects( "router bgp", cfg ) bgp_asn = napalm.base.helpers.regex_find_txt( r"router bgp (\d+)", bgp_config_text, default=0 ) all_neighbors = set() all_groups = set() bgp_group_neighbors = {} all_groups.add("_") for line in bgp_config_text: if " neighbor " in line: if re.search(IP_ADDR_REGEX, line) is not None: all_neighbors.add(re.search(IP_ADDR_REGEX, line).group()) elif re.search(IPV6_ADDR_REGEX_2, line) is not None: all_neighbors.add(re.search(IPV6_ADDR_REGEX_2, line).group()) else: bgp_group = re.search(r" neighbor [^\s]+", line).group() bgp_group = bgp_group.split()[1] all_groups.add(bgp_group) for bgp_neighbor in all_neighbors: if neighbor: if bgp_neighbor != neighbor: continue afi_list = napalm.base.helpers.cisco_conf_parse_parents( r"\s+address-family.*", bgp_neighbor, bgp_config_text ) afi = afi_list[0] if "vrf" in str(afi_list): continue else: neighbor_config = napalm.base.helpers.cisco_conf_parse_objects( bgp_neighbor, bgp_config_text ) group_name = napalm.base.helpers.regex_find_txt( " peer-group ([^]+)\]+\]+)\'", neighbor_config ) local_as = napalm.base.helpers.regex_find_txt( r"local-as (\d+)", neighbor_config, default=0 ) import_policy = napalm.base.helpers.regex_find_txt( r"route-map ([^\s]+) in", neighbor_config ) export_policy = napalm.base.helpers.regex_find_txt( r"route-map ([^\s]+) out", neighbor_config ) local_address = napalm.base.helpers.regex_find_txt( r" update-source (\w+)", neighbor_config ) multihop_ttl = napalm.base.helpers.regex_find_txt( r"ebgp-multihop {\d+}", neighbor_config, default=0 ) peer_as = napalm.base.helpers.regex_find_txt( r" remote-as (\d+)", neighbor_config, default=0 ) remove_private_as = bool( napalm.base.helpers.regex_find_txt( r"remove-private-as", neighbor_config ) ) prefix_limit = napalm.base.helpers.regex_find_txt( r"maximum-prefix (\d+) \d+ \w+ \d+", neighbor_config, default=0 ) prefix_percent = napalm.base.helpers.regex_find_txt( r"maximum-prefix \d+ (\d+) \w+ \d+", neighbor_config, default=0 ) prefix_timeout = napalm.base.helpers.regex_find_txt( r"maximum-prefix \d+ \d+ \w+ (\d+)", neighbor_config, default=0 ) bgp_type = "external" if local_as: if local_as == peer_as: bgp_type = "internal" elif bgp_asn == peer_as: bgp_type = "internal" bgp_config[group_name] = { "apply_groups": [], "description": description, "local_as": local_as, "type": bgp_type, "import_policy": import_policy, "export_policy": export_policy, "local_address": local_address, "multipath": multipath, "multihop_ttl": multihop_ttl, "remote_as": peer_as, "remove_private_as": remove_private_as, "prefix_limit": build_prefix_limit( afi, prefix_limit, prefix_percent, prefix_timeout ), "neighbors": bgp_group_neighbors.get(group_name, {}), } return bgp_config
Parse BGP config params into a dict :param group='': :param neighbor='':
12,274
def get_core(self): if self.maplesat and self.status == False: return pysolvers.maplesat_core(self.maplesat)
Get an unsatisfiable core if the formula was previously unsatisfied.
12,275
def _add_element(self, element, parent_node): if element.tag == : element_node_id = element.attrib[]++element.attrib[] node_layers = {self.ns, self.ns+, self.ns++element.attrib[]} elif element.tag == : element_node_id = element.attrib[]+ node_layers = {self.ns, self.ns+} elif element.tag == : element_node_id = node_layers = {self.ns} else: element_node_id = element.getparent().attrib[]++element.tag node_layers = {self.ns, self.ns+} self.add_node(element_node_id, layers=node_layers) self.add_edge(parent_node, element_node_id, layers={self.ns}, edge_type=EdgeTypes.dominance_relation) if element.text: if self.tokenize: for token in element.text.split(): self._add_token(token, element_node_id) else: element_text = sanitize_string(element.text) self.node[element_node_id].update( {: u"{0}: {1}...".format(element_node_id, element_text[:20])}) for child_element in element.iterchildren(): self._add_element(child_element, element_node_id) if element.tail: if self.tokenize: for token in element.tail.split(): self._add_token(token, parent_node) else: tail_text = sanitize_string(element.tail) self.node[parent_node].update( {: u"{0}: {1}...".format(parent_node, tail_text[:20])})
add an element (i.e. a unit/connective/discourse or modifier) to the docgraph.
12,276
def loadRecords(self, records): self.setChildIndicatorPolicy(self.DontShowIndicatorWhenChildless) self._loaded = True if records is None: return if self._nextLevels and RecordSet.typecheck(records): level = self._nextLevels[0] sublevels = self._nextLevels[1:] records = records.grouped(level) elif RecordSet.typecheck(records): sublevels = None records = records.all() else: sublevels = None if type(records) == dict: try: generator = self.treeWidget().createGroupItem cls = None except AttributeError: generator = None cls = type(self) for subgroup, subrecords in records.items(): if generator: generator(subgroup, subrecords, sublevels, self) elif cls: cls(self, subgroup, subrecords, sublevels) else: try: generator = self.treeWidget().createRecordItem cls = None except AttributeError: generator = None cls = XOrbRecordItem cls = self.treeWidget().createRecordItem for record in records: if generator: generator(record, self) elif cls: cls(self, record)
Loads the inputed records as children to this item. :param records | [<orb.Table>, ..] || {<str> sub: <variant>, .. }
12,277
def get_listing(path): if path != ".": listing = sorted([] + os.listdir(path)) else: listing = sorted(os.listdir(path)) return listing
Returns the list of files and directories in a path. Prepents a ".." (parent directory link) if path is not current dir.
12,278
def mins(self): return np.array([self.x_min, self.y_min, self.z_min])
Returns de minimum values of x, y, z as a numpy array
12,279
def find_vulnerabilities( cfg_list, blackbox_mapping_file, sources_and_sinks_file, interactive=False, nosec_lines=defaultdict(set) ): vulnerabilities = list() definitions = parse(sources_and_sinks_file) with open(blackbox_mapping_file) as infile: blackbox_mapping = json.load(infile) for cfg in cfg_list: find_vulnerabilities_in_cfg( cfg, definitions, Lattice(cfg.nodes), blackbox_mapping, vulnerabilities, interactive, nosec_lines ) if interactive: with open(blackbox_mapping_file, ) as outfile: json.dump(blackbox_mapping, outfile, indent=4) return vulnerabilities
Find vulnerabilities in a list of CFGs from a trigger_word_file. Args: cfg_list(list[CFG]): the list of CFGs to scan. blackbox_mapping_file(str) sources_and_sinks_file(str) interactive(bool): determines if we ask the user about blackbox functions not in the mapping file. Returns: A list of vulnerabilities.
12,280
def raw(self, from_, to, body): if isinstance(to, string_types): raise TypeError() return self._session.post(.format(self._url), json={ : from_, : to, : body, }).json()
Send a raw MIME message.
12,281
def absent(name, user=None, signal=None): ret = {: name, : {}, : False, : } if __opts__[]: running = __salt__[](name, user=user) ret[] = None if running: ret[] = ( ).format(len(running)) else: ret[] = return ret if signal: status = __salt__[](name, user=user, signal=signal, full=True) else: status = __salt__[](name, user=user, full=True) ret[] = True if status: ret[] = .format(len(status[])) ret[] = status else: ret[] = return ret
Ensures that the named command is not running. name The pattern to match. user The user to which the process belongs signal Signal to send to the process(es).
12,282
def multi_plot_time(DataArray, SubSampleN=1, units=, xlim=None, ylim=None, LabelArray=[], show_fig=True): unit_prefix = units[:-1] if LabelArray == []: LabelArray = ["DataSet {}".format(i) for i in _np.arange(0, len(DataArray), 1)] fig = _plt.figure(figsize=properties[]) ax = fig.add_subplot(111) for i, data in enumerate(DataArray): ax.plot(unit_conversion(data.time.get_array()[::SubSampleN], unit_prefix), data.voltage[::SubSampleN], alpha=0.8, label=LabelArray[i]) ax.set_xlabel("time (s)") if xlim != None: ax.set_xlim(xlim) if ylim != None: ax.set_ylim(ylim) ax.grid(which="major") legend = ax.legend(loc="best", frameon = 1) frame = legend.get_frame() frame.set_facecolor() frame.set_edgecolor() ax.set_ylabel("voltage (V)") if show_fig == True: _plt.show() return fig, ax
plot the time trace for multiple data sets on the same axes. Parameters ---------- DataArray : array-like array of DataObject instances for which to plot the PSDs SubSampleN : int, optional Number of intervals between points to remove (to sub-sample data so that you effectively have lower sample rate to make plotting easier and quicker. xlim : array-like, optional 2 element array specifying the lower and upper x limit for which to plot the time signal LabelArray : array-like, optional array of labels for each data-set to be plotted show_fig : bool, optional If True runs plt.show() before returning figure if False it just returns the figure object. (the default is True, it shows the figure) Returns ------- fig : matplotlib.figure.Figure object The figure object created ax : matplotlib.axes.Axes object The axes object created
12,283
def set_membership(self, membership): _c_leiden._MutableVertexPartition_set_membership(self._partition, list(membership)) self._update_internal_membership()
Set membership.
12,284
def infer_shape(self, *args, **kwargs): try: res = self._infer_shape_impl(False, *args, **kwargs) if res[1] is None: arg_shapes, _, _ = self._infer_shape_impl(True, *args, **kwargs) arg_names = self.list_arguments() unknowns = [] for name, shape in zip(arg_names, arg_shapes): if is_np_compat(): shape_is_none = not shape or -1 in shape else: shape_is_none = not shape or 0 in shape if shape_is_none: if len(unknowns) >= 10: unknowns.append() break unknowns.append( % (name, str(shape))) warnings.warn( "Cannot decide shape for the following arguments " + "(0s in shape means unknown dimensions). " + "Consider providing them as input:\n\t" + "\n\t".join(unknowns), stacklevel=2) return res except MXNetError: print("infer_shape error. Arguments:") for i, arg in enumerate(args): print(" for k, v in kwargs.items(): print(" %s: %s" % (k, v)) raise
Infers the shapes of all arguments and all outputs given the known shapes of some arguments. This function takes the known shapes of some arguments in either positional way or keyword argument way as input. It returns a tuple of `None` values if there is not enough information to deduce the missing shapes. Example ------- >>> a = mx.sym.var('a') >>> b = mx.sym.var('b') >>> c = a + b >>> arg_shapes, out_shapes, aux_shapes = c.infer_shape(a=(3,3)) >>> arg_shapes [(3L, 3L), (3L, 3L)] >>> out_shapes [(3L, 3L)] >>> aux_shapes [] >>> c.infer_shape(a=(0,3)) # 0s in shape means unknown dimensions. So, returns None. (None, None, None) Inconsistencies in the known shapes will cause an error to be raised. See the following example: >>> data = mx.sym.Variable('data') >>> out = mx.sym.FullyConnected(data=data, name='fc1', num_hidden=1000) >>> out = mx.sym.Activation(data=out, act_type='relu') >>> out = mx.sym.FullyConnected(data=out, name='fc2', num_hidden=10) >>> weight_shape= (1, 100) >>> data_shape = (100, 100) >>> out.infer_shape(data=data_shape, fc1_weight=weight_shape) Error in operator fc1: Shape inconsistent, Provided=(1,100), inferred shape=(1000,100) Parameters ---------- *args : Shape of arguments in a positional way. Unknown shape can be marked as None. **kwargs : Keyword arguments of the known shapes. Returns ------- arg_shapes : list of tuple or None List of argument shapes. The order is same as the order of list_arguments(). out_shapes : list of tuple or None List of output shapes. The order is same as the order of list_outputs(). aux_shapes : list of tuple or None List of auxiliary state shapes. The order is same as the order of list_auxiliary_states().
12,285
def get_below_threshold(umi_quals, quality_encoding, quality_filter_threshold): umi_quals = [x - RANGES[quality_encoding][0] for x in map(ord, umi_quals)] below_threshold = [x < quality_filter_threshold for x in umi_quals] return below_threshold
test whether the umi_quals are below the threshold
12,286
def apply_patch(self, patch): history_file = File(self.__history_file) patches_history = history_file.cache() and [line.strip() for line in history_file.content] or [] if patch.uid not in patches_history: LOGGER.debug("> Applying patch!".format(patch.name)) if patch.apply(): history_file.content = ["{0}\n".format(patch.uid)] history_file.append() else: raise umbra.exceptions.PatchApplyError("{0} | patch failed to apply!".format( self.__class__.__name__, patch.path)) else: LOGGER.debug("> patch is already applied!".format(patch.name)) return True
Applies given patch. :param patch: Patch. :type patch: Patch :return: Method success. :rtype: bool
12,287
def log_state(self, state): results = [] for field_idx, field in enumerate(self.fields): parent, stat = None, state for f in field: parent, stat = stat, stat[f] results.append(stat) self.log(*results)
Gathers the stats from self.trainer.stats and passes them into self.log, as a list
12,288
def sort_trigger_set(triggers, exclude_previous=True, say=None): if say is None: say = lambda x: x trigger_object_list = [] for index, trig in enumerate(triggers): if exclude_previous and trig[1]["previous"]: continue pattern = trig[0] match, weight = re.search(RE.weight, trig[0]), 0 if match: weight = int(match.group(1)) match = re.search(RE.inherit, pattern) if match: inherit = int(match.group(1)) say("\t\t\tTrigger belongs to a topic which inherits other topics: level=" + str(inherit)) triggers[index][0] = pattern = re.sub(RE.inherit, "", pattern) else: inherit = sys.maxsize trigger_object_list.append(TriggerObj(pattern, index, weight, inherit)) sorted_list = sorted(trigger_object_list, key=attrgetter(, , , , , , , , , )) return [triggers[item.index] for item in sorted_list]
Sort a group of triggers in optimal sorting order. The optimal sorting order is, briefly: * Atomic triggers (containing nothing but plain words and alternation groups) are on top, with triggers containing the most words coming first. Triggers with equal word counts are sorted by length, and then alphabetically if they have the same length. * Triggers containing optionals are sorted next, by word count like atomic triggers. * Triggers containing wildcards are next, with ``_`` (alphabetic) wildcards on top, then ``#`` (numeric) and finally ``*``. * At the bottom of the sorted list are triggers consisting of only a single wildcard, in the order: ``_``, ``#``, ``*``. Triggers that have ``{weight}`` tags are grouped together by weight value and sorted amongst themselves. Higher weighted groups are then ordered before lower weighted groups regardless of the normal sorting algorithm. Triggers that come from topics which inherit other topics are also sorted with higher priority than triggers from the inherited topics. Arguments: triggers ([]str): Array of triggers to sort. exclude_previous (bool): Create a sort buffer for 'previous' triggers. say (function): A reference to ``RiveScript._say()`` or provide your own function.
12,289
def cycle_app(parser, cmd, args): parser.add_argument(, , type=int, default=4, help=) parser.add_argument(, type=int, help=) args = parser.parse_args(args) return cycle(args.length, args.width)
Generate a de Bruijn sequence of a given length.
12,290
def get_env_dirs(self): repo_dirs = next(os.walk(self.env_root))[1] if in repo_dirs: repo_dirs.remove() return repo_dirs
Return list of directories in env_root.
12,291
def build_pmid_exclusion_filter(pmids: Strings) -> EdgePredicate: if isinstance(pmids, str): @edge_predicate def pmid_exclusion_filter(data: EdgeData) -> bool: return has_pubmed(data) and data[CITATION][CITATION_REFERENCE] != pmids elif isinstance(pmids, Iterable): pmids = set(pmids) @edge_predicate def pmid_exclusion_filter(data: EdgeData) -> bool: return has_pubmed(data) and data[CITATION][CITATION_REFERENCE] not in pmids else: raise TypeError return pmid_exclusion_filter
Fail for edges with citations whose references are one of the given PubMed identifiers. :param pmids: A PubMed identifier or list of PubMed identifiers to filter against
12,292
def get_header(headers, name, default=None): name = name.lower() for header in headers: if header[0].lower() == name: return header[1] return default
Return the value of header *name*. The *headers* argument must be a list of ``(name, value)`` tuples. If the header is found its associated value is returned, otherwise *default* is returned. Header names are matched case insensitively.
12,293
def get_room_history( self, room_id, oldest=None, latest=datetime.now(), inclusive=False, count=20, unreads=False, **kwargs ): return GetRoomHistory(settings=self.settings, **kwargs).call( room_id=room_id, oldest=oldest, latest=latest, inclusive=inclusive, count=count, unreads=unreads, **kwargs )
Get various history of specific channel/room :param room_id: :param kwargs: :return:
12,294
def _pquery(scheduler, data, ndata, ndim, leafsize, x, nx, d, i, k, eps, p, dub, ierr): try: _data = shmem_as_nparray(data).reshape((ndata, ndim)) _x = shmem_as_nparray(x).reshape((nx, ndim)) _d = shmem_as_nparray(d).reshape((nx, k)) _i = shmem_as_nparray(i).reshape((nx, k)) kdtree = cKDTree(_data, leafsize=leafsize) for s in scheduler: d_out, i_out = kdtree.query(_x[s, :], k=k, eps=eps, p=p, distance_upper_bound=dub) m_d = d_out.shape[0] m_i = i_out.shape[0] _d[s, :], _i[s, :] = d_out.reshape(m_d, 1), i_out.reshape(m_i, 1) except: ierr.value += 1
Function that parallelly queries the K-D tree based on chunks of data returned by the scheduler
12,295
def validate(self, value): try: length = len(value) except TypeError: length = 0 if self.min_length is not None: min_length = self.min_length() if callable(self.min_length) else self.min_length if length < min_length: raise exceptions.ValidationError(self.min_err.format(min_length)) if self.max_length is not None: max_length = self.max_length() if callable(self.max_length) else self.max_length if length > max_length: raise exceptions.ValidationError(self.max_err.format(max_length))
Validate the length of a list. :param value: List of values. :raises: :class:`halogen.exception.ValidationError` exception when length of the list is less than minimum or greater than maximum.
12,296
def is_base64(string): return (not re.match(, string)) and \ (len(string) % 4 == 0) and \ re.match(, string)
Determines whether or not a string is likely to be base64 encoded binary nonsense
12,297
def efficiency(self): mywaveunits = self.waveunits.name self.convert() wave = self.wave thru = self.throughput self.convert(mywaveunits) ans = self.trapezoidIntegration(wave, thru/wave) return ans
Calculate :ref:`pysynphot-formula-qtlam`. Returns ------- ans : float Bandpass dimensionless efficiency.
12,298
def _save_trace(self): stack_trace = stack() try: self.trace = [] for frm in stack_trace[5:]: self.trace.insert(0, frm[1:]) finally: del stack_trace
Save current stack trace as formatted string.
12,299
async def fire(self, name, payload=None, *, dc=None, node=None, service=None, tag=None): params = { "dc": dc, "node": extract_pattern(node), "service": extract_pattern(service), "tag": extract_pattern(tag) } payload = encode_value(payload) if payload else None response = await self._api.put( "/v1/event/fire", name, data=payload, params=params, headers={"Content-Type": "application/octet-stream"}) result = format_event(response.body) return result
Fires a new event Parameters: name (str): Event name payload (Payload): Opaque data node (Filter): Regular expression to filter by node name service (Filter): Regular expression to filter by service tag (Filter): Regular expression to filter by service tags dc (str): Specify datacenter that will be used. Defaults to the agent's local datacenter. Returns: Object: where value is event ID The return body is like:: { "ID": "b54fe110-7af5-cafc-d1fb-afc8ba432b1c", "Name": "deploy", "Payload": None, "NodeFilter": re.compile("node-\d+"), "ServiceFilter": "", "TagFilter": "", "Version": 1, "LTime": 0 } The **ID** field uniquely identifies the newly fired event.