Unnamed: 0
int64
0
389k
code
stringlengths
26
79.6k
docstring
stringlengths
1
46.9k
10,000
def register_job_from_link(self, link, key, **kwargs): job_config = kwargs.get(, None) if job_config is None: job_config = link.args status = kwargs.get(, JobStatus.unknown) job_details = JobDetails(jobname=link.linkname, jobkey=key, appname=link.appname, logfile=kwargs.get(), jobconfig=job_config, timestamp=get_timestamp(), file_dict=copy.deepcopy(link.files), sub_file_dict=copy.deepcopy(link.sub_files), status=status) self.register_job(job_details) return job_details
Register a job in the `JobArchive` from a `Link` object
10,001
def peek_64(library, session, address): value_64 = ViUInt64() ret = library.viPeek64(session, address, byref(value_64)) return value_64.value, ret
Read an 64-bit value from the specified address. Corresponds to viPeek64 function of the VISA library. :param library: the visa library wrapped by ctypes. :param session: Unique logical identifier to a session. :param address: Source address to read the value. :return: Data read from bus, return value of the library call. :rtype: bytes, :class:`pyvisa.constants.StatusCode`
10,002
def add_input_opt(self, opt, inp): self.add_opt(opt, inp._dax_repr()) self._add_input(inp)
Add an option that determines an input
10,003
def parse_options(self, arg): if not arg.startswith(): return False value = None if in arg: arg, value = arg.split() for option in self._option_list: if arg not in (option.shortname, option.longname): continue action = option.action if action: action() if option.key == option.shortname: self._results[option.key] = True return True if option.boolean and option.default: self._results[option.key] = False return True if option.boolean: self._results[option.key] = True return True if not value: if self._argv: value = self._argv[0] self._argv = self._argv[1:] if not value: raise RuntimeError( % option.name) self._results[option.key] = option.to_python(value) return True return False
Parse options with the argv :param arg: one arg from argv
10,004
def last(self): if len(self._range) == 0: raise IndexError("range is empty") if self._idx == -1: return self._range[0] return self._get(self._idx)
Returns the last element accessed via next() or prev(). Returns the first element of range() if neither of these was called.
10,005
def _clear_entity_type_registry(entity, **kwargs): codecopts = entity.codec_options.with_options(type_registry=None) return entity.with_options(codec_options=codecopts, **kwargs)
Clear the given database/collection object's type registry.
10,006
def get_any_node(self, addr): for n in self.graph.nodes(): if n.addr == addr: return n
Get any VFG node corresponding to the basic block at @addr. Note that depending on the context sensitivity level, there might be multiple nodes corresponding to different contexts. This function will return the first one it encounters, which might not be what you want.
10,007
def remove_release(self, username, package_name, version): url = % (self.domain, username, package_name, version) res = self.session.delete(url) self._check_response(res, [201]) return
remove a release and all files under it :param username: the login of the package owner :param package_name: the name of the package :param version: the name of the package
10,008
def get(self, list_id, merge_id): self.list_id = list_id self.merge_id = merge_id return self._mc_client._get(url=self._build_path(list_id, , merge_id))
Get information about a specific merge field in a list. :param list_id: The unique id for the list. :type list_id: :py:class:`str` :param merge_id: The id for the merge field. :type merge_id: :py:class:`str`
10,009
def list(self, log_level=values.unset, start_date=values.unset, end_date=values.unset, limit=None, page_size=None): return list(self.stream( log_level=log_level, start_date=start_date, end_date=end_date, limit=limit, page_size=page_size, ))
Lists AlertInstance records from the API as a list. Unlike stream(), this operation is eager and will load `limit` records into memory before returning. :param unicode log_level: Only show alerts for this log-level. :param date start_date: Only show Alerts on or after this date. :param date end_date: Only show Alerts on or before this date. :param int limit: Upper limit for the number of records to return. list() guarantees never to return more than limit. Default is no limit :param int page_size: Number of records to fetch per request, when not set will use the default value of 50 records. If no page_size is defined but a limit is defined, list() will attempt to read the limit with the most efficient page size, i.e. min(limit, 1000) :returns: Generator that will yield up to limit results :rtype: list[twilio.rest.monitor.v1.alert.AlertInstance]
10,010
def _check_reset_and_type_change(self, name, orig_ctr): if name in orig_ctr: tf.logging.warning("Overwriting hparam %s", name) ctr_names = [ (self._categorical_params, "categorical"), (self._discrete_params, "discrete"), (self._float_params, "float"), (self._int_params, "int"), ] ctrs, names = list(zip(*ctr_names)) orig_name = names[ctrs.index(orig_ctr)] for ctr, ctr_name in ctr_names: if ctr is orig_ctr: continue if name in ctr: raise ValueError("Setting hyperparameter %s as type %s, but a " "hyperparemeter of the same name was originally " "registered as type %s" % (name, ctr_name, orig_name))
Check if name is in orig_ctr or in one of the other type containers.
10,011
def _parametersToDefaults(self, parameters): defaults = {} for p in parameters: if isinstance(p, liveform.ChoiceParameter): selected = [] for choice in p.choices: if choice.selected: selected.append(choice.value) defaults[p.name] = selected else: defaults[p.name] = p.default return defaults
Extract the defaults from C{parameters}, constructing a dictionary mapping parameter names to default values, suitable for passing to L{ListChangeParameter}. @type parameters: C{list} of L{liveform.Parameter} or L{liveform.ChoiceParameter}. @rtype: C{dict}
10,012
def _parse_permission(self, obj): if isinstance(obj, str): if obj == : return A.ALL elif obj in A.ALL: return obj, else: logger.warning(, obj) elif isinstance(obj, (list, tuple)): for i in obj: if i not in A.ALL: logger.warning(, i) return obj elif isinstance(obj, dict): return self._parse_permission(obj.get())
从 obj 中取出权限 :param obj: :return: [A.QUERY, A.WRITE, ...]
10,013
def clear_from(self, timestamp): block_size = self.config.block_size offset, remainder = timestamp // block_size, timestamp % block_size if remainder: raise ValueError() self.driver.clear_from(offset, timestamp)
Clear all data from `timestamp` onwards. Note that the timestamp is rounded down to the nearest block boundary
10,014
def _init_default_values(self): self["filtering"]["remove invalid events"] = False self["filtering"]["enable filters"] = True self["filtering"]["limit events"] = 0 self["filtering"]["polygon filters"] = [] self["filtering"]["hierarchy parent"] = "none" for item in dfn.scalar_feature_names: appends = [" min", " max"] for a in appends: self["filtering"][item + a] = 0
Set default initial values The default values are hard-coded for backwards compatibility and for several functionalities in dclab.
10,015
def fs_cleansed_attachments(self): if exists(self.fs_cleansed_attachment_container): return [join(self.fs_cleansed_attachment_container, attachment) for attachment in listdir(self.fs_cleansed_attachment_container)] else: return []
returns a list of absolute paths to the cleansed attachements
10,016
def average_last_builds(connection, package, limit=5): defer.returnValue(average)
Find the average duration time for the last couple of builds. :param connection: txkoji.Connection :param package: package name :returns: deferred that when fired returns a datetime.timedelta object, or None if there were no previous builds for this package.
10,017
def _PrintTasksStatus(self, processing_status): if processing_status and processing_status.tasks_status: tasks_status = processing_status.tasks_status table_view = views.CLITabularTableView( column_names=[, , , , , ], column_sizes=[15, 7, 15, 15, 15, 0]) table_view.AddRow([ , tasks_status.number_of_queued_tasks, tasks_status.number_of_tasks_processing, tasks_status.number_of_tasks_pending_merge, tasks_status.number_of_abandoned_tasks, tasks_status.total_number_of_tasks]) self._output_writer.Write() table_view.Write(self._output_writer)
Prints the status of the tasks. Args: processing_status (ProcessingStatus): processing status.
10,018
def saveVizGithub(contents, ontouri): title = "Ontospy: ontology export" readme = % str(ontouri) files = { : { : contents }, : { : readme }, : { : } } urls = save_anonymous_gist(title, files) return urls
DEPRECATED on 2016-11-16 Was working but had a dependecies on package 'uritemplate.py' which caused problems at installation time
10,019
def update(self, other): if type(self) != type(other): return NotImplemented else: if other.bad: self.error = other.error self.bad = True self._fieldDict.update(other._fieldDict)
Adds all the tag-entry pairs from _other_ to the `Grant`. If there is a conflict _other_ takes precedence. # Parameters _other_ : `Grant` > Another `Grant` of the same type as _self_
10,020
def load_ply(fileobj): def nextline(): while True: line = fileobj.readline() assert line != if not line.startswith(): return line.strip() assert nextline() == assert nextline() == line = nextline() assert line.startswith() nverts = int(line.split()[2]) assert nextline() == assert nextline() == assert nextline() == line = nextline() assert line.startswith() nfaces = int(line.split()[2]) assert nextline() == line = nextline() has_texcoords = line == if has_texcoords: assert nextline() == else: assert line == verts = np.zeros((nverts, 3)) for i in range(nverts): vals = nextline().split() verts[i, :] = [float(v) for v in vals[:3]] faces = [] faces_uv = [] for i in range(nfaces): vals = nextline().split() assert int(vals[0]) == 3 faces.append([int(v) for v in vals[1:4]]) if has_texcoords: assert len(vals) == 11 assert int(vals[4]) == 6 faces_uv.append([(float(vals[5]), float(vals[6])), (float(vals[7]), float(vals[8])), (float(vals[9]), float(vals[10]))]) else: assert len(vals) == 4 return verts, faces, faces_uv
Same as load_ply, but takes a file-like object
10,021
def _create_results_summary(self): needed_attributes = ["params", "standard_errors", "tvalues", "pvalues", "robust_std_errs", "robust_t_stats", "robust_p_vals"] try: assert all([hasattr(self, attr) for attr in needed_attributes]) assert all([isinstance(getattr(self, attr), pd.Series) for attr in needed_attributes]) except AssertionError: msg = "Call this function only after setting/calculating all other" msg_2 = " estimation results attributes" raise NotImplementedError(msg + msg_2) self.summary = pd.concat((self.params, self.standard_errors, self.tvalues, self.pvalues, self.robust_std_errs, self.robust_t_stats, self.robust_p_vals), axis=1) return None
Create the dataframe that displays the estimation results, and store it on the model instance. Returns ------- None.
10,022
def call_handlers(self, msg): self.message_received.emit(msg) msg_type = msg[][] if msg_type == : self.input_requested.emit(msg)
Reimplemented to emit signals instead of making callbacks.
10,023
def decode(self, binSequence): try: binSeq = iter(binSequence[0]) except TypeError, te: binSeq = binSequence ret = for b in binSeq : ch = for c in self.charToBin : if b & self.forma[self.charToBin[c]] > 0 : ch += c + if ch == : raise KeyError( % b) ret += ch[:-1] return ret
decodes a binary sequence to return a string
10,024
def not_empty(value, allow_empty = False, **kwargs): if not value and allow_empty: return None elif not value: raise errors.EmptyValueError() return value
Validate that ``value`` is not empty. :param value: The value to validate. :param allow_empty: If ``True``, returns :obj:`None <python:None>` if ``value`` is empty. If ``False``, raises a :class:`EmptyValueError <validator_collection.errors.EmptyValueError>` if ``value`` is empty. Defaults to ``False``. :type allow_empty: :class:`bool <python:bool>` :returns: ``value`` / :obj:`None <python:None>` :raises EmptyValueError: if ``value`` is empty and ``allow_empty`` is ``False``
10,025
def get_media_detail_input_interface_name(self, **kwargs): config = ET.Element("config") get_media_detail = ET.Element("get_media_detail") config = get_media_detail input = ET.SubElement(get_media_detail, "input") interface_name = ET.SubElement(input, "interface-name") interface_name.text = kwargs.pop() callback = kwargs.pop(, self._callback) return callback(config)
Auto Generated Code
10,026
def _on_message(self, socket, message): data = json.loads(message) message_type = None identifier = None subscription = None if in data: message_type = data[] if in data: identifier = json.loads(data[]) if identifier is not None: subscription = self.find_subscription(identifier) if subscription is not None: subscription.received(data) elif message_type == : self.logger.debug() for subscription in self.subscriptions.values(): if subscription.state == : subscription.create() elif message_type == : if self.log_ping: self.logger.debug() else: self.logger.warning(.format(message))
Called aways when a message arrives.
10,027
def count(self): references = self.conn.client.get(self.refcount_key) if references is None: return 0 return int(references)
:returns: The total number of elements in the reference list. :rtype: int
10,028
def update_job_by_id(user, job_id): if_match_etag = utils.check_and_get_etag(flask.request.headers) values = schemas.job.put(flask.request.json) job = v1_utils.verify_existence_and_get(job_id, _TABLE) job = dict(job) if not user.is_in_team(job[]): raise dci_exc.Unauthorized() status = values.get() if status and job.get() != status: jobstates.insert_jobstate(user, { : status, : job_id }) if status in models.FINAL_STATUSES: jobs_events.create_event(job_id, status, job[]) where_clause = sql.and_(_TABLE.c.etag == if_match_etag, _TABLE.c.id == job_id) values[] = utils.gen_etag() query = _TABLE.update().returning(*_TABLE.columns).\ where(where_clause).values(**values) result = flask.g.db_conn.execute(query) if not result.rowcount: raise dci_exc.DCIConflict(, job_id) return flask.Response( json.dumps({: result.fetchone()}), 200, headers={: values[]}, content_type= )
Update a job
10,029
def DeleteGRRUser(self, username): try: del self.approvals_by_username[username] except KeyError: pass for approvals in itervalues(self.approvals_by_username): for approval in itervalues(approvals): grants = [g for g in approval.grants if g.grantor_username != username] if len(grants) != len(approval.grants): approval.grants = grants try: del self.notifications_by_username[username] except KeyError: pass try: del self.users[username] except KeyError: raise db.UnknownGRRUserError(username)
Deletes the user and all related metadata with the given username.
10,030
def sanitize_filename(filename): token = generate_drop_id() name, extension = splitext(filename) if extension: return % (token, extension) else: return token
preserve the file ending, but replace the name with a random token
10,031
def get_sizestr(img): n_x, n_y, n_slices = img.shape[:3] import numpy as np voxel_dims = np.array(img.header.get_zooms()[:3]) matrix_size = .format(num_to_str(n_x), num_to_str(n_y)) voxel_size = .join([num_to_str(s) for s in voxel_dims]) fov = [n_x, n_y] * voxel_dims[:2] fov = .join([num_to_str(s) for s in fov]) return n_slices, voxel_size, matrix_size, fov
Extract and reformat voxel size, matrix size, field of view, and number of slices into pretty strings. Parameters ---------- img : :obj:`nibabel.Nifti1Image` Image from scan from which to derive parameters. Returns ------- n_slices : :obj:`int` Number of slices. voxel_size : :obj:`str` Voxel size string (e.g., '2x2x2') matrix_size : :obj:`str` Matrix size string (e.g., '128x128') fov : :obj:`str` Field of view string (e.g., '256x256')
10,032
def decode(packet): offset = 0 _ = struct.unpack_from(, packet, offset) s = _[1] state = dict() state[] = s & 1 state[] = s >> 1 & 1 state[] = s >> 2 & 1 state[] = s >> 3 & 1 state[] = s >> 4 & 1 state[] = s >> 5 & 1 state[] = s >> 6 & 1 state[] = s >> 7 & 1 state[] = s >> 8 & 1 state[] = s >> 9 & 1 state[] = s >> 10 & 1 state[] = s >> 11 & 1 state[] = s >> 12 & 1 state[] = s >> 13 & 1 state[] = s >> 15 & 1 state[] = s >> 16 & 1 state[] = s >> 17 & 1 state[] = s >> 19 & 1 state[] = s >> 21 & 1 state[] = s >> 22 & 1 state[] = s >> 23 & 1 state[] = s >> 24 & 1 state[] = s >> 25 & 1 state[] = s >> 26 & 1 state[] = s >> 27 & 1 state[] = s >> 28 & 1 state[] = s >> 29 & 1 state[] = s >> 30 & 1 state[] = s >> 31 & 1 data = dict() data[] = state data[] = _[0] data[] = _[2] data[] = _[3] offset += struct.calcsize() demo_fields = [ , , , , , , , , , ] angles = [, , ] while True: try: id_nr, size = struct.unpack_from(, packet, offset) offset += struct.calcsize() except struct.error: break values = [] for i in range(size - struct.calcsize()): values.append(struct.unpack_from(, packet, offset)[0]) offset += struct.calcsize() if id_nr == 0: values = struct.unpack_from(, b.join(values)) demo = dict(zip(demo_fields, values)) for a in angles: demo[a] = int(demo[a] / 1000) data[] = demo return data
Decode a navdata packet.
10,033
def value_dp_matrix(self): if self.__value_dp_matrix is None: self.__value_dp_matrix = self.__dp_extractor.to_dp_matrix( to_value_matrix(self.headers, self.rows) ) return self.__value_dp_matrix
:return: DataProperty for table data. :rtype: list
10,034
def get_rules(license): can = [] cannot = [] must = [] req = requests.get("{base_url}/licenses/{license}".format( base_url=BASE_URL, license=license), headers=_HEADERS) if req.status_code == requests.codes.ok: data = req.json() can = data["permitted"] cannot = data["forbidden"] must = data["required"] return can, cannot, must
Gets can, cannot and must rules from github license API
10,035
def flush_incoming(self): while True: try: stanza_obj = self._incoming_queue.get_nowait() except asyncio.QueueEmpty: break self._process_incoming(None, stanza_obj)
Flush all incoming queues to the respective processing methods. The handlers are called as usual, thus it may require at least one iteration through the asyncio event loop before effects can be seen. The incoming queues are empty after a call to this method. It is legal (but pretty useless) to call this method while the stream is :attr:`running`.
10,036
def _ensure_allow_rp(rp_pyxb): if not rp_pyxb.replicationAllowed: rp_pyxb.replicationAllowed = True if not rp_pyxb.numberReplicas: rp_pyxb.numberReplicas = 3
Ensure that RP allows replication.
10,037
def XORPS(cpu, dest, src): res = dest.write(dest.read() ^ src.read())
Performs a bitwise logical OR operation on the source operand (second operand) and the destination operand (first operand) and stores the result in the destination operand. The source operand can be an MMX technology register or a 64-bit memory location or it can be an XMM register or a 128-bit memory location. The destination operand can be an MMX technology register or an XMM register. Each bit of the result is set to 1 if either or both of the corresponding bits of the first and second operands are 1; otherwise, it is set to 0.
10,038
def _validate_max_staleness(max_staleness): if max_staleness == -1: return -1 if not isinstance(max_staleness, integer_types): raise TypeError(_invalid_max_staleness_msg(max_staleness)) if max_staleness <= 0: raise ValueError(_invalid_max_staleness_msg(max_staleness)) return max_staleness
Validate max_staleness.
10,039
def appname(path=None): if path is None: path = sys.argv[0] name = os.path.basename(os.path.splitext(path)[0]) if name == : name = return name
Return a useful application name based on the program argument. A special case maps 'mod_wsgi' to a more appropriate name so web applications show up as our own.
10,040
def spatialimg_to_hdfgroup(h5group, spatial_img): try: h5group[] = spatial_img.get_data() h5group[] = spatial_img.get_affine() if hasattr(h5group, ): h5group[] = spatial_img.get_extra() hdr = spatial_img.get_header() for k in list(hdr.keys()): h5group[].attrs[k] = hdr[k] except ValueError as ve: raise Exception( + h5group.name) from ve
Saves a Nifti1Image into an HDF5 group. Parameters ---------- h5group: h5py Group Output HDF5 file path spatial_img: nibabel SpatialImage Image to be saved h5path: str HDF5 group path where the image data will be saved. Datasets will be created inside the given group path: 'data', 'extra', 'affine', the header information will be set as attributes of the 'data' dataset.
10,041
def run(input_path=None, output_path=None, verbose=True, plot=True, hist_sheet=False): if input_path is None: input_path = show_open_file_dialog(filetypes=[(, )]) if not input_path: if verbose: print("No input file selected.") return input_dir, input_filename = os.path.split(input_path) input_filename_no_ext, __ = os.path.splitext(input_filename) if verbose: print("Reading {}...".format(input_filename)) instruments_table = read_table(input_path, sheetname=, index_col=) beads_table = read_table(input_path, sheetname=, index_col=) samples_table = read_table(input_path, sheetname=, index_col=) beads_samples, mef_transform_fxns, mef_outputs = process_beads_table( beads_table, instruments_table, base_dir=input_dir, verbose=verbose, plot=plot, plot_dir=, full_output=True) if verbose: print("") print("Calculating statistics for beads...") add_beads_stats(beads_table, beads_samples, mef_outputs) samples = process_samples_table( samples_table, instruments_table, mef_transform_fxns=mef_transform_fxns, beads_table=beads_table, base_dir=input_dir, verbose=verbose, plot=plot, plot_dir=) if verbose: print("") print("Calculating statistics for all samples...") add_samples_stats(samples_table, samples) if hist_sheet: if verbose: print("Generating histograms table...") histograms_table = generate_histograms_table(samples_table, samples) about_table = generate_about_table({: input_path}) table_list = [] table_list.append((, instruments_table)) table_list.append((, beads_table)) table_list.append((, samples_table)) if hist_sheet: table_list.append((, histograms_table)) table_list.append((, about_table)) if verbose: print("Saving output Excel file...") if output_path is None: output_filename = "{}_output.xlsx".format(input_filename_no_ext) output_path = os.path.join(input_dir, output_filename) write_workbook(output_path, table_list) if verbose: print("\nDone.")
Run the MS Excel User Interface. This function performs the following: 1. If `input_path` is not specified, show a dialog to choose an input Excel file. 2. Extract data from the Instruments, Beads, and Samples tables. 3. Process all the bead samples specified in the Beads table. 4. Generate statistics for each bead sample. 5. Process all the cell samples in the Samples table. 6. Generate statistics for each sample. 7. If requested, generate a histogram table for each fluorescent channel specified for each sample. 8. Generate a table with run time, date, FlowCal version, among others. 9. Save statistics and (if requested) histograms in an output Excel file. Parameters ---------- input_path : str Path to the Excel file to use as input. If None, show a dialog to select an input file. output_path : str Path to which to save the output Excel file. If None, use "<input_path>_output". verbose : bool, optional Whether to print information messages during the execution of this function. plot : bool, optional Whether to generate and save density/histogram plots of each sample, and each beads sample. hist_sheet : bool, optional Whether to generate a sheet in the output Excel file specifying histogram bin information.
10,042
def register_sigma_task(self, *args, **kwargs): kwargs["task_class"] = SigmaTask return self.register_task(*args, **kwargs)
Register a sigma task.
10,043
def authorization_documents(self): if self._authorization_documents is None: self._authorization_documents = AuthorizationDocumentList(self) return self._authorization_documents
:rtype: twilio.rest.preview.hosted_numbers.authorization_document.AuthorizationDocumentList
10,044
def get_readonly_fields(self, request, obj=None): fields = list(super(CreateUpdateAdmin, self).get_readonly_fields( request=request, obj=obj)) for k, v in self.ownership_info[].items(): if (hasattr(self.model, k) and ( in v and v[]) and k not in fields and (not self.exclude or (self.exclude and k not in self.exclude))): fields.append(k) return tuple(fields)
Makes `created_by`, `create_date` & `update_date` readonly when editing. Author: Himanshu Shankar (https://himanshus.com)
10,045
def property_present(properties, admin_username=, admin_password=, host=None, **kwargs): ret = {: host, : {: host}, : True, : {}, : } if host is None: output = __salt__[]() stdout = output[] reg = re.compile(r) for line in stdout: result = reg.match(line) if result is not None: host = result.group(1) break if not host: ret[] = False ret[] = return ret properties_get = {} for key, value in properties.items(): response = __salt__[](host, admin_username, admin_password, key) if response is False or response[] != 0: ret[] = False ret[] = return ret properties_get[key] = response[].split()[-1].split()[-1] if __opts__[]: for key, value in properties.items(): if properties_get[key] == value: ret[][key] = t be changedchangesWill be changed to {0}dracr.set_propertyretcoderesultcommentFailed to set property from idracchangeswill be changed - old value {0} , new value {1}'.format(properties_get[key], value) return ret
properties = {}
10,046
def recv(self, tab_key, message_id=None, timeout=30): self.__check_open_socket(tab_key) for idx in range(len(self.messages[tab_key])): if self.messages[tab_key][idx]: if "id" in self.messages[tab_key][idx] and message_id: if self.messages[tab_key][idx][] == message_id: return self.messages[tab_key].pop(idx) def check_func(message): if message_id is None: return True if not message: self.log.debug("Message is not true (%s)!", message) return False if "id" in message: return message[] == message_id return False return self.recv_filtered(check_func, tab_key, timeout)
Recieve a message, optionally filtering for a specified message id. If `message_id` is none, the first command in the receive queue is returned. If `message_id` is not none, the command waits untill a message is received with the specified id, or it times out. Timeout is the number of seconds to wait for a response, or `None` if the timeout has expired with no response.
10,047
def col_frequencies(col, weights=None, gap_chars=): counts = col_counts(col, weights, gap_chars) scale = 1.0 / sum(counts.values()) return dict((aa, cnt * scale) for aa, cnt in counts.iteritems())
Frequencies of each residue type (totaling 1.0) in a single column.
10,048
def interval_to_milliseconds(interval): ms = None seconds_per_unit = { "m": 60, "h": 60 * 60, "d": 24 * 60 * 60, "w": 7 * 24 * 60 * 60 } unit = interval[-1] if unit in seconds_per_unit: try: ms = int(interval[:-1]) * seconds_per_unit[unit] * 1000 except ValueError: pass return ms
Convert a Binance interval string to milliseconds :param interval: Binance interval string 1m, 3m, 5m, 15m, 30m, 1h, 2h, 4h, 6h, 8h, 12h, 1d, 3d, 1w :type interval: str :return: None if unit not one of m, h, d or w None if string not in correct format int value of interval in milliseconds
10,049
def gather_job_info(self, jid, tgt, tgt_type, listen=True, **kwargs): log.debug(, jid) timeout = int(kwargs.get(, self.opts[])) pub_data = self.run_job(tgt, , arg=[jid], tgt_type=tgt_type, timeout=timeout, listen=listen, **kwargs ) if in pub_data: self.event.subscribe(pub_data[]) return pub_data
Return the information about a given job
10,050
def save_bed(cls, query, filename=sys.stdout): out = _open(filename, ) for o in query: out.write(o.bed() + )
write a bed12 file of the query. Parameters ---------- query : query a table or query to save to file filename : file string or filehandle to write output
10,051
def _get_name(self): if self.url.count() > 1: base_name = self.url.split(, 3)[-1].replace(, )[1:] if base_name.startswith(): base_name = base_name.split()[-1] base_name = re.sub(, , base_name) if base_name.endswith(): base_name = base_name[:-1] base_name = ( + self._method[] ).join(base_name.rsplit(, 1)) else: base_name = self._method[] if base_name == : base_name = if self._apipie_resource != self.resource: return % (self._apipie_resource, base_name) else: return base_name
There are three cases, because apipie definitions can have multiple signatures but python does not For example, the api endpoint: /api/myres/:myres_id/subres/:subres_id/subres2 for method *index* will be translated to the api method name: subres_index_subres2 So when you want to call it from v2 object, you'll have: myres.subres_index_subres2
10,052
def _initialize_counter(self): if self._counter is not None: return if self.counter_reference is self: self._counter = _Counter(seq=self.factory._setup_next_sequence()) else: self.counter_reference._initialize_counter() self._counter = self.counter_reference._counter
Initialize our counter pointer. If we're the top-level factory, instantiate a new counter Otherwise, point to the top-level factory's counter.
10,053
def version(self, value): self.bytearray[self._get_slicers(1)] = bytearray(c_uint8(value or 0))
Version setter.
10,054
def has_default_privileges(name, object_name, object_type, defprivileges=None, grant_option=None, prepend=, maintenance_db=None, user=None, host=None, port=None, password=None, runas=None): * object_type, defprivileges, _defprivs = _mod_defpriv_opts(object_type, defprivileges) _validate_default_privileges(object_type, _defprivs, defprivileges) if object_type != : owner = _get_object_owner(object_name, object_type, prepend=prepend, maintenance_db=maintenance_db, user=user, host=host, port=port, password=password, runas=runas) if owner is not None and name == owner: return True _defprivileges = default_privileges_list(object_name, object_type, prepend=prepend, maintenance_db=maintenance_db, user=user, host=host, port=port, password=password, runas=runas) if name in _defprivileges: if object_type == : if grant_option: retval = _defprivileges[name] else: retval = True return retval else: _defperms = _DEFAULT_PRIVILEGE_TYPE_MAP[object_type] if grant_option: defperms = dict((_DEFAULT_PRIVILEGES_MAP[defperm], True) for defperm in _defperms) retval = defperms == _defprivileges[name] else: defperms = [_DEFAULT_PRIVILEGES_MAP[defperm] for defperm in _defperms] if in _defprivs: retval = sorted(defperms) == sorted(_defprivileges[name].keys()) else: retval = set(_defprivs).issubset( set(_defprivileges[name].keys())) return retval return False
.. versionadded:: 2019.0.0 Check if a role has the specified privileges on an object CLI Example: .. code-block:: bash salt '*' postgres.has_default_privileges user_name table_name table \\ SELECT,INSERT maintenance_db=db_name name Name of the role whose privileges should be checked on object_type object_name Name of the object on which the check is to be performed object_type The object type, which can be one of the following: - table - sequence - schema - group - function privileges Comma separated list of privileges to check, from the list below: - INSERT - CREATE - TRUNCATE - TRIGGER - SELECT - USAGE - UPDATE - EXECUTE - REFERENCES - DELETE - ALL grant_option If grant_option is set to True, the grant option check is performed prepend Table and Sequence object types live under a schema so this should be provided if the object is not under the default `public` schema maintenance_db The database to connect to user database username if different from config or default password user password if any password for a specified user host Database host if different from config or default port Database port if different from config or default runas System user all operations should be performed on behalf of
10,055
def create_zone(server, token, domain, identifier, dtype, master=None): method = uri = + server + obj = JSONConverter(domain) obj.generate_zone(domain, identifier, dtype, master) connect.tonicdns_client(uri, method, token, obj.zone)
Create zone records. Arguments: server: TonicDNS API server token: TonicDNS API authentication token domain: Specify domain name identifier: Template ID dtype: MASTER|SLAVE|NATIVE (default: MASTER) master: master server ip address when dtype is SLAVE (default: None) ContentType: application/json x-authentication-token: token
10,056
def get_flops(): from sys import stdout from re import compile filename = "linpack.out" fpnum = r fpnum_1 = fpnum + r pattern = compile(r + fpnum_1 + fpnum_1 + fpnum_1 + r + fpnum + r + fpnum_1 + fpnum + r) speeds = [0.0, 1.0e75, 0.0] file = open(filename) count = 0 while file : line = file.readline() if not line : break if pattern.match(line) : count = count + 1 x = float(pattern.sub(r, line)) if x < 1.0 : print(count) speeds[0] = speeds[0] + x speeds[1] = min(speeds[1], x) speeds[2] = max(speeds[2], x) file.close() if count != 0 : speeds[0] = speeds[0] / count stdout.write("%6.1f MFlops (%d from %.1f to %.1f)\n" % (speeds[0], count, speeds[1], speeds[2]))
# DOESNT WORK
10,057
def make_masks(self, template): from copy import deepcopy nmasks = len(tables.mask_patterns) masks = [] * nmasks count = 0 for n in range(nmasks): cur_mask = deepcopy(template) masks[n] = cur_mask self.add_type_pattern(cur_mask, tables.type_bits[self.error][n]) pattern = tables.mask_patterns[n] bits = iter(self.buffer.getvalue()) row_start = itertools.cycle([len(cur_mask)-1, 0]) row_stop = itertools.cycle([-1,len(cur_mask)]) direction = itertools.cycle([-1, 1]) for column in range(len(cur_mask)-1, 0, -2): if column <= 6: column = column - 1 column_pair = itertools.cycle([column, column-1]) for row in range(next(row_start), next(row_stop), next(direction)): for i in range(2): col = next(column_pair) if cur_mask[row][col] != : continue try: bit = int(next(bits)) except: bit = 0 if pattern(row, col): cur_mask[row][col] = bit ^ 1 else: cur_mask[row][col] = bit return masks
This method generates all seven masks so that the best mask can be determined. The template parameter is a code matrix that will server as the base for all the generated masks.
10,058
def intervention_strategies(df, filepath=None): logger = logging.getLogger("caspo") LIMIT = 50 if len(df) > LIMIT: msg = "Too many intervention strategies to visualize. A sample of %s strategies will be considered." % LIMIT logger.warning(msg) df = df.sample(LIMIT) values = np.unique(df.values.flatten()) if len(values) == 3: rwg = matplotlib.colors.ListedColormap([, , ]) elif 1 in values: rwg = matplotlib.colors.ListedColormap([, ]) else: rwg = matplotlib.colors.ListedColormap([, ]) plt.figure(figsize=(max((len(df.columns)-1) * .5, 4), max(len(df)*0.6, 2.5))) df.columns = [c[3:] for c in df.columns] ax = sns.heatmap(df, linewidths=.5, cbar=False, cmap=rwg, linecolor=) ax.set_xlabel("Species") ax.set_ylabel("Intervention strategy") for tick in ax.get_xticklabels(): tick.set_rotation(90) plt.tight_layout() if filepath: plt.savefig(os.path.join(filepath, )) return ax
Plots all intervention strategies Parameters ---------- df: `pandas.DataFrame`_ DataFrame with columns starting with `TR:` filepath: str Absolute path to a folder where to write the plot Returns ------- plot Generated plot .. _pandas.DataFrame: http://pandas.pydata.org/pandas-docs/stable/dsintro.html#dataframe
10,059
def _submit_task_with_template(self, task_ids): runtime = self.config runtime.update({ : os.getcwd(), : os.getcwd(), : env.verbosity, : env.config.get(, ), : env.config.get(, ), : os.path.expanduser() }) if in env.sos_dict: runtime.update({ x: env.sos_dict[][x] for x in (, , , , ) if x in env.sos_dict[] }) if not in runtime: runtime[] = 1 if not in runtime: runtime[] = 1 job_text = for task_id in task_ids: runtime[] = task_id try: job_text += cfg_interpolate(self.job_template, runtime) job_text += except Exception as e: raise ValueError( f) filename = task_ids[0] + ( if len(task_ids) == 1 else f) job_file = os.path.join( os.path.expanduser(), , , filename) with open(job_file, , newline=) as job: job.write(job_text) self.agent.send_task_file(job_file) try: cmd = f self.agent.run_command(cmd, wait_for_task=self.wait_for_task) except Exception as e: raise RuntimeError(f) return True
Submit tasks by interpolating a shell script defined in job_template
10,060
def create_mask(self): ntags = len(self._drawn_tags) if ntags == 0: return old_image = self.fitsimage.get_image() if old_image is None: return mask = None obj_kinds = set() for tag in self._drawn_tags: obj = self.canvas.get_object_by_tag(tag) try: cur_mask = old_image.get_shape_mask(obj) except Exception as e: self.logger.error(.format(str(e))) continue if mask is not None: mask |= cur_mask else: mask = cur_mask obj_kinds.add(obj.kind) image = dp.make_image(mask.astype(), old_image, {}, pfx=self._mask_prefix) imname = image.get() self.fv.gui_call(self.fv.add_image, imname, image, chname=self.chname) s = .format( ntags, .join(sorted(obj_kinds))) info = dict(time_modified=datetime.utcnow(), reason_modified=s) self.fv.update_image_info(image, info) self.logger.info(s)
Create boolean mask from drawing. All areas enclosed by all the shapes drawn will be set to 1 (True) in the mask. Otherwise, the values will be set to 0 (False). The mask will be inserted as a new image buffer, like ``Mosaic``.
10,061
def clean(self, py_value): try: import bleach return bleach.clean(py_value, **self.__bleachOptions) except ImportError: warnings.warn() return py_value
Cleans the value before storing it. :param: py_value : <str> :return: <str>
10,062
def TNRS(self, names, context_name=None, id_list=None, fuzzy_matching=False, include_deprecated=False, include_dubious=False, do_approximate_matching=None, wrap_response=None): if do_approximate_matching is not None: fuzzy_matching = do_approximate_matching if context_name and context_name not in self.valid_contexts: raise ValueError(.format(context_name)) if not (isinstance(names, list) or isinstance(names, tuple)): names = [names] for name in names: if len(name) < 2: raise ValueError(.format(name)) if id_list and len(id_list) != len(names): raise ValueError() data = {: names} if self.use_v1: uri = .format(p=self.prefix) else: uri = .format(p=self.prefix) if context_name: if self.use_v1: data[] = context_name else: data[] = context_name data[] = bool(fuzzy_matching) if id_list: data[] = list(id_list) if include_deprecated: data[] = True if include_dubious: data[] = True resp = self.json_http_post(uri, data=anyjson.dumps(data)) if wrap_response is None or wrap_response is False: return resp if wrap_response is True: return TNRSResponse(self._wr, resp, query_data=data) return wrap_response(resp, query_data=data)
Takes a name and optional contextName returns a list of matches. `wrap_response` can be True to return a TNRSResponse object, None to return the "raw" response dict, or a function/class that takes (response, query_data=dict) as its arguments. Each match is a dict with: 'higher' boolean DEF??? 'exact' boolean for exact match 'ottId' int 'name' name (or uniqname???) for the taxon in OTT 'nodeId' int ID of not in the taxomachine db. probably not of use to anyone...
10,063
def item_fields(self): if self.templates.get("item_fields") and not self._updated( "/itemFields", self.templates["item_fields"], "item_fields" ): return self.templates["item_fields"]["tmplt"] query_string = "/itemFields" retrieved = self._retrieve_data(query_string) return self._cache(retrieved, "item_fields")
Get all available item fields
10,064
def load(self, filename): try: f = open(filename, mode=) except Exception: return False while True: line = f.readline() if not line: break line = line.rstrip() eq = line.find() if eq == -1: continue name = line[:eq] value = line[eq+1:] self.set(name, value) f.close() return True
load settings from a file. Return True/False on success/failure
10,065
def add(self, element): key = self._transform(element) if key not in self._elements: self._elements[key] = element
Add an element to this set.
10,066
def process_upload(upload_file, instance, form, event, request): caption = form.cleaned_data.get() upload_name = upload_file.name.lower() if upload_name.endswith() or upload_name.endswith(): try: upload = Image( event=event, image=upload_file, caption=caption, ) upload.save() instance.photos.add(upload) except Exception as error: messages.error(request, .format(error))
Helper function that actually processes and saves the upload(s). Segregated out for readability.
10,067
def _read_local_kwalitee_configuration(directory="."): filepath = os.path.abspath(os.path.join(directory, )) data = {} if os.path.exists(filepath): with open(filepath, ) as file_read: data = yaml.load(file_read.read()) return data
Check if the repo has a ``.kwalitee.yaml`` file.
10,068
def set_op_version(version): * cmd = .format(version) root = _gluster_xml(cmd) if not _gluster_ok(root): return False, root.find().text return root.find().text
.. versionadded:: 2019.2.0 Set the glusterfs volume op-version version Version to set the glusterfs volume op-version CLI Example: .. code-block:: bash salt '*' glusterfs.set_op_version <volume>
10,069
def verify(self, h, sig, sig_fmt=SER_BINARY): s = deserialize_number(sig, sig_fmt) return self.p._ECDSA_verify(h, s)
Verifies that `sig' is a signature for a message with SHA-512 hash `h'.
10,070
def _dev_api(cls): gh = github3.GitHub() gh.set_client_id(cls.remote.consumer_key, cls.remote.consumer_secret) return gh
Get a developer instance for GitHub API access.
10,071
def set_debug(self, set_to=True): if set_to: StreamHandler(sys.stdout).push_application() self._log.level = logbook.DEBUG self.debug = set_to
Sets the capture to debug mode (or turns it off if specified).
10,072
async def patch_entries(self, entry, **kwargs): params = {: self.token, : , : []} if in kwargs: params[] = kwargs[] if in kwargs and isinstance(kwargs[], list): params[] = .join(kwargs[]) params[] = self.__get_attr(what=, type_attr=int, value_attr=(0, 1), **kwargs) params[] = self.__get_attr(what=, type_attr=int, value_attr=(0, 1), **kwargs) params[] = self.__get_attr(what=, type_attr=str, value_attr=(, ), **kwargs) path = .format( entry=entry, ext=self.format) return await self.query(path, "patch", **params)
PATCH /api/entries/{entry}.{_format} Change several properties of an entry :param entry: the entry to 'patch' / update :param kwargs: can contain one of the following title: string tags: a list of tags tag1,tag2,tag3 archive: '0' or '1', default '0' archived the entry. starred: '0' or '1', default '0' starred the entry In case that you don't want to *really* remove it.. :return data related to the ext
10,073
def _get_device_by_label(devices, label): device_labels = [d for d in devices if d.deviceInfo.label == label] if device_labels: return device_labels[0] else: raise salt.exceptions.VMwareObjectNotFoundError( .format(label))
Returns the device with the given label, raises error if the device is not found. devices list of vim.vm.device.VirtualDevice objects key Unique key of device
10,074
def get_urlpatterns(self): return [ path( , search_view_factory(view_class=self.search_view, form_class=self.search_form), name=, ), ]
Returns the URL patterns managed by the considered factory / application.
10,075
def authGenders(self, countsOnly = False, fractionsMode = False, _countsTuple = False): authDict = recordGenders(self) if _countsTuple or countsOnly or fractionsMode: rawList = list(authDict.values()) countsList = [] for k in (,,): countsList.append(rawList.count(k)) if fractionsMode: tot = sum(countsList) for i in range(3): countsList.append(countsList.pop(0) / tot) if _countsTuple: return tuple(countsList) else: return { : countsList[0], : countsList[1], : countsList[2]} else: return authDict
Creates a dict mapping `'Male'`, `'Female'` and `'Unknown'` to lists of the names of all the authors. # Parameters _countsOnly_ : `optional bool` > Default `False`, if `True` the counts (lengths of the lists) will be given instead of the lists of names _fractionsMode_ : `optional bool` > Default `False`, if `True` the fraction counts (lengths of the lists divided by the total number of authors) will be given instead of the lists of names. This supersedes _countsOnly_ # Returns `dict[str:str or int]` > The mapping of genders to author's names or counts
10,076
def _notify(self, topic, **kwargs): for cb in self._connects.get(topic, []): try: cb(**kwargs) except Exception: if self._debug: traceback.print_exc()
Invokes callbacks for an event topic. @param topic: String event name @type topic: str @param kwargs: Values associated with the event @type kwargs: dict
10,077
def image_import(self, image_name, url, image_meta, remote_host=None): image_info = [] try: image_info = self._ImageDbOperator.image_query_record(image_name) except exception.SDKObjectNotExistError: msg = ("The image record %s doensos_versionDEPLOYFailed to create repository to store image %(img)s with error: %(err)s, please make sure there are enough space on zvmsdk server and proper permission to create the repositoryimgerr//md5sums single image, rename its name to be same as image_type = if image_type == : final_image_fpath = .join([target_folder, CONF.zvm.user_root_vdev]) os.rename(import_image_fpath, final_image_fpath) elif image_type == : pass disk_size_units = self._get_disk_size_units(final_image_fpath) image_size = self._get_image_size(final_image_fpath) self._ImageDbOperator.image_add_record(image_name, image_os_version, real_md5sum, disk_size_units, image_size, image_type) LOG.info("Image %s is import successfully" % image_name) except Exception: self._pathutils.clean_temp_folder(target_folder) raise
Import the image specified in url to SDK image repository, and create a record in image db, the imported images are located in image_repository/prov_method/os_version/image_name/, for example, /opt/sdk/images/netboot/rhel7.2/90685d2b-167bimage/0100
10,078
def gzip_if_smaller(content_related, data): if content_related and len(data) > 512: gzipped = bytes(GzipPacked(data)) return gzipped if len(gzipped) < len(data) else data else: return data
Calls bytes(request), and based on a certain threshold, optionally gzips the resulting data. If the gzipped data is smaller than the original byte array, this is returned instead. Note that this only applies to content related requests.
10,079
def popError(text, title="Lackey Error"): root = tk.Tk() root.withdraw() tkMessageBox.showerror(title, text)
Creates an error dialog with the specified text.
10,080
def Pad(self, n): for i in range_func(n): self.Place(0, N.Uint8Flags)
Pad places zeros at the current offset.
10,081
def is_nested(values): return (all(isinstance(item, Iterable) for item in values) if isinstance(values, Iterable) else False)
Check if values is composed only by iterable elements.
10,082
def add_to_submenu(self, submenu_path, item): for m in self.items: if m.name == submenu_path[0]: m.add_to_submenu(submenu_path[1:], item) return raise(ValueError("No submenu (%s) found" % (submenu_path[0])))
add an item to a submenu using a menu path array
10,083
def _get(self, text): if self.strict: match = self.prog.match(text) if match: cmd = match.group() if cmd in self: return cmd else: words = self.prog.findall(text) for word in words: if word in self: return word
Analyze the text to get the right function Parameters ---------- text : str The text that could call a function
10,084
def pathFromHere_explore(self, astr_startPath = ): self.l_lwd = [] self.treeExplore(startPath = astr_startPath, f=self.lwd) return self.l_lwd
Return a list of paths from "here" in the stree, using the child explore access. :param astr_startPath: path from which to start :return: a list of paths from "here"
10,085
def post(self, request, *args, **kwargs): enterprise_customer_uuid, course_run_id, course_key, program_uuid = RouterView.get_path_variables(**kwargs) enterprise_customer = get_enterprise_customer_or_404(enterprise_customer_uuid) if course_key: context_data = get_global_context(request, enterprise_customer) try: kwargs[] = RouterView.get_course_run_id(request.user, enterprise_customer, course_key) except Http404: error_code = log_message = ( .format( course_key=course_key, course_run_id=course_run_id, enterprise_customer_uuid=enterprise_customer_uuid, error_code=error_code, userid=request.user.id, program_uuid=program_uuid, ) ) return render_page_with_error_code_message(request, context_data, error_code, log_message) return self.redirect(request, *args, **kwargs)
Run some custom POST logic for Enterprise workflows before routing the user through existing views.
10,086
def log(duration, message=None, use_last_commit_message=False): branch = git.branch issue = jira.get_issue(branch) comment = "Working on issue %s" % branch if message: comment = message elif use_last_commit_message: comment = git.get_last_commit_message() if issue: duration = jira.get_elapsed_time(issue) if duration == else duration if duration: jira.add_worklog(issue, timeSpent=duration, adjustEstimate=None, newEstimate=None, reduceBy=None, comment=comment) print "Logged %s against issue %s (%s)" % (duration, branch, comment) else: print "No time logged, less than 0m elapsed."
Log time against the current active issue
10,087
def check_lat_extents(self, ds): if not (hasattr(ds, ) or hasattr(ds, )): return Result(BaseCheck.MEDIUM, False, , []) try: lat_min = float(ds.geospatial_lat_min) lat_max = float(ds.geospatial_lat_max) except ValueError: return Result(BaseCheck.MEDIUM, False, , [ .format(ds.geospatial_lat_min, ds.geospatial_lat_max)]) lat_vars = {} for name, var in ds.variables.items(): if not hasattr(var, ): continue lat_vars[var] = 0 if var.units in _possibleyunits: lat_vars[var] += 1 if hasattr(var, ) and var.standard_name == : lat_vars[var] += 1 if hasattr(var, ) and var.axis == : lat_vars[var] += 1 lat_vars = {k: v for k, v in lat_vars.items() if v > 0} if len(lat_vars) == 0: return Result(BaseCheck.MEDIUM, False, , []) final_lats = sorted(lat_vars, key=lambda x: lat_vars[x], reverse=True) obs_mins = {var._name: np.nanmin(var) for var in final_lats if not np.isnan(var).all()} obs_maxs = {var._name: np.nanmax(var) for var in final_lats if not np.isnan(var).all()} min_pass = any((np.isclose(lat_min, min_val) for min_val in obs_mins.values())) max_pass = any((np.isclose(lat_max, max_val) for max_val in obs_maxs.values())) allpass = sum((min_pass, max_pass)) msgs = [] if not min_pass: msgs.append("Data for possible latitude variables (%s) did not match geospatial_lat_min value (%s)" % (obs_mins, lat_min)) if not max_pass: msgs.append("Data for possible latitude variables (%s) did not match geospatial_lat_max value (%s)" % (obs_maxs, lat_max)) return Result(BaseCheck.MEDIUM, (allpass, 2), , msgs)
Check that the values of geospatial_lat_min/geospatial_lat_max approximately match the data. :param netCDF4.Dataset ds: An open netCDF dataset
10,088
def human_file_size(size): suffixes = if size == 0: num_scale = 0 else: num_scale = int(math.floor(math.log(size) / math.log(1000))) num_scale = max(num_scale, 0) if num_scale >= len(suffixes): suffix = else: suffix = suffixes[num_scale] num_scale = int(math.pow(1000, num_scale)) value = float(size) / num_scale str_value = str(value) if suffix == : if in str_value: str_value = str_value[:str_value.index()] elif str_value[2] == : str_value = str_value[:2] else: str_value = str_value[:3] return "{0:>3s}{1}".format(str_value, suffix)
Returns a human-friendly string representing a file size that is 2-4 characters long. For example, depending on the number of bytes given, can be one of:: 256b 64k 1.1G Parameters ---------- size : int The size of the file (in bytes) Returns ------- size : str A human-friendly representation of the size of the file
10,089
def update_from_env(yaml_dict, prefix=None): _config.databases.local prefix = prefix or "" def _set_env_var(path, node): env_path = "{0}{1}{2}".format( prefix.upper(), if prefix else , .join([str(key).upper() for key in path]) ) env_val = os.environ.get(env_path, None) if env_val is not None: env_dict = yamldict.load(.format(env_val)) return env_dict.val else: return None yaml_dict.traverse(_set_env_var)
Override YAML settings with values from the environment variables. - The letter '_' is delimit the hierarchy of the YAML settings such that the value of 'config.databases.local' will be overridden by CONFIG_DATABASES_LOCAL.
10,090
def suppress_output(reverse=False): if reverse: sys.stdout = sys.__stdout__ sys.stderr = sys.__stderr__ else: sys.stdout = os.devnull sys.stderr = os.devnull
Suppress output
10,091
def unflag_field(self, move_x, move_y): field_status = self.info_map[move_y, move_x] if field_status == 9 or field_status == 10: self.info_map[move_y, move_x] = 11
Unflag or unquestion a grid by given position.
10,092
def set_routput(self, routput): if type(routput) != str: return False, "Routput must be string" self.r_outputs.append(routput) return True, "Ok"
Add routput to be used in next api call :param routput: key :return: True/False, message
10,093
def base(number, input_base=10, output_base=10, max_depth=10, string=False, recurring=True): if type(number) == int or type(number) == float: number = str(number) if type(number) == str: number = represent_as_tuple(number) if not check_valid(number, input_base): raise ValueError if input_base == 1: number = (1,) * number.count(1) number = expand_recurring(number, repeat=5) if "." in number: radix_point = number.index(".") integer_part = number[:radix_point] fractional_part = number[radix_point:] integer_part = integer_base(integer_part, input_base, output_base) fractional_part = fractional_base(fractional_part, input_base, output_base, max_depth) number = integer_part + fractional_part number = truncate(number) else: number = integer_base(number, input_base, output_base) if recurring: number = find_recurring(number, min_repeat=2) return represent_as_string(number) if string else number
Converts a number from any base to any another. Args: number(tuple|str|int): The number to convert. input_base(int): The base to convert from (defualt 10). output_base(int): The base to convert to (default 10). max_depth(int): The maximum number of fractional digits (defult 10). string(bool): If True output will be in string representation, if False output will be in tuple representation (defult False). recurring(bool): Attempt to find repeating digits in the fractional part of a number. Repeated digits will be enclosed with "[" and "]" (default True). Returns: A tuple of digits in the specified base: (int, int, int, ... , '.' , int, int, int) If the string flag is set to True, a string representation will be used instead. Raises: ValueError if a digit value is too high for the input_base. Example: >>> base((1,9,6,'.',5,1,6), 17, 20) (1, 2, 8, '.', 5, 19, 10, 7, 17, 2, 13, 13, 1, 8)
10,094
def to_description_dict(self): return { : self.arn, : self.certificate_id, : self.status, : self.certificate_pem, : self.owner, : self.creation_date, : self.last_modified_date, : self.transfer_data }
You might need keys below in some situation - caCertificateId - previousOwnedBy
10,095
def normalize_object_slot(self, value=_nothing, prop=None, obj=None): if value is not _nothing and hasattr(prop, "compare_as"): method, nargs = getattr(prop, "compare_as_info", (False, 1)) args = [] if method: args.append(obj) if nargs: args.append(value) value = prop.compare_as(*args) return self.normalize_slot(value, prop)
This hook wraps ``normalize_slot``, and performs clean-ups which require access to the object the slot is in as well as the value.
10,096
def _supply_data(data_sink, context): try: data_sink.sink(context) except Exception as e: ex = ValueError("An exception occurred while " "supplying data to data sink \n\n" "{e}\n\n" "{help}".format(ds=context.name, e=str(e), help=context.help())) raise ex, None, sys.exc_info()[2]
Supply data to the data sink
10,097
def __from_xml(self,value): n=value.children vns=get_node_ns(value) while n: if n.type!=: n=n.next continue ns=get_node_ns(n) if (ns and vns and ns.getContent()!=vns.getContent()): n=n.next continue if n.name==: self.pobox=unicode(n.getContent(),"utf-8","replace") elif n.name in (, ): self.extadr=unicode(n.getContent(),"utf-8","replace") elif n.name==: self.street=unicode(n.getContent(),"utf-8","replace") elif n.name==: self.locality=unicode(n.getContent(),"utf-8","replace") elif n.name==: self.region=unicode(n.getContent(),"utf-8","replace") elif n.name==: self.pcode=unicode(n.getContent(),"utf-8","replace") elif n.name==: self.ctry=unicode(n.getContent(),"utf-8","replace") elif n.name in ("HOME","WORK","POSTAL","PARCEL","DOM","INTL", "PREF"): self.type.append(n.name.lower()) n=n.next if self.type==[]: self.type=["intl","postal","parcel","work"] elif "dom" in self.type and "intl" in self.type: raise ValueError("Both and specified in vcard ADR")
Initialize a `VCardAdr` object from and XML element. :Parameters: - `value`: field value as an XML node :Types: - `value`: `libxml2.xmlNode`
10,098
def load_key(self, key, key_type, key_encoding): if key_type not in (EncryptionKeyType.PRIVATE, EncryptionKeyType.PUBLIC): raise ValueError( .format(key_type=key_type, cipher=self.java_name) ) if key_encoding not in (KeyEncodingType.DER, KeyEncodingType.PEM): raise ValueError( .format( key_encoding=key_encoding, cipher=self.java_name ) ) return _KEY_LOADERS[self.cipher](key, key_type, key_encoding)
Load a key from bytes. :param bytes key: Key bytes :param EncryptionKeyType key_type: Type of key :param KeyEncodingType key_encoding: Encoding used to serialize key :returns: Loaded key
10,099
def get_s3_bucket_keys(api_client, bucket_name, bucket, check_encryption, check_acls): bucket[] = [] keys = handle_truncated_response(api_client.list_objects, {: bucket_name}, []) bucket[] = len(keys[]) key_count = 0 update_status(key_count, bucket[], ) for key in keys[]: key_count += 1 key[] = key.pop() key[] = str(key[]) if check_encryption: try: k = api_client.get_object(Bucket = bucket_name, Key = key[]) key[] = k[] if in k else None key[] = k[] if in k else None except Exception as e: printException(e) continue if check_acls: try: key[] = get_s3_acls(api_client, bucket_name, bucket, key_name = key[]) except Exception as e: continue bucket[].append(key) update_status(key_count, bucket[], )
Get key-specific information (server-side encryption, acls, etc...) :param api_client: :param bucket_name: :param bucket: :param check_encryption: :param check_acls: :return: