Unnamed: 0
int64
0
389k
code
stringlengths
26
79.6k
docstring
stringlengths
1
46.9k
10,300
def cancel_inquiry (self): self.names_to_find = {} if self.is_inquiring: try: _bt.hci_send_cmd (self.sock, _bt.OGF_LINK_CTL, \ _bt.OCF_INQUIRY_CANCEL) except _bt.error as e: self.sock.close () self.sock = None raise BluetoothError (e.args[0], "error canceling inquiry: " + e.args[1]) self.is_inquiring = False
Call this method to cancel an inquiry in process. inquiry_complete will still be called.
10,301
def all_my_hosts_and_services(self): for what in (self.hosts, self.services): for item in what: yield item
Create an iterator for all my known hosts and services :return: None
10,302
def _get_objects_with_same_attribute(self, objects: Set[Object], attribute_function: Callable[[Object], str]) -> Set[Object]: objects_of_attribute: Dict[str, Set[Object]] = defaultdict(set) for entity in objects: objects_of_attribute[attribute_function(entity)].add(entity) if not objects_of_attribute: return set() most_frequent_attribute = max(objects_of_attribute, key=lambda x: len(objects_of_attribute[x])) if len(objects_of_attribute[most_frequent_attribute]) <= 1: return set() return objects_of_attribute[most_frequent_attribute]
Returns the set of objects for which the attribute function returns an attribute value that is most frequent in the initial set, if the frequency is greater than 1. If not, all objects have different attribute values, and this method returns an empty set.
10,303
async def parse_update(self, bot): data = await self.request.json() update = types.Update(**data) return update
Read update from stream and deserialize it. :param bot: bot instance. You an get it from Dispatcher :return: :class:`aiogram.types.Update`
10,304
def _apply_to_array(self, yd, y, weights, off_slices, ref_slice, dim): ndims = len(y.shape) all = slice(None, None, 1) ref_multi_slice = [all] * ndims ref_multi_slice[dim] = ref_slice for w, s in zip(weights, off_slices): off_multi_slice = [all] * ndims off_multi_slice[dim] = s if abs(1 - w) < 1.E-14: yd[ref_multi_slice] += y[off_multi_slice] else: yd[ref_multi_slice] += w * y[off_multi_slice]
Applies the finite differences only to slices along a given axis
10,305
def _get_dvs_capability(dvs_name, dvs_capability): log.trace(%s\, dvs_name) return {: dvs_capability.dvsOperationSupported, : dvs_capability.dvPortGroupOperationSupported, : dvs_capability.dvPortOperationSupported}
Returns the dict representation of the DVS product_info dvs_name The name of the DVS dvs_capability The DVS capability
10,306
def _create_identifier(rdtype, name, content): sha256 = hashlib.sha256() sha256.update((rdtype + ).encode()) sha256.update((name + ).encode()) sha256.update(content.encode()) return sha256.hexdigest()[0:7]
Creates hashed identifier based on full qualified record type, name & content and returns hash.
10,307
def dependencies(self, task, params={}, **options): path = "/tasks/%s/dependencies" % (task) return self.client.get(path, params, **options)
Returns the compact representations of all of the dependencies of a task. Parameters ---------- task : {Id} The task to get dependencies on. [params] : {Object} Parameters for the request
10,308
def get_rupdict(self): assert len(self.rup_array) == 1, dic = {: self.trt, : self.samples} with datastore.read(self.filename) as dstore: rupgeoms = dstore[] source_ids = dstore[][] rec = self.rup_array[0] geom = rupgeoms[rec[]:rec[]].reshape( rec[], rec[]) dic[] = geom[] dic[] = geom[] dic[] = geom[] rupclass, surclass = self.code2cls[rec[]] dic[] = rupclass.__name__ dic[] = surclass.__name__ dic[] = rec[] dic[] = rec[] dic[] = rec[] dic[] = rec[] dic[] = rec[] dic[] = rec[] dic[] = source_ids[rec[]] return dic
:returns: a dictionary with the parameters of the rupture
10,309
def _set_vcs(self, v, load=False): if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u: {: 1}, u: {: 2}},), is_leaf=True, yang_name="vcs", rest_name="vcs", parent=self, choice=(u, u), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u: {u: u}}, namespace=, defining_module=, yang_type=, is_config=True) except (TypeError, ValueError): raise ValueError({ : , : "brocade-event-handler:enumeration", : , }) self.__vcs = t if hasattr(self, ): self._set()
Setter method for vcs, mapped from YANG variable /event_handler/event_handler_list/trigger/vcs (enumeration) If this variable is read-only (config: false) in the source YANG file, then _set_vcs is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_vcs() directly. YANG Description: VCS event type.
10,310
def _learnFeatureLocationPair(self, newLocation, featureLocationInput, featureLocationGrowthCandidates): potentialOverlaps = self.featureLocationConnections.computeActivity( featureLocationInput) matchingSegments = np.where(potentialOverlaps > self.learningThreshold)[0] cellsForActiveSegments = self.featureLocationConnections.mapSegmentsToCells( self.activeFeatureLocationSegments) learningActiveSegments = self.activeFeatureLocationSegments[ np.in1d(cellsForActiveSegments, newLocation)] remainingCells = np.setdiff1d(newLocation, cellsForActiveSegments) candidateSegments = self.featureLocationConnections.filterSegmentsByCell( matchingSegments, remainingCells) cellsForCandidateSegments = ( self.featureLocationConnections.mapSegmentsToCells( candidateSegments)) candidateSegments = candidateSegments[ np.in1d(cellsForCandidateSegments, remainingCells)] onePerCellFilter = np2.argmaxMulti(potentialOverlaps[candidateSegments], cellsForCandidateSegments) learningMatchingSegments = candidateSegments[onePerCellFilter] newSegmentCells = np.setdiff1d(remainingCells, cellsForCandidateSegments) for learningSegments in (learningActiveSegments, learningMatchingSegments): self._learn(self.featureLocationConnections, self.rng, learningSegments, featureLocationInput, featureLocationGrowthCandidates, potentialOverlaps, self.initialPermanence, self.sampleSize, self.permanenceIncrement, self.permanenceDecrement, self.maxSynapsesPerSegment) numNewSynapses = len(featureLocationInput) if self.sampleSize != -1: numNewSynapses = min(numNewSynapses, self.sampleSize) if self.maxSynapsesPerSegment != -1: numNewSynapses = min(numNewSynapses, self.maxSynapsesPerSegment) newSegments = self.featureLocationConnections.createSegments( newSegmentCells) self.featureLocationConnections.growSynapsesToSample( newSegments, featureLocationGrowthCandidates, numNewSynapses, self.initialPermanence, self.rng)
Grow / reinforce synapses between the location layer's dendrites and the input layer's active cells.
10,311
def _check_for_pi_nodes(self, list, inheader): list = list[:] while list: elt = list.pop() t = elt.nodeType if t == _Node.PROCESSING_INSTRUCTION_NODE: raise ParseException( + \ elt.nodeName + , inheader, elt.parentNode, self.dom) elif t == _Node.DOCUMENT_TYPE_NODE: raise ParseException(, inheader, elt.parentNode, self.dom) list += _children(elt)
Raise an exception if any of the list descendants are PI nodes.
10,312
def _validate_query(query): query = deepcopy(query) if query["q"] == BLANK_QUERY["q"]: raise ValueError("No query specified.") query["q"] = _clean_query_string(query["q"]) if query["limit"] is None: query["limit"] = SEARCH_LIMIT if query["advanced"] else NONADVANCED_LIMIT elif query["limit"] > SEARCH_LIMIT: warnings.warn( .format(query["limit"], SEARCH_LIMIT), RuntimeWarning) query["limit"] = SEARCH_LIMIT for key, val in BLANK_QUERY.items(): if query.get(key, float()) == val: query.pop(key) to_remove = [field for field in query.keys() if field not in BLANK_QUERY.keys()] [query.pop(field) for field in to_remove] return query
Validate and clean up a query to be sent to Search. Cleans the query string, removes unneeded parameters, and validates for correctness. Does not modify the original argument. Raises an Exception on invalid input. Arguments: query (dict): The query to validate. Returns: dict: The validated query.
10,313
def convert_entrez_to_uniprot(self, entrez): server = "http://www.uniprot.org/uniprot/?query=%22GENEID+{0}%22&format=xml".format(entrez) r = requests.get(server, headers={"Content-Type": "text/xml"}) if not r.ok: r.raise_for_status() sys.exit() response = r.text info = xmltodict.parse(response) try: data = info[][][][0] return data except TypeError: data = info[][][0][][0] return data
Convert Entrez Id to Uniprot Id
10,314
def getPDF(self): if hasattr(self, ): return self._qplot, self._hplot, self._tplot else: raise ValueError()
Function that gets vectors of the pdf and target at the last design evaluated. :return: tuple of q values, pdf values, target values
10,315
def read_config(cls, configparser): config = dict() section = cls.__name__ option = "warningregex" if configparser.has_option(section, option): value = configparser.get(section, option) else: value = None config[option] = value return config
Read configuration file options.
10,316
def job_step_error(self, job_request_payload, message): payload = JobStepErrorPayload(job_request_payload, message) self.send(job_request_payload.error_command, payload)
Send message that the job step failed using payload data. :param job_request_payload: StageJobPayload|RunJobPayload|StoreJobOutputPayload payload from job with error :param message: description of the error
10,317
def new_socket(): new_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) new_sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) try: reuseport = socket.SO_REUSEPORT except AttributeError: pass else: try: new_sock.setsockopt(socket.SOL_SOCKET, reuseport, 1) except (OSError, socket.error) as err: if err.errno != errno.ENOPROTOOPT: raise return new_sock
Create a new socket with OS-specific parameters Try to set SO_REUSEPORT for BSD-flavored systems if it's an option. Catches errors if not.
10,318
def validate_replicas(self, data): environment = data.get() if environment and environment.replicas: validate_replicas(data.get(), environment.replicas)
Validate distributed experiment
10,319
def _call(self, endpoint, data=None): data = {} if data is None else data try: data[] = self.access_token() return self._request(endpoint, data) except AccessTokenExpired: self._cached_access_token = None data[] = self.access_token() return self._request(endpoint, data)
Make an authorized API call to specified endpoint. :param str endpoint: API endpoint's relative URL, eg. `/account`. :param dict data: POST request data. :return: A dictionary or a string with response data.
10,320
def run(self): print() _build.run(self) print() docdir = os.path.join(self.build_lib, , ) self.mkpath(docdir) doc_builder = os.path.join(self.build_lib, , ) doc_source = check_call([sys.executable, doc_builder, doc_source, self.build_lib]) print()
Build the Fortran library, all python extensions and the docs.
10,321
def _read_data(path): data = {} with open(path, "r") as f_obj: var = "" for line in f_obj: if "<-" in line: if len(var): key, var = _process_data_var(var) data[key] = var var = "" var += " " + line.strip() if len(var): key, var = _process_data_var(var) data[key] = var return data
Read Rdump output and transform to Python dictionary. Parameters ---------- path : str Returns ------- Dict key, values pairs from Rdump formatted data.
10,322
def is_handler_subclass(cls, classnames=("ViewHandler", "APIHandler")): if isinstance(cls, list): return any(is_handler_subclass(c) for c in cls) elif isinstance(cls, type): return any(c.__name__ in classnames for c in inspect.getmro(cls)) else: raise TypeError( "Unexpected type `{}` for class `{}`".format( type(cls), cls ) )
Determines if ``cls`` is indeed a subclass of ``classnames``
10,323
def analyze_cluster_size_per_scan_parameter(input_file_hits, output_file_cluster_size, parameter=, max_chunk_size=10000000, overwrite_output_files=False, output_pdf=None): logging.info( + parameter + + input_file_hits) if os.path.isfile(output_file_cluster_size) and not overwrite_output_files: logging.info( + output_file_cluster_size + ) else: with tb.open_file(output_file_cluster_size, mode="w") as out_file_h5: filter_table = tb.Filters(complib=, complevel=5, fletcher32=False) parameter_goup = out_file_h5.create_group(out_file_h5.root, parameter, title=parameter) cluster_size_total = None with tb.open_file(input_file_hits, mode="r+") as in_hit_file_h5: meta_data_array = in_hit_file_h5.root.meta_data[:] scan_parameter = analysis_utils.get_scan_parameter(meta_data_array) if scan_parameter: scan_parameter_values = scan_parameter[parameter] if len(scan_parameter_values) == 1: logging.warning( + str(input_file_hits) + + str(parameter) + ) else: logging.info( + input_file_hits + + parameter + + str(len(scan_parameter_values)) + + str(np.amin(scan_parameter_values)) + + str(np.amax(scan_parameter_values))) event_numbers = analysis_utils.get_meta_data_at_scan_parameter(meta_data_array, parameter)[] parameter_ranges = np.column_stack((scan_parameter_values, analysis_utils.get_ranges_from_array(event_numbers))) hit_table = in_hit_file_h5.root.Hits analysis_utils.index_event_number(hit_table) total_hits, total_hits_2, index = 0, 0, 0 chunk_size = max_chunk_size analyze_data = AnalyzeRawData() analyze_data.create_cluster_size_hist = True analyze_data.create_cluster_tot_hist = True analyze_data.histogram.set_no_scan_parameter() progress_bar = progressbar.ProgressBar(widgets=[, progressbar.Percentage(), , progressbar.Bar(marker=, left=, right=), , progressbar.AdaptiveETA()], maxval=hit_table.shape[0], term_width=80) progress_bar.start() for parameter_index, parameter_range in enumerate(parameter_ranges): analyze_data.reset() logging.debug( + str(parameter_range[0]) + + str(int(float(float(parameter_index) / float(len(parameter_ranges)) * 100.0))) + ) start_event_number = parameter_range[1] stop_event_number = parameter_range[2] logging.debug( + str(start_event_number) + + str(stop_event_number) + ) actual_parameter_group = out_file_h5.create_group(parameter_goup, name=parameter + + str(parameter_range[0]), title=parameter + + str(parameter_range[0])) readout_hit_len = 0 for hits, index in analysis_utils.data_aligned_at_events(hit_table, start_event_number=start_event_number, stop_event_number=stop_event_number, start_index=index, chunk_size=chunk_size): total_hits += hits.shape[0] analyze_data.analyze_hits(hits) readout_hit_len += hits.shape[0] progress_bar.update(index) chunk_size = int(1.05 * readout_hit_len) if int(1.05 * readout_hit_len) < max_chunk_size else max_chunk_size if chunk_size < 50: chunk_size = 50 occupancy = analyze_data.histogram.get_occupancy() cluster_size_hist = analyze_data.clusterizer.get_cluster_size_hist() cluster_size_hist_table = out_file_h5.create_carray(actual_parameter_group, name=, title=, atom=tb.Atom.from_dtype(cluster_size_hist.dtype), shape=cluster_size_hist.shape, filters=filter_table) cluster_size_hist_table[:] = cluster_size_hist if output_pdf is not False: plotting.plot_cluster_size(hist=cluster_size_hist, title= + str(np.sum(cluster_size_hist)) + + parameter + + str(scan_parameter_values[parameter_index]), filename=output_pdf) if cluster_size_total is None: cluster_size_total = cluster_size_hist else: cluster_size_total = np.vstack([cluster_size_total, cluster_size_hist]) total_hits_2 += np.sum(occupancy) progress_bar.finish() if total_hits != total_hits_2: logging.warning() logging.info(, total_hits) cluster_size_total_out = out_file_h5.create_carray(out_file_h5.root, name=, title=, atom=tb.Atom.from_dtype(cluster_size_total.dtype), shape=cluster_size_total.shape, filters=filter_table) cluster_size_total_out[:] = cluster_size_total
This method takes multiple hit files and determines the cluster size for different scan parameter values of Parameters ---------- input_files_hits: string output_file_cluster_size: string The data file with the results parameter: string The name of the parameter to separate the data into (e.g.: PlsrDAC) max_chunk_size: int the maximum chunk size used during read, if too big memory error occurs, if too small analysis takes longer overwrite_output_files: bool Set to true to overwrite the output file if it already exists output_pdf: PdfPages PdfPages file object, if none the plot is printed to screen, if False nothing is printed
10,324
def trace_line_numbers(filename, reload_on_change=False): fullname = cache_file(filename, reload_on_change) if not fullname: return None e = file_cache[filename] if not e.line_numbers: if hasattr(coverage.coverage, ): e.line_numbers = coverage.the_coverage.analyze_morf(fullname)[1] else: cov = coverage.coverage() cov._warn_no_data = False e.line_numbers = cov.analysis(fullname)[1] pass pass return e.line_numbers
Return an Array of breakpoints in filename. The list will contain an entry for each distinct line event call so it is possible (and possibly useful) for a line number appear more than once.
10,325
def combine_hex(data): output = 0x00 for i, value in enumerate(reversed(data)): output |= (value << i * 8) return output
Combine list of integer values to one big integer
10,326
def dist_to_deg(self, distance, latitude): lat = latitude if latitude >= 0 else -1 * latitude rad2deg = 180 / pi earthRadius = 6378160.0 latitudeCorrection = 0.5 * (1 + cos(lat * pi / 180)) return (distance / (earthRadius * latitudeCorrection) * rad2deg)
distance = distance in meters latitude = latitude in degrees at the equator, the distance of one degree is equal in latitude and longitude. at higher latitudes, a degree longitude is shorter in length, proportional to cos(latitude) http://en.wikipedia.org/wiki/Decimal_degrees This function is part of a distance filter where the database 'distance' is in degrees. There's no good single-valued answer to this problem. The distance/ degree is quite constant N/S around the earth (latitude), but varies over a huge range E/W (longitude). Split the difference: I'm going to average the the degrees latitude and degrees longitude corresponding to the given distance. At high latitudes, this will be too short N/S and too long E/W. It splits the errors between the two axes. Errors are < 25 percent for latitudes < 60 degrees N/S.
10,327
def avg_receive_rate(self): if not self._has_data or not in self.result[]: return None bps = self.result[][][] return bps / 8 / 1024 / 1024
Average receiving rate in MB/s over the entire run. This data may not exist if iperf was interrupted. If the result is not from a success run, this property is None.
10,328
def _prepare_for_submission(self,tempfolder, inputdict): try: code = inputdict.pop(self.get_linkname()) except KeyError: raise InputValidationError("No code specified for this " "calculation") try: parameters = inputdict.pop(self.get_linkname()) except KeyError: raise InputValidationError("No parameters specified for this " "calculation") if not isinstance(parameters, ParameterData): raise InputValidationError("parameters is not of type " "ParameterData") try: structure = inputdict.pop(self.get_linkname()) except KeyError: raise InputValidationError("No structure specified for this " "calculation") if not isinstance(structure,StructureData): raise InputValidationError("structure node is not of type" "StructureData") try: settings = inputdict.pop(self.get_linkname(),None) except KeyError: pass if settings is not None: if not isinstance(parameters, ParameterData): raise InputValidationError("parameters is not of type " "ParameterData") try: kpoints = inputdict.pop(self.get_linkname(),None) except KeyError: pass if kpoints is not None: if not isinstance(kpoints, KpointsData): raise InputValidationError("kpoints is not of type KpointsData") default_atoms_getters = [ ["total_energy",""] ] atoms = structure.get_ase() atoms.write(tempfolder.get_abs_path(self._input_aseatoms)) parameters_dict = parameters.get_dict() settings_dict = settings.get_dict() if settings is not None else {} optimizer = parameters_dict.pop("optimizer",None) if optimizer is not None: if not isinstance(optimizer,dict): raise InputValidationError("optimizer key must contain a dictionary") optimizer_name = optimizer.pop("name",None) if optimizer_name is None: raise InputValidationError("Don"{}"args"{}"@functiont find a mesh of kpoints" " in the KpointsData") calc_argsstr = ", ".join( [calc_argsstr] + ["kpts=({},{},{})".format( *mesh )] ) atoms_getters = default_atoms_getters + convert_the_getters( parameters_dict.pop("atoms_getters",[]) ) calculator_getters = convert_the_getters( parameters_dict.pop("calculator_getters",[]) ) all_imports = ["import ase", , "import json", "import numpy", calculator_import_string] if optimizer is not None: all_imports.append(optimizer_import_string) try: if "PW" in calc_args[].values(): all_imports.append("from gpaw import PW") except KeyError: pass extra_imports = parameters_dict.pop("extra_imports",[]) for i in extra_imports: if isinstance(i,basestring): all_imports.append("import {}".format(i)) elif isinstance(i,(list,tuple)): if not all( [isinstance(j,basestring) for j in i] ): raise ValueError("extra import must contain strings") if len(i)==2: all_imports.append("from {} import {}".format(*i)) elif len(i)==3: all_imports.append("from {} import {} as {}".format(*i)) else: raise ValueError("format for extra imports not recognized") else: raise ValueError("format for extra imports not recognized") if self.get_withmpi(): all_imports.append( "from ase.parallel import paropen" ) all_imports_string = "\n".join(all_imports) + "\n" input_txt = "" input_txt += get_file_header() input_txt += " input_txt += "\n" input_txt += all_imports_string input_txt += "\n" pre_lines = parameters_dict.pop("pre_lines",None) if pre_lines is not None: if not isinstance(pre_lines,(list,tuple)): raise ValueError("Prelines must be a list of strings") if not all( [isinstance(_,basestring) for _ in pre_lines] ): raise ValueError("Prelines must be a list of strings") input_txt += "\n".join(pre_lines) + "\n\n" input_txt += "atoms = ase.io.read()\n".format(self._input_aseatoms) input_txt += "\n" input_txt += "calculator = custom_calculator({})\n".format(calc_argsstr) input_txt += "atoms.set_calculator(calculator)\n" input_txt += "\n" if optimizer is not None: input_txt += "optimizer = custom_optimizer({})\n".format(optimizer_argsstr) input_txt += "optimizer.run({})\n".format(optimizer_runargsstr) input_txt += "\n" input_txt += "results = {}\n" for getter,getter_args in atoms_getters: input_txt += "results[] = atoms.get_{}({})\n".format(getter, getter, getter_args) input_txt += "\n" for getter,getter_args in calculator_getters: input_txt += "results[] = calculator.get_{}({})\n".format(getter, getter, getter_args) input_txt += "\n" input_txt += "for k,v in results.iteritems():\n" input_txt += " if isinstance(results[k],(numpy.matrix,numpy.ndarray)):\n" input_txt += " results[k] = results[k].tolist()\n" input_txt += "\n" post_lines = parameters_dict.pop("post_lines",None) if post_lines is not None: if not isinstance(post_lines,(list,tuple)): raise ValueError("Postlines must be a list of strings") if not all( [isinstance(_,basestring) for _ in post_lines] ): raise ValueError("Postlines must be a list of strings") input_txt += "\n".join(post_lines) + "\n\n" right_open = "paropen" if self.get_withmpi() else "open" input_txt += "with {}(, ) as f:\n".format(right_open, self._OUTPUT_FILE_NAME) input_txt += " json.dump(results,f)" input_txt += "\n" if optimizer is not None: input_txt += "atoms.write()\n".format(self._output_aseatoms) input_txt += "\n" input_filename = tempfolder.get_abs_path(self._INPUT_FILE_NAME) with open(input_filename,) as infile: infile.write(input_txt) local_copy_list = [] remote_copy_list = [] additional_retrieve_list = settings_dict.pop("ADDITIONAL_RETRIEVE_LIST",[]) calcinfo = CalcInfo() calcinfo.uuid = self.uuid calcinfo.local_copy_list = local_copy_list calcinfo.remote_copy_list = remote_copy_list codeinfo = CodeInfo() codeinfo.cmdline_params = [self._INPUT_FILE_NAME] codeinfo.stdout_name = self._TXT_OUTPUT_FILE_NAME codeinfo.code_uuid = code.uuid calcinfo.codes_info = [codeinfo] calcinfo.retrieve_list = [] calcinfo.retrieve_list.append(self._OUTPUT_FILE_NAME) calcinfo.retrieve_list.append(self._output_aseatoms) calcinfo.retrieve_list += additional_retrieve_list return calcinfo
This is the routine to be called when you want to create the input files and related stuff with a plugin. :param tempfolder: a aiida.common.folders.Folder subclass where the plugin should put all its files. :param inputdict: a dictionary with the input nodes, as they would be returned by get_inputdata_dict (without the Code!)
10,329
def load_average(self): with io.open(self.load_average_file, ) as f: file_columns = f.readline().strip().split() return float(file_columns[self._load_average_file_column])
Returns the current load average.
10,330
def _create_package_hierarchy(prefix=settings.TEMP_DIR, book_id=None): root_dir = _get_package_name(book_id=book_id, prefix=prefix) if os.path.exists(root_dir): shutil.rmtree(root_dir) os.mkdir(root_dir) original_dir = os.path.join(root_dir, "original") metadata_dir = os.path.join(root_dir, "metadata") os.mkdir(original_dir) os.mkdir(metadata_dir) return root_dir, original_dir, metadata_dir
Create hierarchy of directories, at it is required in specification. `root_dir` is root of the package generated using :attr:`settings.TEMP_DIR` and :func:`_get_package_name`. `orig_dir` is path to the directory, where the data files are stored. `metadata_dir` is path to the directory with MODS metadata. Args: book_id (str, default None): UUID of the book. prefix (str, default settings.TEMP_DIR): Where the package will be stored. Default :attr:`settings.TEMP_DIR`. Warning: If the `root_dir` exists, it is REMOVED! Returns: list of str: root_dir, orig_dir, metadata_dir
10,331
def all(self, query=None, **kwargs): return super(OrganizationsProxy, self).all(query=query)
Gets all organizations.
10,332
def SetPercentageView(self, percentageView): self.percentageView = percentageView self.percentageMenuItem.Check(self.percentageView) self.percentageViewTool.SetValue(self.percentageView) total = self.adapter.value( self.loader.get_root( self.viewType ) ) for control in self.ProfileListControls: control.SetPercentage(self.percentageView, total) self.adapter.SetPercentage(self.percentageView, total)
Set whether to display percentage or absolute values
10,333
def get(self, variable_path: str, default: t.Optional[t.Any] = None, coerce_type: t.Optional[t.Type] = None, coercer: t.Optional[t.Callable] = None, required: bool = False, **kwargs): for p in self.parsers: try: val = p.get( variable_path, default=self.sentinel, coerce_type=coerce_type, coercer=coercer, **kwargs ) if val != self.sentinel: self.enqueue(variable_path, p, val) return val except Exception as e: if not self.silent: raise if self.suppress_logs: continue self.logger.error(.format( p.__class__.__name__, variable_path, str(e) )) self.enqueue(variable_path, value=default) if not default and required: raise exceptions.RequiredValueIsEmpty( .format(variable_path)) return default
Tries to read a ``variable_path`` from each of the passed parsers. It stops if read was successful and returns a retrieved value. If none of the parsers contain a value for the specified path it returns ``default``. :param variable_path: a path to variable in config :param default: a default value if ``variable_path`` is not present anywhere :param coerce_type: cast a result to a specified type :param coercer: perform the type casting with specified callback :param required: raise ``RequiredValueIsEmpty`` if no ``default`` and no result :param kwargs: additional options to all parsers :return: **the first successfully read** value from the list of parser instances or ``default`` :raises config.exceptions.RequiredValueIsEmpty: if nothing is read,``required`` flag is set, and there's no ``default`` specified
10,334
def pre_build(local_root, versions): log = logging.getLogger(__name__) exported_root = TempDir(True).name for sha in {r[] for r in versions.remotes}: target = os.path.join(exported_root, sha) log.debug(, sha) export(local_root, sha, target) remote = versions[Config.from_context().root_ref] with TempDir() as temp_dir: log.debug(, temp_dir) source = os.path.dirname(os.path.join(exported_root, remote[], remote[])) build(source, temp_dir, versions, remote[], True) existing = os.listdir(temp_dir) for remote in versions.remotes: root_dir = RE_INVALID_FILENAME.sub(, remote[]) while root_dir in existing: root_dir += remote[] = root_dir log.debug(, remote[], root_dir) existing.append(root_dir) for remote in list(versions.remotes): log.debug(, remote[]) source = os.path.dirname(os.path.join(exported_root, remote[], remote[])) try: config = read_config(source, remote[]) except HandledError: log.warning(, remote[]) versions.remotes.pop(versions.remotes.index(remote)) continue remote[] = config[] remote[] = config[] return exported_root
Build docs for all versions to determine root directory and master_doc names. Need to build docs to (a) avoid filename collision with files from root_ref and branch/tag names and (b) determine master_doc config values for all versions (in case master_doc changes from e.g. contents.rst to index.rst between versions). Exports all commits into a temporary directory and returns the path to avoid re-exporting during the final build. :param str local_root: Local path to git root directory. :param sphinxcontrib.versioning.versions.Versions versions: Versions class instance. :return: Tempdir path with exported commits as subdirectories. :rtype: str
10,335
def validate_json_schema(data, schema, name="task"): try: jsonschema.validate(data, schema) except jsonschema.exceptions.ValidationError as exc: raise ScriptWorkerTaskException( "Canmalformed-payload'] )
Given data and a jsonschema, let's validate it. This happens for tasks and chain of trust artifacts. Args: data (dict): the json to validate. schema (dict): the jsonschema to validate against. name (str, optional): the name of the json, for exception messages. Defaults to "task". Raises: ScriptWorkerTaskException: on failure
10,336
def compress_flood_fill_regions(targets): t = RegionCoreTree() for (x, y), cores in iteritems(targets): for p in cores: t.add_core(x, y, p) return sorted(t.get_regions_and_coremasks())
Generate a reduced set of flood fill parameters. Parameters ---------- targets : {(x, y) : set([c, ...]), ...} For each used chip a set of core numbers onto which an application should be loaded. E.g., the output of :py:func:`~rig.place_and_route.util.build_application_map` when indexed by an application. Yields ------ (region, core mask) Pair of integers which represent a region of a SpiNNaker machine and a core mask of selected cores within that region for use in flood-filling an application. `region` and `core_mask` are both integer representations of bit fields that are understood by SCAMP. The pairs are yielded in an order suitable for direct use with SCAMP's flood-fill core select (FFCS) method of loading.
10,337
def getTypeStr(_type): r if isinstance(_type, CustomType): return str(_type) if hasattr(_type, ): return _type.__name__ return
r"""Gets the string representation of the given type.
10,338
def set_state_view(self, request): if not request.user.has_perm(): return HttpResponseForbidden() try: state = int(request.POST.get("state", "")) except ValueError: return HttpResponseBadRequest() try: experiment = Experiment.objects.get(name=request.POST.get("experiment")) except Experiment.DoesNotExist: return HttpResponseBadRequest() experiment.state = state if state == 0: experiment.end_date = timezone.now() else: experiment.end_date = None experiment.save() return HttpResponse()
Changes the experiment state
10,339
def make_dataset(self, dataset, raise_if_exists=False, body=None): if body is None: body = {} try: body[] = { : dataset.project_id, : dataset.dataset_id } if dataset.location is not None: body[] = dataset.location self.client.datasets().insert(projectId=dataset.project_id, body=body).execute() except http.HttpError as ex: if ex.resp.status == 409: if raise_if_exists: raise luigi.target.FileAlreadyExists() else: raise
Creates a new dataset with the default permissions. :param dataset: :type dataset: BQDataset :param raise_if_exists: whether to raise an exception if the dataset already exists. :raises luigi.target.FileAlreadyExists: if raise_if_exists=True and the dataset exists
10,340
def setup_statemachine(self): machine = QtCore.QStateMachine() group = util.QState("group", QtCore.QState.ParallelStates, machine) visibility = util.QState("visibility", group) hidden = util.QState("hidden", visibility) visible = util.QState("visible", visibility) operation = util.QState("operation", group) ready = util.QState("ready", operation) collecting = util.QState("collecting", operation) validating = util.QState("validating", operation) extracting = util.QState("extracting", operation) integrating = util.QState("integrating", operation) finished = util.QState("finished", operation) repairing = util.QState("repairing", operation) initialising = util.QState("initialising", operation) stopping = util.QState("stopping", operation) stopped = util.QState("stopped", operation) saving = util.QState("saving", operation) errored = util.QState("errored", group) clean = util.QState("clean", errored) dirty = util.QState("dirty", errored) suspended = util.QState("suspended", group) alive = util.QState("alive", suspended) acting = util.QState("acting", suspended) acted = QtCore.QHistoryState(operation) acted.setDefaultState(ready) hidden.addTransition(self.show, visible) visible.addTransition(self.hide, hidden) ready.addTransition(self.acting, acting) ready.addTransition(self.validating, validating) ready.addTransition(self.initialising, initialising) ready.addTransition(self.repairing, repairing) ready.addTransition(self.saving, saving) saving.addTransition(self.saved, ready) collecting.addTransition(self.initialised, ready) collecting.addTransition(self.stopping, stopping) validating.addTransition(self.stopping, stopping) validating.addTransition(self.finished, finished) validating.addTransition(self.extracting, extracting) extracting.addTransition(self.stopping, stopping) extracting.addTransition(self.finished, finished) extracting.addTransition(self.integrating, integrating) integrating.addTransition(self.stopping, stopping) integrating.addTransition(self.finished, finished) finished.addTransition(self.initialising, initialising) finished.addTransition(self.acting, acting) initialising.addTransition(self.collecting, collecting) stopping.addTransition(self.acted, acted) stopping.addTransition(self.finished, finished) dirty.addTransition(self.initialising, clean) clean.addTransition(self.changed, dirty) alive.addTransition(self.acting, acting) acting.addTransition(self.acted, acted) for compound, state in {machine: group, visibility: hidden, operation: ready, errored: clean, suspended: alive}.items(): compound.setInitialState(state) for state in (hidden, visible, ready, collecting, validating, extracting, integrating, finished, repairing, initialising, stopping, saving, stopped, dirty, clean, acting, alive, acted): state.entered.connect( lambda state=state: self.state_changed.emit(state.name)) machine.start() return machine
Setup and start state machine
10,341
def reduce_claims(query_claims): claims = collections.defaultdict(list) for claim, entities in query_claims.items(): for ent in entities: try: snak = ent.get() snaktype = snak.get() value = snak.get().get() except AttributeError: claims[claim] = [] try: if snaktype != : val = snaktype elif value.get(): val = value.get() elif value.get(): val = value.get() elif value.get(): val = value.get() else: val = value except AttributeError: val = value if not val or not [x for x in val if x]: raise ValueError("%s %s" % (claim, ent)) claims[claim].append(val) return dict(claims)
returns claims as reduced dict {P: [Q's or values]} P = property Q = item
10,342
def parse_bool(value): boolean = parse_str(value).capitalize() if boolean in ("True", "Yes", "On", "1"): return True elif boolean in ("False", "No", "Off", "0"): return False else: raise ValueError(.format(value))
Parse string to bool. :param str value: String value to parse as bool :return bool:
10,343
def render(self, message=None, css_class=, form_contents=None, status=200, title="Python OpenID Consumer Example", sreg_data=None, pape_data=None): self.send_response(status) self.pageHeader(title) if message: self.wfile.write("<div class=>" % (css_class,)) self.wfile.write(message) self.wfile.write("</div>") if sreg_data is not None: self.renderSREG(sreg_data) if pape_data is not None: self.renderPAPE(pape_data) self.pageFooter(form_contents)
Render a page.
10,344
def covertype(): import sklearn.datasets data = sklearn.datasets.covtype.fetch_covtype() features = data.data labels = data.target features -= features.mean(0) features /= features.std(0) features = np.hstack([features, np.ones([features.shape[0], 1])]) features = tf.cast(features, dtype=tf.float32) _, counts = np.unique(labels, return_counts=True) specific_category = np.argmax(counts) labels = (labels == specific_category) labels = tf.cast(labels, dtype=tf.int32) return features, labels
Builds the Covertype data set.
10,345
def t_fold_end(self, t): r column = find_column(t) indent = self.indent_stack[-1] if column < indent: rollback_lexpos(t) if column <= indent: t.lexer.pop_state() t.type = if column > indent: t.type = return t
r'\n+\ *
10,346
def targets(tgt, tgt_type=, **kwargs): roster_dir = __opts__.get(, ) raw = dict.fromkeys(os.listdir(roster_dir), ) log.debug(, len(raw), roster_dir) matched_raw = __utils__[](raw, tgt, tgt_type, ) rendered = {minion_id: _render(os.path.join(roster_dir, minion_id), **kwargs) for minion_id in matched_raw} pruned_rendered = {id_: data for id_, data in rendered.items() if data} log.debug( , len(rendered), tgt, tgt_type, len(rendered) - len(pruned_rendered)) return pruned_rendered
Return the targets from the directory of flat yaml files, checks opts for location.
10,347
def decode_body(headers: MutableMapping, body: bytes) -> dict: type_, encoding = parse_content_type(headers) decoded_body = body.decode(encoding) if type_ == "application/json": payload = json.loads(decoded_body) else: if decoded_body == "ok": payload = {"ok": True} else: payload = {"ok": False, "data": decoded_body} return payload
Decode the response body For 'application/json' content-type load the body as a dictionary Args: headers: Response headers body: Response body Returns: decoded body
10,348
def getNextRecord(self, useCache=True): assert self._file is not None assert self._mode == self._FILE_READ_MODE try: line = self._reader.next() except StopIteration: if self.rewindAtEOF: if self._recordCount == 0: raise Exception("The source configured to reset at EOF but " " appears to be empty" % self._filename) self.rewind() line = self._reader.next() else: return None self._recordCount += 1 record = [] for i, f in enumerate(line): if f in self._missingValues: record.append(SENTINEL_VALUE_FOR_MISSING_DATA) else: record.append(self._adapters[i](f)) return record
Returns next available data record from the file. :returns: a data row (a list or tuple) if available; None, if no more records in the table (End of Stream - EOS); empty sequence (list or tuple) when timing out while waiting for the next record.
10,349
def download_file(pk): release_file = models.ReleaseFile.objects.get(pk=pk) logging.info("Downloading %s", release_file.url) proxies = None if settings.LOCALSHOP_HTTP_PROXY: proxies = settings.LOCALSHOP_HTTP_PROXY response = requests.get(release_file.url, stream=True, proxies=proxies) filename = os.path.basename(release_file.url) with TemporaryUploadedFile(name=filename, size=size, charset=, content_type=content_type) as temp_file: temp_file.write(response.content) temp_file.seek(0) md5_hash = md5_hash_file(temp_file) if md5_hash != release_file.md5_digest: logging.error("MD5 hash mismatch: %s (expected: %s)" % ( md5_hash, release_file.md5_digest)) return release_file.distribution.save(filename, temp_file) release_file.save() logging.info("Complete")
Download the file reference in `models.ReleaseFile` with the given pk.
10,350
def get_file_search(self, query): api_name = (all_responses, query) = self._bulk_cache_lookup(api_name, query) response_chunks = self._request_reports("query", query, ) self._extract_response_chunks(all_responses, response_chunks, api_name) return all_responses
Performs advanced search on samples, matching certain binary/ metadata/detection criteria. Possible queries: file size, file type, first or last submission to VT, number of positives, bynary content, etc. Args: query: dictionary with search arguments Example: 'query': 'type:peexe size:90kb+ positives:5+ behaviour:"taskkill"' Returns: A dict with the VT report.
10,351
def geo(self): out = dict(zip([, , , , , ], self.raster.GetGeoTransform())) out[] = out[] + out[] * self.cols out[] = out[] + out[] * self.rows return out
General image geo information. Returns ------- dict a dictionary with keys `xmin`, `xmax`, `xres`, `rotation_x`, `ymin`, `ymax`, `yres`, `rotation_y`
10,352
def set_mapper_index(self, index, mapper): parent = index.parent() mapper.setRootIndex(parent) mapper.setCurrentModelIndex(index)
Set the mapper to the given index :param index: the index to set :type index: QtCore.QModelIndex :param mapper: the mapper to set :type mapper: QtGui.QDataWidgetMapper :returns: None :rtype: None :raises: None
10,353
def read_config(config): for line in config.splitlines(): line = line.lstrip() if line and not line.startswith(" return line return ""
Read config file and return uncomment line
10,354
def rename(self, path, raise_if_exists=False): if isinstance(path, HdfsTarget): path = path.path if raise_if_exists and self.fs.exists(path): raise RuntimeError( % path) self.fs.rename(self.path, path)
Does not change self.path. Unlike ``move_dir()``, ``rename()`` might cause nested directories. See spotify/luigi#522
10,355
def html_abstract(self): return self.format_abstract(format=, deparagraph=False, mathjax=False, smart=True)
HTML5-formatted document abstract (`str`).
10,356
def get_version(): proc = tmux_cmd() if proc.stderr: if proc.stderr[0] == : if sys.platform.startswith("openbsd"): return LooseVersion( % TMUX_MAX_VERSION) raise exc.LibTmuxException( % TMUX_MIN_VERSION ) raise exc.VersionTooLow(proc.stderr) version = proc.stdout[0].split()[1] if version == : return LooseVersion( % TMUX_MAX_VERSION) version = re.sub(r, , version) return LooseVersion(version)
Return tmux version. If tmux is built from git master, the version returned will be the latest version appended with -master, e.g. ``2.4-master``. If using OpenBSD's base system tmux, the version will have ``-openbsd`` appended to the latest version, e.g. ``2.4-openbsd``. Returns ------- :class:`distutils.version.LooseVersion` tmux version according to :func:`libtmux.common.which`'s tmux
10,357
def emitError(self, level): if level in [ABORT, ERROR, WARNING, VERBOSE, VERBOSE1, VERBOSE2, VERBOSE3, DEBUG]: return True return False
determine if a level should print to stderr, includes all levels but INFO and QUIET
10,358
def intersect(self, range_): self.solver.intersection_broad_tests_count += 1 if range_.is_any(): return self if self.solver.optimised: if range_ in self.been_intersected_with: return self if self.pr: self.pr.passive("intersecting %s wrt range ...", self, range_) self.solver.intersection_tests_count += 1 with self.solver.timed(self.solver.intersection_time): entries = [x for x in self.entries if x.version in range_] if not entries: return None elif len(entries) < len(self.entries): copy_ = self._copy(entries) copy_.been_intersected_with.add(range_) return copy_ else: self.been_intersected_with.add(range_) return self
Remove variants whose version fall outside of the given range.
10,359
def handle_annotations_url(self, line: str, position: int, tokens: ParseResults) -> ParseResults: keyword = tokens[] self.raise_for_redefined_annotation(line, position, keyword) url = tokens[] self.annotation_url_dict[keyword] = url if self.skip_validation: return tokens self.annotation_to_term[keyword] = self.manager.get_annotation_entry_names(url) return tokens
Handle statements like ``DEFINE ANNOTATION X AS URL "Y"``. :raises: RedefinedAnnotationError
10,360
def Suratman(L, rho, mu, sigma): r return rho*sigma*L/(mu*mu)
r'''Calculates Suratman number, `Su`, for a fluid with the given characteristic length, density, viscosity, and surface tension. .. math:: \text{Su} = \frac{\rho\sigma L}{\mu^2} Parameters ---------- L : float Characteristic length [m] rho : float Density of fluid, [kg/m^3] mu : float Viscosity of fluid, [Pa*s] sigma : float Surface tension, [N/m] Returns ------- Su : float Suratman number [] Notes ----- Also known as Laplace number. Used in two-phase flow, especially the bubbly-slug regime. No confusion regarding the definition of this group has been observed. .. math:: \text{Su} = \frac{\text{Re}^2}{\text{We}} =\frac{\text{Inertia}\cdot \text{Surface tension} }{\text{(viscous forces)}^2} The oldest reference to this group found by the author is in 1963, from [2]_. Examples -------- >>> Suratman(1E-4, 1000., 1E-3, 1E-1) 10000.0 References ---------- .. [1] Sen, Nilava. "Suratman Number in Bubble-to-Slug Flow Pattern Transition under Microgravity." Acta Astronautica 65, no. 3-4 (August 2009): 423-28. doi:10.1016/j.actaastro.2009.02.013. .. [2] Catchpole, John P., and George. Fulford. "DIMENSIONLESS GROUPS." Industrial & Engineering Chemistry 58, no. 3 (March 1, 1966): 46-60. doi:10.1021/ie50675a012.
10,361
def get_all_metadata( self, bucket: str, key: str ) -> dict: try: return self.s3_client.head_object( Bucket=bucket, Key=key ) except botocore.exceptions.ClientError as ex: if str(ex.response[][]) == \ str(requests.codes.not_found): raise BlobNotFoundError(f"Could not find s3://{bucket}/{key}") from ex raise BlobStoreUnknownError(ex)
Retrieves all the metadata for a given object in a given bucket. :param bucket: the bucket the object resides in. :param key: the key of the object for which metadata is being retrieved. :return: the metadata
10,362
def _get_future_tasks(self): self.alerts = {} now = std_now() for task in objectmodels[].find({: {: now}}): self.alerts[task.alert_time] = task self.log(, len(self.alerts), )
Assemble a list of future alerts
10,363
def has_next_assessment_part(self, assessment_part_id): if not self.supports_child_ordering or not self.supports_simple_child_sequencing: raise AttributeError() if in self._my_map and str(assessment_part_id) in self._my_map[]: if self._my_map[][-1] != str(assessment_part_id): return True else: return False raise errors.NotFound( + str(assessment_part_id) + )
This supports the basic simple sequence case. Can be overriden in a record for other cases
10,364
def output_filename(output_dir, key_handle, public_id): parts = [output_dir, key_handle] + pyhsm.util.group(public_id, 2) path = os.path.join(*parts) if not os.path.isdir(path): os.makedirs(path) return os.path.join(path, public_id)
Return an output filename for a generated AEAD. Creates a hashed directory structure using the last three bytes of the public id to get equal usage.
10,365
def deprecate(message): warnings.simplefilter() warnings.warn(message, category=DeprecationWarning) warnings.resetwarnings()
Loudly prints warning.
10,366
def _extract_apis_from_function(logical_id, function_resource, collector): resource_properties = function_resource.get("Properties", {}) serverless_function_events = resource_properties.get(SamApiProvider._FUNCTION_EVENT, {}) SamApiProvider._extract_apis_from_events(logical_id, serverless_function_events, collector)
Fetches a list of APIs configured for this SAM Function resource. Parameters ---------- logical_id : str Logical ID of the resource function_resource : dict Contents of the function resource including its properties collector : ApiCollector Instance of the API collector that where we will save the API information
10,367
def getExtn(fimg, extn=None): if extn is None: _extn = fimg[0] for _e in fimg: if _e.data is not None: _extn = _e break else: if repr(extn).find() > 1: if isinstance(extn, tuple): _extns = list(extn) if in _extns: _extns.remove() else: _extns = extn.split() try: _extn = fimg[_extns[0], int(_extns[1])] except KeyError: _extn = None for e in fimg: hdr = e.header if ( in hdr and hdr[].lower() == _extns[0].lower() and hdr[] == int(_extns[1])): _extn = e break elif repr(extn).find() > 1: _indx = str(extn[:extn.find()]) _extn = fimg[int(_indx)] elif isinstance(extn, string_types): if extn.strip() == : _extn = None elif extn.isdigit(): _nextn = int(extn) else: _nextn = None if extn.lower() == : _nextn = 0 else: i = 0 for hdu in fimg: isimg = in hdu.header hdr = hdu.header if isimg and extn.lower() == hdr[].lower(): _nextn = i break i += 1 if _nextn < len(fimg): _extn = fimg[_nextn] else: _extn = None else: if int(extn) < len(fimg): _extn = fimg[int(extn)] else: _extn = None if _extn is None: raise KeyError( % extn) return _extn
Returns the PyFITS extension corresponding to extension specified in filename. Defaults to returning the first extension with data or the primary extension, if none have data. If a non-existent extension has been specified, it raises a `KeyError` exception.
10,368
def crosscov(x, y, axis=-1, all_lags=False, debias=True, normalize=True): if x.shape[axis] != y.shape[axis]: raise ValueError( ) if debias: x = remove_bias(x, axis) y = remove_bias(y, axis) slicing = [slice(d) for d in x.shape] slicing[axis] = slice(None, None, -1) cxy = fftconvolve(x, y[tuple(slicing)].conj(), axis=axis, mode=) N = x.shape[axis] if normalize: cxy /= N if all_lags: return cxy slicing[axis] = slice(N - 1, 2 * N - 1) return cxy[tuple(slicing)]
Returns the crosscovariance sequence between two ndarrays. This is performed by calling fftconvolve on x, y[::-1] Parameters ---------- x : ndarray y : ndarray axis : time axis all_lags : {True/False} whether to return all nonzero lags, or to clip the length of s_xy to be the length of x and y. If False, then the zero lag covariance is at index 0. Otherwise, it is found at (len(x) + len(y) - 1)/2 debias : {True/False} Always removes an estimate of the mean along the axis, unless told not to (eg X and Y are known zero-mean) Returns ------- cxy : ndarray The crosscovariance function Notes ----- cross covariance of processes x and y is defined as .. math:: C_{xy}[k]=E\{(X(n+k)-E\{X\})(Y(n)-E\{Y\})^{*}\} where X and Y are discrete, stationary (or ergodic) random processes Also note that this routine is the workhorse for all auto/cross/cov/corr functions.
10,369
def parse(cls, gvid, exception=True): if gvid == : return cls.get_class()(0) if not bool(gvid): return None if not isinstance(gvid, six.string_types): raise TypeError("Can{}{}null{}{}null{}{}sl'] except KeyError: pass return cls(**d)
Parse a string value into the geoid of this class. :param gvid: String value to parse. :param exception: If true ( default) raise an eception on parse erorrs. If False, return a 'null' geoid. :return:
10,370
def pull_requests(self): pr_numbers = re.findall(r"[pP][rR]\s?[0-9]+", self.description) pr_numbers += re.findall(re.compile("pull\s?request\s?[0-9]+", re.IGNORECASE), self.description) pr_numbers = [re.sub(,, p) for p in pr_numbers] return pr_numbers
Looks for any of the following pull request formats in the description field: pr12345, pr 2345, PR2345, PR 2345
10,371
def get_nexusvm_bindings(vlan_id, instance_id): LOG.debug("get_nexusvm_bindings() called") return _lookup_all_nexus_bindings(instance_id=instance_id, vlan_id=vlan_id)
Lists nexusvm bindings.
10,372
def path(self, value): prepval = value.replace(, ) self._path = posixpath.normpath(prepval)
Set path :param value: The value for path :type value: str :raises: None
10,373
def subcorpus(self, selector): subcorpus = self.__class__(self[selector], index_by=self.index_by, index_fields=self.indices.keys(), index_features=self.features.keys()) return subcorpus
Generates a new :class:`.Corpus` using the criteria in ``selector``. Accepts selector arguments just like :meth:`.Corpus.select`\. .. code-block:: python >>> corpus = Corpus(papers) >>> subcorpus = corpus.subcorpus(('date', 1995)) >>> subcorpus <tethne.classes.corpus.Corpus object at 0x10278ea10>
10,374
def visibility_changed(self, enable): super(SpyderPluginWidget, self).visibility_changed(enable) if enable and not self.pydocbrowser.is_server_running(): self.pydocbrowser.initialize()
DockWidget visibility has changed
10,375
def _create_spec_config(self, table_name, spec_documents): _spec_table = self._resource.Table(table_name + ) for doc in spec_documents: _spec_table.put_item(Item=doc)
Dynamo implementation of spec config creation Called by `create_archive_table()` in :py:class:`manager.BaseDataManager` Simply adds two rows to the spec table Parameters ---------- table_name : base table name (not including .spec suffix) spec_documents : list list of dictionary documents defining the manager spec
10,376
def _message_hostgroup_parse(self, message): splitter_count = message.count(WHostgroupBeaconMessenger.__message_groups_splitter__) if splitter_count == 0: return [], WBeaconGouverneurMessenger._message_address_parse(self, message) elif splitter_count == 1: splitter_pos = message.find(WHostgroupBeaconMessenger.__message_groups_splitter__) groups = [] group_splitter = WHostgroupBeaconMessenger.__group_splitter__ for group_name in message[(splitter_pos + 1):].split(group_splitter): groups.append(group_name.strip()) address = WBeaconGouverneurMessenger._message_address_parse(self, message[:splitter_pos]) return groups, address else: raise ValueError()
Parse given message and return list of group names and socket information. Socket information is parsed in :meth:`.WBeaconGouverneurMessenger._message_address_parse` method :param message: bytes :return: tuple of list of group names and WIPV4SocketInfo
10,377
def _cmd_line_parser(): parser = argparse.ArgumentParser() parser.add_argument(, help=( )) parser.add_argument(, action=, help=) parser.add_argument(, default=, nargs=, choices=[, ], help=) return parser
return a command line parser. It is used when generating the documentation
10,378
def init_environment(): os.environ[] = pluginpath = os.pathsep.join((os.environ.get(, ), constants.BUILTIN_PLUGIN_PATH)) os.environ[] = pluginpath
Set environment variables that are important for the pipeline. :returns: None :rtype: None :raises: None
10,379
def send_message(self, app_mxit_id, target_user_ids, message=, contains_markup=True, spool=None, spool_timeout=None, links=None, scope=): data = { : app_mxit_id, : ",".join(target_user_ids), : message, : contains_markup } if spool: data[] = spool if spool_timeout: data[] = spool_timeout if links: data[] = links return _post( token=self.oauth.get_app_token(scope), uri=, data=data )
Send a message (from a Mxit app) to a list of Mxit users
10,380
def parentLayer(self): if self._parentLayer is None: from ..agol.services import FeatureService self.__init() url = os.path.dirname(self._url) self._parentLayer = FeatureService(url=url, securityHandler=self._securityHandler, proxy_url=self._proxy_url, proxy_port=self._proxy_port) return self._parentLayer
returns information about the parent
10,381
def run(main=None, argv=None): flags_obj = flags.FLAGS absl_flags_obj = absl_flags.FLAGS args = argv[1:] if argv else None flags_passthrough = flags_obj._parse_flags(args=args) if absl_flags_obj["verbosity"].using_default_value: absl_flags_obj.verbosity = 0 main = main or sys.modules[].main sys.exit(main(sys.argv[:1] + flags_passthrough))
Runs the program with an optional 'main' function and 'argv' list.
10,382
def get_events(self): result = [] while self._wait(0): event = self._read() if event: result.append(event) return result
Returns a list of all joystick events that have occurred since the last call to `get_events`. The list contains events in the order that they occurred. If no events have occurred in the intervening time, the result is an empty list.
10,383
def _lei16(ins): output = _16bit_oper(ins.quad[2], ins.quad[3]) output.append() output.append() REQUIRES.add() return output
Compares & pops top 2 operands out of the stack, and checks if the 1st operand <= 2nd operand (top of the stack). Pushes 0 if False, 1 if True. 16 bit signed version
10,384
def fetch(self, key: object, default=None): return self._user_data.get(key, default)
Retrieves the related value from the stored user data.
10,385
def generate(str, alg): img = Image.new(IMAGE_MODE, IMAGE_SIZE, BACKGROUND_COLOR) hashcode = hash_input(str, alg) pixelmap = setup_pixelmap(hashcode) draw_image(pixelmap, img) return img
Generates an PIL image avatar based on the given input String. Acts as the main accessor to pagan.
10,386
def _get_user_data(self): url = self.session.host + + str(self.session.id) + + str(self.id) + r = requests.get(url) if r.status_code == 200: content = r.json() else: raise Exception() return content
Base method for retrieving user data from a viz.
10,387
def open(self, filename, mode=, **kwargs): if in mode and not self.backend.exists(filename): raise FileNotFound(filename) return self.backend.open(filename, mode, **kwargs)
Open the file and return a file-like object. :param str filename: The storage root-relative filename :param str mode: The open mode (``(r|w)b?``) :raises FileNotFound: If trying to read a file that does not exists
10,388
def position(self): line, col = self._position(self.chunkOffset) return (line + 1, col)
Returns (line, col) of the current position in the stream.
10,389
def parseReaderConfig(self, confdict): logger.debug(, confdict) conf = {} for k, v in confdict.items(): if not k.startswith(): continue ty = v[] data = v[] vendor = None subtype = None try: vendor, subtype = v[], v[] except KeyError: pass if ty == 1023: if vendor == 25882 and subtype == 37: tempc = struct.unpack(, data)[0] conf.update(temperature=tempc) else: conf[ty] = data return conf
Parse a reader configuration dictionary. Examples: { Type: 23, Data: b'\x00' } { Type: 1023, Vendor: 25882, Subtype: 21, Data: b'\x00' }
10,390
def count_sources(edge_iter: EdgeIterator) -> Counter: return Counter(u for u, _, _ in edge_iter)
Count the source nodes in an edge iterator with keys and data. :return: A counter of source nodes in the iterable
10,391
def ordered_expected_layers(self): registry = QgsProject.instance() layers = [] count = self.list_layers_in_map_report.count() for i in range(count): layer = self.list_layers_in_map_report.item(i) origin = layer.data(LAYER_ORIGIN_ROLE) if origin == FROM_ANALYSIS[]: key = layer.data(LAYER_PURPOSE_KEY_OR_ID_ROLE) parent = layer.data(LAYER_PARENT_ANALYSIS_ROLE) layers.append(( FROM_ANALYSIS[], key, parent, None )) else: layer_id = layer.data(LAYER_PURPOSE_KEY_OR_ID_ROLE) layer = registry.mapLayer(layer_id) style_document = QDomDocument() layer.exportNamedStyle(style_document) layers.append(( FROM_CANVAS[], layer.name(), full_layer_uri(layer), style_document.toString() )) return layers
Get an ordered list of layers according to users input. From top to bottom in the legend: [ ('FromCanvas', layer name, full layer URI, QML), ('FromAnalysis', layer purpose, layer group, None), ... ] The full layer URI is coming from our helper. :return: An ordered list of layers following a structure. :rtype: list
10,392
def batch_predict_async(training_dir, prediction_input_file, output_dir, mode, batch_size=16, shard_files=True, output_format=, cloud=False): import google.datalab.utils as du with warnings.catch_warnings(): warnings.simplefilter("ignore") if cloud: runner_results = cloud_batch_predict(training_dir, prediction_input_file, output_dir, mode, batch_size, shard_files, output_format) job = du.DataflowJob(runner_results) else: runner_results = local_batch_predict(training_dir, prediction_input_file, output_dir, mode, batch_size, shard_files, output_format) job = du.LambdaJob(lambda: runner_results.wait_until_finish(), job_id=None) return job
Local and cloud batch prediction. Args: training_dir: The output folder of training. prediction_input_file: csv file pattern to a file. File must be on GCS if running cloud prediction output_dir: output location to save the results. Must be a GSC path if running cloud prediction. mode: 'evaluation' or 'prediction'. If 'evaluation', the input data must contain a target column. If 'prediction', the input data must not contain a target column. batch_size: Int. How many instances to run in memory at once. Larger values mean better performace but more memeory consumed. shard_files: If False, the output files are not shardded. output_format: csv or json. Json file are json-newlined. cloud: If ture, does cloud batch prediction. If False, runs batch prediction locally. Returns: A google.datalab.utils.Job object that can be used to query state from or wait.
10,393
def toc(self, depth=6, lowest_level=6): depth = min(max(depth, 0), 6) depth = 6 if depth == 0 else depth lowest_level = min(max(lowest_level, 1), 6) toc = self._root.to_dict()[] def traverse(curr_toc, dep, lowest_lvl, curr_depth=1): if curr_depth > dep: curr_toc.clear() return items_to_remove = [] for item in curr_toc: if item[] > lowest_lvl: items_to_remove.append(item) else: traverse(item[], dep, lowest_lvl, curr_depth + 1) [curr_toc.remove(item) for item in items_to_remove] traverse(toc, depth, lowest_level) return toc
Get table of content of currently fed HTML string. :param depth: the depth of TOC :param lowest_level: the allowed lowest level of header tag :return: a list representing the TOC
10,394
def initialize(name=, pool_size=10, host=, password=, port=5432, user=): global pool instance = Pool(name=name, pool_size=pool_size, host=host, password=password, port=port, user=user) pool = instance return instance
Initialize a new database connection and return the pool object. Saves a reference to that instance in a module-level variable, so applications with only one database can just call this function and not worry about pool objects.
10,395
def _invoke_callbacks(self, *args, **kwargs): for callback in self._done_callbacks: _helpers.safe_invoke_callback(callback, *args, **kwargs)
Invoke all done callbacks.
10,396
def create_table( data, meta=None, fields=None, skip_header=True, import_fields=None, samples=None, force_types=None, max_rows=None, *args, **kwargs ): table_rows = iter(data) force_types = force_types or {} if import_fields is not None: import_fields = make_header(import_fields) if fields is None: header = make_header(next(table_rows)) if samples is not None: sample_rows = list(islice(table_rows, 0, samples)) table_rows = chain(sample_rows, table_rows) else: if max_rows is not None and max_rows > 0: sample_rows = table_rows = list(islice(table_rows, max_rows)) else: sample_rows = table_rows = list(table_rows) detected_fields = detect_types( header, sample_rows, skip_indexes=[ index for index, field in enumerate(header) if field in force_types or field not in (import_fields or header) ], *args, **kwargs ) new_fields = [ field_name for field_name in detected_fields.keys() if field_name not in header ] fields = OrderedDict( [ (field_name, detected_fields.get(field_name, TextField)) for field_name in header + new_fields ] ) fields.update(force_types) header = list(fields.keys()) if import_fields is None: import_fields = header else: if not isinstance(fields, OrderedDict): raise ValueError("`fields` must be an `OrderedDict`") if skip_header: raise ValueError("Invalid field names: {}".format(field_names)) fields = OrderedDict( [(field_name, fields[field_name]) for field_name in import_fields] ) get_row = get_items(*map(header.index, import_fields)) table = Table(fields=fields, meta=meta) if max_rows is not None and max_rows > 0: table_rows = islice(table_rows, max_rows) table.extend(dict(zip(import_fields, get_row(row))) for row in table_rows) source = table.meta.get("source", None) if source is not None: if source.should_close: source.fobj.close() if source.should_delete and Path(source.uri).exists(): unlink(source.uri) return table
Create a rows.Table object based on data rows and some configurations - `skip_header` is only used if `fields` is set - `samples` is only used if `fields` is `None`. If samples=None, all data is filled in memory - use with caution. - `force_types` is only used if `fields` is `None` - `import_fields` can be used either if `fields` is set or not, the resulting fields will seek its order - `fields` must always be in the same order as the data
10,397
def file_renamed_in_data_in_editorstack(self, editorstack_id_str, original_filename, filename): for editorstack in self.editorstacks: if str(id(editorstack)) != editorstack_id_str: editorstack.rename_in_data(original_filename, filename)
A file was renamed in data in editorstack, this notifies others
10,398
def pkcs7_pad(buf): padder = cryptography.hazmat.primitives.padding.PKCS7( cryptography.hazmat.primitives.ciphers. algorithms.AES.block_size).padder() return padder.update(buf) + padder.finalize()
Appends PKCS7 padding to an input buffer :param bytes buf: buffer to add padding :rtype: bytes :return: buffer with PKCS7_PADDING
10,399
def _make_names_unique(animations): counts = {} for a in animations: c = counts.get(a[], 0) + 1 counts[a[]] = c if c > 1: a[] += + str(c - 1) dupes = set(k for k, v in counts.items() if v > 1) for a in animations: if a[] in dupes: a[] +=
Given a list of animations, some of which might have duplicate names, rename the first one to be <duplicate>_0, the second <duplicate>_1, <duplicate>_2, etc.