code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def get_language(self, language_id): raw_response = requests_util.run_request('get', self.API_BASE_URL + '/languages/%d' % language_id, headers=self.__get_header_with_auth()) return self.parse_raw_response(raw_response)
Retrieves information about the language of the given id. :param language_id: The TheTVDB Id of the language. :return: a python dictionary with either the result of the search or an error from TheTVDB.
def get_regex(self): regex = '' for flag in self.compound: if flag == '?' or flag == '*': regex += flag else: regex += '(' + '|'.join(self.flags[flag]) + ')' return regex
Generates and returns compound regular expression
def _find_controller(self, *args): for name in args: obj = self._lookup_child(name) if obj and iscontroller(obj): return obj return None
Returns the appropriate controller for routing a custom action.
def get_replicas(self, service_id: str) -> str: replicas = [] if not self._manager: raise RuntimeError('Only the Swarm manager node can retrieve ' 'replication level of the service') service_tasks = self._client.services.get(service_id).tasks() for task in service_tasks: if task['Status']['State'] == "running": replicas.append(task) return len(replicas)
Get the replication level of a service. Args: service_id (str): docker swarm service id Returns: str, replication level of the service
def pick_free_port(): test_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) test_socket.bind(('127.0.0.1', 0)) free_port = int(test_socket.getsockname()[1]) test_socket.close() return free_port
Picks a free port
def init_win32com (): global _initialized if _initialized: return import win32com.client if win32com.client.gencache.is_readonly: win32com.client.gencache.is_readonly = False win32com.client.gencache.Rebuild() _initialized = True
Initialize the win32com.client cache.
def mode(self): mu = self.mean() sigma = self.std() ret_val = math.exp(mu - sigma**2) if math.isnan(ret_val): ret_val = float("inf") return ret_val
Computes the mode of a log-normal distribution built with the stats data.
def GetUcsPropertyMeta(classId, key): if classId in _ManagedObjectMeta: if key in _ManagedObjectMeta[classId]: return _ManagedObjectMeta[classId][key] return None
Methods returns the property meta of the provided key for the given classId.
def _create_and_save_state(cls, mapreduce_spec, _app): state = model.MapreduceState.create_new(mapreduce_spec.mapreduce_id) state.mapreduce_spec = mapreduce_spec state.active = True state.active_shards = 0 if _app: state.app_id = _app config = util.create_datastore_write_config(mapreduce_spec) state.put(config=config) return state
Save mapreduce state to datastore. Save state to datastore so that UI can see it immediately. Args: mapreduce_spec: model.MapreduceSpec, _app: app id if specified. None otherwise. Returns: The saved Mapreduce state.
def perform_get_or_create(self, request, *args, **kwargs): serializer = self.get_serializer(data=request.data) serializer.is_valid(raise_exception=True) process = serializer.validated_data.get('process') process_input = request.data.get('input', {}) fill_with_defaults(process_input, process.input_schema) checksum = get_data_checksum(process_input, process.slug, process.version) data_qs = Data.objects.filter( checksum=checksum, process__persistence__in=[Process.PERSISTENCE_CACHED, Process.PERSISTENCE_TEMP], ) data_qs = get_objects_for_user(request.user, 'view_data', data_qs) if data_qs.exists(): data = data_qs.order_by('created').last() serializer = self.get_serializer(data) return Response(serializer.data)
Perform "get_or_create" - return existing object if found.
def csv_print(classes, class_stat, digit=5, class_param=None): result = "Class" classes.sort() for item in classes: result += ',"' + str(item) + '"' result += "\n" class_stat_keys = sorted(class_stat.keys()) if isinstance(class_param, list): if set(class_param) <= set(class_stat_keys): class_stat_keys = class_param if len(class_stat_keys) < 1 or len(classes) < 1: return "" for key in class_stat_keys: row = [rounder(class_stat[key][i], digit) for i in classes] result += key + "," + ",".join(row) result += "\n" return result
Return csv file data. :param classes: classes list :type classes:list :param class_stat: statistic result for each class :type class_stat:dict :param digit: scale (the number of digits to the right of the decimal point in a number.) :type digit : int :param class_param : class parameters list for print, Example : ["TPR","TNR","AUC"] :type class_param : list :return: csv file data as str
def hashes(self): hashes = set() if (self.resources is not None): for resource in self: if (resource.md5 is not None): hashes.add('md5') if (resource.sha1 is not None): hashes.add('sha-1') if (resource.sha256 is not None): hashes.add('sha-256') return(hashes)
Return set of hashes uses in this resource_list.
def copyidfintoidf(toidf, fromidf): idfobjlst = getidfobjectlist(fromidf) for idfobj in idfobjlst: toidf.copyidfobject(idfobj)
copy fromidf completely into toidf
def _maybe_wrap_exception(exception): if isinstance(exception, grpc.RpcError): return exceptions.from_grpc_error(exception) return exception
Wraps a gRPC exception class, if needed.
def _is_finished_dumping(directory): run_info = os.path.join(directory, "RunInfo.xml") hi_seq_checkpoint = "Basecalling_Netcopy_complete_Read%s.txt" % \ _expected_reads(run_info) to_check = ["Basecalling_Netcopy_complete_SINGLEREAD.txt", "Basecalling_Netcopy_complete_READ2.txt", hi_seq_checkpoint] return reduce(operator.or_, [os.path.exists(os.path.join(directory, f)) for f in to_check])
Determine if the sequencing directory has all files. The final checkpoint file will differ depending if we are a single or paired end run.
def grab_names_from_emails(email_list): all_staff = STAFF_LIST emails_names = {} for email in email_list: for person in all_staff: if email == person['email'] and email not in emails_names: emails_names[email] = person['fullName'] for email in email_list: matched = False for assignment in emails_names: if email == assignment: matched = True if not matched: emails_names[email] = email return emails_names
Return a dictionary mapping names to email addresses. Only gives a response if the email is found in the staff API/JSON. Expects an API of the format = [ { 'email': '[email protected]', ... 'fullName': 'Frank Oo' }, ... ]
def specstring(self): if self.subgroup is None: variable = self.variable else: variable = f'{self.subgroup}.{self.variable}' if self.series: variable = f'{variable}.series' return variable
The string corresponding to the current values of `subgroup`, `state`, and `variable`. >>> from hydpy.core.itemtools import ExchangeSpecification >>> spec = ExchangeSpecification('hland_v1', 'fluxes.qt') >>> spec.specstring 'fluxes.qt' >>> spec.series = True >>> spec.specstring 'fluxes.qt.series' >>> spec.subgroup = None >>> spec.specstring 'qt.series'
def show_diff(original, modified, prefix='', suffix='', prefix_unchanged=' ', suffix_unchanged='', prefix_removed='-', suffix_removed='', prefix_added='+', suffix_added=''): import difflib differ = difflib.Differ() result = [prefix] for line in differ.compare(modified.splitlines(), original.splitlines()): if line[0] == ' ': result.append( prefix_unchanged + line[2:].strip() + suffix_unchanged) elif line[0] == '-': result.append(prefix_removed + line[2:].strip() + suffix_removed) elif line[0] == '+': result.append(prefix_added + line[2:].strip() + suffix_added) result.append(suffix) return '\n'.join(result)
Return the diff view between original and modified strings. Function checks both arguments line by line and returns a string with a: - prefix_unchanged when line is common to both sequences - prefix_removed when line is unique to sequence 1 - prefix_added when line is unique to sequence 2 and a corresponding suffix in each line :param original: base string :param modified: changed string :param prefix: prefix of the output string :param suffix: suffix of the output string :param prefix_unchanged: prefix of the unchanged line :param suffix_unchanged: suffix of the unchanged line :param prefix_removed: prefix of the removed line :param suffix_removed: suffix of the removed line :param prefix_added: prefix of the added line :param suffix_added: suffix of the added line :return: string with the comparison of the records :rtype: string
def reset_pw_confirm_view(request, uidb64=None, token=None): return password_reset_confirm(request, template_name="reset_confirmation.html", uidb64=uidb64, token=token, post_reset_redirect=reverse('login'))
View to confirm resetting password.
def relaxParserSetFlag(self, flags): ret = libxml2mod.xmlRelaxParserSetFlag(self._o, flags) return ret
Semi private function used to pass informations to a parser context which are a combination of xmlRelaxNGParserFlag .
def cursor_blink_mode_changed(self, settings, key, user_data): for term in self.guake.notebook_manager.iter_terminals(): term.set_property("cursor-blink-mode", settings.get_int(key))
Called when cursor blink mode settings has been changed
def compute_adjacency_matrix(X, method='auto', **kwargs): if method == 'auto': if X.shape[0] > 10000: method = 'cyflann' else: method = 'kd_tree' return Adjacency.init(method, **kwargs).adjacency_graph(X.astype('float'))
Compute an adjacency matrix with the given method
def libvlc_video_get_aspect_ratio(p_mi): f = _Cfunctions.get('libvlc_video_get_aspect_ratio', None) or \ _Cfunction('libvlc_video_get_aspect_ratio', ((1,),), string_result, ctypes.c_void_p, MediaPlayer) return f(p_mi)
Get current video aspect ratio. @param p_mi: the media player. @return: the video aspect ratio or NULL if unspecified (the result must be released with free() or L{libvlc_free}()).
def add_alt_goids(go2values, altgo2goobj): for goobj_key in altgo2goobj.values(): values_curr = go2values[goobj_key.id] for goid_alt in goobj_key.alt_ids: go2values[goid_alt] = values_curr return go2values
Add alternate source GO IDs.
def GetSource(self, row, col, table=None): if table is None: table = self.grid.current_table value = self.code_array((row, col, table)) if value is None: return u"" else: return value
Return the source string of a cell
def stop(self): log.info('Stopping %s' % self) pids = list(self._processes) for pid in pids: self.terminate(pid) while self._connections: pid = next(iter(self._connections)) conn = self._connections.pop(pid, None) if conn: conn.close() self.__loop.stop()
Stops the context. This terminates all PIDs and closes all connections.
def class_path(cls): if cls.__module__ == '__main__': path = None else: path = os.path.dirname(inspect.getfile(cls)) if not path: path = os.getcwd() return os.path.realpath(path)
Return the path to the source file of the given class.
def _value_function(self, x_input, y_true, y_pred): if len(y_true.shape) == 1: return y_pred.argmax(1).eq(y_true).double().mean().item() else: raise NotImplementedError
Return classification accuracy of input
def as_json(self, force_object=True, name=None): func = streamsx.topology.runtime._json_force_object if force_object else None saj = self._change_schema(streamsx.topology.schema.CommonSchema.Json, 'as_json', name, func)._layout('AsJson') saj.oport.operator.sl = _SourceLocation(_source_info(), 'as_json') return saj
Declares a stream converting each tuple on this stream into a JSON value. The stream is typed as a :py:const:`JSON stream <streamsx.topology.schema.CommonSchema.Json>`. Each tuple must be supported by `JSONEncoder`. If `force_object` is `True` then each tuple that not a `dict` will be converted to a JSON object with a single key `payload` containing the tuple. Thus each object on the stream will be a JSON object. If `force_object` is `False` then each tuple is converted to a JSON value directly using `json` package. If this stream is already typed as a JSON stream then it will be returned (with no additional processing against it and `force_object` and `name` are ignored). Args: force_object(bool): Force conversion of non dicts to JSON objects. name(str): Name of the resulting stream. When `None` defaults to a generated name. .. versionadded:: 1.6.1 Returns: Stream: Stream containing the JSON representations of tuples on this stream.
def connection(self): if self._connection is None: self._connection = self.client[self.database_name] if self.disable_id_injector: incoming = self._connection._Database__incoming_manipulators for manipulator in incoming: if isinstance(manipulator, pymongo.son_manipulator.ObjectIdInjector): incoming.remove(manipulator) LOG.debug("Disabling %s on mongodb connection to " "'%s'.", manipulator.__class__.__name__, self.database_name) break for manipulator in self.manipulators: self._connection.add_son_manipulator(manipulator) LOG.info("Connected to mongodb on %s (database=%s)", self.safe_connection_string, self.database_name) return self._connection
Connect to and return mongodb database object.
def write_squonk_datasetmetadata(outputBase, thinOutput, valueClassMappings, datasetMetaProps, fieldMetaProps): meta = {} props = {} if datasetMetaProps: props.update(datasetMetaProps) if fieldMetaProps: meta["fieldMetaProps"] = fieldMetaProps if len(props) > 0: meta["properties"] = props if valueClassMappings: meta["valueClassMappings"] = valueClassMappings if thinOutput: meta['type'] = 'org.squonk.types.BasicObject' else: meta['type'] = 'org.squonk.types.MoleculeObject' s = json.dumps(meta) meta = open(outputBase + '.metadata', 'w') meta.write(s) meta.close()
This is a temp hack to write the minimal metadata that Squonk needs. Will needs to be replaced with something that allows something more complete to be written. :param outputBase: Base name for the file to write to :param thinOutput: Write only new data, not structures. Result type will be BasicObject :param valueClasses: A dict that describes the Java class of the value properties (used by Squonk) :param datasetMetaProps: A dict with metadata properties that describe the datset as a whole. The keys used for these metadata are up to the user, but common ones include source, description, created, history. :param fieldMetaProps: A list of dicts with the additional field metadata. Each dict has a key named fieldName whose value is the name of the field being described, and a key name values wholes values is a map of metadata properties. The keys used for these metadata are up to the user, but common ones include source, description, created, history.
def _report_exception(self, msg, frame_skip=2): msg_hash = hash(msg) if msg_hash in self._report_exception_cache: return self._report_exception_cache.add(msg_hash) error_frame = sys._getframe(0) while frame_skip: error_frame = error_frame.f_back frame_skip -= 1 self._py3_wrapper.report_exception( msg, notify_user=False, error_frame=error_frame )
THIS IS PRIVATE AND UNSUPPORTED. logs an exception that occurs inside of a Py3 method. We only log the exception once to prevent spamming the logs and we do not notify the user. frame_skip is used to change the place in the code that the error is reported as coming from. We want to show it as coming from the py3status module where the Py3 method was called.
def get_selected_values(self, selection): return [v for b, v in self._choices if b & selection]
Return a list of values for the given selection.
def attention_lm_ae_extended(): hparams = attention_lm_moe_base_long_seq() hparams.attention_layers = "eeee" hparams.attention_local = True hparams.attention_moe_k = 2 hparams.attention_exp_factor = 4 hparams.layer_preprocess_sequence = "n" hparams.layer_postprocess_sequence = "da" return hparams
Experiment with the exp_factor params.
def get_digests(self): digests = {} for registry in self.workflow.push_conf.docker_registries: for image in self.workflow.tag_conf.images: image_str = image.to_str() if image_str in registry.digests: digest = registry.digests[image_str] digests[image.to_str(registry=False)] = digest return digests
Returns a map of repositories to digests
def _add_version_to_request(self, url, headers, version): if self._has_capability(SERVER_REQUIRES_VERSION_HEADER): new_headers = headers.copy() new_headers['Last-Modified'] = email.utils.formatdate(version) return url, new_headers else: url_params = { 'last_modified': email.utils.formatdate(version) } new_url = url + "?" + urlencode(url_params) return new_url, headers
Adds version to either url or headers, depending on protocol.
def create_system(self, new_machine_id=False): client_hostname = determine_hostname() machine_id = generate_machine_id(new_machine_id) branch_info = self.branch_info if not branch_info: return False remote_branch = branch_info['remote_branch'] remote_leaf = branch_info['remote_leaf'] data = {'machine_id': machine_id, 'remote_branch': remote_branch, 'remote_leaf': remote_leaf, 'hostname': client_hostname} if self.config.display_name is not None: data['display_name'] = self.config.display_name data = json.dumps(data) post_system_url = self.api_url + '/v1/systems' logger.debug("POST System: %s", post_system_url) logger.debug(data) net_logger.info("POST %s", post_system_url) return self.session.post(post_system_url, headers={'Content-Type': 'application/json'}, data=data)
Create the machine via the API
def add_vbar_widget(self, ref, x=1, y=1, length=10): if ref not in self.widgets: widget = widgets.VBarWidget(screen=self, ref=ref, x=x, y=y, length=length) self.widgets[ref] = widget return self.widgets[ref]
Add Vertical Bar Widget
def send(self, message, binary=False): if not self.is_closed: self.session.send_message(message, binary=binary)
Send message to the client. `message` Message to send.
def remove_node(self, p_id, remove_unconnected_nodes=True): if self.has_node(p_id): for neighbor in self.incoming_neighbors(p_id): self._edges[neighbor].remove(p_id) neighbors = set() if remove_unconnected_nodes: neighbors = self.outgoing_neighbors(p_id) del self._edges[p_id] for neighbor in neighbors: if self.is_isolated(neighbor): self.remove_node(neighbor)
Removes a node from the graph.
def upgrade_cmd(argv=sys.argv[1:]): arguments = docopt(upgrade_cmd.__doc__, argv=argv) initialize_config(__mode__='fit') upgrade(from_version=arguments['--from'], to_version=arguments['--to'])
\ Upgrade the database to the latest version. Usage: pld-ugprade [options] Options: --from=<v> Upgrade from a specific version, overriding the version stored in the database. --to=<v> Upgrade to a specific version instead of the latest version. -h --help Show this screen.
def _get_config_name(): p = subprocess.Popen('git config --get user.name', shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) output = p.stdout.readlines() return _stripslashes(output[0])
Get git config user name
def first(self): if self._first is None: for target in self.targets: if target is not None: self._first = target break else: self._first = False return self._first
Returns the first module procedure embedded in the interface that has a valid instance of a CodeElement.
def create_tab(self, location=None): eb = self._get_or_create_editor_buffer(location) self.tab_pages.insert(self.active_tab_index + 1, TabPage(Window(eb))) self.active_tab_index += 1
Create a new tab page.
def to_ipv6(key): if key[-2:] != '.k': raise ValueError('Key does not end with .k') key_bytes = base32.decode(key[:-2]) hash_one = sha512(key_bytes).digest() hash_two = sha512(hash_one).hexdigest() return ':'.join([hash_two[i:i+4] for i in range(0, 32, 4)])
Get IPv6 address from a public key.
def _read_generated_broker_id(meta_properties_path): try: with open(meta_properties_path, 'r') as f: broker_id = _parse_meta_properties_file(f) except IOError: raise IOError( "Cannot open meta.properties file: {path}" .format(path=meta_properties_path), ) except ValueError: raise ValueError("Broker id not valid") if broker_id is None: raise ValueError("Autogenerated broker id missing from data directory") return broker_id
reads broker_id from meta.properties file. :param string meta_properties_path: path for meta.properties file :returns int: broker_id from meta_properties_path
def format_filter_value(self, element, value): format_func = self.allowed_filter.get(element) return format_func(value)
Calls the specific function to format value, depending on the given element. Arguments: element (string): The element of the VT to be formatted. value (dictionary): The element value. Returns: Returns a formatted value.
def async_new_device_callback(self, device): _LOGGING.info( 'New Device: %s cat: 0x%02x subcat: 0x%02x desc: %s, model: %s', device.id, device.cat, device.subcat, device.description, device.model) for state in device.states: device.states[state].register_updates( self.async_state_change_callback)
Log that our new device callback worked.
def sendFinalResponse(self): self.requestProtocol.requestResponse["code"] = ( self.responseCode ) self.requestProtocol.requestResponse["content"] = ( self.responseContent ) self.requestProtocol.requestResponse["errors"] = ( self.responseErrors ) self.requestProtocol.sendFinalRequestResponse()
Send the final response and close the connection. :return: <void>
def as_pyemu_matrix(self,typ=Matrix): x = self.values.copy().astype(np.float) return typ(x=x,row_names=list(self.index), col_names=list(self.columns))
Create a pyemu.Matrix from the Ensemble. Parameters ---------- typ : pyemu.Matrix or derived type the type of matrix to return Returns ------- pyemu.Matrix : pyemu.Matrix
def RenderValue(value, limit_lists=-1): if value is None: return None renderer = ApiValueRenderer.GetRendererForValueOrClass( value, limit_lists=limit_lists) return renderer.RenderValue(value)
Render given RDFValue as plain old python objects.
def merge(self, other): print "MERGING", self, other other = self.coerce(other) if self.is_contradictory(other): raise Contradiction("Cannot merge %s and %s" % (self, other)) elif self.value is None and not other.value is None: self.r, self.g, self.b = other.r, other.g, other.b self.value = RGBColor(self.r, self.b, self.g, rgb_type='sRGB') return self
Merges the values
async def stop(self): await self.node.stop(self.channel.guild.id) self.queue = [] self.current = None self.position = 0 self._paused = False
Stops playback from lavalink. .. important:: This method will clear the queue.
def get_web_element(self, element): from toolium.pageelements.page_element import PageElement if isinstance(element, WebElement): web_element = element elif isinstance(element, PageElement): web_element = element.web_element elif isinstance(element, tuple): web_element = self.driver_wrapper.driver.find_element(*element) else: web_element = None return web_element
Return the web element from a page element or its locator :param element: either a WebElement, PageElement or element locator as a tuple (locator_type, locator_value) :returns: WebElement object
def pretty_memory_info(): process = psutil.Process(os.getpid()) return '{}MB memory usage'.format(int(process.memory_info().rss / 2**20))
Pretty format memory info. Returns ------- str Memory info. Examples -------- >>> pretty_memory_info() '5MB memory usage'
def setup(self, data, view='hypergrid', schema=None, columns=None, rowpivots=None, columnpivots=None, aggregates=None, sort=None, index='', limit=-1, computedcolumns=None, settings=True, embed=False, dark=False, *args, **kwargs): self.view = validate_view(view) self.schema = schema or {} self.sort = validate_sort(sort) or [] self.index = index self.limit = limit self.settings = settings self.embed = embed self.dark = dark self.rowpivots = validate_rowpivots(rowpivots) or [] self.columnpivots = validate_columnpivots(columnpivots) or [] self.aggregates = validate_aggregates(aggregates) or {} self.columns = validate_columns(columns) or [] self.computedcolumns = validate_computedcolumns(computedcolumns) or [] self.load(data)
Setup perspective base class Arguments: data : dataframe/list/dict The static or live datasource Keyword Arguments: view : str or View what view to use. available in the enum View (default: {'hypergrid'}) columns : list of str what columns to display rowpivots : list of str what names to use as rowpivots columnpivots : list of str what names to use as columnpivots aggregates: dict(str: str or Aggregate) dictionary of name to aggregate type (either string or enum Aggregate) index : str columns to use as index limit : int row limit computedcolumns : list of dict computed columns to set on the perspective viewer settings : bool display settings settings : bool embedded mode dark : bool use dark theme
async def apply(self, sender: str, recipient: str, mailbox: str, append_msg: AppendMessage) \ -> Tuple[Optional[str], AppendMessage]: ...
Run the filter and return the mailbox where it should be appended, or None to discard, and the message to be appended, which is usually the same as ``append_msg``. Args: sender: The envelope sender of the message. recipient: The envelope recipient of the message. mailbox: The intended mailbox to append the message. append_msg: The message to be appended. raises: :exc:`~pymap.exceptions.AppendFailure`
def get_info(self): info_response = self.send_command("show info") if not info_response: return {} def convert_camel_case(string): return all_cap_re.sub( r'\1_\2', first_cap_re.sub(r'\1_\2', string) ).lower() return dict( (convert_camel_case(label), value) for label, value in [ line.split(": ") for line in info_response.split("\n") ] )
Parses the output of a "show info" HAProxy command and returns a simple dictionary of the results.
def egress_subnets(rid=None, unit=None): def _to_range(addr): if re.search(r'^(?:\d{1,3}\.){3}\d{1,3}$', addr) is not None: addr += '/32' elif ':' in addr and '/' not in addr: addr += '/128' return addr settings = relation_get(rid=rid, unit=unit) if 'egress-subnets' in settings: return [n.strip() for n in settings['egress-subnets'].split(',') if n.strip()] if 'ingress-address' in settings: return [_to_range(settings['ingress-address'])] if 'private-address' in settings: return [_to_range(settings['private-address'])] return []
Retrieve the egress-subnets from a relation. This function is to be used on the providing side of the relation, and provides the ranges of addresses that client connections may come from. The result is uninteresting on the consuming side of a relation (unit == local_unit()). Returns a stable list of subnets in CIDR format. eg. ['192.168.1.0/24', '2001::F00F/128'] If egress-subnets is not available, falls back to using the published ingress-address, or finally private-address. :param rid: string relation id :param unit: string unit name :side effect: calls relation_get :return: list of subnets in CIDR format. eg. ['192.168.1.0/24', '2001::F00F/128']
def _calibration_program(qc: QuantumComputer, tomo_experiment: TomographyExperiment, setting: ExperimentSetting) -> Program: calibr_prog = Program() readout_povm_instruction = [i for i in tomo_experiment.program.out().split('\n') if 'PRAGMA READOUT-POVM' in i] calibr_prog += readout_povm_instruction kraus_instructions = [i for i in tomo_experiment.program.out().split('\n') if 'PRAGMA ADD-KRAUS' in i] calibr_prog += kraus_instructions for q, op in setting.out_operator.operations_as_set(): calibr_prog += _one_q_pauli_prep(label=op, index=0, qubit=q) for q, op in setting.out_operator.operations_as_set(): calibr_prog += _local_pauli_eig_meas(op, q) return calibr_prog
Program required for calibration in a tomography-like experiment. :param tomo_experiment: A suite of tomographic observables :param ExperimentSetting: The particular tomographic observable to measure :param symmetrize_readout: Method used to symmetrize the readout errors (see docstring for `measure_observables` for more details) :param cablir_shots: number of shots to take in the measurement process :return: Program performing the calibration
def _call_api(self, url, method='GET', params=None, data=None): req = self.session.request( method=method, url=url, params=params, headers=self.header, data=data, verify=not self.insecure, ) output = None try: output = req.json() except Exception as err: LOG.debug(req.text) raise Exception('Error while decoding JSON: {0}'.format(err)) if req.status_code != 200: LOG.error(output) if 'error_code' in output: raise APIError(output['error']) return output
Method used to call the API. It returns the raw JSON returned by the API or raises an exception if something goes wrong. :arg url: the URL to call :kwarg method: the HTTP method to use when calling the specified URL, can be GET, POST, DELETE, UPDATE... Defaults to GET :kwarg params: the params to specify to a GET request :kwarg data: the data to send to a POST request
def _load_client_cert_chain(keychain, *paths): certificates = [] identities = [] paths = (path for path in paths if path) try: for file_path in paths: new_identities, new_certs = _load_items_from_file( keychain, file_path ) identities.extend(new_identities) certificates.extend(new_certs) if not identities: new_identity = Security.SecIdentityRef() status = Security.SecIdentityCreateWithCertificate( keychain, certificates[0], ctypes.byref(new_identity) ) _assert_no_error(status) identities.append(new_identity) CoreFoundation.CFRelease(certificates.pop(0)) trust_chain = CoreFoundation.CFArrayCreateMutable( CoreFoundation.kCFAllocatorDefault, 0, ctypes.byref(CoreFoundation.kCFTypeArrayCallBacks), ) for item in itertools.chain(identities, certificates): CoreFoundation.CFArrayAppendValue(trust_chain, item) return trust_chain finally: for obj in itertools.chain(identities, certificates): CoreFoundation.CFRelease(obj)
Load certificates and maybe keys from a number of files. Has the end goal of returning a CFArray containing one SecIdentityRef, and then zero or more SecCertificateRef objects, suitable for use as a client certificate trust chain.
def expand(template_str, dictionary, **kwargs): t = Template(template_str, **kwargs) return t.expand(dictionary)
Free function to expands a template string with a data dictionary. This is useful for cases where you don't care about saving the result of compilation (similar to re.match('.*', s) vs DOT_STAR.match(s))
def _ReadN(self, n): ret = "" while True: chunk = self._read_file.read(n - len(ret)) ret += chunk if len(ret) == n or not chunk: return ret
Reads n characters from the input stream, or until EOF. This is equivalent to the current CPython implementation of read(n), but not guaranteed by the docs. Args: n: int Returns: string
def wrap_constant(self, val): from .queries import QueryBuilder if isinstance(val, (Term, QueryBuilder, Interval)): return val if val is None: return NullValue() if isinstance(val, list): return Array(*val) if isinstance(val, tuple): return Tuple(*val) _ValueWrapper = getattr(self, '_wrapper_cls', ValueWrapper) return _ValueWrapper(val)
Used for wrapping raw inputs such as numbers in Criterions and Operator. For example, the expression F('abc')+1 stores the integer part in a ValueWrapper object. :param val: Any value. :return: Raw string, number, or decimal values will be returned in a ValueWrapper. Fields and other parts of the querybuilder will be returned as inputted.
async def remove_items(self, *items): items = [i.id for i in (await self.process(items)) if i in self.items] if not items: return await self.connector.delete( 'Playlists/{Id}/Items'.format(Id=self.id), EntryIds=','.join(items), remote=False )
remove items from the playlist |coro| Parameters ---------- items : array_like list of items to remove(or their ids) See Also -------- add_items :
def get(self, source_id=None, profile_id=None, profile_reference=None, filter_id=None, filter_reference=None): query_params = {} query_params["source_id"] = _validate_source_id(source_id) if profile_id: query_params["profile_id"] = _validate_profile_id(profile_id) if profile_reference: query_params["profile_reference"] = _validate_profile_reference(profile_reference) if filter_id: query_params["filter_id"] = _validate_filter_id(filter_id) if filter_reference: query_params["filter_reference"] = _validate_filter_reference(filter_reference) response = self.client.get('profile/revealing', query_params) return response
Retrieve the interpretability information. Args: source_id: <string> source id profile_id: <string> profile id filter_id: <string> filter id Returns interpretability information
def join(self, source, op='LEFT JOIN', on=''): if isinstance(source, SQLConstructor): (sql, params, _) = source.compile() self.join_params.extend(params) jsrc = '( {0} )'.format(sql) if source.table_alias: jsrc += ' AS ' + source.table_alias on = on.format(r=source.table_alias) else: jsrc = source on = on.format(r=source) constraint = 'ON {0}'.format(on) if on else '' self.join_source = ' '.join([self.join_source, op, jsrc, constraint])
Join `source`. >>> sc = SQLConstructor('main', ['c1', 'c2']) >>> sc.join('sub', 'JOIN', 'main.id = sub.id') >>> (sql, params, keys) = sc.compile() >>> sql 'SELECT c1, c2 FROM main JOIN sub ON main.id = sub.id' It is possible to pass another `SQLConstructor` as a source. >>> sc = SQLConstructor('main', ['c1', 'c2']) >>> sc.add_or_matches('{0} = {1}', 'c1', [111]) >>> subsc = SQLConstructor('sub', ['d1', 'd2']) >>> subsc.add_or_matches('{0} = {1}', 'd1', ['abc']) >>> sc.join(subsc, 'JOIN', 'main.id = sub.id') >>> sc.add_column('d1') >>> (sql, params, keys) = sc.compile() >>> print(sql) # doctest: +NORMALIZE_WHITESPACE SELECT c1, c2, d1 FROM main JOIN ( SELECT d1, d2 FROM sub WHERE (d1 = ?) ) ON main.id = sub.id WHERE (c1 = ?) `params` is set appropriately to include parameters for joined source: >>> params ['abc', 111] Note that `subsc.compile` is called when `sc.join(subsc, ...)` is called. Therefore, calling `subsc.add_<predicate>` does not effect `sc`. :type source: str or SQLConstructor :arg source: table :type op: str :arg op: operation (e.g., 'JOIN') :type on: str :arg on: on clause. `source` ("right" source) can be referred using `{r}` formatting field.
def sasml(self) -> 'SASml': if not self._loaded_macros: self._loadmacros() self._loaded_macros = True return SASml(self)
This methods creates a SASML object which you can use to run various analytics. See the sasml.py module. :return: sasml object
def _validate_state(state, valid_states): if state in State: return state.name elif state in valid_states: return state else: raise Invalid('Invalid state')
Validate a state string
def background_color(self): if self._has_real(): return self._data.real_background_color return self._data.background_color
Background color.
def discover_base_dir(start_dir): if is_base_dir(start_dir): return start_dir pcl = start_dir.split('/') found_base_dir = None for i in range(1, len(pcl)+1): d2c = '/'.join(pcl[:-i]) if (d2c == ''): d2c = '/' if is_base_dir(d2c): found_base_dir = d2c break return found_base_dir
Return start_dir or the parent dir that has the s2 marker. Starting from the specified directory, and going up the parent chain, check each directory to see if it's a base_dir (contains the "marker" directory *s2*) and return it. Otherwise, return the start_dir.
def get_attrs(self, *names): attrs = [getattr(self, name) for name in names] return attrs
Get multiple attributes from multiple objects.
def venue_healthcheck(self): url = urljoin(self.base_url, 'venues/TESTEX/heartbeat') return self.session.get(url).json()['ok']
Check A Venue Is Up. https://starfighter.readme.io/docs/venue-healthcheck
def set_data_location(apps, schema_editor): Data = apps.get_model('flow', 'Data') DataLocation = apps.get_model('flow', 'DataLocation') for data in Data.objects.all(): if os.path.isdir(os.path.join(settings.FLOW_EXECUTOR['DATA_DIR'], str(data.id))): with transaction.atomic(): data_location = DataLocation.objects.create(id=data.id, subpath=str(data.id)) data_location.data.add(data) if DataLocation.objects.exists(): max_id = DataLocation.objects.order_by('id').last().id with connection.cursor() as cursor: cursor.execute( "ALTER SEQUENCE flow_datalocation_id_seq RESTART WITH {};".format(max_id + 1) )
Create DataLocation for each Data.
def state_not_literal(self, value): value = negate = chr(value) while value == negate: value = choice(self.literals) yield value
Parse not literal.
def function_exists(self, fun): res = fun in self._rule_functions self.say('function exists:' + str(fun) + ':' + str(res), verbosity=10) return res
get function's existense
def write_bits(self, *args): if len(args) > 8: raise ValueError("Can only write 8 bits at a time") self._output_buffer.append(chr( reduce(lambda x, y: xor(x, args[y] << y), xrange(len(args)), 0))) return self
Write multiple bits in a single byte field. The bits will be written in little-endian order, but should be supplied in big endian order. Will raise ValueError when more than 8 arguments are supplied. write_bits(True, False) => 0x02
def _parse_current_member(self, previous_rank, values): rank, name, vocation, level, joined, status = values rank = previous_rank[1] if rank == " " else rank title = None previous_rank[1] = rank m = title_regex.match(name) if m: name = m.group(1) title = m.group(2) self.members.append(GuildMember(name, rank, title, int(level), vocation, joined=joined, online=status == "online"))
Parses the column texts of a member row into a member dictionary. Parameters ---------- previous_rank: :class:`dict`[int, str] The last rank present in the rows. values: tuple[:class:`str`] A list of row contents.
def ReadPermission(self, permission_link, options=None): if options is None: options = {} path = base.GetPathFromLink(permission_link) permission_id = base.GetResourceIdOrFullNameFromLink(permission_link) return self.Read(path, 'permissions', permission_id, None, options)
Reads a permission. :param str permission_link: The link to the permission. :param dict options: The request options for the request. :return: The read permission. :rtype: dict
def find_root(self): cmd = self while cmd.parent: cmd = cmd.parent return cmd
Traverse parent refs to top.
def print_command(command: List[str], fname: str): with open(fname, "w", encoding="utf-8") as out: print(" \\\n".join(command), file=out)
Format and print command to file. :param command: Command in args list form. :param fname: File name to write out.
def module_import(module_path): try: module = __import__(module_path) components = module_path.split('.') for component in components[1:]: module = getattr(module, component) return module except ImportError: raise BadModulePathError( 'Unable to find module "%s".' % (module_path,))
Imports the module indicated in name Args: module_path: string representing a module path such as 'app.config' or 'app.extras.my_module' Returns: the module matching name of the last component, ie: for 'app.extras.my_module' it returns a reference to my_module Raises: BadModulePathError if the module is not found
def _set_pseudotime(self): self.pseudotime = self.distances_dpt[self.iroot].copy() self.pseudotime /= np.max(self.pseudotime[self.pseudotime < np.inf])
Return pseudotime with respect to root point.
def _max_lengths(): max_header_length = max([len(x.byte_match) + x.offset for x in magic_header_array]) max_footer_length = max([len(x.byte_match) + abs(x.offset) for x in magic_footer_array]) return max_header_length, max_footer_length
The length of the largest magic string + its offset
def _clean_dict(target_dict, whitelist=None): assert isinstance(target_dict, dict) return { ustr(k).strip(): ustr(v).strip() for k, v in target_dict.items() if v not in (None, Ellipsis, [], (), "") and (not whitelist or k in whitelist) }
Convenience function that removes a dicts keys that have falsy values
def map_version(self, requirement, local_version): if isinstance(self._versions_map, dict): version = self._versions_map.get(requirement, {}).get( local_version, local_version) else: version = self._versions_map(requirement, local_version) return version
Maps a local version name to one recognised by the Requirement class Parameters ---------- requirement : str Name of the requirement version : str version string
def sample_correlations(self): C = np.corrcoef(self.X.T) corr_matrix = ExpMatrix(genes=self.samples, samples=self.samples, X=C) return corr_matrix
Returns an `ExpMatrix` containing all pairwise sample correlations. Returns ------- `ExpMatrix` The sample correlation matrix.
def assign(self, key, value): key_split = key.split('.') cur_dict = self for k in key_split[:-1]: try: cur_dict = cur_dict[k] except KeyError: cur_dict[k] = self.__class__() cur_dict = cur_dict[k] cur_dict[key_split[-1]] = value
an alternative method for assigning values to nested DotDict instances. It accepts keys in the form of X.Y.Z. If any nested DotDict instances don't yet exist, they will be created.
def publish_event(self, data, suffix=''): try: event_type = data.pop('event_type') except KeyError: return {'result': 'error', 'message': 'Missing event_type in JSON data'} return self.publish_event_from_dict(event_type, data)
AJAX handler to allow client-side code to publish a server-side event
def encode_quorum(self, rw): if rw in QUORUM_TO_PB: return QUORUM_TO_PB[rw] elif type(rw) is int and rw >= 0: return rw else: return None
Converts a symbolic quorum value into its on-the-wire equivalent. :param rw: the quorum :type rw: string, integer :rtype: integer
def _crossProduct( self, ls ): p = ls[0] ds = [] if len(ls) == 1: for i in self._parameters[p]: dp = dict() dp[p] = i ds.append(dp) else: ps = self._crossProduct(ls[1:]) for i in self._parameters[p]: for d in ps: dp = d.copy() dp[p] = i ds.append(dp) return ds
Internal method to generate the cross product of all parameter values, creating the parameter space for the experiment. :param ls: an array of parameter names :returns: list of dicts
def _filter_attribute(mcs, attribute_name, attribute_value): if attribute_name == '__module__': return True elif hasattr(attribute_value, '_trace_disable'): return True return False
decides whether the given attribute should be excluded from tracing or not
def parse_requirements( filename, finder=None, comes_from=None, options=None, session=None, constraint=False, wheel_cache=None, use_pep517=None ): if session is None: raise TypeError( "parse_requirements() missing 1 required keyword argument: " "'session'" ) _, content = get_file_content( filename, comes_from=comes_from, session=session ) lines_enum = preprocess(content, options) for line_number, line in lines_enum: req_iter = process_line(line, filename, line_number, finder, comes_from, options, session, wheel_cache, use_pep517=use_pep517, constraint=constraint) for req in req_iter: yield req
Parse a requirements file and yield InstallRequirement instances. :param filename: Path or url of requirements file. :param finder: Instance of pip.index.PackageFinder. :param comes_from: Origin description of requirements. :param options: cli options. :param session: Instance of pip.download.PipSession. :param constraint: If true, parsing a constraint file rather than requirements file. :param wheel_cache: Instance of pip.wheel.WheelCache :param use_pep517: Value of the --use-pep517 option.
def find_and_fire_hook(event_name, instance, user_override=None): try: from django.contrib.auth import get_user_model User = get_user_model() except ImportError: from django.contrib.auth.models import User from rest_hooks.models import HOOK_EVENTS if not event_name in HOOK_EVENTS.keys(): raise Exception( '"{}" does not exist in `settings.HOOK_EVENTS`.'.format(event_name) ) filters = {'event': event_name} if user_override is not False: if user_override: filters['user'] = user_override elif hasattr(instance, 'user'): filters['user'] = instance.user elif isinstance(instance, User): filters['user'] = instance else: raise Exception( '{} has no `user` property. REST Hooks needs this.'.format(repr(instance)) ) HookModel = get_hook_model() hooks = HookModel.objects.filter(**filters) for hook in hooks: hook.deliver_hook(instance)
Look up Hooks that apply
def filtering(queryset, query_dict): for key, value in query_dict.items(): assert hasattr(queryset, key), "Parameter 'query_dict' contains"\ " non-existent attribute." if isinstance(value, list): queryset = getattr(queryset, key)(*value) elif isinstance(value, dict): queryset = getattr(queryset, key)(**value) else: queryset = getattr(queryset, key)(value) return queryset
function to apply the pre search condition to the queryset to narrow down the queryset's size :param queryset: Django Queryset: queryset of all objects :param query_dict: dict: contains selected_related, filter and other customized filter functions :return: queryset: result after applying the pre search condition dict
def redo(self, channel, image): self._image = None info = channel.extdata._header_info self.set_header(info, image)
This is called when image changes.
def check_picture(file_name, mediainfo_path=None): D = call_MediaInfo(file_name, mediainfo_path) if ( ("Image" not in D) or ("Width" not in D["Image"]) or ("Height" not in D["Image"]) ): raise MediaInfoError("Could not determine all picture paramters") return D
Scans the given file with MediaInfo and returns the picture information if all the required parameters were found.
def _parse_to_recoverable_signature(sig): assert isinstance(sig, bytes) assert len(sig) == 65 rec_sig = ffi.new("secp256k1_ecdsa_recoverable_signature *") recid = ord(sig[64:65]) parsable_sig = lib.secp256k1_ecdsa_recoverable_signature_parse_compact( ctx, rec_sig, sig, recid ) if not parsable_sig: raise InvalidSignatureError() return rec_sig
Returns a parsed recoverable signature of length 65 bytes
def available_composite_ids(self, available_datasets=None): if available_datasets is None: available_datasets = self.available_dataset_ids(composites=False) else: if not all(isinstance(ds_id, DatasetID) for ds_id in available_datasets): raise ValueError( "'available_datasets' must all be DatasetID objects") all_comps = self.all_composite_ids() comps, mods = self.cpl.load_compositors(self.attrs['sensor']) dep_tree = DependencyTree(self.readers, comps, mods) dep_tree.find_dependencies(set(available_datasets + all_comps)) available_comps = set(x.name for x in dep_tree.trunk()) return sorted(available_comps & set(all_comps))
Get names of compositors that can be generated from the available datasets. Returns: generator of available compositor's names