Unnamed: 0
int64
0
389k
code
stringlengths
26
79.6k
docstring
stringlengths
1
46.9k
381,700
def handle_truncated_response(callback, params, entities): results = {} for entity in entities: results[entity] = [] while True: try: marker_found = False response = callback(**params) for entity in entities: if entity in response: results[entity] = results[entity] + response[entity] for marker_name in [, , ]: if marker_name in response and response[marker_name]: params[marker_name] = response[marker_name] marker_found = True if not marker_found: break except Exception as e: if is_throttled(e): time.sleep(1) else: raise e return results
Handle truncated responses :param callback: :param params: :param entities: :return:
381,701
def parse_cctop_full(infile): parser = etree.XMLParser(ns_clean=True) with open(infile, ) as f: tree = etree.fromstring(f.read(), parser) all_info = [] if tree.find() is not None: for r in tree.find().findall(): region_start = int(r.attrib[]) region_end = int(r.attrib[]) region = r.attrib[] for i in range(region_start, region_end + 1): all_info.append((i, region)) return all_info
Parse a CCTOP XML results file and return a list of the consensus TM domains in the format:: [(1, inside_outside_or_tm), (2, inside_outside_or_tm), ...] Where the first value of a tuple is the sequence residue number, and the second is the predicted location with the values 'I' (inside), 'O' (outside), or 'M' (membrane). Args: infile (str): Path to CCTOP XML file Returns: list: List of tuples in the format described above
381,702
def unsubscribe(self, tag, match_type=None): if tag is None: return match_func = self._get_match_func(match_type) self.pending_tags.remove([tag, match_func]) old_events = self.pending_events self.pending_events = [] for evt in old_events: if any(pmatch_func(evt[], ptag) for ptag, pmatch_func in self.pending_tags): self.pending_events.append(evt)
Un-subscribe to events matching the passed tag.
381,703
def _get_files(extension, path): files = [] for file in os.listdir(path): if file.endswith(extension): files.append(os.path.join(path, file)) return sorted(files)
Returns a sorted list of all of the files having the same extension under the same directory :param extension: the extension of the data files such as 'gdm' :param path: path to the folder containing the files :return: sorted list of files
381,704
def serialize_to_string( root_processor, value, indent=None ): if not _is_valid_root_processor(root_processor): raise InvalidRootProcessor() state = _ProcessorState() state.push_location(root_processor.element_path) root = root_processor.serialize(value, state) state.pop_location() serialized_value = ET.tostring(root, encoding=) if indent: serialized_value = minidom.parseString(serialized_value).toprettyxml( indent=indent, encoding= ) return serialized_value.decode()
Serialize the value to an XML string using the root processor. :return: The serialized XML string. See also :func:`declxml.serialize_to_file`
381,705
def write_to_file(self): with open(, ) as output: output.write( + + + + ) previous_commits = 0 for week in self.sorted_weeks: if str(self.commits[week]) != previous_commits: week_formatted = datetime.datetime.utcfromtimestamp( week ).strftime() output.write(week_formatted + + str(self.commits[week]) + ) previous_commits = str(self.commits[week])
Writes the weeks with associated commits to file.
381,706
def infer_namespace(ac): namespaces = infer_namespaces(ac) if not namespaces: return None if len(namespaces) > 1: raise BioutilsError("Multiple namespaces possible for {}".format(ac)) return namespaces[0]
Infer the single namespace of the given accession This function is convenience wrapper around infer_namespaces(). Returns: * None if no namespaces are inferred * The (single) namespace if only one namespace is inferred * Raises an exception if more than one namespace is inferred >>> infer_namespace("ENST00000530893.6") 'ensembl' >>> infer_namespace("NM_01234.5") 'refseq' >>> infer_namespace("A2BC19") 'uniprot' N.B. The following test is disabled because Python 2 and Python 3 handle doctest exceptions differently. :-( X>>> infer_namespace("P12345") Traceback (most recent call last): ... bioutils.exceptions.BioutilsError: Multiple namespaces possible for P12345 >>> infer_namespace("BOGUS99") is None True
381,707
def get_id(self): if self._id is None: self._id = self.inspect(refresh=False)["Id"] return self._id
get unique identifier of this container :return: str
381,708
def update_contents(self, contents, mime_type): import hashlib import time new_size = len(contents) self.mime_type = mime_type if mime_type == : self.contents = contents.encode() else: self.contents = contents old_hash = self.hash self.hash = hashlib.md5(self.contents).hexdigest() if self.size and (old_hash != self.hash): self.modified = int(time.time()) self.size = new_size
Update the contents and set the hash and modification time
381,709
def gaussian_filter(self, sigma=2, order=0): from scipy.ndimage.filters import gaussian_filter return self.map(lambda v: gaussian_filter(v, sigma, order), value_shape=self.value_shape)
Spatially smooth images with a gaussian filter. Filtering will be applied to every image in the collection. Parameters ---------- sigma : scalar or sequence of scalars, default = 2 Size of the filter size as standard deviation in pixels. A sequence is interpreted as the standard deviation for each axis. A single scalar is applied equally to all axes. order : choice of 0 / 1 / 2 / 3 or sequence from same set, optional, default = 0 Order of the gaussian kernel, 0 is a gaussian, higher numbers correspond to derivatives of a gaussian.
381,710
def tokens_required(scopes=, new=False): def decorator(view_func): @wraps(view_func, assigned=available_attrs(view_func)) def _wrapped_view(request, *args, **kwargs): token = _check_callback(request) if token and new: tokens = Token.objects.filter(pk=token.pk) logger.debug("Returning new token.") return view_func(request, tokens, *args, **kwargs) if not new: if not request.user.is_authenticated: logger.debug( "Session {0} is not logged in. Redirecting to login.".format(request.session.session_key[:5])) from django.contrib.auth.views import redirect_to_login return redirect_to_login(request.get_full_path()) tokens = Token.objects.filter(user__pk=request.user.pk).require_scopes(scopes).require_valid() if tokens.exists(): logger.debug("Retrieved {0} tokens for {1} session {2}".format(tokens.count(), request.user, request.session.session_key[:5])) return view_func(request, tokens, *args, **kwargs) logger.debug("No tokens identified for {0} session {1}. Redirecting to SSO.".format(request.user, request.session.session_key[:5])) from esi.views import sso_redirect return sso_redirect(request, scopes=scopes) return _wrapped_view return decorator
Decorator for views to request an ESI Token. Accepts required scopes as a space-delimited string or list of strings of scope names. Can require a new token to be retrieved by SSO. Returns a QueryDict of Tokens.
381,711
def _fetch_access_token(self, url, data): logger.info() res = self._http.post( url=url, data=data ) try: res.raise_for_status() except requests.RequestException as reqe: raise WeChatClientException( errcode=None, errmsg=None, client=self, request=reqe.request, response=reqe.response ) result = res.json() if in result and result[] != 0: raise WeChatClientException( result[], result[], client=self, request=res.request, response=res ) expires_in = 7200 if in result: expires_in = result[] self.session.set( , result[], expires_in ) self.expires_at = int(time.time()) + expires_in return result
The real fetch access token
381,712
def xtqx(self): if self.__xtqx is None: self.log("xtqx") self.__xtqx = self.jco.T * (self.obscov ** -1) * self.jco self.log("xtqx") return self.__xtqx
get the normal matrix attribute. Create the attribute if it has not yet been created Returns ------- xtqx : pyemu.Matrix
381,713
def positions(self, reverse=False): def Posgen(reverse): if reverse: lastrootsib = self.last_sibling_position(self.root) current = self.last_decendant(lastrootsib) while current is not None: yield current current = self.prev_position(current) else: current = self.root while current is not None: yield current current = self.next_position(current) return Posgen(reverse)
returns a generator that walks the positions of this tree in DFO
381,714
def add_init_files(path, zip_handler): paths = path.split() paths = paths[:len(paths) - 1] for sub_path in paths: for root, dirs, files in os.walk(sub_path): for file_to_zip in [x for x in files if in x]: filename = os.path.join(root, file_to_zip) zip_con = filename.replace(, ) if zip_con in zip_handler.namelist(): continue add_file(filename, zip_handler, False)
adds init files to the included folder :param path: str
381,715
def create_volume(self, availability_zone, size=None, snapshot_id=None): params = {"AvailabilityZone": availability_zone} if ((snapshot_id is None and size is None) or (snapshot_id is not None and size is not None)): raise ValueError("Please provide either size or snapshot_id") if size is not None: params["Size"] = str(size) if snapshot_id is not None: params["SnapshotId"] = snapshot_id query = self.query_factory( action="CreateVolume", creds=self.creds, endpoint=self.endpoint, other_params=params) d = query.submit() return d.addCallback(self.parser.create_volume)
Create a new volume.
381,716
def parent(self) -> Optional[]: if self.start.depth == 1 and (self.end is None or self.end.depth <= 1): return None else: if self.start.depth > 1 and (self.end is None or self.end.depth == 0): return CtsReference("{0}{1}".format( ".".join(self.start.list[:-1]), self.start.subreference or "" )) elif self.start.depth > 1 and self.end is not None and self.end.depth > 1: _start = self.start.list[0:-1] _end = self.end.list[0:-1] if _start == _end and \ self.start.subreference is None and \ self.end.subreference is None: return CtsReference( ".".join(_start) ) else: return CtsReference("{0}{1}-{2}{3}".format( ".".join(_start), self.start.subreference or "", ".".join(_end), self.end.subreference or "" ))
Parent of the actual URN, for example, 1.1 for 1.1.1 :rtype: CtsReference
381,717
def create_table( self, parent, table_id, table, initial_splits=None, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): if "create_table" not in self._inner_api_calls: self._inner_api_calls[ "create_table" ] = google.api_core.gapic_v1.method.wrap_method( self.transport.create_table, default_retry=self._method_configs["CreateTable"].retry, default_timeout=self._method_configs["CreateTable"].timeout, client_info=self._client_info, ) request = bigtable_table_admin_pb2.CreateTableRequest( parent=parent, table_id=table_id, table=table, initial_splits=initial_splits ) if metadata is None: metadata = [] metadata = list(metadata) try: routing_header = [("parent", parent)] except AttributeError: pass else: routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( routing_header ) metadata.append(routing_metadata) return self._inner_api_calls["create_table"]( request, retry=retry, timeout=timeout, metadata=metadata )
Creates a new table in the specified instance. The table can be created with a full set of initial column families, specified in the request. Example: >>> from google.cloud import bigtable_admin_v2 >>> >>> client = bigtable_admin_v2.BigtableTableAdminClient() >>> >>> parent = client.instance_path('[PROJECT]', '[INSTANCE]') >>> >>> # TODO: Initialize `table_id`: >>> table_id = '' >>> >>> # TODO: Initialize `table`: >>> table = {} >>> >>> response = client.create_table(parent, table_id, table) Args: parent (str): The unique name of the instance in which to create the table. Values are of the form ``projects/<project>/instances/<instance>``. table_id (str): The name by which the new table should be referred to within the parent instance, e.g., ``foobar`` rather than ``<parent>/tables/foobar``. table (Union[dict, ~google.cloud.bigtable_admin_v2.types.Table]): The Table to create. If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.bigtable_admin_v2.types.Table` initial_splits (list[Union[dict, ~google.cloud.bigtable_admin_v2.types.Split]]): The optional list of row keys that will be used to initially split the table into several tablets (tablets are similar to HBase regions). Given two split keys, ``s1`` and ``s2``, three tablets will be created, spanning the key ranges: ``[, s1), [s1, s2), [s2, )``. Example: - Row keys := ``["a", "apple", "custom", "customer_1", "customer_2",`` ``"other", "zz"]`` - initial\_split\_keys := ``["apple", "customer_1", "customer_2", "other"]`` - Key assignment: - Tablet 1 ``[, apple) => {"a"}.`` - Tablet 2 ``[apple, customer_1) => {"apple", "custom"}.`` - Tablet 3 ``[customer_1, customer_2) => {"customer_1"}.`` - Tablet 4 ``[customer_2, other) => {"customer_2"}.`` - Tablet 5 ``[other, ) => {"other", "zz"}.`` If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.bigtable_admin_v2.types.Split` retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will not be retried. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata that is provided to the method. Returns: A :class:`~google.cloud.bigtable_admin_v2.types.Table` instance. Raises: google.api_core.exceptions.GoogleAPICallError: If the request failed for any reason. google.api_core.exceptions.RetryError: If the request failed due to a retryable error and retry attempts failed. ValueError: If the parameters are invalid.
381,718
def save(self, record_key, record_data, overwrite=True, secret_key=): labunittests1473719695.2165067.json title = % self.__class__.__name__ input_fields = { : record_key, : secret_key } for key, value in input_fields.items(): if value: object_title = % (title, key, str(value)) self.fields.validate(value, % key, object_title) if not isinstance(record_data, bytes): raise ValueError( % title) file_path = os.path.join(self.collection_folder, record_key) file_path = self.fields.validate(file_path, ) file_root, file_name = os.path.split(file_path) self.fields.validate(file_name, ) while file_root != self.collection_folder: file_root, path_node = os.path.split(file_root) self.fields.validate(path_node, ) from os import path, makedirs if not overwrite: if path.exists(file_path): raise Exception( % (title, record_key)) file_root, file_node = path.split(file_path) if file_root: if not path.exists(file_root): makedirs(file_root) if secret_key: from labpack.encryption import cryptolab record_data, secret_key = cryptolab.encrypt(record_data, secret_key) with open(file_path, ) as f: f.write(record_data) f.close() import re if re.search(, file_name): from os import utime file_time = 1 utime(file_path, times=(file_time, file_time)) return record_key
a method to create a record in the collection folder :param record_key: string with name to assign to record (see NOTES below) :param record_data: byte data for record body :param overwrite: [optional] boolean to overwrite records with same name :param secret_key: [optional] string with key to encrypt data :return: string with name of record NOTE: record_key may only contain alphanumeric, /, _, . or - characters and may not begin with the . or / character. NOTE: using one or more / characters splits the key into separate segments. these segments will appear as a sub directories inside the record collection and each segment is used as a separate index for that record when using the list method eg. lab/unittests/1473719695.2165067.json is indexed: [ 'lab', 'unittests', '1473719695.2165067', '.json' ]
381,719
def free_memory(cls, exclude=None): annotations_in_memory = Annotation.__ANNOTATIONS_IN_MEMORY__ exclude = () if exclude is None else exclude for annotation_cls in list(annotations_in_memory.keys()): if issubclass(annotation_cls, exclude): continue if issubclass(annotation_cls, cls): del annotations_in_memory[annotation_cls]
Free global annotation memory.
381,720
def load_generated_checkers(cls, args): for gen in cls._get_generator_plugins(): checkers = gen.get_checkers(args) cls.checkers.update(checkers)
Load checker classes from generator plugins
381,721
def logistic_map(x, steps, r=4): r for _ in range(steps): x = r * x * (1 - x) yield x
r""" Generates a time series of the logistic map. Characteristics and Background: The logistic map is among the simplest examples for a time series that can exhibit chaotic behavior depending on the parameter r. For r between 2 and 3, the series quickly becomes static. At r=3 the first bifurcation point is reached after which the series starts to oscillate. Beginning with r = 3.6 it shows chaotic behavior with a few islands of stability until perfect chaos is achieved at r = 4. Calculating the Lyapunov exponent: To calculate the "true" Lyapunov exponent of the logistic map, we first have to make a few observations for maps in general that are repeated applications of a function to a starting value. If we have two starting values that differ by some infinitesimal :math:`delta_0` then according to the definition of the lyapunov exponent we will have an exponential divergence: .. math:: |\delta_n| = |\delta_0| e^{\lambda n} We can now write that: .. math:: e^{\lambda n} = \lim_{\delta_0 -> 0} |\frac{\delta_n}{\delta_0}| This is the definition of the derivative :math:`\frac{dx_n}{dx_0}` of a point :math:`x_n` in the time series with respect to the starting point :math:`x_0` (or rather the absolute value of that derivative). Now we can use the fact that due to the definition of our map as repetitive application of some f we have: .. math:: f^{n\prime}(x) = f(f(f(...f(x_0)...))) = f'(x_n-1) \cdot f'(x_n-2) \cdot ... \cdot f'(x_0) with .. math:: e^{\lambda n} = |f^{n\prime}(x)| we now have .. math:: e^{\lambda n} &= |f'(x_n-1) \cdot f'(x_n-2) \cdot ... \cdot f'(x_0)| \\ \Leftrightarrow \\ \lambda n &= \ln |f'(x_n-1) \cdot f'(x_n-2) \cdot ... \cdot f'(x_0)| \\ \Leftrightarrow \\ \lambda &= \frac{1}{n} \ln |f'(x_n-1) \cdot f'(x_n-2) \cdot ... \cdot f'(x_0)| \\ &= \frac{1}{n} \sum_{k=0}^{n-1} \ln |f'(x_k)| With this sum we can now calculate the lyapunov exponent for any map. For the logistic map we simply have to calculate :math:`f'(x)` and as we have .. math:: f(x) = r x (1-x) = rx - rx² we now get .. math:: f'(x) = r - 2 rx References: .. [lm_1] https://en.wikipedia.org/wiki/Tent_map .. [lm_2] https://blog.abhranil.net/2015/05/15/lyapunov-exponent-of-the-logistic-map-mathematica-code/ Args: x (float): starting point steps (int): number of steps for which the generator should run Kwargs: r (int): parameter r that controls the behavior of the map Returns: generator object: the generator that creates the time series
381,722
def main(): if len(sys.argv) < 3: usage() rgname = sys.argv[1] vmss = sys.argv[2] try: with open() as config_file: config_data = json.load(config_file) except FileNotFoundError: sys.exit("Error: Expecting azurermconfig.json in current folder") tenant_id = config_data[] app_id = config_data[] app_secret = config_data[] sub_id = config_data[] access_token = azurerm.get_access_token(tenant_id, app_id, app_secret) provider = resource_type = metric_definitions = azurerm.list_metric_defs_for_resource(access_token, sub_id, rgname, provider, resource_type, vmss) print(json.dumps(metric_definitions, sort_keys=False, indent=2, separators=(, ))) metrics = azurerm.get_metrics_for_resource(access_token, sub_id, rgname, provider, resource_type, vmss) print(json.dumps(metrics, sort_keys=False, indent=2, separators=(, )))
Main routine.
381,723
def run(juttle, deployment_name, program_name=None, persist=False, token_manager=None, app_url=defaults.APP_URL): headers = token_manager.get_access_token_headers() data_url = get_juttle_data_url(deployment_name, app_url=app_url, token_manager=token_manager) websocket = __wss_connect(data_url, token_manager) data = websocket.recv() channel_id_obj = json.loads(data) if is_debug_enabled(): debug(, json.dumps(channel_id_obj)) channel_id = channel_id_obj[] juttle_job = { : channel_id, : program_name, : juttle } response = requests.post( % data_url, data=json.dumps(juttle_job), headers=headers) if response.status_code != 200: yield { "error": True, "context": response.json() } return job_info = response.json() yield job_info job_id = job_info[][] if is_debug_enabled(): debug(, json.dumps(job_info)) for data in connect_job(job_id, deployment_name, token_manager=token_manager, app_url=app_url, persist=persist, websocket=websocket, data_url=data_url): yield data
run a juttle program through the juttle streaming API and return the various events that are part of running a Juttle program which include: * Initial job status details including information to associate multiple flowgraphs with their individual outputs (sinks): { "status": "ok", "job": { "channel_id": "56bde5f0", "_start_time": "2015-10-03T06:59:49.233Z", "alias": "jut-tools program 1443855588", "_ms_begin": 1443855589233, "user": "0fbbd98d-cf33-4582-8ca1-15a3d3fee510", "timeout": 5, "id": "b973bce6" }, "now": "2015-10-03T06:59:49.230Z", "stats": ... "sinks": [ { "location": { "start": { "column": 17, "line": 1, "offset": 16 }, "end": { "column": 24, "line": 1, "offset": 23 }, "filename": "main" }, "name": "table", "channel": "sink237", "options": { "_jut_time_bounds": [] } }, ... as many sinks as there are flowgrpahs in your program ] } * Each set of points returned along with the indication of which sink they belong to: { "points": [ array of points ], "sink": sink_id } * Error event indicating where in your program the error occurred { "error": true, payload with "info" and "context" explaining exact error } * Warning event indicating where in your program the error occurred { "warning": true, payload with "info" and "context" explaining exact warning } * ... juttle: juttle program to execute deployment_name: the deployment name to execute the program on persist: if set to True then we won't wait for response data and will disconnect from the websocket leaving the program running in the background if it is uses a background output (http://docs.jut.io/juttle-guide/#background_outputs) and therefore becomes a persistent job. token_manager: auth.TokenManager object app_url: optional argument used primarily for internal Jut testing
381,724
def RdatasetsBM(database,host=rbiomart_host): biomaRt = importr("biomaRt") ensemblMart=biomaRt.useMart(database, host=host) print(biomaRt.listDatasets(ensemblMart))
Lists BioMart datasets through a RPY2 connection. :param database: a database listed in RdatabasesBM() :param host: address of the host server, default='www.ensembl.org' :returns: nothing
381,725
def ls(self): tree = self.ls_tree() return [t.get() for t in tree if t.get()]
Return a list of *all* files & dirs in the repo. Think of this as a recursive `ls` command from the root of the repo.
381,726
def get_lonlatalt(self, utc_time): (pos_x, pos_y, pos_z), (vel_x, vel_y, vel_z) = self.get_position( utc_time, normalize=True) lon = ((np.arctan2(pos_y * XKMPER, pos_x * XKMPER) - astronomy.gmst(utc_time)) % (2 * np.pi)) lon = np.where(lon > np.pi, lon - np.pi * 2, lon) lon = np.where(lon <= -np.pi, lon + np.pi * 2, lon) r = np.sqrt(pos_x ** 2 + pos_y ** 2) lat = np.arctan2(pos_z, r) e2 = F * (2 - F) while True: lat2 = lat c = 1 / (np.sqrt(1 - e2 * (np.sin(lat2) ** 2))) lat = np.arctan2(pos_z + c * e2 * np.sin(lat2), r) if np.all(abs(lat - lat2) < 1e-10): break alt = r / np.cos(lat) - c alt *= A return np.rad2deg(lon), np.rad2deg(lat), alt
Calculate sublon, sublat and altitude of satellite. http://celestrak.com/columns/v02n03/
381,727
def visible_object_groups(self): return (i for (i, l) in enumerate(self.layers) if l.visible and isinstance(l, TiledObjectGroup))
Return iterator of object group indexes that are set 'visible' :rtype: Iterator
381,728
def list_joined_groups(self, user_alias=None): xml = self.api.xml(API_GROUP_LIST_JOINED_GROUPS % (user_alias or self.api.user_alias)) xml_results = xml.xpath() results = [] for item in xml_results: try: icon = item.xpath()[0] link = item.xpath()[0] url = link.get() name = link.text alias = url.rstrip().rsplit(, 1)[1] user_count = int(item.xpath()[0][1:-1]) results.append({ : icon, : alias, : url, : name, : user_count, }) except Exception as e: self.api.logger.exception( % e) return build_list_result(results, xml)
已加入的小组列表 :param user_alias: 用户名,默认为当前用户名 :return: 单页列表
381,729
def log(self, string): self.log_data.append(string) if self.log_function is None: print(string) else: self.log_function(string)
appends input string to log file and sends it to log function (self.log_function) Returns:
381,730
def get_name(obj, setting_name=): nickname = obj.get_nickname() romanized_first_name = obj.get_romanized_first_name() romanized_last_name = obj.get_romanized_last_name() non_romanized_first_name = obj.get_non_romanized_first_name() non_romanized_last_name = obj.get_non_romanized_last_name() non_translated_title = obj.get_title() non_translated_gender = obj.get_gender() if non_translated_title: title = gettext(non_translated_title) else: title = non_translated_title if non_translated_gender: gender = gettext(non_translated_gender) else: gender = non_translated_gender format_string = u.format(get_format(setting_name)) format_kwargs = {} if in format_string: format_kwargs.update({: nickname}) if in format_string: format_kwargs.update({: nickname.upper()}) if in format_string: format_kwargs.update({: romanized_first_name}) if in format_string: format_kwargs.update({: romanized_first_name.upper()}) if in format_string: format_kwargs.update({: romanized_last_name}) if in format_string: format_kwargs.update({: romanized_last_name.upper()}) if in format_string: format_kwargs.update({: non_romanized_first_name}) if in format_string: format_kwargs.update({: non_romanized_first_name.upper()}) if in format_string: format_kwargs.update({: non_romanized_last_name}) if in format_string: format_kwargs.update({: non_romanized_last_name.upper()}) if in format_string: format_kwargs.update({: title}) if in format_string: format_kwargs.update({: title.upper()}) if in format_string: format_kwargs.update({: gender}) if in format_string: format_kwargs.update({: gender.upper()}) return format_string.format(**format_kwargs)
Returns the correct order of the name according to the current language.
381,731
def serialize_dict(self, attr, dict_type, **kwargs): serialization_ctxt = kwargs.get("serialization_ctxt", {}) serialized = {} for key, value in attr.items(): try: serialized[self.serialize_unicode(key)] = self.serialize_data( value, dict_type, **kwargs) except ValueError: serialized[self.serialize_unicode(key)] = None if in serialization_ctxt: xml_desc = serialization_ctxt[] xml_name = xml_desc[] final_result = _create_xml_node( xml_name, xml_desc.get(, None), xml_desc.get(, None) ) for key, value in serialized.items(): ET.SubElement(final_result, key).text = value return final_result return serialized
Serialize a dictionary of objects. :param dict attr: Object to be serialized. :param str dict_type: Type of object in the dictionary. :param bool required: Whether the objects in the dictionary must not be None or empty. :rtype: dict
381,732
def _build_layers(self, inputs, num_outputs, options): hiddens = options.get("fcnet_hiddens") activation = get_activation_fn(options.get("fcnet_activation")) with tf.name_scope("fc_net"): i = 1 last_layer = inputs for size in hiddens: label = "fc{}".format(i) last_layer = slim.fully_connected( last_layer, size, weights_initializer=normc_initializer(1.0), activation_fn=activation, scope=label) i += 1 label = "fc_out" output = slim.fully_connected( last_layer, num_outputs, weights_initializer=normc_initializer(0.01), activation_fn=None, scope=label) return output, last_layer
Process the flattened inputs. Note that dict inputs will be flattened into a vector. To define a model that processes the components separately, use _build_layers_v2().
381,733
def isID(self, elem, attr): if elem is None: elem__o = None else: elem__o = elem._o if attr is None: attr__o = None else: attr__o = attr._o ret = libxml2mod.xmlIsID(self._o, elem__o, attr__o) return ret
Determine whether an attribute is of type ID. In case we have DTD(s) then this is done if DTD loading has been requested. In the case of HTML documents parsed with the HTML parser, then ID detection is done systematically.
381,734
def _get_encoder_data_shapes(self, bucket_key: int, batch_size: int) -> List[mx.io.DataDesc]: return [mx.io.DataDesc(name=C.SOURCE_NAME, shape=(batch_size,) + self.input_size, layout=C.BATCH_MAJOR_IMAGE)]
Returns data shapes of the encoder module. :param bucket_key: Maximum input length. :param batch_size: Batch size. :return: List of data descriptions.
381,735
def check_auth(email, password): try: user = User.get(User.email == email) except User.DoesNotExist: return False return password == user.password
Check if a username/password combination is valid.
381,736
def _pip_search(stdout, stderr): result = {} lines = to_text_string(stdout).split() while in lines: lines.remove() for line in lines: if in line: parts = line.split() name = parts[0].strip() description = parts[1].strip() result[name] = description return result
Callback for pip search.
381,737
def parse_request(self): self.command = None self.request_version = version = self.default_request_version self.close_connection = 1 requestline = str(self.raw_requestline, ) requestline = requestline.rstrip() self.requestline = requestline words = requestline.split() if len(words) == 3: command, path, version = words if version[:5] != : self.send_error(400, "Bad request version (%r)" % version) return False try: base_version_number = version.split(, 1)[1] version_number = base_version_number.split(".") if len(version_number) != 2: raise ValueError version_number = int(version_number[0]), int(version_number[1]) except (ValueError, IndexError): self.send_error(400, "Bad request version (%r)" % version) return False if version_number >= (1, 1) and self.protocol_version >= "HTTP/1.1": self.close_connection = 0 if version_number >= (2, 0): self.send_error(505, "Invalid HTTP Version (%s)" % base_version_number) return False elif len(words) == 2: command, path = words self.close_connection = 1 if command != : self.send_error(400, "Bad HTTP/0.9 request type (%r)" % command) return False elif not words: return False else: self.send_error(400, "Bad request syntax (%r)" % requestline) return False self.command, self.path, self.request_version = command, path, version try: self.headers = http_client.parse_headers(self.rfile, _class=self.MessageClass) except http_client.LineTooLong: self.send_error(400, "Line too long") return False conntype = self.headers.get(, "") if conntype.lower() == : self.close_connection = 1 elif (conntype.lower() == and self.protocol_version >= "HTTP/1.1"): self.close_connection = 0 expect = self.headers.get(, "") if (expect.lower() == "100-continue" and self.protocol_version >= "HTTP/1.1" and self.request_version >= "HTTP/1.1"): if not self.handle_expect_100(): return False return True
Parse a request (internal). The request should be stored in self.raw_requestline; the results are in self.command, self.path, self.request_version and self.headers. Return True for success, False for failure; on failure, an error is sent back.
381,738
def get_lsf_status(): status_count = {: 0, : 0, : 0, : 0, : 0, : 0} try: subproc = subprocess.Popen([], stdout=subprocess.PIPE, stderr=subprocess.PIPE) subproc.stderr.close() output = subproc.stdout.readlines() except OSError: return status_count for line in output[1:]: line = line.strip().split() if len(line) < 5: continue status_count[] += 1 for k in status_count: if line[2] == k: status_count[k] += 1 return status_count
Count and print the number of jobs in various LSF states
381,739
def SVGdocument(): "Create default SVG document" import xml.dom.minidom implementation = xml.dom.minidom.getDOMImplementation() doctype = implementation.createDocumentType( "svg", "-//W3C//DTD SVG 1.1//EN", "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd" ) document= implementation.createDocument(None, "svg", doctype) document.documentElement.setAttribute( , ) return document
Create default SVG document
381,740
def dateof(tag_name, tags): for tag in tags: if tag[] == tag_name: commit = read_url(tag[][]) return parse_timestamp(commit[][][]) return None
Given a list of tags, returns the datetime of the tag with the given name; Otherwise None.
381,741
def register_parser(self, type, parser, **meta): try: self.registered_formats[type][] = parser except KeyError: self.registered_formats[type] = {: parser} if meta: self.register_meta(type, **meta)
Registers a parser of a format. :param type: The unique name of the format :param parser: The method to parse data as the format :param meta: The extra information associated with the format
381,742
def home_mode_status(self, **kwargs): api = self._api_info[] payload = dict({ : api[], : , : api[], : self._sid }, **kwargs) response = self._get_json_with_retry(api[], payload) return response[][]
Returns the status of Home Mode
381,743
def all(cls, client, **kwargs): max_date = kwargs[] if in kwargs else None max_fetches = \ kwargs[] if in kwargs else None url = params = {} data = client.get(url, params=params) results = data["results"] if is_max_date_gt(max_date, results[-1][][0:10]): return results if max_fetches == 1: return results fetches = 1 while data["next"]: fetches = fetches + 1 data = client.get(data["next"]) results.extend(data["results"]) if is_max_date_gt(max_date, results[-1][][0:10]): return results if max_fetches and (fetches >= max_fetches): return results return results
fetch all option positions
381,744
def create_object(module_name: str, class_name: str, args: Iterable=(), kwargs: Dict[str, Any]=_EMPTY_DICT): return get_attribute(module_name, class_name)(*args, **kwargs)
Create an object instance of the given class from the given module. Args and kwargs are passed to the constructor. This mimics the following code: .. code-block:: python from module import class return class(*args, **kwargs) :param module_name: module name :param class_name: class name :param args: args to be passed to the object constructor :param kwargs: kwargs to be passed to the object constructor :return: created object instance
381,745
def pivot(self,binned=True): if binned: wave = self.binwave else: wave = self.wave countmulwave = self(wave)*wave countdivwave = self(wave)/wave num = self.trapezoidIntegration(wave,countmulwave) den = self.trapezoidIntegration(wave,countdivwave) if num == 0.0 or den == 0.0: return 0.0 return math.sqrt(num/den)
Calculate :ref:`pivot wavelength <pysynphot-formula-pivwv>` of the observation. .. note:: This is the calculation performed when ETC invokes ``calcphot``. Parameters ---------- binned : bool Use binned dataset for calculations. Otherwise, use native dataset. Returns ------- ans : float Pivot wavelength.
381,746
def main(): parser = argparse.ArgumentParser() parser.add_argument(, help=) parser.add_argument(, help=) parser.add_argument(, help=) parser.add_argument(, action=, default=[ , , ], help=) args = parser.parse_args() reversion(args)
Given an input whl file and target version, create a copy of the whl with that version. This is accomplished via string replacement in files matching a list of globs. Pass the optional `--glob` argument to add additional globs: ie `--glob='thing-to-match*.txt'`.
381,747
async def main(): client = Client(BMAS_ENDPOINT) response = await client(bma.node.summary) print(response) salt = getpass.getpass("Enter your passphrase (salt): ") password = getpass.getpass("Enter your password: ") key = SigningKey.from_credentials(salt, password) pubkey_from = key.pubkey pubkey_to = input("Enter recipient pubkey: ") current_block = await client(bma.blockchain.current) response = await client(bma.tx.sources, pubkey_from) if len(response[]) == 0: print("no sources found for account %s" % pubkey_to) exit(1) source = response[][0] transaction = get_transaction_document(current_block, source, pubkey_from, pubkey_to) transaction.sign([key]) response = await client(bma.tx.process, transaction.signed_raw()) if response.status == 200: print(await response.text()) else: print("Error while publishing transaction: {0}".format(await response.text())) await client.close()
Main code
381,748
def activate_status_output_overall_status(self, **kwargs): config = ET.Element("config") activate_status = ET.Element("activate_status") config = activate_status output = ET.SubElement(activate_status, "output") overall_status = ET.SubElement(output, "overall-status") overall_status.text = kwargs.pop() callback = kwargs.pop(, self._callback) return callback(config)
Auto Generated Code
381,749
def parse_time(timestring): timestring = str(timestring).strip() for regex, pattern in TIME_FORMATS: if regex.match(timestring): found = regex.search(timestring).groupdict() dt = datetime.utcnow().strptime(found[], pattern) dt = datetime.combine(date.today(), dt.time()) if in found and found[] is not None: dt = dt.replace(microsecond=int(found[][1:])) if in found and found[] is not None: dt = dt.replace(tzinfo=Timezone(found.get(, ))) return dt raise ParseError()
Attepmts to parse an ISO8601 formatted ``timestring``. Returns a ``datetime.datetime`` object.
381,750
def get_list(self, url=None, callback=None, limit=100, **data): url = url or str(self) data = dict(((k, v) for k, v in data.items() if v)) all_data = [] if limit: data[] = min(limit, 100) while url: response = self.http.get(url, params=data, auth=self.auth) response.raise_for_status() result = response.json() n = m = len(result) if callback: result = callback(result) m = len(result) all_data.extend(result) if limit and len(all_data) > limit: all_data = all_data[:limit] break elif m == n: data = None next = response.links.get(, {}) url = next.get() else: break return all_data
Get a list of this github component :param url: full url :param Comp: a :class:`.Component` class :param callback: Optional callback :param limit: Optional number of items to retrieve :param data: additional query data :return: a list of ``Comp`` objects with data
381,751
def _handle_successor(self, job, successor, successors): state = successor all_successor_states = successors addr = job.addr pw = None job.successor_status[state] = "" new_state = state.copy() suc_jumpkind = state.history.jumpkind suc_exit_stmt_idx = state.scratch.exit_stmt_idx suc_exit_ins_addr = state.scratch.exit_ins_addr if suc_jumpkind in {, , , , , , }: job.successor_status[state] = "Skipped" return [ ] call_target = job.extra_info[] if suc_jumpkind == "Ijk_FakeRet" and call_target is not None: if self.project.is_hooked(call_target): sim_proc = self.project._sim_procedures[call_target] if sim_proc.NO_RET: return [ ] try: target_addr = state.solver.eval_one(state.ip) except (SimValueError, SimSolverModeError): target_addr = None if suc_jumpkind == "Ijk_Ret": target_addr = job.call_stack.current_return_target if target_addr is not None: new_state.ip = new_state.solver.BVV(target_addr, new_state.arch.bits) if target_addr is None: return [ ] if state.thumb: target_addr |= 1 if self._address_whitelist is not None: if target_addr not in self._address_whitelist: l.debug("Successor % return [ ] if self._base_graph is not None: for src_, dst_ in self._base_graph.edges(): if src_.addr == addr and dst_.addr == target_addr: break else: l.debug("Edge (% return [ ] if suc_jumpkind.startswith("Ijk_Sys"): syscall_proc = self.project.simos.syscall(new_state) if syscall_proc is not None: target_addr = syscall_proc.addr self._pre_handle_successor_state(job.extra_info, suc_jumpkind, target_addr) if suc_jumpkind == "Ijk_FakeRet": if target_addr == job.extra_info[]: l.debug("... skipping a fake return exit that has the same target with its call exit.") job.successor_status[state] = "Skipped" return [ ] if job.extra_info[]: l.debug(s calling doesn\) job.successor_status[state] = "Skipped - non-returning function 0x%x" % job.extra_info[] return [ ] if (suc_jumpkind == and self._call_depth is not None and len(job.call_stack) <= 1 ): l.debug() job.successor_status[state] = "Skipped - reaching the end of the starting function" return [ ] new_call_stack = self._create_new_call_stack(addr, all_successor_states, job, target_addr, suc_jumpkind) new_call_stack_suffix = new_call_stack.stack_suffix(self._context_sensitivity_level) new_tpl = self._generate_block_id(new_call_stack_suffix, target_addr, suc_jumpkind.startswith()) self._reset_state_mode(new_state, ) pw = CFGJob(target_addr, new_state, self._context_sensitivity_level, src_block_id=job.block_id, src_exit_stmt_idx=suc_exit_stmt_idx, src_ins_addr=suc_exit_ins_addr, call_stack=new_call_stack, jumpkind=suc_jumpkind, ) st = new_state self._reset_state_mode(st, ) pw = None pe = PendingJob(job.func_addr, job.extra_info[], st, job.block_id, suc_exit_stmt_idx, suc_exit_ins_addr, new_call_stack ) self._pending_jobs[new_tpl] = pe self._register_analysis_job(pe.caller_func_addr, pe) job.successor_status[state] = "Pended" elif self._traced_addrs[new_call_stack_suffix][target_addr] >= 1 and suc_jumpkind == "Ijk_Ret": pass else: job.successor_status[state] = "Appended" if job.extra_info[] and job.extra_info[] in self._non_returning_functions: job.extra_info[] = True if not pw: return [ ] if self._base_graph is not None: if next((en for en in self.jobs if en.block_id == pw.block_id), None): self._job_info_queue = [entry for entry in self._job_info_queue if entry.job.block_id != pw.block_id] self._register_analysis_job(pw.func_addr, pw) return [ pw ]
Returns a new CFGJob instance for further analysis, or None if there is no immediate state to perform the analysis on. :param CFGJob job: The current job.
381,752
def getKwAsDict(self, kw): self.getKw(kw) return self.str2dict(self.confstr)
return keyword configuration as a dict Usage: rdict = getKwAsDict(kw)
381,753
def get_order_detail(self, code): if code is None or is_str(code) is False: error_str = ERROR_STR_PREFIX + "the type of code param is wrong" return RET_ERROR, error_str query_processor = self._get_sync_query_processor( OrderDetail.pack_req, OrderDetail.unpack_rsp) kargs = { "code": code, "conn_id": self.get_sync_conn_id() } ret_code, msg, order_detail = query_processor(**kargs) if ret_code == RET_ERROR: return ret_code, msg return RET_OK, order_detail
查询A股Level 2权限下提供的委托明细 :param code: 股票代码,例如:'HK.02318' :return: (ret, data) ret == RET_OK data为1个dict,包含以下数据 ret != RET_OK data为错误字符串 {‘code’: 股票代码 ‘Ask’:[ order_num, [order_volume1, order_volume2] ] ‘Bid’: [ order_num, [order_volume1, order_volume2] ] } 'Ask':卖盘, 'Bid'买盘。order_num指委托订单数量,order_volume是每笔委托的委托量,当前最多返回前50笔委托的委托数量。即order_num有可能多于后面的order_volume
381,754
def calculate_rate(phone_number, address_country_code=None, address_exception=None): if not phone_number: raise ValueError() if not isinstance(phone_number, str_cls): raise ValueError() phone_number = phone_number.strip() phone_number = re.sub(, , phone_number) if not phone_number or phone_number[0] != : raise ValueError() phone_number = phone_number[1:] if not phone_number: raise ValueError() country_code = _lookup_country_code(phone_number) if not country_code: raise ValueError() if country_code in CALLING_CODE_EXCEPTIONS: for info in CALLING_CODE_EXCEPTIONS[country_code]: if not re.match(info[], phone_number): continue mapped_country = info[] mapped_name = info[] if not info[]: if address_country_code is None: raise UndefinitiveError() if address_country_code != mapped_country: continue if address_exception != info[]: continue rate = rates.BY_COUNTRY[mapped_country][][mapped_name] return (rate, mapped_country, mapped_name) if country_code not in rates.BY_COUNTRY: return (Decimal(), country_code, None) return (rates.BY_COUNTRY[country_code][], country_code, None)
Calculates the VAT rate based on a telephone number :param phone_number: The string phone number, in international format with leading + :param address_country_code: The user's country_code, as detected from billing_address or declared_residence. This prevents an UndefinitiveError from being raised. :param address_exception: The user's exception name, as detected from billing_address or declared_residence. This prevents an UndefinitiveError from being raised. :raises: ValueError - error with phone number provided UndefinitiveError - when no address_country_code and address_exception are provided and the phone number area code matching isn't specific enough :return: A tuple of (Decimal percentage rate, country code, exception name [or None])
381,755
def _Rforce(self, R, z, phi=0, t=0): if not self.isNonAxi and phi is None: phi= 0. r, theta, phi = bovy_coords.cyl_to_spher(R,z,phi) dr_dR = nu.divide(R,r); dtheta_dR = nu.divide(z,r**2); dphi_dR = 0 return self._computeforceArray(dr_dR, dtheta_dR, dphi_dR, R,z,phi)
NAME: _Rforce PURPOSE: evaluate the radial force at (R,z, phi) INPUT: R - Cylindrical Galactocentric radius z - vertical height phi - azimuth t - time OUTPUT: radial force at (R,z, phi) HISTORY: 2016-06-06 - Written - Aladdin
381,756
def _set_attributes_on_managed_object(self, managed_object, attributes): for attribute_name, attribute_value in six.iteritems(attributes): object_type = managed_object._object_type if self._attribute_policy.is_attribute_applicable_to_object_type( attribute_name, object_type): self._set_attribute_on_managed_object( managed_object, (attribute_name, attribute_value) ) else: name = object_type.name raise exceptions.InvalidField( "Cannot set {0} attribute on {1} object.".format( attribute_name, .join([x.capitalize() for x in name.split()]) ) )
Given a kmip.pie object and a dictionary of attributes, attempt to set the attribute values on the object.
381,757
def do_capture(parser, token): bits = token.split_contents() t_as = t_silent = var = silent = False num_bits = len(bits) if len(bits) > 4: raise TemplateSyntaxError(" node supports parameters.") elif num_bits == 4: t_name, t_as, var, t_silent = bits silent = True elif num_bits == 3: t_name, t_as, var = bits elif num_bits == 2: t_name, t_silent = bits silent = True else: var = silent = False if t_silent != or t_as != : raise TemplateSyntaxError(" node expects or syntax.") nodelist = parser.parse((,)) parser.delete_first_token() return CaptureNode(nodelist, var, silent)
Capture the contents of a tag output. Usage: .. code-block:: html+django {% capture %}..{% endcapture %} # output in {{ capture }} {% capture silent %}..{% endcapture %} # output in {{ capture }} only {% capture as varname %}..{% endcapture %} # output in {{ varname }} {% capture as varname silent %}..{% endcapture %} # output in {{ varname }} only For example: .. code-block:: html+django {# Allow templates to override the page title/description #} <meta name="description" content="{% capture as meta_description %}{% block meta-description %}{% endblock %}{% endcapture %}" /> <title>{% capture as meta_title %}{% block meta-title %}Untitled{% endblock %}{% endcapture %}</title> {# copy the values to the Social Media meta tags #} <meta property="og:description" content="{% block og-description %}{{ meta_description }}{% endblock %}" /> <meta name="twitter:title" content="{% block twitter-title %}{{ meta_title }}{% endblock %}" />
381,758
def parameters_to_segments(origins, vectors, parameters): origins = np.asanyarray(origins, dtype=np.float64) vectors = np.asanyarray(vectors, dtype=np.float64) parameters = np.asanyarray(parameters, dtype=np.float64) segments = np.hstack((origins + vectors * parameters[:, :1], origins + vectors * parameters[:, 1:])) return segments.reshape((-1, 2, origins.shape[1]))
Convert a parametric line segment representation to a two point line segment representation Parameters ------------ origins : (n, 3) float Line origin point vectors : (n, 3) float Unit line directions parameters : (n, 2) float Start and end distance pairs for each line Returns -------------- segments : (n, 2, 3) float Line segments defined by start and end points
381,759
def _remote_connection(server, opts, argparser_): global CONN if opts.timeout is not None: if opts.timeout < 0 or opts.timeout > 300: argparser_.error( % opts.timeout) if opts.mock_server: CONN = FakedWBEMConnection( default_namespace=opts.namespace, timeout=opts.timeout, stats_enabled=opts.statistics) try: build_mock_repository(CONN, opts.mock_server, opts.verbose) except ValueError as ve: argparser_.error( % ve) return CONN if server[0] == : url = server elif re.match(r"^https{0,1}://", server) is not None: url = server elif re.match(r"^[a-zA-Z0-9]+://", server) is not None: argparser_.error( ) else: url = % (, server) creds = None if opts.key_file is not None and opts.cert_file is None: argparser_.error() if opts.user is not None and opts.password is None: opts.password = _getpass.getpass( % opts.user) if opts.user is not None or opts.password is not None: creds = (opts.user, opts.password) x509_dict = None if opts.cert_file is not None: x509_dict = {"cert_file": opts.cert_file} if opts.key_file is not None: x509_dict.update({: opts.key_file}) CONN = WBEMConnection(url, creds, default_namespace=opts.namespace, no_verification=opts.no_verify_cert, x509=x509_dict, ca_certs=opts.ca_certs, timeout=opts.timeout, stats_enabled=opts.statistics) CONN.debug = True return CONN
Initiate a remote connection, via PyWBEM. Arguments for the request are part of the command line arguments and include user name, password, namespace, etc.
381,760
def load_vocab(self, vocab_name, **kwargs): log.setLevel(kwargs.get("log_level", self.log_level)) vocab = self.get_vocab(vocab_name , **kwargs) if vocab[] in self.loaded: if self.loaded_times.get(vocab[], datetime.datetime(2001,1,1)).timestamp() \ < vocab[]: self.drop_file(vocab[], **kwargs) else: return conn = kwargs.get("conn", self.conn) conn.load_data(graph=getattr(__NSM__.kdr, vocab[]).clean_uri, data=vocab[], datatype=vocab[].split(".")[-1], log_level=logging.WARNING) self.__update_time__(vocab[], **kwargs) log.warning("\n\tvocab: loaded \n\tconn: ", vocab[], conn) self.loaded.append(vocab[])
loads a vocabulary into the defintion triplestore args: vocab_name: the prefix, uri or filename of a vocabulary
381,761
def average_precision(truth, recommend): if len(truth) == 0: if len(recommend) == 0: return 1. return 0. tp = accum = 0. for n in range(recommend.size): if recommend[n] in truth: tp += 1. accum += (tp / (n + 1.)) return accum / truth.size
Average Precision (AP). Args: truth (numpy 1d array): Set of truth samples. recommend (numpy 1d array): Ordered set of recommended samples. Returns: float: AP.
381,762
def CaptureFrameLocals(self, frame): variables = {n: self.CaptureNamedVariable(n, v, 1, self.default_capture_limits) for n, v in six.viewitems(frame.f_locals)} nargs = frame.f_code.co_argcount if frame.f_code.co_flags & inspect.CO_VARARGS: nargs += 1 if frame.f_code.co_flags & inspect.CO_VARKEYWORDS: nargs += 1 frame_arguments = [] for argname in frame.f_code.co_varnames[:nargs]: if argname in variables: frame_arguments.append(variables.pop(argname)) return (frame_arguments, list(six.viewvalues(variables)))
Captures local variables and arguments of the specified frame. Args: frame: frame to capture locals and arguments. Returns: (arguments, locals) tuple.
381,763
def __json_strnum_to_bignum(json_object): for key in (, , , , , , ): if (key in json_object and isinstance(json_object[key], six.text_type)): try: json_object[key] = int(json_object[key]) except ValueError: pass return json_object
Converts json string numerals to native python bignums.
381,764
def example_lab_to_ipt(): print("=== Simple Example: XYZ->IPT ===") xyz = XYZColor(0.5, 0.5, 0.5, illuminant=) print(xyz) ipt = convert_color(xyz, IPTColor) print(ipt) print("=== End Example ===\n")
This function shows a simple conversion of an XYZ color to an IPT color.
381,765
def render_tile(cells, ti, tj, render, params, metadata, layout, summary): image_size = params["cell_size"] * params["n_tile"] tile = Image.new("RGB", (image_size, image_size), (255,255,255)) keys = cells.keys() for i,key in enumerate(keys): print("cell", i+1, "/", len(keys), end=) cell_image = render(cells[key], params, metadata, layout, summary) ci = key[0] % params["n_tile"] cj = key[1] % params["n_tile"] xmin = ci*params["cell_size"] ymin = cj*params["cell_size"] xmax = (ci+1)*params["cell_size"] ymax = (cj+1)*params["cell_size"] if params.get("scale_density", False): density = len(cells[key]["gi"]) scale = math.log(density)/(math.log(summary["max_density"]) or 1) owidth = xmax - xmin width = int(round(owidth * scale)) if(width < 1): width = 1 offsetL = int(round((owidth - width)/2)) offsetR = owidth - width - offsetL box = [xmin + offsetL, ymin + offsetL, xmax - offsetR, ymax - offsetR] resample = params.get("scale_type", Image.NEAREST) cell_image = cell_image.resize(size=(width,width), resample=resample) else: box = [xmin, ymin, xmax, ymax] tile.paste(cell_image, box) print("\n") return tile
Render each cell in the tile and stitch it into a single image
381,766
def get_bins_by_query(self, bin_query): if self._catalog_session is not None: return self._catalog_session.get_catalogs_by_query(bin_query) query_terms = dict(bin_query._query_terms) collection = JSONClientValidated(, collection=, runtime=self._runtime) result = collection.find(query_terms).sort(, DESCENDING) return objects.BinList(result, runtime=self._runtime)
Gets a list of ``Bins`` matching the given bin query. arg: bin_query (osid.resource.BinQuery): the bin query return: (osid.resource.BinList) - the returned ``BinList`` raise: NullArgument - ``bin_query`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure raise: Unsupported - a ``bin_query`` is not of this service *compliance: mandatory -- This method must be implemented.*
381,767
def flattennd(d, levels=0, key_as_tuple=True, delim=, list_of_dicts=None): if levels < 0: raise ValueError() new_d = {} flattened = flatten(d, True, delim, list_of_dicts=list_of_dicts) if levels == 0: return flattened for key, value in flattened.items(): if key_as_tuple: new_key = key[: - (levels)] else: new_key = delim.join([str(k) for k in key[:-(levels)]]) new_levels = key[-(levels):] if new_key not in new_d: new_d[new_key] = {new_levels: value} else: if new_levels in new_d[new_key]: raise ValueError( "key clash for: {0}; {1}".format(new_key, new_levels)) new_d[new_key][new_levels] = value for nkey, nvalue in new_d.items(): new_d[nkey] = unflatten( nvalue, list_of_dicts=list_of_dicts, deepcopy=False) return new_d
get nested dict as {key:dict,...}, where key is tuple/string of all-n levels of nested keys Parameters ---------- d : dict levels : int the number of levels to leave unflattened key_as_tuple : bool whether keys are list of nested keys or delimited string of nested keys delim : str if key_as_tuple=False, delimiter for keys list_of_dicts: str or None if not None, flatten lists of dicts using this prefix Examples -------- >>> from pprint import pprint >>> d = {1:{2:{3:{'b':'B','c':'C'},4:'D'}}} >>> pprint(flattennd(d,0)) {(1, 2, 3, 'b'): 'B', (1, 2, 3, 'c'): 'C', (1, 2, 4): 'D'} >>> pprint(flattennd(d,1)) {(1, 2): {4: 'D'}, (1, 2, 3): {'b': 'B', 'c': 'C'}} >>> pprint(flattennd(d,2)) {(1,): {2: {4: 'D'}}, (1, 2): {3: {'b': 'B', 'c': 'C'}}} >>> pprint(flattennd(d,3)) {(): {1: {2: {4: 'D'}}}, (1,): {2: {3: {'b': 'B', 'c': 'C'}}}} >>> pprint(flattennd(d,4)) {(): {1: {2: {3: {'b': 'B', 'c': 'C'}, 4: 'D'}}}} >>> pprint(flattennd(d,5)) {(): {1: {2: {3: {'b': 'B', 'c': 'C'}, 4: 'D'}}}} >>> pprint(flattennd(d,1,key_as_tuple=False,delim='.')) {'1.2': {4: 'D'}, '1.2.3': {'b': 'B', 'c': 'C'}} >>> test_dict = {"a":[{"b":[{"c":1, "d": 2}, {"e":3, "f": 4}]}, {"b":[{"c":5, "d": 6}, {"e":7, "f": 8}]}]} >>> pprint(flattennd(test_dict, list_of_dicts="__list__", levels=2)) {('a', '__list__0', 'b'): [{'c': 1, 'd': 2}, {'e': 3, 'f': 4}], ('a', '__list__1', 'b'): [{'c': 5, 'd': 6}, {'e': 7, 'f': 8}]} >>> pprint(flattennd(test_dict, list_of_dicts="__list__", levels=3)) {('a', '__list__0'): {'b': [{'c': 1, 'd': 2}, {'e': 3, 'f': 4}]}, ('a', '__list__1'): {'b': [{'c': 5, 'd': 6}, {'e': 7, 'f': 8}]}}
381,768
def display_notes(self): if self.annot is not None: short_xml_file = short_strings(basename(self.annot.xml_file)) self.idx_annotations.setText(short_xml_file) if self.parent.overview.scene is None: self.parent.overview.update() if not self.annot.raters: self.new_rater() self.idx_rater.setText(self.annot.current_rater) self.display_eventtype() self.update_annotations() self.display_stats() self.epoch_length = self.annot.epoch_length
Display information about scores and raters.
381,769
def set_primary_key_auto(self, table): pk = self.get_primary_key(table) if not pk: unique_col = self.get_unique_column(table) if unique_col: self.set_primary_key(table, unique_col) else: unique_col = self.add_column(table, primary_key=True) return unique_col else: return pk
Analysis a table and set a primary key. Determine primary key by identifying a column with unique values or creating a new column. :param table: Table to alter :return: Primary Key column
381,770
def copy_folder_content(src, dst): for file in os.listdir(src): file_path = os.path.join(src, file) dst_file_path = os.path.join(dst, file) if os.path.isdir(file_path): shutil.copytree(file_path, dst_file_path) else: shutil.copyfile(file_path, dst_file_path)
Copy all content in src directory to dst directory. The src and dst must exist.
381,771
def cdsthreads(self): for i in range(self.cpus): threads = Thread(target=self.cds, args=()) threads.setDaemon(True) threads.start() for sample in self.metadata.samples: sample[self.analysistype].corepresence = dict() self.cdsqueue.put(sample) self.cdsqueue.join()
Determines which core genes from a pre-calculated database are present in each strain
381,772
def get_email_context(self, activation_key): scheme = if self.request.is_secure() else return { : scheme, : activation_key, : settings.ACCOUNT_ACTIVATION_DAYS, : get_current_site(self.request) }
Build the template context used for the activation email.
381,773
def get_middle_point(self): num_rows, num_cols = self.lons.shape mid_row = num_rows // 2 depth = 0 if num_rows & 1 == 1: mid_col = num_cols // 2 if num_cols & 1 == 1: depth = self.depths[mid_row, mid_col] return Point(self.lons[mid_row, mid_col], self.lats[mid_row, mid_col], depth) else: lon1, lon2 = self.lons[mid_row, mid_col - 1: mid_col + 1] lat1, lat2 = self.lats[mid_row, mid_col - 1: mid_col + 1] depth1 = self.depths[mid_row, mid_col - 1] depth2 = self.depths[mid_row, mid_col] else: submesh1 = self[mid_row - 1: mid_row] submesh2 = self[mid_row: mid_row + 1] p1, p2 = submesh1.get_middle_point(), submesh2.get_middle_point() lon1, lat1, depth1 = p1.longitude, p1.latitude, p1.depth lon2, lat2, depth2 = p2.longitude, p2.latitude, p2.depth depth = (depth1 + depth2) / 2.0 lon, lat = geo_utils.get_middle_point(lon1, lat1, lon2, lat2) return Point(lon, lat, depth)
Return the middle point of the mesh. :returns: An instance of :class:`~openquake.hazardlib.geo.point.Point`. The middle point is taken from the middle row and a middle column of the mesh if there are odd number of both. Otherwise the geometric mean point of two or four middle points.
381,774
def get_suppressions(relative_filepaths, root, messages): paths_to_ignore = set() lines_to_ignore = defaultdict(set) messages_to_ignore = defaultdict(lambda: defaultdict(set)) for filepath in relative_filepaths: abspath = os.path.join(root, filepath) try: file_contents = encoding.read_py_file(abspath).split() except encoding.CouldNotHandleEncoding as err: warnings.warn(.format(err.path, err.cause), ImportWarning) continue ignore_file, ignore_lines = get_noqa_suppressions(file_contents) if ignore_file: paths_to_ignore.add(filepath) lines_to_ignore[filepath] |= ignore_lines pylint_ignore_files, pylint_ignore_messages = _parse_pylint_informational(messages) paths_to_ignore |= pylint_ignore_files for filepath, line in pylint_ignore_messages.items(): for line_number, codes in line.items(): for code in codes: messages_to_ignore[filepath][line_number].add((, code)) if code in _PYLINT_EQUIVALENTS: for equivalent in _PYLINT_EQUIVALENTS[code]: messages_to_ignore[filepath][line_number].add(equivalent) return paths_to_ignore, lines_to_ignore, messages_to_ignore
Given every message which was emitted by the tools, and the list of files to inspect, create a list of files to ignore, and a map of filepath -> line-number -> codes to ignore
381,775
def osm_net_download(lat_min=None, lng_min=None, lat_max=None, lng_max=None, network_type=, timeout=180, memory=None, max_query_area_size=50*1000*50*1000, custom_osm_filter=None): if custom_osm_filter is None: request_filter = osm_filter(network_type) else: request_filter = custom_osm_filter response_jsons_list = [] response_jsons = [] if memory is None: maxsize = else: maxsize = .format(memory) polygon = Polygon([(lng_max, lat_min), (lng_min, lat_min), (lng_min, lat_max), (lng_max, lat_max)]) geometry_proj, crs_proj = project_geometry(polygon, crs={: }) geometry_proj_consolidated_subdivided = consolidate_subdivide_geometry( geometry_proj, max_query_area_size=max_query_area_size) geometry, crs = project_geometry(geometry_proj_consolidated_subdivided, crs=crs_proj, to_latlong=True) log( .format(len(geometry))) start_time = time.time() for poly in geometry: lng_max, lat_min, lng_min, lat_max = poly.bounds query_template = \ \ \ query_str = query_template.format(lat_max=lat_max, lat_min=lat_min, lng_min=lng_min, lng_max=lng_max, filters=request_filter, timeout=timeout, maxsize=maxsize) response_json = overpass_request(data={: query_str}, timeout=timeout) response_jsons_list.append(response_json) log( .format(len(geometry), time.time()-start_time)) for json in response_jsons_list: try: response_jsons.extend(json[]) except KeyError: pass start_time = time.time() record_count = len(response_jsons) if record_count == 0: raise Exception( .format(query_str)) else: response_jsons_df = pd.DataFrame.from_records(response_jsons, index=) nodes = response_jsons_df[response_jsons_df[] == ] nodes = nodes[~nodes.index.duplicated(keep=)] ways = response_jsons_df[response_jsons_df[] == ] ways = ways[~ways.index.duplicated(keep=)] response_jsons_df = pd.concat([nodes, ways], axis=0) response_jsons_df.reset_index(inplace=True) response_jsons = response_jsons_df.to_dict(orient=) if record_count - len(response_jsons) > 0: log(.format( record_count - len(response_jsons), time.time() - start_time)) return {: response_jsons}
Download OSM ways and nodes within a bounding box from the Overpass API. Parameters ---------- lat_min : float southern latitude of bounding box lng_min : float eastern longitude of bounding box lat_max : float northern latitude of bounding box lng_max : float western longitude of bounding box network_type : string Specify the network type where value of 'walk' includes roadways where pedestrians are allowed and pedestrian pathways and 'drive' includes driveable roadways. timeout : int the timeout interval for requests and to pass to Overpass API memory : int server memory allocation size for the query, in bytes. If none, server will use its default allocation size max_query_area_size : float max area for any part of the geometry, in the units the geometry is in: any polygon bigger will get divided up for multiple queries to Overpass API (default is 50,000 * 50,000 units (ie, 50km x 50km in area, if units are meters)) custom_osm_filter : string, optional specify custom arguments for the way["highway"] query to OSM. Must follow Overpass API schema. For example to request highway ways that are service roads use: '["highway"="service"]' Returns ------- response_json : dict
381,776
def compute_before_after(self): if not self.parsed: self._parse() list_from = self.input_lines list_to = [] last = 0 for e in self.docs_list: start, end = e[] if start <= 0: start, end = -start, -end list_to.extend(list_from[last:start + 1]) else: list_to.extend(list_from[last:start]) docs = e[].get_raw_docs() list_docs = [l + for l in docs.splitlines()] list_to.extend(list_docs) last = end + 1 if last < len(list_from): list_to.extend(list_from[last:]) return list_from, list_to
Compute the list of lines before and after the proposed docstring changes. :return: tuple of before,after where each is a list of lines of python code.
381,777
def _get_node_column(cls, node, column_name): if not hasattr(node, ): node.set(, {}) if column_name in node.columns: column = node.columns[column_name] else: column = {: column_name, : } node.columns[column_name] = column return column
Given a ParsedNode, add some fields that might be missing. Return a reference to the dict that refers to the given column, creating it if it doesn't yet exist.
381,778
def get_active_project_path(self): active_project_path = None if self.current_active_project: active_project_path = self.current_active_project.root_path return active_project_path
Get path of the active project
381,779
def child_allocation(self): sum = Decimal(0) if self.classes: for child in self.classes: sum += child.child_allocation else: sum = self.allocation return sum
The sum of all child asset classes' allocations
381,780
def get_index_by_alias(self, alias): try: info = self.es.indices.get_alias(name=alias) return next(iter(info.keys())) except elasticsearch.exceptions.NotFoundError: return alias
Get index name for given alias. If there is no alias assume it's an index. :param alias: alias name
381,781
def strings(self, otherchar=None): string = [ otherchar if char == fsm.anything_else else char for char in string ] yield "".join(string)
Each time next() is called on this iterator, a new string is returned which will the present lego piece can match. StopIteration is raised once all such strings have been returned, although a regex with a * in may match infinitely many strings.
381,782
def get_element_name(parent, ns): name = parent.find( + ns + ) if name is not None and name.text is not None: return name.text return ""
Get element short name.
381,783
def get_config_groups(self, groups_conf, groups_pillar_name): log.debug(, ret_groups) return ret_groups
get info from groups in config, and from the named pillar todo: add specification for the minion to use to recover pillar
381,784
def _clean(self, rmConnetions=True, lockNonExternal=True): if self._interfaces: for i in self._interfaces: i._clean(rmConnetions=rmConnetions, lockNonExternal=lockNonExternal) else: self._sigInside = self._sig del self._sig if lockNonExternal and not self._isExtern: self._isAccessible = False
Remove all signals from this interface (used after unit is synthesized and its parent is connecting its interface to this unit)
381,785
def getMetricDetails(self, metricLabel): try: metricIndex = self.__metricLabels.index(metricLabel) except IndexError: return None return self.__metrics[metricIndex].getMetric()
Gets detailed info about a given metric, in addition to its value. This may including any statistics or auxilary data that are computed for a given metric. :param metricLabel: (string) label of the given metric (see :class:`~nupic.frameworks.opf.metrics.MetricSpec`) :returns: (dict) of metric information, as returned by :meth:`nupic.frameworks.opf.metrics.MetricsIface.getMetric`.
381,786
def _parse_group(self, group_name, group): if type(group) == dict: hostnames_in_group = set() for hostname in group.get(, []): self._get_host(hostname)[].add(group_name) hostnames_in_group.add(hostname) for var_key, var_val in group.get(, {}).items(): for hostname in hostnames_in_group: self._get_host(hostname)[][var_key] = var_val elif type(group) == list: for hostname in group: self._get_host(hostname)[].add(group_name) else: self.log.warning("Invalid element found in dynamic inventory output: {0}".format(type(group)))
Parse a group definition from a dynamic inventory. These are top-level elements which are not '_meta(data)'.
381,787
def frets_to_NoteContainer(self, fingering): res = [] for (string, fret) in enumerate(fingering): if fret is not None: res.append(self.get_Note(string, fret)) return NoteContainer(res)
Convert a list such as returned by find_fret to a NoteContainer.
381,788
def _assemble_active_form(self, stmt): act_agent = Agent(stmt.agent.name, db_refs=stmt.agent.db_refs) act_agent.activity = ActivityCondition(stmt.activity, True) activates = stmt.is_active relation = get_causal_edge(stmt, activates) self._add_nodes_edges(stmt.agent, act_agent, relation, stmt.evidence)
Example: p(HGNC:ELK1, pmod(Ph)) => act(p(HGNC:ELK1), ma(tscript))
381,789
def connectQ2Q(self, fromAddress, toAddress, protocolName, protocolFactory, usePrivateCertificate=None, fakeFromDomain=None, chooser=None): if chooser is None: chooser = lambda x: x and [x[0]] def onSecureConnection(protocol): if fakeFromDomain: connectFromAddress = Q2QAddress( fakeFromDomain, toAddress.resource ) else: connectFromAddress = fromAddress return protocol.connect(connectFromAddress, toAddress, protocolName, protocolFactory, chooser) def onSecureConnectionFailure(reason): protocolFactory.clientConnectionFailed(None, reason) return reason return self.getSecureConnection( fromAddress, toAddress, port, usePrivateCertificate).addCallback( onSecureConnection).addErrback(onSecureConnectionFailure)
Connect a named protocol factory from a resource@domain to a resource@domain. This is analagous to something like connectTCP, in that it creates a connection-oriented transport for each connection, except instead of specifying your credentials with an application-level (username, password) and your endpoint with a framework-level (host, port), you specify both at once, in the form of your ID (user@my-domain), their ID (user@their-domain) and the desired protocol. This provides several useful features: - All connections are automatically authenticated via SSL certificates, although not authorized for any particular activities, based on their transport interface rather than having to have protocol logic to authenticate. - User-meaningful protocol nicknames are attached to implementations of protocol logic, rather than arbitrary numbering. - Endpoints can specify a variety of transport mechanisms transparently to the application: for example, you might be connecting to an authorized user-agent on the user's server or to the user directly using a NAT-circumvention handshake. All the application has to know is that it wants to establish a TCP-like connection. XXX Really, really should return an IConnector implementor for symmetry with other connection-oriented transport APIs, but currently does not. The 'resource' parameters are so named (rather than beginning with 'user', for example) because they are sometimes used to refer to abstract entities or roles, such as 'payments', or groups of users (communities) but generally the convention is to document them as individual users for simplicity's sake. The parameters are described as if Alice <[email protected]> were trying try connect to Bob <[email protected]> to transfer a file over HTTP. @param fromAddress: The address of the connecting user: in this case, Q2QAddress("divmod.com", "alice") @param toAddress: The address of the user connected to: in this case, Q2QAddress("notdivmod.com", "bob") @param protocolName: The name of the protocol, by convention observing similar names to http://www.iana.org/assignments/port-numbers when appropriate. In this case, 'http'. @param protocolFactory: An implementation of L{twisted.internet.interfaces.IProtocolFactory} @param usePrivateCertificate: Use a different private certificate for initiating the 'secure' call. Mostly for testing different invalid certificate attacks. @param fakeFromDomain: This domain name will be used for an argument to the 'connect' command, but NOT as an argument to the SECURE command. This is to test a particular kind of invalid cert attack. @param chooser: a function taking a list of connection-describing objects and returning another list. Those items in the remaining list will be attempted as connections and buildProtocol called on the client factory. May return a Deferred. @default chooser: C{lambda x: x and [x[0]]} @return:
381,790
def get(self): return tuple([(x.name(), x.get()) for x in self._generators])
Retrieve the most recent value generated
381,791
def report(self, event, metadata=None, block=None): if self._sender.is_terminated: self._notify(logging.ERROR, consts.LOG_MSG_REPORT_AFTER_TERMINATION) return False if isinstance(event, (dict,) + py2to3.basestring): formatted_event = self._format_event(event, metadata) should_block = block if block is not None else self.is_blocking return self._sender.enqueue_event(formatted_event, should_block) else: error_message = (consts.LOG_MSG_BAD_EVENT % (type(event), event)) self._notify(logging.ERROR, error_message) return False
Reports an event to Alooma by formatting it properly and placing it in the buffer to be sent by the Sender instance :param event: A dict / string representing an event :param metadata: (Optional) A dict with extra metadata to be attached to the event :param block: (Optional) If True, the function will block the thread until the event buffer has space for the event. If False, reported events are discarded if the queue is full. Defaults to None, which uses the global `block` parameter given in the `init`. :return: True if the event was successfully enqueued, else False
381,792
def network_security_group_delete(name, resource_group, **kwargs): result = False netconn = __utils__[](, **kwargs) try: secgroup = netconn.network_security_groups.delete( resource_group_name=resource_group, network_security_group_name=name ) secgroup.wait() result = True except CloudError as exc: __utils__[](, str(exc), **kwargs) return result
.. versionadded:: 2019.2.0 Delete a network security group within a resource group. :param name: The name of the network security group to delete. :param resource_group: The resource group name assigned to the network security group. CLI Example: .. code-block:: bash salt-call azurearm_network.network_security_group_delete testnsg testgroup
381,793
def _delete(self, state=None): mutation_val = data_v2_pb2.Mutation.DeleteFromRow() mutation_pb = data_v2_pb2.Mutation(delete_from_row=mutation_val) self._get_mutations(state).append(mutation_pb)
Helper for :meth:`delete` Adds a delete mutation (for the entire row) to the accumulated mutations. ``state`` is unused by :class:`DirectRow` but is used by subclasses. :type state: bool :param state: (Optional) The state that is passed along to :meth:`_get_mutations`.
381,794
def p(self, value, event): assert isinstance(value, bool) ptrue = self.cpt[event_values(event, self.parents)] return if_(value, ptrue, 1 - ptrue)
Return the conditional probability P(X=value | parents=parent_values), where parent_values are the values of parents in event. (event must assign each parent a value.) >>> bn = BayesNode('X', 'Burglary', {T: 0.2, F: 0.625}) >>> bn.p(False, {'Burglary': False, 'Earthquake': True}) 0.375
381,795
def configure(self, options, conf): super(LeakDetectorPlugin, self).configure(options, conf) if options.leak_detector_level: self.reporting_level = int(options.leak_detector_level) self.report_delta = options.leak_detector_report_delta self.patch_mock = options.leak_detector_patch_mock self.ignore_patterns = options.leak_detector_ignore_patterns self.save_traceback = options.leak_detector_save_traceback self.multiprocessing_enabled = bool(getattr(options, , False))
Configure plugin.
381,796
def lock(self): self.update() self.execute_operations(False) self._lock = True return self
Prepare the installer for locking only.
381,797
def downgrade(): op.drop_column(, ) op.drop_column(, ) op.drop_column(, ) op.drop_column(, ) op.drop_column(, ) op.drop_column(, )
Downgrade database.
381,798
def solve_gamlasso(self, lam): weights = lam / (1 + self.gamma * np.abs(self.beta[self.trails[::2]] - self.beta[self.trails[1::2]])) s = self.solve_gfl(u) self.steps.append(s) return self.beta
Solves the Graph-fused gamma lasso via POSE (Taddy, 2013)
381,799
def compute_pixels(orb, sgeom, times, rpy=(0.0, 0.0, 0.0)): if isinstance(orb, (list, tuple)): tle1, tle2 = orb orb = Orbital("mysatellite", line1=tle1, line2=tle2) pos, vel = orb.get_position(times, normalize=False) vectors = sgeom.vectors(pos, vel, *rpy) centre = -pos a__ = 6378.137 b__ = 6356.752314245 radius = np.array([[1 / a__, 1 / a__, 1 / b__]]).T shape = vectors.shape xr_ = vectors.reshape([3, -1]) * radius cr_ = centre.reshape([3, -1]) * radius ldotc = np.einsum("ij,ij->j", xr_, cr_) lsq = np.einsum("ij,ij->j", xr_, xr_) csq = np.einsum("ij,ij->j", cr_, cr_) d1_ = (ldotc - np.sqrt(ldotc ** 2 - csq * lsq + lsq)) / lsq return vectors * d1_.reshape(shape[1:]) - centre
Compute cartesian coordinates of the pixels in instrument scan.