docstring
stringlengths
52
499
function
stringlengths
67
35.2k
__index_level_0__
int64
52.6k
1.16M
Initialize the AppFuture. Args: KWargs: - tid (Int) : Task id should be any unique identifier. Now Int. - stdout (str) : Stdout file of the app. Default: None - stderr (str) : Stderr file of the app. Default: None
def __init__(self, tid=None, stdout=None, stderr=None): self._tid = tid super().__init__() self.parent = None self._update_lock = threading.Lock() self._outputs = [] self._stdout = stdout self._stderr = stderr
261,131
Callback from executor future to update the parent. Args: - parent_fu (Future): Future returned by the executor along with callback Returns: - None Updates the super() with the result() or exception()
def parent_callback(self, parent_fu): if parent_fu.done() is True: e = parent_fu._exception if e: super().set_exception(e) else: super().set_result(self.file_obj) return
261,136
Construct the DataFuture object. If the file_obj is a string convert to a File. Args: - fut (AppFuture) : AppFuture that this DataFuture will track - file_obj (string/File obj) : Something representing file(s) Kwargs: - tid (task_id) : Task id that this DataFuture tracks
def __init__(self, fut, file_obj, tid=None): super().__init__() self._tid = tid if isinstance(file_obj, str): self.file_obj = File(file_obj) elif isinstance(file_obj, File): self.file_obj = file_obj else: raise ValueError("DataFuture must be initialized with a str or File") self.parent = fut self._exception = None if fut is None: logger.debug("Setting result to filepath since no future was passed") self.set_result(self.file_obj) else: if isinstance(fut, Future): self.parent.add_done_callback(self.parent_callback) else: raise NotFutureError("DataFuture can be created only with a FunctionFuture on None") logger.debug("Creating DataFuture with parent: %s", self.parent) logger.debug("Filepath: %s", self.filepath)
261,137
Handle the call to a Bash app. Args: - Arbitrary Kwargs: - Arbitrary Returns: If outputs=[...] was a kwarg then: App_fut, [Data_Futures...] else: App_fut
def __call__(self, *args, **kwargs): # Update kwargs in the app definition with ones passed in at calltime self.kwargs.update(kwargs) if self.data_flow_kernel is None: dfk = DataFlowKernelLoader.dfk() else: dfk = self.data_flow_kernel app_fut = dfk.submit(wrap_error(remote_side_bash_executor), self.func, *args, executors=self.executors, fn_hash=self.func_hash, cache=self.cache, **self.kwargs) out_futs = [DataFuture(app_fut, o, tid=app_fut.tid) for o in kwargs.get('outputs', [])] app_fut._outputs = out_futs return app_fut
261,143
Get the status of a list of jobs identified by the job identifiers returned from the submit request. Args: - job_ids (list) : A list of job identifiers Returns: - A list of status from ['PENDING', 'RUNNING', 'CANCELLED', 'COMPLETED', 'FAILED', 'TIMEOUT'] corresponding to each job_id in the job_ids list. Raises: - ExecutionProviderException or its subclasses
def status(self, job_ids): statuses = [] for job_id in job_ids: instance = self.client.instances().get(instance=job_id, project=self.project_id, zone=self.zone).execute() self.resources[job_id]['status'] = translate_table[instance['status']] statuses.append(translate_table[instance['status']]) return statuses
261,146
Cancels the resources identified by the job_ids provided by the user. Args: - job_ids (list): A list of job identifiers Returns: - A list of status from cancelling the job which can be True, False Raises: - ExecutionProviderException or its subclasses
def cancel(self, job_ids): statuses = [] for job_id in job_ids: try: self.delete_instance(job_id) statuses.append(True) self.provisioned_blocks -= 1 except Exception: statuses.append(False) return statuses
261,147
Construct a File object from a url string. Args: - url (string) : url string of the file e.g. - 'input.txt' - 'file:///scratch/proj101/input.txt' - 'globus://go#ep1/~/data/input.txt' - 'globus://ddb59aef-6d04-11e5-ba46-22000b92c6ec/home/johndoe/data/input.txt'
def __init__(self, url: str): self.url = url parsed_url = urlparse(self.url) self.scheme = parsed_url.scheme if parsed_url.scheme else 'file' self.netloc = parsed_url.netloc self.path = parsed_url.path self.filename = os.path.basename(self.path)
261,157
Return the resolved filepath on the side where it is called from. The appropriate filepath will be returned when called from within an app running remotely as well as regular python on the client side. Args: - self Returns: - filepath (string)
def filepath(self): if hasattr(self, 'local_path'): return self.local_path if self.scheme in ['ftp', 'http', 'https', 'globus']: return self.filename elif self.scheme in ['file']: return self.path else: raise Exception('Cannot return filepath for unknown scheme {}'.format(self.scheme))
261,159
If the source files dirpath is the same as dest_dir, a copy is not necessary, and nothing is done. Else a copy is made. Args: - source (string) : Path to the source file - dest_dir (string) : Path to the directory to which the files is to be copied Returns: - destination_path (String) : Absolute path of the destination file Raises: - FileCopyException : If file copy failed.
def push_file(self, source, dest_dir): local_dest = dest_dir + '/' + os.path.basename(source) # Only attempt to copy if the target dir and source dir are different if os.path.dirname(source) != dest_dir: try: shutil.copyfile(source, local_dest) os.chmod(local_dest, 0o777) except OSError as e: raise FileCopyException(e, self.hostname) return local_dest
261,165
Synchronously execute a commandline string on the shell. Args: - cmd (string) : Commandline string to execute - walltime (int) : walltime in seconds Kwargs: - envs (dict) : Dictionary of env variables Returns: - retcode : Return code from the execution, -1 on fail - stdout : stdout string - stderr : stderr string Raises: None.
def execute_wait(self, cmd, walltime=2, envs={}): # Execute the command stdin, stdout, stderr = self.ssh_client.exec_command( self.prepend_envs(cmd, envs), bufsize=-1, timeout=walltime ) # Block on exit status from the command exit_status = stdout.channel.recv_exit_status() return exit_status, stdout.read().decode("utf-8"), stderr.read().decode("utf-8")
261,193
Execute asynchronousely without waiting for exitcode Args: - cmd (string): Commandline string to be executed on the remote side - walltime (int): timeout to exec_command KWargs: - envs (dict): A dictionary of env variables Returns: - None, stdout (readable stream), stderr (readable stream) Raises: - ChannelExecFailed (reason)
def execute_no_wait(self, cmd, walltime=2, envs={}): # Execute the command stdin, stdout, stderr = self.ssh_client.exec_command( self.prepend_envs(cmd, envs), bufsize=-1, timeout=walltime ) return None, stdout, stderr
261,194
Transport a local file to a directory on a remote machine Args: - local_source (string): Path - remote_dir (string): Remote path Returns: - str: Path to copied file on remote machine Raises: - BadScriptPath : if script path on the remote side is bad - BadPermsScriptPath : You do not have perms to make the channel script dir - FileCopyException : FileCopy failed.
def push_file(self, local_source, remote_dir): remote_dest = remote_dir + '/' + os.path.basename(local_source) try: self.makedirs(remote_dir, exist_ok=True) except IOError as e: logger.exception("Pushing {0} to {1} failed".format(local_source, remote_dir)) if e.errno == 2: raise BadScriptPath(e, self.hostname) elif e.errno == 13: raise BadPermsScriptPath(e, self.hostname) else: logger.exception("File push failed due to SFTP client failure") raise FileCopyException(e, self.hostname) try: self.sftp_client.put(local_source, remote_dest, confirm=True) # Set perm because some systems require the script to be executable self.sftp_client.chmod(remote_dest, 0o777) except Exception as e: logger.exception("File push from local source {} to remote destination {} failed".format( local_source, remote_dest)) raise FileCopyException(e, self.hostname) return remote_dest
261,195
Transport file on the remote side to a local directory Args: - remote_source (string): remote_source - local_dir (string): Local directory to copy to Returns: - str: Local path to file Raises: - FileExists : Name collision at local directory. - FileCopyException : FileCopy failed.
def pull_file(self, remote_source, local_dir): local_dest = local_dir + '/' + os.path.basename(remote_source) try: os.makedirs(local_dir) except OSError as e: if e.errno != errno.EEXIST: logger.exception("Failed to create script_dir: {0}".format(script_dir)) raise BadScriptPath(e, self.hostname) # Easier to check this than to waste time trying to pull file and # realize there's a problem. if os.path.exists(local_dest): logger.exception("Remote file copy will overwrite a local file:{0}".format(local_dest)) raise FileExists(None, self.hostname, filename=local_dest) try: self.sftp_client.get(remote_source, local_dest) except Exception as e: logger.exception("File pull failed") raise FileCopyException(e, self.hostname) return local_dest
261,196
Initialize the flowcontrol object. We start the timer thread here Args: - dfk (DataFlowKernel) : DFK object to track parsl progress KWargs: - threshold (int) : Tasks after which the callback is triggered - interval (int) : seconds after which timer expires
def __init__(self, dfk, *args, threshold=20, interval=5): self.dfk = dfk self.threshold = threshold self.interval = interval self.cb_args = args self.strategy = Strategy(dfk) self.callback = self.strategy.strategize self._handle = None self._event_count = 0 self._event_buffer = [] self._wake_up_time = time.time() + 1 self._kill_event = threading.Event() self._thread = threading.Thread(target=self._wake_up_timer, args=(self._kill_event,)) self._thread.daemon = True self._thread.start()
261,204
Internal. This is the function that the thread will execute. waits on an event so that the thread can make a quick exit when close() is called Args: - kill_event (threading.Event) : Event to wait on
def _wake_up_timer(self, kill_event): while True: prev = self._wake_up_time # Waiting for the event returns True only when the event # is set, usually by the parent thread time_to_die = kill_event.wait(float(max(prev - time.time(), 0))) if time_to_die: return if prev == self._wake_up_time: self.make_callback(kind='timer') else: print("Sleeping a bit more")
261,205
Initialize the flowcontrol object We start the timer thread here Args: - dfk (DataFlowKernel) : DFK object to track parsl progress KWargs: - threshold (int) : Tasks after which the callback is triggered - interval (int) : seconds after which timer expires
def __init__(self, callback, *args, interval=5): self.interval = interval self.cb_args = args self.callback = callback self._wake_up_time = time.time() + 1 self._kill_event = threading.Event() self._thread = threading.Thread(target=self._wake_up_timer, args=(self._kill_event,)) self._thread.daemon = True self._thread.start()
261,208
Submit a job Args: - cmd_string :(String) - Name of the container to initiate - blocksize :(float) - Number of replicas - tasks_per_node (int) : command invocations to be launched per node Kwargs: - job_name (String): Name for job, must be unique Returns: - None: At capacity, cannot provision more - job_id: (string) Identifier for the job
def submit(self, cmd_string, blocksize, tasks_per_node, job_name="parsl"): if not self.resources: cur_timestamp = str(time.time() * 1000).split(".")[0] job_name = "{0}-{1}".format(job_name, cur_timestamp) if not self.deployment_name: deployment_name = '{}-deployment'.format(job_name) else: deployment_name = '{}-{}-deployment'.format(self.deployment_name, cur_timestamp) formatted_cmd = template_string.format(command=cmd_string, worker_init=self.worker_init) self.deployment_obj = self._create_deployment_object(job_name, self.image, deployment_name, cmd_string=formatted_cmd, replicas=self.init_blocks, volumes=self.persistent_volumes) logger.debug("Deployment name :{}".format(deployment_name)) self._create_deployment(self.deployment_obj) self.resources[deployment_name] = {'status': 'RUNNING', 'pods': self.init_blocks} return deployment_name
261,222
Create a kubernetes deployment for the job. Args: - job_name (string) : Name of the job and deployment - job_image (string) : Docker image to launch KWargs: - port (integer) : Container port - replicas : Number of replica containers to maintain Returns: - True: The deployment object to launch
def _create_deployment_object(self, job_name, job_image, deployment_name, port=80, replicas=1, cmd_string=None, engine_json_file='~/.ipython/profile_default/security/ipcontroller-engine.json', engine_dir='.', volumes=[]): # sorry, quick hack that doesn't pass this stuff through to test it works. # TODO it also doesn't only add what is set :( security_context = None if self.user_id and self.group_id: security_context = client.V1SecurityContext(run_as_group=self.group_id, run_as_user=self.user_id, run_as_non_root=self.run_as_non_root) # Create the enviornment variables and command to initiate IPP environment_vars = client.V1EnvVar(name="TEST", value="SOME DATA") launch_args = ["-c", "{0}; /app/deploy.sh;".format(cmd_string)] volume_mounts = [] # Create mount paths for the volumes for volume in volumes: volume_mounts.append(client.V1VolumeMount(mount_path=volume[1], name=volume[0])) # Configureate Pod template container container = None if security_context: container = client.V1Container( name=job_name, image=job_image, ports=[client.V1ContainerPort(container_port=port)], volume_mounts=volume_mounts, command=['/bin/bash'], args=launch_args, env=[environment_vars], security_context=security_context) else: container = client.V1Container( name=job_name, image=job_image, ports=[client.V1ContainerPort(container_port=port)], volume_mounts=volume_mounts, command=['/bin/bash'], args=launch_args, env=[environment_vars]) # Create a secret to enable pulling images from secure repositories secret = None if self.secret: secret = client.V1LocalObjectReference(name=self.secret) # Create list of volumes from (pvc, mount) tuples volume_defs = [] for volume in volumes: volume_defs.append(client.V1Volume(name=volume[0], persistent_volume_claim=client.V1PersistentVolumeClaimVolumeSource( claim_name=volume[0]))) # Create and configurate a spec section template = client.V1PodTemplateSpec( metadata=client.V1ObjectMeta(labels={"app": job_name}), spec=client.V1PodSpec(containers=[container], image_pull_secrets=[secret], volumes=volume_defs )) # Create the specification of deployment spec = client.ExtensionsV1beta1DeploymentSpec(replicas=replicas, template=template) # Instantiate the deployment object deployment = client.ExtensionsV1beta1Deployment( api_version="extensions/v1beta1", kind="Deployment", metadata=client.V1ObjectMeta(name=deployment_name), spec=spec) return deployment
261,223
Create a custom scheme. Args: colors (list of str): List of hex values for styling data bins (int, optional): Number of bins to style by. If not given, the number of colors will be used. bin_method (str, optional): Classification method. One of the values in :obj:`BinMethod`. Defaults to `quantiles`, which only works with quantitative data.
def custom(colors, bins=None, bin_method=BinMethod.quantiles): return { 'colors': colors, 'bins': bins if bins is not None else len(colors), 'bin_method': bin_method, }
261,982
Delete a table in user's CARTO account. Args: table_name (str): Name of table to delete Returns: bool: `True` if table is removed
def delete(self, table_name): dataset = Dataset(self, table_name) deleted = dataset.delete() if deleted: return deleted raise CartoException(.format(table_name))
261,991
Return the bounds of all data layers involved in a cartoframes map. Args: layers (list): List of cartoframes layers. See `cartoframes.layers` for all types. Returns: dict: Dictionary of northern, southern, eastern, and western bounds of the superset of data layers. Keys are `north`, `south`, `east`, and `west`. Units are in WGS84.
def _get_bounds(self, layers): extent_query = ('SELECT ST_EXTENT(the_geom) AS the_geom ' 'FROM ({query}) AS t{idx}\n') union_query = 'UNION ALL\n'.join( [extent_query.format(query=layer.orig_query, idx=idx) for idx, layer in enumerate(layers) if not layer.is_basemap]) extent = self.sql_client.send( utils.minify_sql(( 'SELECT', ' ST_XMIN(ext) AS west,', ' ST_YMIN(ext) AS south,', ' ST_XMAX(ext) AS east,', ' ST_YMAX(ext) AS north', 'FROM (', ' SELECT ST_Extent(the_geom) AS ext', ' FROM ({union_query}) AS _wrap1', ') AS _wrap2', )).format(union_query=union_query), do_post=False) return extent['rows'][0]
262,003
Saves current user credentials to user directory. Args: config_loc (str, optional): Location where credentials are to be stored. If no argument is provided, it will be send to the default location. Example: .. code:: from cartoframes import Credentials creds = Credentials(username='eschbacher', key='abcdefg') creds.save() # save to default location
def save(self, config_loc=None): if not os.path.exists(_USER_CONFIG_DIR): os.makedirs(_USER_CONFIG_DIR) with open(_DEFAULT_PATH, 'w') as f: json.dump({'key': self._key, 'base_url': self._base_url, 'username': self._username}, f)
262,069
Orthogonal Projection Object cretes projection Object that can be used in Camera Args: origin (str): 'center' or 'corner' coords (str): 'relative' or 'absolute' Returns: OrthoProjection instance
def __init__(self, origin='center', coords='relative', **kwargs): self._origin = origin self._coords = coords super(OrthoProjection, self).__init__(**kwargs)
262,088
Returns a camera object Args: projection (obj): the projection type for the camera. It can either be an instance of OrthoProjection or PerspeectiveProjection orientation0 (tuple): Returns: Camera instance
def __init__(self, projection=None, orientation0=(0, 0, -1), **kwargs): kwargs['orientation0'] = orientation0 super(Camera, self).__init__(**kwargs) self.projection = PerspectiveProjection() if not projection else projection self.reset_uniforms()
262,098
Experimental anaglyph drawing function for VR system with red/blue glasses, used in Sirota lab. Draws a virtual scene in red and blue, from subject's (heda trackers) perspective in active scene. Note: assumes shader uses playerPos like ratcave's default shader Args: cube_fbo: texture frameBuffer object. vr_scene: virtual scene object active_scene: active scene object eye_poses: the eye positions Returns:
def draw_vr_anaglyph(cube_fbo, vr_scene, active_scene, eye_poses=(.035, -.035)): color_masks = [(True, False, False, True), (False, True, True, True)] cam = vr_scene.camera orig_cam_position = cam.position.xyz for color_mask, eye_pos in zip(color_masks, eye_poses): gl.glColorMask(*color_mask) cam.position.xyz = cam.model_matrix.dot([eye_pos, 0., 0., 1.])[:3] # inter_eye_distance / 2. cam.uniforms['playerPos'] = cam.position.xyz with cube_fbo as fbo: vr_scene.draw360_to_texture(fbo.texture) cam.position.xyz = orig_cam_position active_scene.draw()
262,124
Reads the shader programs, given the vert and frag filenames Arguments: - vert (str): The filename of the vertex shader program (ex: 'vertshader.vert') - frag (str): The filename of the fragment shader program (ex: 'fragshader.frag') Returns: - shader (Shader): The Shader using these files.
def from_file(cls, vert, frag, **kwargs): vert_program = open(vert).read() frag_program = open(frag).read() return cls(vert=vert_program, frag=frag_program, **kwargs)
262,205
XYZ Position, Scale and XYZEuler Rotation Class. Args: position: (x, y, z) translation values. rotation: (x, y, z) rotation values scale (float): uniform scale factor. 1 = no scaling.
def __init__(self, position=(0., 0., 0.), rotation=(0., 0., 0.), scale=1., orientation0=(1., 0., 0.), **kwargs): super(Physical, self).__init__(**kwargs) self.orientation0 = np.array(orientation0, dtype=np.float32) self.rotation = coordinates.RotationEulerDegrees(*rotation) self.position = coordinates.Translation(*position) if hasattr(scale, '__iter__'): if 0 in scale: raise ValueError("Scale can not be set to 0") self.scale = coordinates.Scale(*scale) else: if scale is 0: raise ValueError("Scale can not be set to 0") self.scale = coordinates.Scale(scale) self._model_matrix = np.identity(4, dtype=np.float32) self._normal_matrix = np.identity(4, dtype=np.float32) self._view_matrix = np.identity(4, dtype=np.float32)
262,210
r"""Read Bradley--Cracknell k-points path from data file Args: bravais (str): Lattice code including orientation e.g. 'trig_p_c' Returns: dict: kpoint path and special point locations, formatted as e.g.:: {'kpoints': {'\Gamma': [0., 0., 0.], 'X': [0., 0.5, 0.], ...}, 'path': [['\Gamma', 'X', ..., 'P'], ['H', 'N', ...]]}
def _get_bradcrack_data(bravais): r json_file = pkg_resources.resource_filename(__name__, 'bradcrack.json') with open(json_file, 'r') as f: bradcrack_data = load_json(f) return bradcrack_data[bravais]
263,875
Return the lattice crystal system. Hexagonal cells are differentiated into rhombohedral and hexagonal lattices. Args: number (int): The international space group number. Returns: str: The lattice crystal system.
def get_lattice_type(number): f = lambda i, j: i <= number <= j cs = {'triclinic': (1, 2), 'monoclinic': (3, 15), 'orthorhombic': (16, 74), 'tetragonal': (75, 142), 'trigonal': (143, 167), 'hexagonal': (168, 194), 'cubic': (195, 230)} crystal_system = None for k, v in cs.items(): if f(*v): crystal_system = k break if number in [146, 148, 155, 160, 161, 166, 167]: return "rhombohedral" elif crystal_system == "trigonal": return "hexagonal" else: return crystal_system
263,895
Get a default set of labels (1), (2), (3)... for a k-point path Repeated points will be identified and the labels re-used. Args: kpt_list (list): Nested list representing k-point path segments, e.g.:: [[[0., 0., 0.], [0., 0., 0.5], [0., 0.5, 0.5]], [[0.5, 0.5, 0.], [0., 0., 0.]]] Returns: list: Corresponding nested list of labels, e.g.:: [['(1)', '(2)', '(3)'], ['(4)', '(1)']]
def _auto_kpath_labels(kpt_list): # Build dict of labels label_i = 1 kpt_labels = {} for kpt in chain(*kpt_list): if tuple(kpt) in kpt_labels: continue else: kpt_labels.update({tuple(kpt): '({})'.format(label_i)}) label_i += 1 # Read out into nested lists kpath_labels = [[kpt_labels[tuple(kpt)] for kpt in segment] for segment in kpt_list] return kpath_labels
263,916
Log data about the direct and indirect band gaps. Args: bs (:obj:`~pymatgen.electronic_structure.bandstructure.BandStructureSymmLine`):
def _log_band_gap_information(bs): bg_data = bs.get_band_gap() if not bg_data['direct']: logging.info('Indirect band gap: {:.3f} eV'.format(bg_data['energy'])) direct_data = bs.get_direct_band_gap_dict() if bs.is_spin_polarized: direct_bg = min((spin_data['value'] for spin_data in direct_data.values())) logging.info('Direct band gap: {:.3f} eV'.format(direct_bg)) for spin, spin_data in direct_data.items(): direct_kindex = spin_data['kpoint_index'] direct_kpoint = bs.kpoints[direct_kindex].frac_coords direct_kpoint = kpt_str.format(k=direct_kpoint) eq_kpoints = bs.get_equivalent_kpoints(direct_kindex) k_indices = ', '.join(map(str, eq_kpoints)) # add 1 to band indices to be consistent with VASP band numbers. b_indices = ', '.join([str(i+1) for i in spin_data['band_indices']]) logging.info(' {}:'.format(spin.name.capitalize())) logging.info(' k-point: {}'.format(direct_kpoint)) logging.info(' k-point indices: {}'.format(k_indices)) logging.info(' Band indices: {}'.format(b_indices)) else: direct_bg = direct_data[Spin.up]['value'] logging.info('Direct band gap: {:.3f} eV'.format(direct_bg)) direct_kindex = direct_data[Spin.up]['kpoint_index'] direct_kpoint = kpt_str.format(k=bs.kpoints[direct_kindex].frac_coords) k_indices = ', '.join(map(str, bs.get_equivalent_kpoints(direct_kindex))) b_indices = ', '.join([str(i+1) for i in direct_data[Spin.up]['band_indices']]) logging.info(' k-point: {}'.format(direct_kpoint)) logging.info(' k-point indices: {}'.format(k_indices)) logging.info(' Band indices: {}'.format(b_indices))
263,923
Log data about the valence band maximum or conduction band minimum. Args: bs (:obj:`~pymatgen.electronic_structure.bandstructure.BandStructureSymmLine`): The band structure. edge_data (dict): The :obj:`dict` from ``bs.get_vbm()`` or ``bs.get_cbm()``
def _log_band_edge_information(bs, edge_data): if bs.is_spin_polarized: spins = edge_data['band_index'].keys() b_indices = [', '.join([str(i+1) for i in edge_data['band_index'][spin]]) + '({})'.format(spin.name.capitalize()) for spin in spins] b_indices = ', '.join(b_indices) else: b_indices = ', '.join([str(i+1) for i in edge_data['band_index'][Spin.up]]) kpoint = edge_data['kpoint'] kpoint_str = kpt_str.format(k=kpoint.frac_coords) k_indices = ', '.join(map(str, edge_data['kpoint_index'])) if kpoint.label: k_loc = kpoint.label else: branch = bs.get_branch(edge_data['kpoint_index'][0])[0] k_loc = 'between {}'.format(branch['name']) logging.info(' Energy: {:.3f} eV'.format(edge_data['energy'])) logging.info(' k-point: {}'.format(kpoint_str)) logging.info(' k-point location: {}'.format(k_loc)) logging.info(' k-point indices: {}'.format(k_indices)) logging.info(' Band indices: {}'.format(b_indices))
263,924
Write the band structure data files to disk. Args: vs (`Vasprun`): Pymatgen `Vasprun` object. bs (`BandStructureSymmLine`): Calculated band structure. prefix (`str`, optional): Prefix for data file. directory (`str`, optional): Directory in which to save the data. Returns: The filename of the written data file.
def save_data_files(vr, bs, prefix=None, directory=None): filename = '{}_band.dat'.format(prefix) if prefix else 'band.dat' directory = directory if directory else '.' filename = os.path.join(directory, filename) if bs.is_metal(): zero = vr.efermi else: zero = bs.get_vbm()['energy'] with open(filename, 'w') as f: header = '#k-distance eigenvalue[eV]\n' f.write(header) # write the spin up eigenvalues for band in bs.bands[Spin.up]: for d, e in zip(bs.distance, band): f.write('{:.8f} {:.8f}\n'.format(d, e - zero)) f.write('\n') # calculation is spin polarised, write spin down bands at end of file if bs.is_spin_polarized: for band in bs.bands[Spin.down]: for d, e in zip(bs.distance, band): f.write('{:.8f} {:.8f}\n'.format(d, e - zero)) f.write('\n') return filename
263,935
Parse the element and orbital argument strings. The presence of an element without any orbitals means that we want to plot all of its orbitals. Args: string (`str`): The selected elements and orbitals in in the form: `"Sn.s.p,O"`. Returns: A list of tuples specifying which elements/orbitals to plot. The output for the above example would be: `[('Sn', ('s', 'p')), 'O']`
def _el_orb_tuple(string): el_orbs = [] for split in string.split(','): splits = split.split('.') el = splits[0] if len(splits) == 1: el_orbs.append(el) else: el_orbs.append((el, tuple(splits[1:]))) return el_orbs
263,936
Write the phonon band structure data files to disk. Args: bs (:obj:`~pymatgen.phonon.bandstructure.PhononBandStructureSymmLine`): The phonon band structure. prefix (:obj:`str`, optional): Prefix for data file. directory (:obj:`str`, optional): Directory in which to save the data. Returns: str: The filename of the written data file.
def save_data_files(bs, prefix=None, directory=None): filename = 'phonon_band.dat' filename = '{}_phonon_band.dat'.format(prefix) if prefix else filename directory = directory if directory else '.' filename = os.path.join(directory, filename) with open(filename, 'w') as f: header = '#k-distance frequency[THz]\n' f.write(header) for band in bs.bands: for d, e in zip(bs.distance, band): f.write('{:.8f} {:.8f}\n'.format(d, e)) f.write('\n') return filename
263,939
Parse the atom string. Args: atoms_string (str): The atoms to plot, in the form ``"C.1.2.3,"``. Returns: dict: The atomic indices over which to sum the DOS. Formatted as:: {Element: [atom_indices]}. Indices are zero indexed for each atomic species. If an element symbol is included with an empty list, then all sites for that species are considered.
def _atoms(atoms_string): atoms = {} for split in atoms_string.split(','): sites = split.split('.') el = sites.pop(0) sites = list(map(int, sites)) atoms[el] = np.array(sites) - 1 return atoms
263,948
Scrape types from a blob of text and return node tuples. Args: text (str): Text to scrape. ptype (str): Optional ptype to scrape. If present, only scrape rules which match the provided type. Returns: (str, str): Yield tuples of type, valu strings.
def scrape(text, ptype=None): for ruletype, rule, info in scrape_types: if ptype and ptype != ruletype: continue regx = regexes.get(ruletype) for valu in regx.findall(text): yield (ruletype, valu)
264,040
Use msgpack to serialize a compatible python object. Args: item (obj): The object to serialize Notes: String objects are encoded using utf8 encoding. In order to handle potentially malformed input, ``unicode_errors='surrogatepass'`` is set to allow encoding bad input strings. Returns: bytes: The serialized bytes in msgpack format.
def en(item): if pakr is None: # pragma: no cover return msgpack.packb(item, use_bin_type=True, unicode_errors='surrogatepass') try: return pakr.pack(item) except Exception: pakr.reset() raise
264,041
Use msgpack to de-serialize a python object. Args: byts (bytes): The bytes to de-serialize Notes: String objects are decoded using utf8 encoding. In order to handle potentially malformed input, ``unicode_errors='surrogatepass'`` is set to allow decoding bad input strings. Returns: obj: The de-serialized object
def un(byts): # This uses a subset of unpacker_kwargs return msgpack.loads(byts, use_list=False, raw=False, unicode_errors='surrogatepass')
264,042
Generator which unpacks a file object of msgpacked content. Args: fd: File object to consume data from. Notes: String objects are decoded using utf8 encoding. In order to handle potentially malformed input, ``unicode_errors='surrogatepass'`` is set to allow decoding bad input strings. Yields: Objects from a msgpack stream.
def iterfd(fd): unpk = msgpack.Unpacker(fd, **unpacker_kwargs) for mesg in unpk: yield mesg
264,043
Generator which yields msgpack objects from a file path. Args: path: File path to open and consume data from. Notes: String objects are decoded using utf8 encoding. In order to handle potentially malformed input, ``unicode_errors='surrogatepass'`` is set to allow decoding bad input strings. Yields: Objects from a msgpack stream.
def iterfile(path, since=-1): with io.open(path, 'rb') as fd: unpk = msgpack.Unpacker(fd, **unpacker_kwargs) for i, mesg in enumerate(unpk): if i <= since: continue yield mesg
264,044
Dump an object to a file by path. Args: item (object): The object to serialize. path (str): The file path to save. Returns: None
def dumpfile(item, path): with io.open(path, 'wb') as fd: fd.write(en(item))
264,045
Feed bytes to the unpacker and return completed objects. Args: byts (bytes): Bytes to unpack. Notes: It is intended that this function is called multiple times with bytes from some sort of a stream, as it will unpack and return objects as they are available. Returns: list: List of tuples containing the item size and the unpacked item.
def feed(self, byts): self.unpk.feed(byts) retn = [] while True: try: item = self.unpk.unpack() tell = self.unpk.tell() retn.append((tell - self.size, item)) self.size = tell except msgpack.exceptions.OutOfData: break return retn
264,046
Execute a function in an executor thread. Args: todo ((func,args,kwargs)): A todo tuple.
async def executor(func, *args, **kwargs): def syncfunc(): return func(*args, **kwargs) loop = asyncio.get_running_loop() return await loop.run_in_executor(None, syncfunc)
264,077
Add the structured data from items to the CryoTank. Args: items (list): A list of objects to store in the CryoTank. seqn (iden, offs): An iden / offset pair to record. Returns: int: The ending offset of the items or seqn.
async def puts(self, items, seqn=None): size = 0 for chunk in s_common.chunks(items, 1000): metrics = self._items.save(chunk) self._metrics.add(metrics) await self.fire('cryotank:puts', numrecords=len(chunk)) size += len(chunk) await asyncio.sleep(0) if seqn is not None: iden, offs = seqn self.setOffset(iden, offs + size) return size
264,174
Yield metrics rows starting at offset. Args: offs (int): The index offset. size (int): The maximum number of records to yield. Yields: ((int, dict)): An index offset, info tuple for metrics.
async def metrics(self, offs, size=None): for i, (indx, item) in enumerate(self._metrics.iter(offs)): if size is not None and i >= size: return yield indx, item
264,175
Yield a number of items from the CryoTank starting at a given offset. Args: offs (int): The index of the desired datum (starts at 0) size (int): The max number of items to yield. Yields: ((index, object)): Index and item values.
async def slice(self, offs, size=None, iden=None): if iden is not None: self.setOffset(iden, offs) for i, (indx, item) in enumerate(self._items.iter(offs)): if size is not None and i >= size: return yield indx, item
264,176
Yield a number of raw items from the CryoTank starting at a given offset. Args: offs (int): The index of the desired datum (starts at 0) size (int): The max number of items to yield. Yields: ((indx, bytes)): Index and msgpacked bytes.
async def rows(self, offs, size=None, iden=None): if iden is not None: self.setOffset(iden, offs) for i, (indx, byts) in enumerate(self._items.rows(offs)): if size is not None and i >= size: return yield indx, byts
264,177
Generate a new CryoTank with a given name or get an reference to an existing CryoTank. Args: name (str): Name of the CryoTank. Returns: CryoTank: A CryoTank instance.
async def init(self, name, conf=None): tank = self.tanks.get(name) if tank is not None: return tank iden = s_common.guid() logger.info('Creating new tank: %s', name) path = s_common.genpath(self.dirn, 'tanks', iden) tank = await CryoTank.anit(path, conf) node = await self.names.open((name,)) await node.set((iden, conf)) self.tanks.put(name, tank) return tank
264,187
Return a date string for an epoch-millis timestamp. Args: tick (int): The timestamp in milliseconds since the epoch. Returns: (str): A date time string
def repr(tick, pack=False): if tick == 0x7fffffffffffffff: return '?' dt = datetime.datetime(1970, 1, 1) + datetime.timedelta(milliseconds=tick) millis = dt.microsecond / 1000 if pack: return '%d%.2d%.2d%.2d%.2d%.2d%.3d' % (dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second, millis) return '%d/%.2d/%.2d %.2d:%.2d:%.2d.%.3d' % (dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second, millis)
264,311
Encrypt the given bytes and return an envelope dict in msgpack form. Args: byts (bytes): The message to be encrypted. asscd (bytes): Extra data that needs to be authenticated (but not encrypted). Returns: bytes: The encrypted message. This is a msgpacked dictionary containing the IV, ciphertext, and associated data.
def enc(self, byts, asscd=None): iv = os.urandom(16) encryptor = AESGCM(self.ekey) byts = encryptor.encrypt(iv, byts, asscd) envl = {'iv': iv, 'data': byts, 'asscd': asscd} return s_msgpack.en(envl)
264,328
Decode an envelope dict and decrypt the given bytes. Args: byts (bytes): Bytes to decrypt. Returns: bytes: Decrypted message.
def dec(self, byts): envl = s_msgpack.un(byts) iv = envl.get('iv', b'') asscd = envl.get('asscd', b'') data = envl.get('data', b'') decryptor = AESGCM(self.ekey) try: data = decryptor.decrypt(iv, data, asscd) except Exception: logger.exception('Error decrypting data') return None return data
264,329
Wrap a message with a sequence number and encrypt it. Args: mesg: The mesg to encrypt. Returns: bytes: The encrypted message.
def encrypt(self, mesg): seqn = next(self._tx_sn) rv = self._tx_tinh.enc(s_msgpack.en((seqn, mesg))) return rv
264,331
Decrypt a message, validating its sequence number is as we expect. Args: ciphertext (bytes): The message to decrypt and verify. Returns: mesg: A mesg. Raises: s_exc.CryptoErr: If the message decryption fails or the sequence number was unexpected.
def decrypt(self, ciphertext): plaintext = self._rx_tinh.dec(ciphertext) if plaintext is None: logger.error('Message decryption failure') raise s_exc.CryptoErr(mesg='Message decryption failure') seqn = next(self._rx_sn) sn, mesg = s_msgpack.un(plaintext) if sn != seqn: logger.error('Message out of sequence: got %d expected %d', sn, seqn) raise s_exc.CryptoErr(mesg='Message out of sequence', expected=seqn, got=sn) return mesg
264,332
Pack a set of major/minor/patch integers into a single integer for storage. Args: major (int): Major version level integer. minor (int): Minor version level integer. patch (int): Patch version level integer. Returns: int: System normalized integer value to represent a software version.
def packVersion(major, minor=0, patch=0): ret = patch & mask20 ret = ret | (minor & mask20) << 20 ret = ret | (major & mask20) << 20 * 2 return ret
264,335
Unpack a system normalized integer representing a softare version into its component parts. Args: ver (int): System normalized integer value to unpack into a tuple. Returns: (int, int, int): A tuple containing the major, minor and patch values shifted out of the integer.
def unpackVersion(ver): major = (ver >> 20 * 2) & mask20 minor = (ver >> 20) & mask20 patch = ver & mask20 return major, minor, patch
264,336
Join a string of parts together with a . separator. Args: *vsnparts: Returns:
def fmtVersion(*vsnparts): if len(vsnparts) < 1: raise s_exc.BadTypeValu(valu=repr(vsnparts), name='fmtVersion', mesg='Not enough version parts to form a version string with.',) ret = '.'.join([str(part).lower() for part in vsnparts]) return ret
264,337
Set a name in the SlabDict. Args: name (str): The key name. valu (obj): A msgpack compatible value. Returns: None
def set(self, name, valu): byts = s_msgpack.en(valu) lkey = self.pref + name.encode('utf8') self.slab.put(lkey, byts, db=self.db) self.info[name] = valu
264,353
Pop a name from the SlabDict. Args: name (str): The name to remove. defval (obj): The default value to return if the name is not present. Returns: object: The object stored in the SlabDict, or defval if the object was not present.
def pop(self, name, defval=None): valu = self.info.pop(name, defval) lkey = self.pref + name.encode('utf8') self.slab.pop(lkey, db=self.db) return valu
264,354
Initialize a new (min,max) tuple interval from values. Args: *vals ([int,...]): A list of values (or Nones) Returns: ((int,int)): A (min,max) interval tuple or None
def fold(*vals): vals = [v for v in vals if v is not None] if not vals: return None return min(vals), max(vals)
264,426
Determine if two interval tuples have overlap. Args: iv0 ((int,int)): An interval tuple iv1 ((int,int)); An interval tuple Returns: (bool): True if the intervals overlap, otherwise False
def overlap(ival0, ival1): min0, max0 = ival0 min1, max1 = ival1 return max(0, min(max0, max1) - max(min0, min1)) > 0
264,427
Parse an interval time string and return a (min,max) tuple. Args: text (str): A time interval string Returns: ((int,int)): A epoch millis epoch time string
def parsetime(text): mins, maxs = text.split('-', 1) minv = s_time.parse(mins) maxv = s_time.parse(maxs, base=minv) return minv, maxv
264,428
Get a proxy to a cortex backed by a temporary directory. Args: mods (list): A list of modules which are loaded into the cortex. Notes: The cortex and temporary directory are town down on exit. This should only be called from synchronous code. Returns: Proxy to the cortex.
async def getTempCortex(mods=None): with s_common.getTempDir() as dirn: async with await Cortex.anit(dirn) as core: if mods: for mod in mods: await core.loadCoreModule(mod) async with core.getLocalProxy() as prox: yield prox
264,451
Async init the view. Args: core (Cortex): The cortex that owns the view. node (HiveNode): The hive node containing the view info.
async def __anit__(self, core, node): await s_base.Base.__anit__(self) self.core = core self.node = node self.iden = node.name() self.borked = None self.info = await node.dict() self.info.setdefault('owner', 'root') self.info.setdefault('layers', ()) self.layers = [] for iden in self.info.get('layers'): layr = core.layers.get(iden) if layr is None: self.borked = iden logger.warning('view %r has missing layer %r' % (self.iden, iden)) continue if not self.layers and layr.readonly: self.borked = iden raise s_exc.ReadOnlyLayer(mesg=f'First layer {iden} must not be read-only') self.layers.append(layr)
264,452
Delete a cron job Args: iden (bytes): The iden of the cron job to be deleted
async def delCronJob(self, iden): cron = self.cell.agenda.appts.get(iden) if cron is None: raise s_exc.NoSuchIden() self._trig_auth_check(cron.useriden) await self.cell.agenda.delete(iden)
264,467
Change an existing cron job's query Args: iden (bytes): The iden of the cron job to be changed
async def updateCronJob(self, iden, query): cron = self.cell.agenda.appts.get(iden) if cron is None: raise s_exc.NoSuchIden() self._trig_auth_check(cron.useriden) await self.cell.agenda.mod(iden, query)
264,468
Add a tag to a node specified by iden. Args: iden (str): A hex encoded node BUID. tag (str): A tag string. valu (tuple): A time interval tuple or (None, None).
async def addNodeTag(self, iden, tag, valu=(None, None)): buid = s_common.uhex(iden) parts = tag.split('.') self._reqUserAllowed('tag:add', *parts) async with await self.cell.snap(user=self.user) as snap: with s_provenance.claim('coreapi', meth='tag:add', user=snap.user.iden): node = await snap.getNodeByBuid(buid) if node is None: raise s_exc.NoSuchIden(iden=iden) await node.addTag(tag, valu=valu) return node.pack()
264,470
Add a list of packed nodes to the cortex. Args: nodes (list): [ ( (form, valu), {'props':{}, 'tags':{}}), ... ] Yields: (tuple): Packed node tuples ((form,valu), {'props': {}, 'tags':{}})
async def addNodes(self, nodes): # First check that that user may add each form done = {} for node in nodes: formname = node[0][0] if done.get(formname): continue self._reqUserAllowed('node:add', formname) done[formname] = True async with await self.cell.snap(user=self.user) as snap: with s_provenance.claim('coreapi', meth='node:add', user=snap.user.iden): snap.strict = False async for node in snap.addNodes(nodes): if node is not None: node = node.pack() yield node
264,473
Count the number of nodes which result from a storm query. Args: text (str): Storm query text. opts (dict): Storm query options. Returns: (int): The number of nodes resulting from the query.
async def count(self, text, opts=None): i = 0 async for _ in self.cell.eval(text, opts=opts, user=self.user): i += 1 return i
264,475
Return the providence stack associated with the given iden. Args: iden (str): the iden from splice Note: the iden appears on each splice entry as the 'prov' property
async def getProvStack(self, iden: str): return self.cell.provstor.getProvStack(s_common.uhex(iden))
264,480
Register a callback for tag addition. Args: name (str): The name of the tag or tag glob. func (function): The callback func(node, tagname, tagval).
def onTagAdd(self, name, func): # TODO allow name wild cards if '*' in name: self.ontagaddglobs.add(name, func) else: self.ontagadds[name].append(func)
264,492
Unregister a callback for tag addition. Args: name (str): The name of the tag or tag glob. func (function): The callback func(node, tagname, tagval).
def offTagAdd(self, name, func): if '*' in name: self.ontagaddglobs.rem(name, func) return cblist = self.ontagadds.get(name) if cblist is None: return try: cblist.remove(func) except ValueError: pass
264,493
Register a callback for tag deletion. Args: name (str): The name of the tag or tag glob. func (function): The callback func(node, tagname, tagval).
def onTagDel(self, name, func): if '*' in name: self.ontagdelglobs.add(name, func) else: self.ontagdels[name].append(func)
264,494
Unregister a callback for tag deletion. Args: name (str): The name of the tag or tag glob. func (function): The callback func(node, tagname, tagval).
def offTagDel(self, name, func): if '*' in name: self.ontagdelglobs.rem(name, func) return cblist = self.ontagdels.get(name) if cblist is None: return try: cblist.remove(func) except ValueError: pass
264,495
Execute a runt lift function. Args: full (str): Property to lift by. valu: cmpr: Returns: bytes, list: Yields bytes, list tuples where the list contains a series of key/value pairs which are used to construct a Node object.
async def runRuntLift(self, full, valu=None, cmpr=None): func = self._runtLiftFuncs.get(full) if func is None: raise s_exc.NoSuchLift(mesg='No runt lift implemented for requested property.', full=full, valu=valu, cmpr=cmpr) async for buid, rows in func(full, valu, cmpr): yield buid, rows
264,496
Quickly add/modify a list of nodes from node definition tuples. This API is the simplest/fastest way to add nodes, set node props, and add tags to nodes remotely. Args: nodedefs (list): A list of node definition tuples. See below. A node definition tuple is defined as: ( (form, valu), {'props':{}, 'tags':{}) The "props" or "tags" keys may be omitted.
async def addNodes(self, nodedefs): async with await self.snap() as snap: snap.strict = False async for node in snap.addNodes(nodedefs): yield node
264,540
Add data using a feed/parser function. Args: name (str): The name of the feed record format. items (list): A list of items to ingest. seqn ((str,int)): An (iden, offs) tuple for this feed chunk. Returns: (int): The next expected offset (or None) if seqn is None.
async def addFeedData(self, name, items, seqn=None): async with await self.snap() as snap: snap.strict = False return await snap.addFeedData(name, items, seqn=seqn)
264,541
Return a transaction object for the default view. Args: write (bool): Set to True for a write transaction. Returns: (synapse.lib.snap.Snap) NOTE: This must be used in a with block.
async def snap(self, user=None, view=None): if view is None: view = self.view if user is None: user = self.auth.getUserByName('root') snap = await view.snap(user) return snap
264,543
Load a single cortex module with the given ctor and conf. Args: ctor (str): The python module class path conf (dict):Config dictionary for the module
async def loadCoreModule(self, ctor, conf=None): if conf is None: conf = {} modu = self._loadCoreModule(ctor, conf=conf) try: await s_coro.ornot(modu.preCoreModule) except asyncio.CancelledError: # pragma: no cover raise except Exception: logger.exception(f'module preCoreModule failed: {ctor}') self.modules.pop(ctor, None) return mdefs = modu.getModelDefs() self.model.addDataModels(mdefs) cmds = modu.getStormCmds() [self.addStormCmd(c) for c in cmds] try: await s_coro.ornot(modu.initCoreModule) except asyncio.CancelledError: # pragma: no cover raise except Exception: logger.exception(f'module initCoreModule failed: {ctor}') self.modules.pop(ctor, None) return await self.fire('core:module:load', module=ctor) return modu
264,544
Get the normalized property value based on the Cortex data model. Args: prop (str): The property to normalize. valu: The value to normalize. Returns: (tuple): A two item tuple, containing the normed value and the info dictionary. Raises: s_exc.NoSuchProp: If the prop does not exist. s_exc.BadTypeValu: If the value fails to normalize.
async def getPropNorm(self, prop, valu): pobj = self.model.prop(prop) if pobj is None: raise s_exc.NoSuchProp(mesg=f'The property {prop} does not exist.', prop=prop) norm, info = pobj.type.norm(valu) return norm, info
264,549
Get the normalized type value based on the Cortex data model. Args: name (str): The type to normalize. valu: The value to normalize. Returns: (tuple): A two item tuple, containing the normed value and the info dictionary. Raises: s_exc.NoSuchType: If the type does not exist. s_exc.BadTypeValu: If the value fails to normalize.
async def getTypeNorm(self, name, valu): tobj = self.model.type(name) if tobj is None: raise s_exc.NoSuchType(mesg=f'The type {name} does not exist.', name=name) norm, info = tobj.norm(valu) return norm, info
264,550
Call a remote method by name. Args: methname (str): The name of the remote method. *args: Arguments to the method call. **kwargs: Keyword arguments to the method call. Most use cases will likely use the proxy methods directly: The following two are effectively the same: valu = proxy.getFooBar(x, y) valu = proxy.call('getFooBar', x, y)
async def call(self, methname, *args, **kwargs): todo = (methname, args, kwargs) return await self.task(todo)
264,595
Get a dictionary of special annotations for a Telepath Proxy. Args: item: Item to inspect. Notes: This will set the ``_syn_telemeth`` attribute on the item and the items class, so this data is only computed once. Returns: dict: A dictionary of methods requiring special handling by the proxy.
def getShareInfo(item): key = f'_syn_sharinfo_{item.__class__.__module__}_{item.__class__.__qualname__}' info = getattr(item, key, None) if info is not None: return info meths = {} info = {'meths': meths} for name in dir(item): if name.startswith('_'): continue attr = getattr(item, name, None) if not callable(attr): continue # We know we can cleanly unwrap these functions # for asyncgenerator inspection. wrapped = getattr(attr, '__syn_wrapped__', None) if wrapped in unwraps: real = inspect.unwrap(attr) if inspect.isasyncgenfunction(real): meths[name] = {'genr': True} continue if inspect.isasyncgenfunction(attr): meths[name] = {'genr': True} try: setattr(item, key, info) except Exception as e: # pragma: no cover logger.exception(f'Failed to set magic on {item}') try: setattr(item.__class__, key, info) except Exception as e: # pragma: no cover logger.exception(f'Failed to set magic on {item.__class__}') return info
264,625
Schedule a coroutine to run on the global loop and return it's result. Args: coro (coroutine): The coroutine instance. Notes: This API is thread safe and should only be called by non-loop threads.
def sync(coro, timeout=None): loop = initloop() return asyncio.run_coroutine_threadsafe(coro, loop).result(timeout)
264,677
Distribute an existing event tuple. Args: mesg ((str,dict)): An event tuple. Example: await base.dist( ('foo',{'bar':'baz'}) )
async def dist(self, mesg): if self.isfini: return () ret = [] for func in self._syn_funcs.get(mesg[0], ()): try: ret.append(await s_coro.ornot(func, mesg)) except asyncio.CancelledError: raise except Exception: logger.exception('base %s error with mesg %s', self, mesg) for func in self._syn_links: try: ret.append(await func(mesg)) except asyncio.CancelledError: raise except Exception: logger.exception('base %s error with mesg %s', self, mesg) return ret
264,708
A context manager which can be used to add a callback and remove it when using a ``with`` statement. Args: evnt (str): An event name func (function): A callback function to receive event tufo
def onWith(self, evnt, func): self.on(evnt, func) # Allow exceptions to propagate during the context manager # but ensure we cleanup our temporary callback try: yield self finally: self.off(evnt, func)
264,711
Add a Base (or sub-class) to the BaseRef by name. Args: name (str): The name/iden of the Base base (Base): The Base instance Returns: (None)
def put(self, name, base): async def fini(): if self.base_by_name.get(name) is base: self.base_by_name.pop(name, None) # Remove myself from BaseRef when I fini base.onfini(fini) self.base_by_name[name] = base
264,724
Atomically get/gen a Base and incref. (requires ctor during BaseRef init) Args: name (str): The name/iden of the Base instance.
async def gen(self, name): if self.ctor is None: raise s_exc.NoSuchCtor(name=name, mesg='BaseRef.gen() requires ctor') base = self.base_by_name.get(name) if base is None: base = await self.ctor(name) self.put(name, base) else: base.incref() return base
264,725
Get a 16 byte guid value. By default, this is a random guid value. Args: valu: Object used to construct the guid valu from. This must be able to be msgpack'd. Returns: str: 32 character, lowercase ascii string.
def guid(valu=None): if valu is None: return binascii.hexlify(os.urandom(16)).decode('utf8') # Generate a "stable" guid from the given item byts = s_msgpack.en(valu) return hashlib.md5(byts).hexdigest()
264,726
A binary GUID like sequence of 32 bytes. Args: valu (object): Optional, if provided, the hash of the msgpack encoded form of the object is returned. This can be used to create stable buids. Notes: By default, this returns a random 32 byte value. Returns: bytes: A 32 byte value.
def buid(valu=None): if valu is None: return os.urandom(32) byts = s_msgpack.en(valu) return hashlib.sha256(byts).digest()
264,727
Ensure ( or coerce ) a value into being an integer or None. Args: x (obj): An object to intify Returns: (int): The int value ( or None )
def intify(x): if isinstance(x, int): return x try: return int(x, 0) except (TypeError, ValueError): return None
264,728
Create or open ( for read/write ) a file path join. Args: *paths: A list of paths to join together to make the file. Notes: If the file already exists, the fd returned is opened in ``r+b`` mode. Otherwise, the fd is opened in ``w+b`` mode. Returns: io.BufferedRandom: A file-object which can be read/written too.
def genfile(*paths): path = genpath(*paths) gendir(os.path.dirname(path)) if not os.path.isfile(path): return io.open(path, 'w+b') return io.open(path, 'r+b')
264,734
List the (optionally glob filtered) full paths from a dir. Args: *paths ([str,...]): A list of path elements glob (str): An optional fnmatch glob str
def listdir(*paths, glob=None): path = genpath(*paths) names = os.listdir(path) if glob is not None: names = fnmatch.filter(names, glob) retn = [os.path.join(path, name) for name in names] return retn
264,737
Get an err tufo from an exception. Args: e (Exception): An Exception (or Exception subclass). Notes: This can be called outside of the context of an exception handler, however details such as file, line, function name and source may be missing. Returns: ((str, dict)):
def getexcfo(e): tb = sys.exc_info()[2] tbinfo = traceback.extract_tb(tb) path, line, name, src = '', '', '', None if tbinfo: path, line, name, sorc = tbinfo[-1] retd = { 'msg': str(e), 'file': path, 'line': line, 'name': name, 'src': src } if isinstance(e, s_exc.SynErr): retd['syn:err'] = e.errinfo return (e.__class__.__name__, retd)
264,745
Divide an iterable into chunks. Args: item: Item to slice size (int): Maximum chunk size. Notes: This supports Generator objects and objects which support calling the __getitem__() method with a slice object. Yields: Slices of the item containing up to "size" number of items.
def chunks(item, size): # use islice if it's a generator if isinstance(item, types.GeneratorType): while True: chunk = tuple(itertools.islice(item, size)) if not chunk: return yield chunk # The sequence item is empty, yield a empty slice from it. # This will also catch mapping objects since a slice should # be an unhashable type for a mapping and the __getitem__ # method would not be present on a set object if not item: yield item[0:0] return # otherwise, use normal slicing off = 0 while True: chunk = item[off:off + size] if not chunk: return yield chunk off += size
264,747
Configure synapse logging. Args: mlogger (logging.Logger): Reference to a logging.Logger() defval (str): Default log level Notes: This calls logging.basicConfig and should only be called once per process. Returns: None
def setlogging(mlogger, defval=None): log_level = os.getenv('SYN_LOG_LEVEL', defval) if log_level: # pragma: no cover log_level = log_level.upper() if log_level not in s_const.LOG_LEVEL_CHOICES: raise ValueError('Invalid log level provided: {}'.format(log_level)) logging.basicConfig(level=log_level, format=s_const.LOG_FORMAT) mlogger.info('log level set to %s', log_level)
264,752
Fire the onset() handlers for this property. Args: node (synapse.lib.node.Node): The node whose property was set. oldv (obj): The previous value of the property.
async def wasSet(self, node, oldv): for func in self.onsets: try: await s_coro.ornot(func, node, oldv) except asyncio.CancelledError: raise except Exception: logger.exception('onset() error for %s' % (self.full,))
264,758
Get a list of storage operations to delete this property from the buid. Args: buid (bytes): The node buid. Returns: (tuple): The storage operations
def getDelOps(self, buid): return ( ('prop:del', (buid, self.form.name, self.name, self.storinfo)), )
264,762
Bind and listen on the given host/port with possible SSL. Args: host (str): A hostname or IP address. port (int): The TCP port to bind.
async def listen(self, url, **opts): info = s_urlhelp.chopurl(url, **opts) info.update(opts) scheme = info.get('scheme') if scheme == 'unix': path = info.get('path') try: server = await s_link.unixlisten(path, self._onLinkInit) except Exception as e: if 'path too long' in str(e): logger.error(f'unix:// exceeds OS supported UNIX socket path length: {path}') raise else: host = info.get('host') port = info.get('port') sslctx = None if scheme == 'ssl': sslctx = self.certdir.getServerSSLContext(hostname=host) server = await s_link.listen(host, port, self._onLinkInit, ssl=sslctx) self.listenservers.append(server) ret = server.sockets[0].getsockname() if self.addr is None: self.addr = ret return ret
264,785
Share an object via the telepath protocol. Args: name (str): Name of the shared object item (object): The object to share over telepath.
def share(self, name, item): try: if isinstance(item, s_telepath.Aware): item.onTeleShare(self, name) self.shared[name] = item except Exception: logger.exception(f'onTeleShare() error for: {name}')
264,786