docstring
stringlengths
52
499
function
stringlengths
67
35.2k
__index_level_0__
int64
52.6k
1.16M
Compute the error and the gradient. This is the function optimized by :obj:`scipy.optimize.minimize`. Args: x (`array-like`): [`m` * `n`, ] matrix. Returns: `tuple`: containing: - Error (`float`) - Gradient (`np.array`) [`m`, `n`]
def _error_and_gradient(self, x): coords = x.reshape((self.m, self.n)) d = squareform(pdist(coords)) diff = self.D - d error = self._error(diff) gradient = self._gradient(diff, d, coords) return error, gradient.ravel()
1,030,279
Construct a Projection from the output of an optimization. Args: result (:py:class:`scipy.optimize.OptimizeResult`): Object returned by :py:func:`scipy.optimize.minimize`. n (`int`): Number of dimensions. m (`int`): Number of samples. index (`list-like`): Names of samples. (Optional). Returns: :py:class:`pymds.Projection`
def from_optimize_result(cls, result, n, m, index=None): coords = pd.DataFrame(result.x.reshape((m, n)), index=index) projection = cls(coords) projection.stress = result.fun return projection
1,030,282
wtime_to_minutes Convert standard wallclock time string to minutes. Args: - Time_string in HH:MM:SS format Returns: (int) minutes
def wtime_to_minutes(time_string): hours, mins, seconds = time_string.split(':') return int(hours) * 60 + int(mins) + 1
1,031,860
Initialize the Kubernetes execution provider class Args: - Config (dict): Dictionary with all the config options. KWargs : - channel (channel object) : default=None A channel object
def __init__(self, config, channel=None): self.channel = channel if not _kubernetes_enabled: raise OptionalModuleMissing(['kubernetes'], "Kubernetes provider requires kubernetes module and config.") self.kube_client = client.ExtensionsV1beta1Api() self.config = config self.sitename = self.config['site'] self.namespace = self.config['execution']['namespace'] self.image = self.config['execution']['image'] self.init_blocks = self.config["execution"]["block"]["initBlocks"] self.min_blocks = self.config["execution"]["block"]["minBlocks"] self.max_blocks = self.config["execution"]["block"]["maxBlocks"] self.user_id = None self.group_id = None self.run_as_non_root = None if 'security' in self.config['execution']: self.user_id = self.config["execution"]['security']["user_id"] self.group_id = self.config["execution"]['security']["group_id"] self.run_as_non_root = self.config["execution"]['security']["run_as_non_root"] self.secret = None if 'secret' in self.config['execution']: self.secret = self.config['execution']['secret'] # Dictionary that keeps track of jobs, keyed on job_id self.resources = {}
1,032,037
Submit a job Args: - cmd_string :(String) - Name of the container to initiate - blocksize :(float) - Number of replicas Kwargs: - job_name (String): Name for job, must be unique Returns: - None: At capacity, cannot provision more - job_id: (string) Identifier for the job
def submit(self, cmd_string, blocksize, job_name="parsl.auto"): if not self.resources: job_name = "{0}-{1}".format(job_name, time.time()).split(".")[0] self.deployment_name = '{}-{}-deployment'.format(job_name, str(time.time()).split('.')[0]) formatted_cmd = template_string.format(command=cmd_string, overrides=self.config["execution"]["block"]["options"].get("overrides", '')) print("Creating replicas :", self.init_blocks) self.deployment_obj = self._create_deployment_object(job_name, self.image, self.deployment_name, cmd_string=formatted_cmd, replicas=self.init_blocks) logger.debug("Deployment name :{}".format(self.deployment_name)) self._create_deployment(self.deployment_obj) self.resources[self.deployment_name] = {'status': 'RUNNING', 'pods': self.init_blocks} return self.deployment_name
1,032,038
Cancels the jobs specified by a list of job ids Args: job_ids : [<job_id> ...] Returns : [True/False...] : If the cancel operation fails the entire list will be False.
def cancel(self, job_ids): for job in job_ids: logger.debug("Terminating job/proc_id : {0}".format(job)) # Here we are assuming that for local, the job_ids are the process id's self._delete_deployment(job) self.resources[job]['status'] = 'CANCELLED' rets = [True for i in job_ids] return rets
1,032,039
Create a kubernetes deployment for the job. Args: - job_name (string) : Name of the job and deployment - job_image (string) : Docker image to launch KWargs: - port (integer) : Container port - replicas : Number of replica containers to maintain Returns: - True: The deployment object to launch
def _create_deployment_object(self, job_name, job_image, deployment_name, port=80, replicas=1, cmd_string=None, engine_json_file='~/.ipython/profile_default/security/ipcontroller-engine.json', engine_dir='.'): # sorry, quick hack that doesn't pass this stuff through to test it works. # TODO it also doesn't only add what is set :( security_context = None if 'security' in self.config['execution']: security_context = client.V1SecurityContext(run_as_group=self.group_id, run_as_user=self.user_id, run_as_non_root=self.run_as_non_root) # self.user_id = None # self.group_id = None # self.run_as_non_root = None # Create the enviornment variables and command to initiate IPP environment_vars = client.V1EnvVar(name="TEST", value="SOME DATA") launch_args = ["-c", "{0}; /app/deploy.sh;".format(cmd_string)] print(launch_args) # Configureate Pod template container container = None if security_context: container = client.V1Container( name=job_name, image=job_image, ports=[client.V1ContainerPort(container_port=port)], command=['/bin/bash'], args=launch_args, env=[environment_vars], security_context=security_context) else: container = client.V1Container( name=job_name, image=job_image, ports=[client.V1ContainerPort(container_port=port)], command=['/bin/bash'], args=launch_args, env=[environment_vars]) # Create a secret to enable pulling images from secure repositories secret = None if self.secret: secret = client.V1LocalObjectReference(name=self.secret) # Create and configurate a spec section template = client.V1PodTemplateSpec( metadata=client.V1ObjectMeta(labels={"app": job_name}), spec=client.V1PodSpec(containers=[container], image_pull_secrets=[secret])) # Create the specification of deployment spec = client.ExtensionsV1beta1DeploymentSpec(replicas=replicas, template=template) # Instantiate the deployment object deployment = client.ExtensionsV1beta1Deployment( api_version="extensions/v1beta1", kind="Deployment", metadata=client.V1ObjectMeta(name=deployment_name), spec=spec) return deployment
1,032,040
Get the status of a list of jobs identified by their ids. Args: - job_ids (List of ids) : List of identifiers for the jobs Returns: - List of status codes.
def status(self, job_ids): logging.debug("Checking status of : {0}".format(job_ids)) for job_id in self.resources: poll_code = self.resources[job_id]['proc'].poll() if self.resources[job_id]['status'] in ['COMPLETED', 'FAILED']: continue if poll_code is None: self.resources[job_id]['status'] = 'RUNNING' elif poll_code == 0 and self.resources[job_id]['status'] != 'RUNNING': self.resources[job_id]['status'] = 'COMPLETED' elif poll_code < 0 and self.resources[job_id]['status'] != 'RUNNING': self.resources[job_id]['status'] = 'FAILED' return [self.resources[jid]['status'] for jid in job_ids]
1,032,140
Cancels the jobs specified by a list of job ids Args: job_ids : [<job_id> ...] Returns : [True/False...] : If the cancel operation fails the entire list will be False.
def cancel(self, job_ids): for job in job_ids: logger.debug("Terminating job/proc_id : {0}".format(job)) # Here we are assuming that for local, the job_ids are the process id's proc = self.resources[job]['proc'] os.killpg(os.getpgid(proc.pid), signal.SIGTERM) self.resources[job]['status'] = 'CANCELLED' rets = [True for i in job_ids] return rets
1,032,143
Synchronously execute a commandline string on the shell. Args: - cmd (string) : Commandline string to execute - walltime (int) : walltime in seconds, this is not really used now. Returns: - retcode : Return code from the execution, -1 on fail - stdout : stdout string - stderr : stderr string Raises: None.
def execute_no_wait(self, cmd, walltime, envs={}): current_env = copy.deepcopy(self._envs) current_env.update(envs) try: proc = subprocess.Popen( cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=self.userhome, env=current_env, shell=True, preexec_fn=os.setpgrp ) pid = proc.pid except Exception as e: print("Caught exception : {0}".format(e)) logger.warn("Execution of command [%s] failed due to \n %s ", (cmd, e)) return pid, proc
1,032,415
Filter given settings to keep only key names available in ``DEFAULT_SETTINGS``. Args: settings (dict): Loaded settings. Returns: dict: Settings object filtered.
def clean(self, settings): return {k: v for k, v in settings.items() if k in DEFAULT_SETTINGS}
1,032,736
Set every given settings as object attributes. Args: settings (dict): Dictionnary of settings.
def set_settings(self, settings): for k, v in settings.items(): setattr(self, k, v)
1,032,737
Update object attributes from given settings Args: settings (dict): Dictionnary of elements to update settings. Returns: dict: Dictionnary of all current saved settings.
def update(self, settings): settings = self.clean(settings) # Update internal dict self._settings.update(settings) # Push every setting items as class object attributes self.set_settings(settings) return self._settings
1,032,738
Initialize DEBUG and DEBUGALL. Allows other modules to set DEBUG and DEBUGALL, so their call to dprint or dprintx generate output. Args: deb1 (bool): value of DEBUG to set deb2 (bool): optional - value of DEBUGALL to set, defaults to False.
def init(deb1, deb2=False): global DEBUG # pylint: disable=global-statement global DEBUGALL # pylint: disable=global-statement DEBUG = deb1 DEBUGALL = deb2
1,032,845
Print Text if DEBUGALL set, optionally with PrettyPrint. Args: passeditem (str): item to print special (bool): determines if item prints with PrettyPrint or regular print.
def dprintx(passeditem, special=False): if DEBUGALL: if special: from pprint import pprint pprint(passeditem) else: print("%s%s%s" % (C_TI, passeditem, C_NORM))
1,032,846
Get details for instances that match the qry_string. Execute a query against the AWS EC2 client object, that is based on the contents of qry_string. Args: qry_string (str): the query to be used against the aws ec2 client. Returns: qry_results (dict): raw information returned from AWS.
def get_inst_info(qry_string): qry_prefix = "EC2C.describe_instances(" qry_real = qry_prefix + qry_string + ")" qry_results = eval(qry_real) # pylint: disable=eval-used return qry_results
1,033,196
Get Image_Name for each instance in i_info. Args: i_info (dict): information on instances and details. Returns: i_info (dict): i_info is returned with the aminame added for each instance.
def get_all_aminames(i_info): for i in i_info: try: # pylint: disable=maybe-no-member i_info[i]['aminame'] = EC2R.Image(i_info[i]['ami']).name except AttributeError: i_info[i]['aminame'] = "Unknown" return i_info
1,033,197
Get Image_Name for the image_id specified. Args: inst_img_id (str): image_id to get name value from. Returns: aminame (str): name of the image.
def get_one_aminame(inst_img_id): try: aminame = EC2R.Image(inst_img_id).name except AttributeError: aminame = "Unknown" return aminame
1,033,198
Start or Stop the Specified Instance. Args: inst_id (str): instance-id to perform command against cmdtodo (str): command to perform (start or stop) Returns: response (dict): reponse returned from AWS after performing specified action.
def startstop(inst_id, cmdtodo): tar_inst = EC2R.Instance(inst_id) thecmd = getattr(tar_inst, cmdtodo) response = thecmd() return response
1,033,199
Export ranking to a file. Args: template_file_name (str): where is the template (moustache template) output_file_name (str): where create the file with the ranking sort (str): field to sort the users
def export(self, template_file_name, output_file_name, sort="public", data=None, limit=0): exportedData = {} exportedUsers = self.getSortedUsers() template = self.__getTemplate(template_file_name) position = 1 if not limit: exportedData["users"] = exportedUsers else: exportedData["users"] = exportedUsers[:limit] for u in exportedData["users"]: u["position"] = position u["comma"] = position < len(exportedData["users"]) position += 1 exportedData["extraData"] = data renderer = Renderer() output = renderer.render(template, exportedData) with open(output_file_name, "w") as text_file: text_file.write(output)
1,033,216
Get backend engine from given name. Args: (string): Path to validate. Raises: boussole.exceptions.SettingsBackendError: If given backend name does not match any available engine. Returns: object: Instance of selected backend engine.
def get_backend_engine(self, name, **kwargs): if name not in self._engines: msg = "Given settings backend is unknowed: {}" raise SettingsBackendError(msg.format(name)) return self._engines[name](**kwargs)
1,033,418
Validate that given paths are not the same. Args: (string): Path to validate. Raises: boussole.exceptions.SettingsInvalidError: If there is more than one occurence of the same path. Returns: bool: ``True`` if paths are validated.
def valid_paths(self, *args): for i, path in enumerate(args, start=0): cp = list(args) current = cp.pop(i) if current in cp: raise SettingsInvalidError("Multiple occurences finded for " "path: {}".format(current)) return True
1,033,419
Commit project structure and configuration file Args: sourcedir (string): Source directory path. targetdir (string): Compiled files target directory path. abs_config (string): Configuration file absolute path. abs_sourcedir (string): ``sourcedir`` expanded as absolute path. abs_targetdir (string): ``targetdir`` expanded as absolute path.
def commit(self, sourcedir, targetdir, abs_config, abs_sourcedir, abs_targetdir): config_path, config_filename = os.path.split(abs_config) if not os.path.exists(config_path): os.makedirs(config_path) if not os.path.exists(abs_sourcedir): os.makedirs(abs_sourcedir) if not os.path.exists(abs_targetdir): os.makedirs(abs_targetdir) # Dump settings file self.backend_engine.dump({ 'SOURCES_PATH': sourcedir, 'TARGET_PATH': targetdir, "LIBRARY_PATHS": [], "OUTPUT_STYLES": "nested", "SOURCE_COMMENTS": False, "EXCLUDES": [] }, abs_config, indent=4)
1,033,421
Initialize app logger to configure its level/handler/formatter/etc.. Todo: * A mean to raise click.Abort or sys.exit when CRITICAL is used; Args: level (str): Level name (``debug``, ``info``, etc..). Keyword Arguments: printout (bool): If False, logs will never be outputed. Returns: logging.Logger: Application logger.
def init_logger(level, printout=True): root_logger = logging.getLogger("boussole") root_logger.setLevel(level) # Redirect outputs to the void space, mostly for usage within unittests if not printout: from io import StringIO dummystream = StringIO() handler = logging.StreamHandler(dummystream) # Standard output with colored messages else: handler = logging.StreamHandler() handler.setFormatter( colorlog.ColoredFormatter( '%(asctime)s - %(log_color)s%(message)s', datefmt="%H:%M:%S" ) ) root_logger.addHandler(handler) return root_logger
1,033,441
Apply compile on all dependencies Args: sourcepath (string): Sass source path to compile to its destination using project settings. Keyword Arguments: include_self (bool): If ``True`` the given sourcepath is add to items to compile, else only its dependencies are compiled.
def compile_dependencies(self, sourcepath, include_self=False): items = self.inspector.parents(sourcepath) # Also add the current event related path if include_self: items.add(sourcepath) return filter(None, [self.compile_source(item) for item in items])
1,033,535
Called when a file or a directory is moved or renamed. Many editors don't directly change a file, instead they make a transitional file like ``*.part`` then move it to the final filename. Args: event: Watchdog event, either ``watchdog.events.DirMovedEvent`` or ``watchdog.events.FileModifiedEvent``.
def on_moved(self, event): if not self._event_error: # We are only interested for final file, not transitional file # from editors (like *.part) pathtools_options = { 'included_patterns': self.patterns, 'excluded_patterns': self.ignore_patterns, 'case_sensitive': self.case_sensitive, } # Apply pathtool matching on destination since Watchdog only # automatically apply it on source if match_path(event.dest_path, **pathtools_options): self.logger.info(u"Change detected from a move on: %s", event.dest_path) self.compile_dependencies(event.dest_path)
1,033,536
Called when a file or directory is modified. Args: event: Watchdog event, ``watchdog.events.DirModifiedEvent`` or ``watchdog.events.FileModifiedEvent``.
def on_modified(self, event): if not self._event_error: self.logger.info(u"Change detected from an edit on: %s", event.src_path) self.compile_dependencies(event.src_path)
1,033,538
Called when a file or directory is deleted. Todo: May be bugged with inspector and sass compiler since the does not exists anymore. Args: event: Watchdog event, ``watchdog.events.DirDeletedEvent`` or ``watchdog.events.FileDeletedEvent``.
def on_deleted(self, event): if not self._event_error: self.logger.info(u"Change detected from deletion of: %s", event.src_path) # Never try to compile the deleted source self.compile_dependencies(event.src_path, include_self=False)
1,033,539
Convert a traceback (i.e. as returned by `tracebacks()`) into an alignment (i.e. as returned by `align`). Arguments: tb: A traceback. a: the sequence defining the rows in the traceback matrix. b: the sequence defining the columns in the traceback matrix. Returns: An iterable of (index, index) tupless where ether (but not both) tuples can be `None`.
def _traceback_to_alignment(tb, a, b): # We subtract 1 from the indices here because we're translating from the # alignment matrix space (which has one extra row and column) to the space # of the input sequences. for idx, direction in tb: if direction == Direction.DIAG: yield (idx[0] - 1, idx[1] - 1) elif direction == Direction.UP: yield (idx[0] - 1, None) elif direction == Direction.LEFT: yield (None, idx[1] - 1)
1,033,629
Dump settings content to filepath. Args: content (str): Settings content. filepath (str): Settings file location.
def dump(self, content, filepath, indent=4): with open(filepath, 'w') as fp: json.dump(content, fp, indent=indent)
1,033,645
Parse opened settings content using JSON parser. Args: filepath (str): Settings object, depends from backend content (str): Settings content from opened file, depends from backend. Raises: boussole.exceptions.SettingsBackendError: If parser can not decode a valid JSON object. Returns: dict: Dictionnary containing parsed setting elements.
def parse(self, filepath, content): try: parsed = json.loads(content) except ValueError: msg = "No JSON object could be decoded from file: {}" raise SettingsBackendError(msg.format(filepath)) return parsed
1,033,646
Open a SCSS file (sourcepath) and find all involved file through imports. This will fill internal buffers ``_CHILDREN_MAP`` and ``_PARENTS_MAP``. Args: sourcepath (str): Source file path to start searching for imports. Keyword Arguments: library_paths (list): List of directory paths for libraries to resolve paths if resolving fails on the base source path. Default to None.
def look_source(self, sourcepath, library_paths=None): # Don't inspect again source that has allready be inspected as a # children of a previous source if sourcepath not in self._CHILDREN_MAP: with io.open(sourcepath, 'r', encoding='utf-8') as fp: finded_paths = self.parse(fp.read()) children = self.resolve(sourcepath, finded_paths, library_paths=library_paths) # Those files that are imported by the sourcepath self._CHILDREN_MAP[sourcepath] = children # Those files that import the sourcepath for p in children: self._PARENTS_MAP[p].add(sourcepath) # Start recursive finding through each resolved path that has not # been collected yet for path in children: if path not in self._CHILDREN_MAP: self.look_source(path, library_paths=library_paths) return
1,033,686
Recursively find all children that are imported from the given source path. Args: sourcepath (str): Source file path to search for. Keyword Arguments: recursive (bool): Switch to enabled recursive finding (if True). Default to True. Returns: set: List of finded parents path.
def children(self, sourcepath, recursive=True): return self._get_recursive_dependancies( self._CHILDREN_MAP, sourcepath, recursive=True )
1,033,689
Recursively find all parents that import the given source path. Args: sourcepath (str): Source file path to search for. Keyword Arguments: recursive (bool): Switch to enabled recursive finding (if True). Default to True. Returns: set: List of finded parents path.
def parents(self, sourcepath, recursive=True): return self._get_recursive_dependancies( self._PARENTS_MAP, sourcepath, recursive=True )
1,033,690
Initialize a spor repository in `path` if one doesn't already exist. Args: path: Path to any file or directory within the repository. spor_dir: The name of the directory containing spor data. Returns: A `Repository` instance. Raises: ValueError: A repository already exists at `path`.
def initialize_repository(path, spor_dir='.spor'): path = pathlib.Path(path) spor_path = path / spor_dir if spor_path.exists(): raise ValueError('spor directory already exists: {}'.format(spor_path)) spor_path.mkdir() return Repository(path, spor_dir)
1,033,726
Open an existing repository. Args: path: Path to any file or directory within the repository. spor_dir: The name of the directory containing spor data. Returns: A `Repository` instance. Raises: ValueError: No repository is found.
def open_repository(path, spor_dir='.spor'): root = _find_root_dir(path, spor_dir) return Repository(root, spor_dir)
1,033,727
Get an Anchor by ID. Args: anchor_id: The ID of the anchor to retrieve. Returns: An anchor instance. Raises: KeyError: The anchor can not be found.
def __getitem__(self, anchor_id): file_path = self._anchor_path(anchor_id) try: with file_path.open(mode='rt') as handle: return load_anchor(handle, self.root) except OSError: raise KeyError('No anchor with id {}'.format(anchor_id))
1,033,731
Update an anchor. This will update an existing anchor if it exists, or it will create new storage if not. Args: anchor_id: The ID of the anchor to update. anchor: The anchor to store.
def __setitem__(self, anchor_id, anchor): with self._anchor_path(anchor_id).open(mode='wt') as f: save_anchor(f, anchor, self.root)
1,033,732
Remove an anchor from storage. Args: anchor_id: The ID of the anchor to remove. Raises: KeyError: There is no anchor with that ID.
def __delitem__(self, anchor_id): try: self._anchor_path(anchor_id).unlink() except OSError: raise KeyError('No anchor with id {}'.format(anchor_id))
1,033,733
Patch a path to expand home directory and make absolute path. Args: settings (dict): Current settings. name (str): Setting name. value (str): Path to patch. Returns: str: Patched path to an absolute path.
def _patch_expand_path(self, settings, name, value): if os.path.isabs(value): return os.path.normpath(value) # Expand home directory if any value = os.path.expanduser(value) # If the path is not yet an absolute directory, make it so from base # directory if not empty if not os.path.isabs(value) and self.projectdir: value = os.path.join(self.projectdir, value) return os.path.normpath(value)
1,034,146
Apply ``SettingsPostProcessor._patch_expand_path`` to each element in list. Args: settings (dict): Current settings. name (str): Setting name. value (list): List of paths to patch. Returns: list: Patched path list to an absolute path.
def _patch_expand_paths(self, settings, name, value): return [self._patch_expand_path(settings, name, item) for item in value]
1,034,147
Validate path exists Args: settings (dict): Current settings. name (str): Setting name. value (str): Path to validate. Raises: boussole.exceptions.SettingsInvalidError: If path does not exists. Returns: str: Validated path.
def _validate_path(self, settings, name, value): if not os.path.exists(value): raise SettingsInvalidError("Path from setting '{name}' does not " "exists: {value}".format( name=name, value=value )) return value
1,034,148
Apply ``SettingsPostProcessor._validate_path`` to each element in list. Args: settings (dict): Current settings. name (str): Setting name. value (list): List of paths to patch. Raises: boussole.exceptions.SettingsInvalidError: Once a path does not exists. Returns: list: Validated paths.
def _validate_paths(self, settings, name, value): return [self._validate_path(settings, name, item) for item in value]
1,034,149
Validate a required setting (value can not be empty) Args: settings (dict): Current settings. name (str): Setting name. value (str): Required value to validate. Raises: boussole.exceptions.SettingsInvalidError: If value is empty. Returns: str: Validated value.
def _validate_required(self, settings, name, value): if not value: raise SettingsInvalidError(("Required value from setting '{name}' " "must not be " "empty.").format(name=name)) return value
1,034,150
Looks for a configuration file in 3 locations: - the current directory - the user config directory (~/.config/scriptabit) - the version installed with the package (using setuptools resource API) Args: basename (str): The base filename. Returns: str: The full path to the configuration file.
def get_config_file(basename): locations = [ os.path.join(os.curdir, basename), os.path.join( os.path.expanduser("~"), ".config", "scriptabit", basename), resource_filename( Requirement.parse("scriptabit"), os.path.join('scriptabit', basename)) ] for location in locations: if os.path.isfile(location): return location
1,034,325
Copies the default configuration file into the user config directory. Args: basename (str): The base filename. clobber (bool): If True, the default will be written even if a user config already exists. dst_dir (str): The destination directory.
def copy_default_config_to_user_directory( basename, clobber=False, dst_dir='~/.config/scriptabit'): dst_dir = os.path.expanduser(dst_dir) dst = os.path.join(dst_dir, basename) src = resource_filename( Requirement.parse("scriptabit"), os.path.join('scriptabit', basename)) if not os.path.exists(dst_dir): os.makedirs(dst_dir) if clobber or not os.path.isfile(dst): shutil.copy(src, dst)
1,034,326
Export ranking to a file. Args: template_file_name (str): where is the template (moustache template) output_file_name (str): where create the file with the ranking sort (str): field to sort the users
def export(self, template_file_name, output_file_name, sort="public", data=None, limit=0): exportedData = {} exportedUsers = self.__exportUsers(sort, limit) exportedData["users"] = exportedUsers exportedData["extraData"] = data with open(template_file_name) as template_file: template_raw = template_file.read() template = parse(template_raw) renderer = Renderer() output = renderer.render(template, exportedData) with open(output_file_name, "w") as text_file: text_file.write(output)
1,034,498
Encode bytes/strings to base64. Args: - ``byte_str``: The string or bytes to base64 encode. Returns: - byte_str encoded as base64.
def enbase64(byte_str): # Python 3: base64.b64encode() expects type byte if isinstance(byte_str, str) and not PYTHON2: byte_str = bytes(byte_str, 'utf-8') return base64.b64encode(byte_str)
1,034,514
Decode base64 encoded bytes/strings. Args: - ``byte_str``: The string or bytes to base64 encode. Returns: - decoded string as type str for python2 and type byte for python3.
def debase64(byte_str): # Python 3: base64.b64decode() expects type byte if isinstance(byte_str, str) and not PYTHON2: byte_str = bytes(byte_str, 'utf-8') return base64.b64decode(byte_str)
1,034,515
Given a password, hash, salt this function verifies the password is equal to hash/salt. Args: - ``password``: The password to perform check on. Returns: - ``bool``
def check_password_hash(password, password_hash, salt, N=1 << 14, r=8, p=1, buflen=64): candidate_hash = generate_password_hash(password, salt, N, r, p, buflen) return safe_str_cmp(password_hash, candidate_hash)
1,034,517
Unquote given rule. Args: content (str): An import rule. Raises: InvalidImportRule: Raise exception if the rule is badly quoted (not started or not ended quotes). Returns: string: The given rule unquoted.
def strip_quotes(self, content): error_msg = "Following rule is badly quoted: {}" if (content.startswith('"') and content.endswith('"')) or \ (content.startswith("'") and content.endswith("'")): return content[1:-1] # Quote starting but not ended elif (content.startswith('"') and not content.endswith('"')) or \ (content.startswith("'") and not content.endswith("'")): raise InvalidImportRule(error_msg.format(content)) # Quote ending but not started elif (not content.startswith('"') and content.endswith('"')) or \ (not content.startswith("'") and content.endswith("'")): raise InvalidImportRule(error_msg.format(content)) return content
1,034,757
Flatten returned import rules from regex. Because import rules can contains multiple items in the same rule (called multiline import rule), the regex ``REGEX_IMPORT_RULE`` return a list of unquoted items for each rule. Args: declarations (list): A SCSS source. Returns: list: Given SCSS source with all comments removed.
def flatten_rules(self, declarations): rules = [] for protocole, paths in declarations: # If there is a protocole (like 'url), drop it if protocole: continue # Unquote and possibly split multiple rule in the same declaration rules.extend([self.strip_quotes(v.strip()) for v in paths.split(',')]) return list(filter(self.filter_rules, rules))
1,034,758
Parse a stylesheet document with a regex (``REGEX_IMPORT_RULE``) to extract all import rules and return them. Args: content (str): A SCSS source. Returns: list: Finded paths in import rules.
def parse(self, content): # Remove all comments before searching for import rules, to not catch # commented breaked import rules declarations = self.REGEX_IMPORT_RULE.findall( self.remove_comments(content) ) return self.flatten_rules(declarations)
1,034,759
Check if file is a Sass partial source (see `Sass partials Reference`_). Args: filepath (str): A file path. Can be absolute, relative or just a filename. Returns: bool: True if file is a partial source, else False.
def is_partial(self, filepath): path, filename = os.path.split(filepath) return filename.startswith('_')
1,034,809
Change final filename extension. Args: filepath (str): A file path (relative or absolute). new_extension (str): New extension name (without leading dot) to apply. Returns: str: Filepath with new extension.
def change_extension(self, filepath, new_extension): filename, ext = os.path.splitext(filepath) return '.'.join([filename, new_extension])
1,034,812
Return destination path from given source file path. Destination is allways a file with extension ``.css``. Args: filepath (str): A file path. The path is allways relative to sources directory. If not relative, ``targetdir`` won't be joined. absolute (bool): If given will be added at beginning of file path. Returns: str: Destination filepath.
def get_destination(self, filepath, targetdir=None): dst = self.change_extension(filepath, 'css') if targetdir: dst = os.path.join(targetdir, dst) return dst
1,034,813
Gather data for instances matching args and call display func. Args: options (object): contains args and data from parser.
def cmd_list(options): (i_info, param_str) = gather_data(options) if i_info: awsc.get_all_aminames(i_info) param_str = "Instance List - " + param_str + "\n" list_instances(i_info, param_str) else: print("No instances found with parameters: {}".format(param_str))
1,034,869
Start or Stop the specified instance. Finds instances that match args and instance-state expected by the command. Then, the target instance is determined, the action is performed on the instance, and the eturn information is displayed. Args: options (object): contains args and data from parser.
def cmd_startstop(options): statelu = {"start": "stopped", "stop": "running"} options.inst_state = statelu[options.command] debg.dprint("toggle set state: ", options.inst_state) (i_info, param_str) = gather_data(options) (tar_inst, tar_idx) = determine_inst(i_info, param_str, options.command) response = awsc.startstop(tar_inst, options.command) responselu = {"start": "StartingInstances", "stop": "StoppingInstances"} filt = responselu[options.command] resp = {} state_term = ('CurrentState', 'PreviousState') for i, j in enumerate(state_term): resp[i] = response["{0}".format(filt)][0]["{0}".format(j)]['Name'] print("Current State: {}{}{} - Previous State: {}{}{}\n". format(C_STAT[resp[0]], resp[0], C_NORM, C_STAT[resp[1]], resp[1], C_NORM))
1,034,870
Connect to the specified instance via ssh. Finds instances that match the user specified args that are also in the 'running' state. The target instance is determined, the required connection information is retreived (IP, key and ssh user-name), then an 'ssh' connection is made to the instance. Args: options (object): contains args and data from parser
def cmd_ssh(options): import os import subprocess from os.path import expanduser options.inst_state = "running" (i_info, param_str) = gather_data(options) (tar_inst, tar_idx) = determine_inst(i_info, param_str, options.command) home_dir = expanduser("~") if options.user is None: tar_aminame = awsc.get_one_aminame(i_info[tar_idx]['ami']) options.user = cmd_ssh_user(tar_aminame, i_info[tar_idx]['tag']['Name']) else: debg.dprint("LoginUser set by user: ", options.user) os_spec = {"nt": ["powershell plink", "\\", "ppk"]} c_itm = os_spec.get(os.name, ["ssh", "/", "pem"]) cmd_ssh_run = c_itm[0] if not options.nopem: cmd_ssh_run += (" -i {0}{1}.aws{1}{2}.{3}". format(home_dir, c_itm[1], i_info[tar_idx]['ssh_key'], c_itm[2])) else: debg.dprint("Connect string: ", "ssh {}@{}". format(options.user, i_info[tar_idx]['pub_dns_name'])) cmd_ssh_run += " {0}@{1}".format(options.user, i_info[tar_idx]['pub_dns_name']) print(cmd_ssh_run) subprocess.call(cmd_ssh_run, shell=True)
1,034,871
Calculate instance login-username based on image-name. Args: tar_aminame (str): name of the image instance created with. inst_name (str): name of the instance. Returns: username (str): name for ssh based on AMI-name.
def cmd_ssh_user(tar_aminame, inst_name): if tar_aminame == "Unknown": tar_aminame = inst_name # first 5 chars of AMI-name can be anywhere in AMI-Name userlu = {"ubunt": "ubuntu", "debia": "admin", "fedor": "root", "cento": "centos", "openb": "root"} usertemp = ['name'] + [value for key, value in list(userlu.items()) if key in tar_aminame.lower()] usertemp = dict(zip(usertemp[::2], usertemp[1::2])) username = usertemp.get('name', 'ec2-user') debg.dprint("loginuser Calculated: ", username) return username
1,034,872
Generate dictionary of results from query. Decodes the large dict recturned from the AWS query. Args: qry_results (dict): results from awsc.get_inst_info Returns: i_info (dict): information on instances and details.
def process_results(qry_results): i_info = {} for i, j in enumerate(qry_results['Reservations']): i_info[i] = {'id': j['Instances'][0]['InstanceId']} i_info[i]['state'] = j['Instances'][0]['State']['Name'] i_info[i]['ami'] = j['Instances'][0]['ImageId'] i_info[i]['ssh_key'] = j['Instances'][0]['KeyName'] i_info[i]['pub_dns_name'] = j['Instances'][0]['PublicDnsName'] try: i_info[i]['tag'] = process_tags(j['Instances'][0]['Tags']) except KeyError: i_info[i]['tag'] = {"Name": ""} debg.dprint("numInstances: ", len(i_info)) debg.dprintx("Details except AMI-name") debg.dprintx(i_info, True) return i_info
1,034,874
Create query from the args specified and command chosen. Creates a query string that incorporates the args in the options object, and creates the title for the 'list' function. Args: options (object): contains args and data from parser Returns: qry_string (str): the query to be used against the aws ec2 client. param_str (str): the title to display before the list.
def qry_create(options): qry_string = filt_end = param_str = "" filt_st = "Filters=[" param_str_default = "All" if options.id: qry_string += "InstanceIds=['%s']" % (options.id) param_str += "id: '%s'" % (options.id) param_str_default = "" if options.instname: (qry_string, param_str) = qry_helper(bool(options.id), qry_string, param_str) filt_end = "]" param_str_default = "" qry_string += filt_st + ("{'Name': 'tag:Name', 'Values': ['%s']}" % (options.instname)) param_str += "name: '%s'" % (options.instname) if options.inst_state: (qry_string, param_str) = qry_helper(bool(options.id), qry_string, param_str, bool(options.instname), filt_st) qry_string += ("{'Name': 'instance-state-name'," "'Values': ['%s']}" % (options.inst_state)) param_str += "state: '%s'" % (options.inst_state) filt_end = "]" param_str_default = "" qry_string += filt_end param_str += param_str_default debg.dprintx("\nQuery String") debg.dprintx(qry_string, True) debg.dprint("param_str: ", param_str) return(qry_string, param_str)
1,034,876
Display a list of all instances and their details. Iterates through all the instances in the dict, and displays information for each instance. Args: i_info (dict): information on instances and details. param_str (str): the title to display before the list. numbered (bool): optional - indicates wheter the list should be displayed with numbers before each instance. This is used when called from user_picklist.
def list_instances(i_info, param_str, numbered=False): print(param_str) for i in i_info: if numbered: print("Instance {}#{}{}".format(C_WARN, i + 1, C_NORM)) print(" {6}Name: {1}{3:<22}{1}ID: {0}{4:<20}{1:<18}Status: {2}{5}{1}". format(C_TI, C_NORM, C_STAT[i_info[i]['state']], i_info[i]['tag']['Name'], i_info[i]['id'], i_info[i]['state'], C_HEAD2)) print(" AMI: {0}{2:<23}{1}AMI Name: {0}{3:.41}{1}". format(C_TI, C_NORM, i_info[i]['ami'], i_info[i]['aminame'])) list_tags(i_info[i]['tag']) debg.dprintx("All Data") debg.dprintx(i_info, True)
1,034,878
Display list of instances matching args and ask user to select target. Instance list displayed and user asked to enter the number corresponding to the desired target instance, or '0' to abort. Args: i_info (dict): information on instances and details. command (str): command specified on the command line. Returns: tar_idx (int): the dictionary index number of the targeted instance.
def user_picklist(i_info, command): valid_entry = False awsc.get_all_aminames(i_info) list_instances(i_info, "", True) msg_txt = ("Enter {0}#{1} of instance to {3} ({0}1{1}-{0}{4}{1})" " [{2}0 aborts{1}]: ".format(C_WARN, C_NORM, C_TI, command, len(i_info))) while not valid_entry: entry_raw = obtain_input(msg_txt) try: entry_int = int(entry_raw) except ValueError: entry_int = 999 (tar_idx, valid_entry) = user_entry(entry_int, len(i_info), command) return tar_idx
1,034,881
prepare compliant, normalized metadata from inputs Args: kwargs: key-value pairs for metadata fields. Raises: InvalidDatalakeMetadata if required fields are missing and cannot be inferred.
def __init__(self, *args, **kwargs): # we want to own all of our bits so we can normalize them without # altering the caller's data unexpectedly. So deepcopy. args = deepcopy(args) kwargs = deepcopy(kwargs) super(Metadata, self).__init__(*args, **kwargs) self._ensure_id() self._ensure_version() self._validate() self._normalize_dates() self._validate_interval()
1,034,932
Write given content to destination path. It will create needed directory structure first if it contain some directories that does not allready exists. Args: content (str): Content to write to target file. destination (str): Destination path for target file. Returns: str: Path where target file has been written.
def write_content(self, content, destination): directory = os.path.dirname(destination) if directory and not os.path.exists(directory): os.makedirs(directory) with io.open(destination, 'w', encoding='utf-8') as f: f.write(content) return destination
1,035,160
Check and return the final filepath to settings Args: path (str): Directory path where to search for settings file. filename (str): Filename to use to search for settings file. Raises: boussole.exceptions.SettingsBackendError: If determined filepath does not exists or is a directory. Returns: string: Settings file path, joining given path and filename.
def check_filepath(self, path, filename): settings_path = os.path.join(path, filename) if not os.path.exists(settings_path) or \ not os.path.isfile(settings_path): msg = "Unable to find settings file: {}" raise SettingsBackendError(msg.format(settings_path)) return settings_path
1,035,230
Open settings backend to return its content Args: filepath (str): Settings object, depends from backend Returns: string: File content.
def open(self, filepath): with io.open(filepath, 'r', encoding='utf-8') as fp: content = fp.read() return content
1,035,231
Initializes a new instance of the :see AsarArchive class. Args: filename (str): The path to the *.asar file to read/write from/to. asarfile (File): A open *.asar file object. files (dict): Dictionary of files contained in the archive. (The header that was read from the file). baseoffset (int): Base offset, indicates where in the file the header ends.
def __init__(self, filename, asarfile, files, baseoffset): self.filename = filename self.asarfile = asarfile self.files = files self.baseoffset = baseoffset
1,035,470
Extracts the contents of the archive to the specifed directory. Args: destination (str): Path to an empty directory to extract the files to.
def extract(self, destination): if os.path.exists(destination): raise OSError(20, 'Destination exists', destination) self.__extract_directory( '.', self.files['files'], destination )
1,035,471
Extracts a single directory to the specified directory on disk. Args: path (str): Relative (to the root of the archive) path of the directory to extract. files (dict): A dictionary of files from a *.asar file header. destination (str): The path to extract the files to.
def __extract_directory(self, path, files, destination): # assures the destination directory exists destination_path = os.path.join(destination, path) if not os.path.exists(destination_path): os.makedirs(destination_path) for name, contents in files.items(): item_path = os.path.join(path, name) # objects that have a 'files' member are directories, # recurse into them if 'files' in contents: self.__extract_directory( item_path, contents['files'], destination ) continue self.__extract_file(item_path, contents, destination)
1,035,472
Extracts the specified file to the specified destination. Args: path (str): Relative (to the root of the archive) path of the file to extract. fileinfo (dict): Dictionary containing the offset and size of the file (Extracted from the header). destination (str): Directory to extract the archive to.
def __extract_file(self, path, fileinfo, destination): if 'offset' not in fileinfo: self.__copy_extracted(path, destination) return self.asarfile.seek( self.__absolute_offset(fileinfo['offset']) ) # TODO: read in chunks, ain't going to read multiple GB's in memory contents = self.asarfile.read( self.__absolute_offset(fileinfo['size']) ) destination_path = os.path.join(destination, path) with open(destination_path, 'wb') as fp: fp.write(contents) LOGGER.debug('Extracted %s to %s', path, destination_path)
1,035,473
Copies a file that was already extracted to the destination directory. Args: path (str): Relative (to the root of the archive) of the file to copy. destination (str): Directory to extract the archive to.
def __copy_extracted(self, path, destination): unpacked_dir = self.filename + '.unpacked' if not os.path.isdir(unpacked_dir): LOGGER.warn( 'Failed to copy extracted file %s, no extracted dir', path ) return source_path = os.path.join(unpacked_dir, path) if not os.path.exists(source_path): LOGGER.warn( 'Failed to copy extracted file %s, does not exist', path ) return destination_path = os.path.join(destination, path) shutil.copyfile(source_path, destination_path)
1,035,474
Opens a *.asar file and constructs a new :see AsarArchive instance. Args: filename (str): Path to the *.asar file to open for reading. Returns (AsarArchive): An insance of of the :AsarArchive class or None if reading failed.
def open(cls, filename): asarfile = open(filename, 'rb') # uses google's pickle format, which prefixes each field # with its total length, the first field is a 32-bit unsigned # integer, thus 4 bytes, we know that, so we skip it asarfile.seek(4) header_size = struct.unpack('I', asarfile.read(4)) if len(header_size) <= 0: raise IndexError() # substract 8 bytes from the header size, again because google's # pickle format uses some padding here header_size = header_size[0] - 8 # read the actual header, which is a json string, again skip 8 # bytes because of pickle padding asarfile.seek(asarfile.tell() + 8) header = asarfile.read(header_size).decode('utf-8') files = json.loads(header) return cls(filename, asarfile, files, asarfile.tell())
1,035,476
Check that at least one candidate exist into a directory. Args: basepath (str): Directory path where to search for candidate. candidates (list): List of candidate file paths. Returns: list: List of existing candidates.
def check_candidate_exists(self, basepath, candidates): checked = [] for item in candidates: abspath = os.path.join(basepath, item) if os.path.exists(abspath): checked.append(abspath) return checked
1,035,694
Dump settings content to filepath. Args: content (str): Settings content. filepath (str): Settings file location.
def dump(self, content, filepath, indent=4): with open(filepath, 'w') as fp: pyaml.dump(content, dst=fp, indent=indent)
1,035,696
Parse opened settings content using YAML parser. Args: filepath (str): Settings object, depends from backend content (str): Settings content from opened file, depends from backend. Raises: boussole.exceptions.SettingsBackendError: If parser can not decode a valid YAML object. Returns: dict: Dictionnary containing parsed setting elements.
def parse(self, filepath, content): try: parsed = yaml.load(content) except yaml.YAMLError as exc: msg = "No YAML object could be decoded from file: {}\n{}" raise SettingsBackendError(msg.format(filepath, exc)) return parsed
1,035,697
Get the header lines of a vcf file Args: source(iterable): A vcf file Returns: head (HeaderParser): A headerparser object
def get_vcf_header(source): head = HeaderParser() #Parse the header lines for line in source: line = line.rstrip() if line.startswith('#'): if line.startswith('##'): logger.debug("Found metadata line {0}".format(line)) head.parse_meta_data(line) else: logger.debug("Found header line {0}".format(line)) head.parse_header_line(line) else: break return head
1,035,730
Constructor. Args: xml (str/file, default None): XML to be parsed. May be file-like object. resort (bool, default True): Sort the output alphabetically?
def __init__(self, xml=None, resort=True): self.leader = None self.oai_marc = False self.controlfields = OrderedDict() self.datafields = OrderedDict() self.valid_i_chars = set(list(" 0123456789*")) # resort output XML alphabetically self.resorted = tools.resorted if resort else lambda x: x # handle file-like objects if hasattr(xml, "read"): xml = xml.read() # it is always possible to create blank object and add values into it # piece by piece using .add_ctl_field()/.add_data_field() methods. if xml is not None: self._original_xml = xml self._parse_string(xml)
1,035,738
Parse MARC XML document to dicts, which are contained in self.controlfields and self.datafields. Args: xml (str or HTMLElement): input data Also detect if this is oai marc format or not (see elf.oai_marc).
def _parse_string(self, xml): if not isinstance(xml, HTMLElement): xml = dhtmlparser.parseString(str(xml)) # check if there are any records record = xml.find("record") if not record: raise ValueError("There is no <record> in your MARC XML document!") record = record[0] self.oai_marc = len(record.find("oai_marc")) > 0 # leader is separate only in marc21 if not self.oai_marc: leader = record.find("leader") if len(leader) >= 1: self.leader = leader[0].getContent() # parse body in respect of OAI MARC format possibility if self.oai_marc: self._parse_control_fields(record.find("fixfield"), "id") self._parse_data_fields(record.find("varfield"), "id", "label") else: self._parse_control_fields(record.find("controlfield"), "tag") self._parse_data_fields(record.find("datafield"), "tag", "code") # for backward compatibility of MARC XML with OAI if self.oai_marc and "LDR" in self.controlfields: self.leader = self.controlfields["LDR"]
1,035,739
Parse control fields. Args: fields (list): list of HTMLElements tag_id (str): parameter name, which holds the information, about field name this is normally "tag", but in case of oai_marc "id".
def _parse_control_fields(self, fields, tag_id="tag"): for field in fields: params = field.params # skip tags without parameters if tag_id not in params: continue self.controlfields[params[tag_id]] = field.getContent().strip()
1,035,740
Parse data fields. Args: fields (list): of HTMLElements tag_id (str): parameter name, which holds the information, about field name this is normally "tag", but in case of oai_marc "id" sub_id (str): id of parameter, which holds informations about subfield name this is normally "code" but in case of oai_marc "label"
def _parse_data_fields(self, fields, tag_id="tag", sub_id="code"): for field in fields: params = field.params if tag_id not in params: continue # take care of iX/indX (indicator) parameters field_repr = OrderedDict([ [self.i1_name, params.get(self.i1_name, " ")], [self.i2_name, params.get(self.i2_name, " ")], ]) # process all subfields for subfield in field.find("subfield"): if sub_id not in subfield.params: continue content = MARCSubrecord( val=subfield.getContent().strip(), i1=field_repr[self.i1_name], i2=field_repr[self.i2_name], other_subfields=field_repr ) # add or append content to list of other contents code = subfield.params[sub_id] if code in field_repr: field_repr[code].append(content) else: field_repr[code] = [content] tag = params[tag_id] if tag in self.datafields: self.datafields[tag].append(field_repr) else: self.datafields[tag] = [field_repr]
1,035,741
This method is used mainly internally, but it can be handy if you work with with raw MARC XML object and not using getters. Args: num (int): Which indicator you need (1/2). is_oai (bool/None): If None, :attr:`.oai_marc` is used. Returns: str: current name of ``i1``/``ind1`` parameter based on \ :attr:`oai_marc` property.
def get_i_name(self, num, is_oai=None): if num not in (1, 2): raise ValueError("`num` parameter have to be 1 or 2!") if is_oai is None: is_oai = self.oai_marc i_name = "ind" if not is_oai else "i" return i_name + str(num)
1,035,744
Method wrapper over :attr:`.controlfields` dictionary. Args: controlfield (str): Name of the controlfield. alt (object, default None): Alternative value of the `controlfield` when `controlfield` couldn't be found. Returns: str: record from given `controlfield`
def get_ctl_field(self, controlfield, alt=None): if not alt: return self.controlfields[controlfield] return self.controlfields.get(controlfield, alt)
1,035,745
report scores and give a winner |methcoro| Args: winner: :class:Participant instance scores_csv: Comma separated set/game scores with player 1 score first (e.g. "1-3,3-0,3-2") Raises: ValueError: scores_csv has a wrong format APIException
async def report_winner(self, winner: Participant, scores_csv: str): await self._report(scores_csv, winner._id)
1,035,890
add a file as an attachment |methcoro| Warning: |unstable| Args: file_path: path to the file you want to add description: *optional* description for your attachment Returns: Attachment: Raises: ValueError: file_path must not be None APIException
async def attach_file(self, file_path: str, description: str = None) -> Attachment: with open(file_path, 'rb') as f: return await self._attach(f.read(), description)
1,035,894
add an url as an attachment |methcoro| Args: url: url you want to add description: *optional* description for your attachment Returns: Attachment: Raises: ValueError: url must not be None APIException
async def attach_url(self, url: str, description: str = None) -> Attachment: return await self._attach(url=url, description=description)
1,035,895
destroy a match attachment |methcoro| Args: a: the attachment you want to destroy Raises: APIException
async def destroy_attachment(self, a: Attachment): await self.connection('DELETE', 'tournaments/{}/matches/{}/attachments/{}'.format(self._tournament_id, self._id, a._id)) if a in self.attachments: self.attachments.remove(a)
1,035,896
Find the aggregated subobjects of an object. These are the public attributes. Args: class_: The class whose subobjects to return. Yields: Tuples (name, type, required) describing subobjects.
def class_subobjects( class_: Type) -> Generator[Tuple[str, Type, bool], None, None]: argspec = inspect.getfullargspec(class_.__init__) defaults = argspec.defaults if argspec.defaults else [] num_optional = len(defaults) first_optional = len(argspec.args) - num_optional for i, attr_name in enumerate(argspec.args): if attr_name == 'self': continue if attr_name == 'yatiml_extra': continue attr_type = argspec.annotations.get(attr_name, Any) yield attr_name, attr_type, i < first_optional
1,035,905
__init__ Defines attributes for Personator Object. Args: custID (str): ID for Melissa Data account
def __init__(self, custID): self.custID = custID self.addr1 = None self.addr2 = None self.city = None self.postal = None self.province = None self.country = None self.name = None self.phone = None self.recordID = None
1,036,042
parse_results Parses the MelissaData response. Args: data (dict): Contains MelissaData response Returns: results, either contains a dict with corrected address info or -1 for an invalid address.
def parse_results(self, data): results = [] if len(data["Records"]) < 1: return -1 codes = data["Records"][0]["Results"] for code in codes.split(","): results.append(str(code)) self.addr1 = data["Records"][0]["AddressLine1"] self.addr2 = data["Records"][0]["AddressLine2"] self.city = data["Records"][0]["City"] self.name = data["Records"][0]["NameFull"] self.phone = data["Records"][0]["PhoneNumber"] self.province = data["Records"][0]["State"] self.postal = data["Records"][0]["PostalCode"] self.recordID = data["Records"][0]["RecordID"] return results
1,036,044
Executes given functions with given models. Args: models: models to execute func: function name to execute Returns:
def creating_schema_and_index(self, models, func): waiting_models = [] self.base_thread.do_with_submit(func, models, waiting_models, threads=self.threads) if waiting_models: print("WAITING MODELS ARE CHECKING...") self.creating_schema_and_index(waiting_models, func)
1,036,281
Creates search schemas. Args: model: model to execute waiting_models: if riak can't return response immediately, model is taken to queue. After first execution session, method is executed with waiting models and controlled. And be ensured that all given models are executed properly. Returns:
def create_schema(self, model, waiting_models): bucket_name = model._get_bucket_name() index_name = "%s_%s" % (settings.DEFAULT_BUCKET_TYPE, bucket_name) ins = model(fake_context) fields = self.get_schema_fields(ins._collect_index_fields()) new_schema = self.compile_schema(fields) schema = get_schema_from_solr(index_name) if not (schema == new_schema): try: client.create_search_schema(index_name, new_schema) print("+ %s (%s) search schema is created." % (model.__name__, index_name)) except: print("+ %s (%s) search schema checking operation is taken to queue." % ( model.__name__, index_name)) waiting_models.append(model)
1,036,282
Creates search indexes. Args: model: model to execute waiting_models: if riak can't return response immediately, model is taken to queue. After first execution session, method is executed with waiting models and controlled. And be ensured that all given models are executed properly. Returns:
def create_index(self, model, waiting_models): bucket_name = model._get_bucket_name() bucket_type = client.bucket_type(settings.DEFAULT_BUCKET_TYPE) index_name = "%s_%s" % (settings.DEFAULT_BUCKET_TYPE, bucket_name) bucket = bucket_type.bucket(bucket_name) try: client.get_search_index(index_name) if not (bucket.get_property('search_index') == index_name): bucket.set_property('search_index', index_name) print("+ %s (%s) search index is created." % (model.__name__, index_name)) except RiakError: try: client.create_search_index(index_name, index_name, self.n_val) bucket.set_property('search_index', index_name) print("+ %s (%s) search index is created." % (model.__name__, index_name)) except RiakError: print("+ %s (%s) search index checking operation is taken to queue." % ( model.__name__, index_name)) waiting_models.append(model)
1,036,283
Iterate over all ``<record>`` tags in `xml`. Args: xml (str/file): Input string with XML. UTF-8 is prefered encoding, unicode should be ok. Yields: MARCXMLRecord: For each corresponding ``<record>``.
def record_iterator(xml): # handle file-like objects if hasattr(xml, "read"): xml = xml.read() dom = None try: dom = dhtmlparser.parseString(xml) except UnicodeError: dom = dhtmlparser.parseString(xml.encode("utf-8")) for record_xml in dom.findB("record"): yield MARCXMLRecord(record_xml)
1,036,287
saves the model instance to riak Args: meta (dict): JSON serializable meta data for logging of save operation. {'lorem': 'ipsum', 'dolar': 5} index_fields (list): Tuple list for secondary indexing keys in riak (with 'bin' or 'int'). [('lorem','bin'),('dolar','int')] :return:
def save_model(self, model, meta_data=None, index_fields=None): return self.adapter.save_model(model, meta_data, index_fields)
1,036,298
Creates a model instance with the given data. Args: data: Model data returned from DB. key: Object key Returns: pyoko.Model object.
def _make_model(self, data, key=None): if data['deleted'] and not self.adapter.want_deleted: raise ObjectDoesNotExist('Deleted object returned') model = self._model_class(self._current_context, _pass_perm_checks=self._pass_perm_checks) model.setattr('key', ub_to_str(key) if key else ub_to_str(data.get('key'))) model = model.set_data(data, from_db=True) model._initial_data = model.clean_value() return model
1,036,299
Applies query filters for excluding matching records from result set. Args: **filters: Query filters as keyword arguments. Returns: Self. Queryset object. Examples: >>> Person.objects.exclude(age=None) >>> Person.objects.filter(name__startswith='jo').exclude(age__lte=16)
def exclude(self, **filters): exclude = {'-%s' % key: value for key, value in filters.items()} return self.filter(**exclude)
1,036,302
Deletes an object if it exists in database according to given query parameters and returns True otherwise does nothing and returns False. Args: **kwargs: query parameters Returns(bool): True or False
def delete_if_exists(self, **kwargs): try: self.get(**kwargs).blocking_delete() return True except ObjectDoesNotExist: return False
1,036,304
Returns list of dicts (field names as keys) for given fields. Args: \*args: List of fields to be returned as dict. Returns: list of dicts for given fields. Example: >>> Person.objects.filter(age__gte=16, name__startswith='jo').values('name', 'lastname')
def values(self, *args): return [dict(zip(args, values_list)) for values_list in self.values_list(flatten=False, *args)]
1,036,309