docstring
stringlengths
52
499
function
stringlengths
67
35.2k
__index_level_0__
int64
52.6k
1.16M
Check status Args: Returns: True: Sucess False: Failed
def checkStatus(self): checkAccount() data = {'userid': self.user_id, 'useridx': self.useridx } r = self.session.post(nurls['checkStatus'], data = data) p = re.compile(r'\<message\>(?P<message>.+)\</message\>') message = p.search(r.text).group('message') if message == 'success': return True else: return False
1,010,113
getDiskSpace Args: file_path: Full path for a file you want to checkUpload upload_path: Ndrive path where you want to upload file ex) /Picture/ Returns: True: Possible to upload a file with a given file_size False: Impossible to upload a file with a given file_size
def getDiskSpace(self, file_path, upload_path = '', overwrite = False): self.checkAccount() url = nurls['checkUpload'] file_size = os.stat(file_path).st_size file_name = os.path.basename(file_path) now = datetime.datetime.now().isoformat() data = {'userid': self.user_id, 'useridx': self.useridx, 'getlastmodified': now, 'dstresource': upload_path + file_name, 'overwrite': overwrite, 'uploadsize': file_size, } r = self.session.post(nurls['getDiskSpace'], data = data) return resultManager(r.text)
1,010,116
PUT Args: file_path: Full path for a file you want to upload upload_path: Ndrive path where you want to upload file ex) /Picture/ Returns: True: Upload success False: Upload failed
def put(self, file_path, upload_path = ''): f = open(file_path, "r") c = f.read() file_name = os.path.basename(file_path) now = datetime.datetime.now().isoformat() url = nurls['put'] + upload_path + file_name headers = {'userid': self.user_id, 'useridx': self.useridx, 'MODIFYDATE': now, 'Content-Type': magic.from_file(file_path, mime=True), 'charset': 'UTF-8', 'Origin': 'http://ndrive2.naver.com', } r = self.session.put(url = url, data = c, headers = headers) return self.resultManager(r.text)
1,010,117
DELETE Args: file_path: Full path for a file you want to delete upload_path: Ndrive path where you want to delete file ex) /Picture/ Returns: True: Delete success False: Delete failed
def delete(self, file_path): now = datetime.datetime.now().isoformat() url = nurls['put'] + upload_path + file_name headers = {'userid': self.user_id, 'useridx': self.useridx, 'Content-Type': "application/x-www-form-urlencoded; charset=UTF-8", 'charset': 'UTF-8', 'Origin': 'http://ndrive2.naver.com', } r = self.session.delete(url = url, headers = headers) return self.resultManager(r.text)
1,010,118
DoMove Args: dummy: ??? orgresource: Path for a file which you want to move dstresource: Destination path bShareFireCopy: ??? Returns: True: Move success False: Move failed
def doMove(self, orgresource, dstresource, dummy = 56184, stresource = 'F', bShareFireCopy = 'false'): url = nurls['doMove'] data = {'userid': self.user_id, 'useridx': self.useridx, 'dummy': dummy, 'orgresource': orgresource, 'dstresource': dstresource, 'overwrite': overwrite, 'bShareFireCopy': bShareFireCopy, } r = self.session.post(url = url, data = data) try: j = json.loads(r.text) except: print '[*] Success checkUpload: 0 result' return False return self.resultManager(r.text)
1,010,120
GetProperty Args: dummy: ??? orgresource: File path Returns: FileInfo object: False: Failed to get property
def getProperty(self, orgresource, dummy = 56184): url = nurls['getProperty'] data = {'userid': self.user_id, 'useridx': self.useridx, 'dummy': dummy, 'orgresource': orgresource, } r = self.session.post(url = url, data = data) j = json.loads(r.text) if self.resultManager(r.text): f = FileInfo() result = j['resultvalue'] f.resourcetype = result['resourcetype'] f.resourceno = result['resourceno'] return f else: return False
1,010,121
GetVersionListCount Args: orgresource: File path Returns: Integer number: # of version list False: Failed to get property
def getVersionListCount(self, orgresource): url = nurls['getVersionListCount'] data = {'userid': self.user_id, 'useridx': self.useridx, 'orgresource': orgresource, } r = self.session.post(url = url, data = data) j = json.loads(r.text) if j['message'] != 'success': print "[*] Error getVersionListCount: " + j['message'] return False else: return int(j['resultvalue']['count'])
1,010,122
SetProperty Args: orgresource: File path protect: 'Y' or 'N', 중요 표시 Returns: Integer number: # of version list False: Failed to get property
def setProperty(self, orgresource, protect, dummy = 7046): url = nurls['setProperty'] data = {'userid': self.user_id, 'useridx': self.useridx, 'orgresource': orgresource, 'protect': protect, 'dummy': dummy, } r = self.session.post(url = url, data = data) return resultManager(r.text)
1,010,123
GetMusicAlbumList Args: tagtype = ??? startnum pagingrow Returns: ??? False: Failed to get property
def getMusicAlbumList(self, tagtype = 0, startnum = 0, pagingrow = 100): url = nurls['setProperty'] data = {'userid': self.user_id, 'useridx': self.useridx, 'tagtype': tagtype, 'startnum': startnum, 'pagingrow': pagingrow, } r = self.session.post(url = url, data = data) return resultManager(r.text)
1,010,124
Normalize a sequence of values via rank and Normal c.d.f. Args: x (array_like): sequence of values. Returns: Gaussian-normalized values. Example: .. doctest:: >>> from scipy_sugar.stats import quantile_gaussianize >>> print(quantile_gaussianize([-1, 0, 2])) [-0.67448975 0. 0.67448975]
def quantile_gaussianize(x): from scipy.stats import norm, rankdata x = asarray(x, float).copy() ok = isfinite(x) x[ok] *= -1 y = empty_like(x) y[ok] = rankdata(x[ok]) y[ok] = norm.isf(y[ok] / (sum(ok) + 1)) y[~ok] = x[~ok] return y
1,010,139
Writes the voevent to the file object. e.g.:: with open('/tmp/myvoevent.xml','wb') as f: voeventparse.dump(v, f) Args: voevent(:class:`Voevent`): Root node of the VOevent etree. file (io.IOBase): An open (binary mode) file object for writing. pretty_print pretty_print(bool): See :func:`dumps` xml_declaration(bool): See :func:`dumps`
def dump(voevent, file, pretty_print=True, xml_declaration=True): file.write(dumps(voevent, pretty_print, xml_declaration))
1,011,008
Tests if a voevent conforms to the schema. Args: voevent(:class:`Voevent`): Root node of a VOEvent etree. Returns: bool: Whether VOEvent is valid
def valid_as_v2_0(voevent): _return_to_standard_xml(voevent) valid_bool = voevent_v2_0_schema.validate(voevent) _remove_root_tag_prefix(voevent) return valid_bool
1,011,009
Sets the minimal 'Who' attributes: date of authoring, AuthorIVORN. Args: voevent(:class:`Voevent`): Root node of a VOEvent etree. date(datetime.datetime): Date of authoring. NB Microseconds are ignored, as per the VOEvent spec. author_ivorn(str): Short author identifier, e.g. ``voevent.4pisky.org/ALARRM``. Note that the prefix ``ivo://`` will be prepended internally.
def set_who(voevent, date=None, author_ivorn=None): if author_ivorn is not None: voevent.Who.AuthorIVORN = ''.join(('ivo://', author_ivorn)) if date is not None: voevent.Who.Date = date.replace(microsecond=0).isoformat()
1,011,010
Add descriptions or references to the How section. Args: voevent(:class:`Voevent`): Root node of a VOEvent etree. descriptions(str): Description string, or list of description strings. references(:py:class:`voeventparse.misc.Reference`): A reference element (or list thereof).
def add_how(voevent, descriptions=None, references=None): if not voevent.xpath('How'): etree.SubElement(voevent, 'How') if descriptions is not None: for desc in _listify(descriptions): # d = etree.SubElement(voevent.How, 'Description') # voevent.How.Description[voevent.How.index(d)] = desc ##Simpler: etree.SubElement(voevent.How, 'Description') voevent.How.Description[-1] = desc if references is not None: voevent.How.extend(_listify(references))
1,011,013
Add citations to other voevents. The schema mandates that the 'Citations' section must either be entirely absent, or non-empty - hence we require this wrapper function for its creation prior to listing the first citation. Args: voevent(:class:`Voevent`): Root node of a VOEvent etree. event_ivorns (:class:`voeventparse.misc.EventIvorn`): List of EventIvorn elements to add to citation list.
def add_citations(voevent, event_ivorns): if not voevent.xpath('Citations'): etree.SubElement(voevent, 'Citations') voevent.Citations.extend(_listify(event_ivorns))
1,011,015
Make a share url of directory >>> nd.makeShareUrl('/Picture/flower.png', PASSWORD) Args: full_path: The full path of directory to get share url. Should be end with '/'. ex) /folder/ passwd: Access password for shared directory Returns: URL: share url for a directory False: Failed to share a directory
def makeShareUrl(self, full_path, passwd): if full_path[-1] is not '/': full_path += '/' data = {'_callback': 'window.__jindo_callback._347', 'path': full_path, 'passwd': passwd, 'userid': self.user_id, 'useridx': self.useridx, } s, metadata = self.GET('shareUrl', data) if s: print "URL: %s" % (metadata['href']) return metadata['href'] else: print "Error makeShareUrl: %s" % (metadata) return False
1,011,257
Checks whether the given files have bit for bit solution matches on the given variable list. Args: model_path: absolute path to the model dataset bench_path: absolute path to the benchmark dataset config: the configuration of the set of analyses Returns: A dictionary created by the elements object corresponding to the results of the bit for bit testing
def bit_for_bit(model_path, bench_path, config): fname = model_path.split(os.path.sep)[-1] # Error handling if not (os.path.isfile(bench_path) and os.path.isfile(model_path)): return elements.error("Bit for Bit", "File named " + fname + " has no suitable match!") try: model_data = Dataset(model_path) bench_data = Dataset(bench_path) except (FileNotFoundError, PermissionError): return elements.error("Bit for Bit", "File named " + fname + " could not be read!") if not (netcdf.has_time(model_data) and netcdf.has_time(bench_data)): return elements.error("Bit for Bit", "File named " + fname + " could not be read!") # Begin bit for bit analysis headers = ["Max Error", "Index of Max Error", "RMS Error", "Plot"] stats = LIVVDict() for i, var in enumerate(config["bit_for_bit_vars"]): if var in model_data.variables and var in bench_data.variables: m_vardata = model_data.variables[var][:] b_vardata = bench_data.variables[var][:] diff_data = m_vardata - b_vardata if diff_data.any(): stats[var]["Max Error"] = np.amax(np.absolute(diff_data)) stats[var]["Index of Max Error"] = str( np.unravel_index(np.absolute(diff_data).argmax(), diff_data.shape)) stats[var]["RMS Error"] = np.sqrt(np.sum(np.square(diff_data).flatten()) / diff_data.size) pf = plot_bit_for_bit(fname, var, m_vardata, b_vardata, diff_data) else: stats[var]["Max Error"] = stats[var]["RMS Error"] = 0 pf = stats[var]["Index of Max Error"] = "N/A" stats[var]["Plot"] = pf else: stats[var] = {"Max Error": "No Match", "RMS Error": "N/A", "Plot": "N/A"} model_data.close() bench_data.close() return elements.bit_for_bit("Bit for Bit", headers, stats)
1,011,505
Description Args: model_config: a dictionary with the model configuration data bench_config: a dictionary with the benchmark configuration data model_bundle: a LIVVkit model bundle object bench_bundle: a LIVVkit model bundle object Returns: A dictionary created by the elements object corresponding to the results of the bit for bit testing
def diff_configurations(model_config, bench_config, model_bundle, bench_bundle): diff_dict = LIVVDict() model_data = model_bundle.parse_config(model_config) bench_data = bench_bundle.parse_config(bench_config) if model_data == {} and bench_data == {}: return elements.error("Configuration Comparison", "Could not open file: " + model_config.split(os.path.sep)[-1]) model_sections = set(six.iterkeys(model_data)) bench_sections = set(six.iterkeys(bench_data)) all_sections = set(model_sections.union(bench_sections)) for s in all_sections: model_vars = set(six.iterkeys(model_data[s])) if s in model_sections else set() bench_vars = set(six.iterkeys(bench_data[s])) if s in bench_sections else set() all_vars = set(model_vars.union(bench_vars)) for v in all_vars: model_val = model_data[s][v] if s in model_sections and v in model_vars else 'NA' bench_val = bench_data[s][v] if s in bench_sections and v in bench_vars else 'NA' same = True if model_val == bench_val and model_val != 'NA' else False diff_dict[s][v] = (same, model_val, bench_val) return elements.file_diff("Configuration Comparison", diff_dict)
1,011,506
Add a object Args: Object: Object will be added Returns: Object: Object with id Raises: TypeError: If add object is not a dict MultipleInvalid: If input object is invaild
def add(self, obj): if not isinstance(obj, dict): raise TypeError("Add object should be a dict object") obj = self.validation(obj) obj["id"] = self.maxId + 1 obj = self._cast_model(obj) self.model.db.append(obj) if not self._batch.enable.is_set(): self.model.save_db() return obj
1,011,533
Get a object by id Args: id (int): Object id Returns: Object: Object with specified id None: If object not found
def get(self, id): for obj in self.model.db: if obj["id"] == id: return self._cast_model(obj) return None
1,011,534
Remove a object by id Args: id (int): Object's id should be deleted Returns: len(int): affected rows
def remove(self, id): before_len = len(self.model.db) self.model.db = [t for t in self.model.db if t["id"] != id] if not self._batch.enable.is_set(): self.model.save_db() return before_len - len(self.model.db)
1,011,535
Update a object Args: id (int): Target Object ID newObj (object): New object will be merged into original object Returns: Object: Updated object None: If specified object id is not found MultipleInvalid: If input object is invaild
def update(self, id, newObj): newObj = self.validation(newObj) for obj in self.model.db: if obj["id"] != id: continue newObj.pop("id", None) obj.update(newObj) obj = self._cast_model(obj) if not self._batch.enable.is_set(): self.model.save_db() return obj return None
1,011,537
Set a object Args: id (int): Target Object ID newObj (object): New object will be set Returns: Object: New object None: If specified object id is not found MultipleInvalid: If input object is invaild
def set(self, id, newObj): newObj = self.validation(newObj) for index in xrange(0, len(self.model.db)): if self.model.db[index]["id"] != id: continue newObj["id"] = id self.model.db[index] = self._cast_model(newObj) if not self._batch.enable.is_set(): self.model.save_db() return self.model.db[index] return None
1,011,538
Handles the parsing of options for LIVVkit's command line interface Args: args: The list of arguments, typically sys.argv[1:]
def parse_args(args=None): parser = argparse.ArgumentParser(description="Main script to run LIVVkit.", formatter_class=argparse.ArgumentDefaultsHelpFormatter, fromfile_prefix_chars='@') parser.add_argument('-o', '--out-dir', default=os.path.join(os.getcwd(), "vv_" + time.strftime("%Y-%m-%d")), help='Location to output the LIVVkit webpages.' ) parser.add_argument('-v', '--verify', nargs=2, default=None, help=' '.join(['Specify the locations of the test and bench bundle to', 'compare (respectively).' ]) ) parser.add_argument('-V', '--validate', action='store', nargs='+', default=None, help=' '.join(['Specify the location of the configuration files for', 'validation tests.' ]) ) # FIXME: this just short-circuits to the validation option, and should become its own module parser.add_argument('-e', '--extension', action='store', nargs='+', default=None, dest='validate', metavar='EXTENSION', help=' '.join(['Specify the location of the configuration files for', 'LIVVkit extensions.' ]) ) parser.add_argument('-p', '--publish', action='store_true', help=' '.join(['Also produce a publication quality copy of the figure in', 'the output directory (eps, 600d pi).' ]) ) parser.add_argument('-s', '--serve', nargs='?', type=int, const=8000, help=' '.join(['Start a simple HTTP server for the output website specified', 'by OUT_DIR on port SERVE.' ]) ) parser.add_argument('--version', action='version', version='LIVVkit {}'.format(livvkit.__version__), help="Show LIVVkit's version number and exit" ) return init(parser.parse_args(args))
1,011,578
Get the configuration directory. Get the configuration directories, optionally for a specific program. Args: program (str) : The name of the program whose configuration directories have to be found. system_wide (bool): Gets the system-wide configuration directories. Returns: list: A list of all matching configuration directories found.
def get_config_dir(program='', system_wide=False): config_homes = [] if system_wide: if os.name == 'nt': config_homes.append( winreg.ExpandEnvironmentStrings('%PROGRAMDATA%')) else: config_homes.append('/etc') config_homes.append('/etc/xdg') if os.name == 'darwin': config_homes.append('/Library') else: if os.name == 'nt': import winreg config_homes.append( winreg.ExpandEnvironmentStrings('%LOCALAPPDATA%')) config_homes.append( os.path.join( winreg.ExpandEnvironmentStrings('%APPDATA%'), 'Roaming')) else: if os.getenv('XDG_CONFIG_HOME'): config_homes.append(os.getenv('XDG_CONFIG_HOME')) else: try: from xdg import BaseDirectory config_homes.append(BaseDirectory.xdg_config_home) except ImportError: config_homes.append(os.path.expanduser('~/.config')) config_homes.append(os.path.expanduser('~')) if os.name == 'darwin': config_homes.append(os.path.expanduser('~/Library')) if program: def __find_homes(app, dirs): homes = [] for home in dirs: if os.path.isdir(os.path.join(home, app)): homes.append(os.path.join(home, app)) if os.path.isdir(os.path.join(home, '.' + app)): homes.append(os.path.join(home, '.' + app)) if os.path.isdir(os.path.join(home, app + '.d')): homes.append(os.path.join(home, app + '.d')) return homes app_homes = __find_homes(program, config_homes) # Special Cases if program == 'vim': app_homes.extend(__find_homes('vimfiles', config_homes)) elif program == 'chrome': app_homes.extend(__find_homes('google-chrome', config_homes)) elif program in ['firefox', 'thunderbird']: app_homes.extend( __find_homes( program, [ os.path.expanduser('~/.mozilla')])) return app_homes return config_homes
1,011,786
Get the configuration file for a program. Gets the configuration file for a given program, assuming it stores it in a standard location. See also :func:`get_config_dir()`. Args: program (str): The program for which to get the configuration file. system_wide (bool):Whether to get the system-wide file for the program. Returns: list: A list of all matching configuration files found.
def get_config_file(program, system_wide=False): program_config_homes = get_config_dir(program, system_wide) config_homes = get_config_dir(system_wide=system_wide) config_files = [] for home in config_homes: for sub in os.listdir(home): if os.path.isfile(os.path.join(home, sub)): if sub.startswith(program): config_files.append(os.path.join(home, sub)) if not program.startswith('.'): config_files.extend(get_config_file('.' + program, system_wide)) for home in program_config_homes: for sub in os.listdir(home): if os.path.isfile(os.path.join(home, sub) ) and sub.startswith(program): config_files.append(os.path.join(home, sub)) return config_files
1,011,787
Returns a dictionary representing a new section. Sections contain a list of elements that are displayed separately from the global elements on the page. Args: title: The title of the section to be displayed element_list: The list of elements to display within the section Returns: A dictionary with metadata specifying that it is to be rendered as a section containing multiple elements
def section(title, element_list): sect = { 'Type': 'Section', 'Title': title, } if isinstance(element_list, list): sect['Elements'] = element_list else: sect['Elements'] = [element_list] return sect
1,011,957
Render the specified template and return the output. Args: tmpl_name (str): file name of the template request_env (dict): request environment Returns: str - the rendered template
def render(self, tmpl_name, request_env): return super(WebApplication, self).render(tmpl_name, request_env)
1,012,016
Set the volume. Sets the volume to a given percentage (integer between 0 and 100). Args: percentage (int): The percentage (as a 0 to 100 integer) to set the volume to. Raises: ValueError: if the percentage is >100 or <0.
def set_volume(percentage): if percentage > 100 or percentage < 0: raise ValueError('percentage must be an integer between 0 and 100') if system.get_name() == 'windows': # TODO: Implement volume for Windows. Looks like WinAPI is the # solution... pass elif system.get_name() == 'mac': # OS X uses 0-10 instead of percentage volume_int = percentage / 10 sp.Popen(['osascript', '-e', 'set Volume %d' % volume_int]).wait() else: # Linux/Unix formatted = str(percentage) + '%' sp.Popen(['amixer', '--quiet', 'sset', 'Master', formatted]).wait()
1,012,157
Increase the volume. Increase the volume by a given percentage. Args: percentage (int): The percentage (as an integer between 0 and 100) to increase the volume by. Raises: ValueError: if the percentage is >100 or <0.
def increase_volume(percentage): if percentage > 100 or percentage < 0: raise ValueError('percentage must be an integer between 0 and 100') if system.get_name() == 'windows': # TODO: Implement volume for Windows. Looks like WinAPI is the # solution... pass elif system.get_name() == 'mac': volume_int = percentage / 10 old_volume = get() new_volume = old_volume + volume_int if new_volume > 10: new_volume = 10 set_volume(new_volume * 10) else: # Linux/Unix formatted = '%d%%+' % percentage # + or - increases/decreases in amixer sp.Popen(['amixer', '--quiet', 'sset', 'Master', formatted]).wait()
1,012,159
Read a GPTL timing file and extract some data. Args: file_path: the path to the GPTL timing file var_list: a list of strings to look for in the file Returns: A dict containing key-value pairs of the livvkit and the times associated with them
def parse_gptl(file_path, var_list): timing_result = dict() if os.path.isfile(file_path): with open(file_path, 'r') as f: for var in var_list: for line in f: if var in line: timing_result[var] = float(line.split()[4])/int(line.split()[2]) break return timing_result
1,012,347
Search for a file in a directory, and return the first match. If the file is not found return an empty string Args: search_dir: The root directory to search in file_pattern: A unix-style wildcard pattern representing the file to find Returns: The path to the file if it was found, otherwise an empty string
def find_file(search_dir, file_pattern): for root, dirnames, fnames in os.walk(search_dir): for fname in fnames: if fnmatch.fnmatch(fname, file_pattern): return os.path.join(root, fname) return ""
1,012,348
Write out data to a json file. Args: data: A dictionary representation of the data to write out path: The directory to output the file in file_name: The name of the file to write out
def write_json(data, path, file_name): if os.path.exists(path) and not os.path.isdir(path): return elif not os.path.exists(path): mkdir_p(path) with open(os.path.join(path, file_name), 'w') as f: json_tricks.dump(data, f, indent=4, primitives=True, allow_nan=True)
1,012,351
Get the output of a command. Gets a nice Unicode no-extra-whitespace string of the ``stdout`` of a given command. Args: command (str or list): A string of the command, or a list of the arguments (as would be used in :class:`subprocess.Popen`). Note: If ``command`` is a ``str``, it will be evaluated with ``shell=True`` i.e. in the default shell (for example, bash). Returns: str: The ``stdout`` of the command.
def get_cmd_out(command): if isinstance(command, list): result = sp.check_output(command) else: result = sp.check_output(command, shell=True) return result.decode('utf-8').rstrip()
1,012,604
Check if a program is in the system ``PATH``. Checks if a given program is in the user's ``PATH`` or not. Args: program (str): The program to try to find in ``PATH``. Returns: bool: Is the program in ``PATH``?
def is_in_path(program): if sys.version_info.major == 2: path = os.getenv('PATH') if os.name == 'nt': path = path.split(';') else: path = path.split(':') else: path = os.get_exec_path() for i in path: if os.path.isdir(i): if program in os.listdir(i): return True
1,012,606
Check if process is running. Check if the given process name is running or not. Note: On a Linux system, kernel threads (like ``kthreadd`` etc.) are excluded. Args: process (str): The name of the process. Returns: bool: Is the process running?
def is_running(process): if os.name == 'nt': process_list = get_cmd_out(['tasklist', '/v']) return process in process_list else: process_list = get_cmd_out('ps axw | awk \'{print $5}\'') for i in process_list.split('\n'): # 'COMMAND' is the column heading # [*] indicates kernel-level processes like \ # kthreadd, which manages threads in the Linux kernel if not i == 'COMMAND' or i.startswith('['): if i == process: return True elif os.path.basename(i) == process: # check i without executable path # for example, if 'process' arguments is 'sshd' # and '/usr/bin/sshd' is listed in ps, return True return True return False
1,012,607
Adds a program to startup. Adds a program to user startup. Args: name (str) : The name of the startup entry. command (str) : The command to run. system_wide (bool): Add to system-wide startup. Note: ``system_wide`` requires superuser/admin privileges.
def add_item(name, command, system_wide=False): desktop_env = system.get_name() if os.path.isfile(command): command_is_file = True if not desktop_env == 'windows': # Will not exit program if insufficient permissions sp.Popen(['chmod +x %s' % command], shell=True) if desktop_env == 'windows': import winreg if system_wide: startup_dir = os.path.join(winreg.ExpandEnvironmentStrings('%PROGRAMDATA%'), 'Microsoft\\Windows\\Start Menu\\Programs\\Startup') else: startup_dir = os.path.join(get_config_dir()[0], 'Roaming\\Microsoft\\Windows\\Start Menu\\Programs\\Startup') if not command_is_file: with open(os.path.join(startup_dir, name + '.bat'), 'w') as f: f.write(command) else: shutil.copy(command, startup_dir) elif desktop_env == 'mac': sp.Popen(['launchctl submit -l %s -- %s'] % (name, command), shell=True) # system-wide will be handled by running the above as root # which will auto-happen if current process is root. else: # Linux/Unix if desktop_env == 'unknown': # CLI if system_wide: login_file = '/etc/profile' else: login_file = os.path.expanduser('~/.profile') with open(login_file, 'a') as f: f.write(command) else: try: desktop_file_name = name + '.desktop' startup_file = os.path.join(get_config_dir('autostart', system_wide=system_wide)[0], desktop_file_name) # .desktop files' Terminal option uses an independent method to find terminal emulator desktop_str = desktopfile.construct(name=name, exec_=command, additional_opts={'X-GNOME-Autostart-enabled': 'true'}) with open(startup_file, 'w') as f: f.write(desktop_str) except: pass
1,012,664
List startup programs. List the programs set to run at startup. Args: system_wide (bool): Gets the programs that run at system-wide startup. Returns: list: A list of dictionaries in this format: .. code-block:: python { 'name': 'The name of the entry.', 'command': 'The command used to run it.' }
def list_items(system_wide=False): desktop_env = system.get_name() result = [] if desktop_env == 'windows': sys_startup_dir = os.path.join(winreg.ExpandEnvironmentStrings('%PROGRAMDATA%'), 'Microsoft\\Windows\\Start Menu\\Programs\\Startup') user_startup_dir = os.path.join(get_config_dir()[0], 'Roaming\\Microsoft\\Windows\\Start Menu\\Programs\\Startup') startup_dir = sys_startup_dir if system_wide else user_startup_dir for file in os.listdir(startup_dir): file_path = os.path.join(startup_dir, file) result.append({ 'name': file, 'command': os.path.join(startup_dir, file) }) elif desktop_env == 'mac': items_list = system.get_cmd_out('launchtl list | awk \'{print $3}\'') for item in items_list.split('\n'): # launchd stores each job as a .plist file (pseudo-xml) launchd_plist_paths = ['~/Library/LaunchAgents', '/Library/LaunchAgents', '/Library/LaunchDaemons', '/System/Library/LaunchAgents', '/System/Library/LaunchDaemons'] for path in launchd_plist_paths: if item + '.plist' in os.listdir(path): plist_file = os.path.join(path, item + '.plist') # Parse the plist if sys.version_info.major == 2: plist_parsed = plistlib.readPlist(plist_file) else: with open(plist_file) as f: plist_parsed = plistlib.load(f) if 'Program' in plist_parsed: cmd = plist_parsed['Program'] if 'ProgramArguments' in plist_parsed: cmd += ' '.join(plist_parsed['ProgramArguments']) elif 'ProgramArguments' in plist_parsed: cmd = ' '.join(plist_parsed['ProgramArguments']) else: cmd = '' result.append({ 'name': item, 'command': cmd }) # system-wide will be handled by running the above as root # which will auto-happen if current process is root. else: # Linux/Unix # CLI profile = os.path.expanduser('~/.profile') if os.path.isfile(profile): with open(profile) as f: for line in f: if system.is_in_path(line.lstrip().split(' ')[0]): cmd_name = line.lstrip().split(' ')[0] result.append({ 'name': cmd_name, 'command': line.strip() }) # /etc/profile.d if system_wide: if os.path.isdir('/etc/profile.d'): for file in os.listdir('/etc/profile.d'): file_path = os.path.join('/etc/profile.d', file) result.append({ 'name': file, 'command': 'sh %s' % file_path }) # GUI try: startup_dir = directories.get_config_dir('autostart', system_wide=system_wide)[0] for file in os.listdir(startup_dir): file_parsed = desktopfile.parse(os.path.join(startup_dir, file)) if 'Name' in file_parsed: name = file_parsed['Name'] else: name = file.replace('.desktop', '') if 'Exec' in file_parsed: if file_parsed['Terminal']: cmd = applications.terminal(exec_=file_parsed['Exec'], return_cmd=True) else: cmd = file_parsed['Exec'] else: cmd = '' if not file_parsed.get('Hidden', False): result.append({ 'name': name, 'command': cmd }) except IndexError: pass return result
1,012,665
Removes a program from startup. Removes a program from startup. Args: name (str) : The name of the program (as known to the system) to remove. See :func:``list_items``. system_wide (bool): Remove it from system-wide startup. Note: ``system_wide`` requires superuser/admin privileges.
def remove_item(name, system_wide=False): desktop_env = system.get_name() if desktop_env == 'windows': import winreg if system_wide: startup_dir = os.path.join(winreg.ExpandEnvironmentStrings('%PROGRAMDATA%'), 'Microsoft\\Windows\\Start Menu\\Programs\\Startup') else: startup_dir = os.path.join(directories.get_config_dir()[0], 'Roaming\\Microsoft\\Windows\\Start Menu\\Programs\\Startup') for startup_file in os.path.listdir(start_dir): if startup_file == name or startup_file.split('.')[0] == name: os.remove(os.path.join(startup_dir, startup_file)) elif desktop_env == 'mac': sp.Popen(['launchctl', 'remove', name]) # system-wide will be handled by running the above as root # which will auto-happen if current process is root. else: # Linux/Unix if desktop_env == 'unknown': # CLI if system_wide: login_file = '/etc/profile' else: login_file = os.path.expanduser('~/.profile') with open(login_file) as f: login_file_contents = f.read() final_login_file_contents = '' for line in login_file_contents.split('\n'): if line.split(' ')[0] != name: final_login_file_contents += line with open(login_file, 'w') as f: f.write(final_login_file_contents) else: try: desktop_file_name = name + '.desktop' startup_file = os.path.join(directories.get_config_dir('autostart', system_wide=system_wide)[0], desktop_file_name) if not os.path.isfile(startup_file): for possible_startup_file in os.listdir(directories.get_config_dir('autostart', system_wide=system_wide)[0]): possible_startup_file_parsed = desktopfile.parse(possible_startup_file) if possible_startup_file_parsed['Name'] == name: startup_file = possible_startup_file os.remove(startup_file) except IndexError: pass
1,012,666
Convert BAM file to ES file. Args: bam_fn (str): File name of the BAM file. bam_fo (file): File object of the ES file. allowed_delta (int): Maximal allowed coordinates difference for correct reads.
def bam2es( bam_fn, es_fo, allowed_delta, ): es_fo.write("# RN: read name" + os.linesep) es_fo.write("# Q: is mapped with quality" + os.linesep) es_fo.write("# Chr: chr id" + os.linesep) es_fo.write("# D: direction" + os.linesep) es_fo.write("# L: leftmost nucleotide" + os.linesep) es_fo.write("# R: rightmost nucleotide" + os.linesep) es_fo.write("# Cat: category of alignment assigned by LAVEnder" + os.linesep) es_fo.write("# M_i i-th segment is correctly mapped" + os.linesep) es_fo.write("# m segment should be unmapped but it is mapped" + os.linesep) es_fo.write("# w segment is mapped to a wrong location" + os.linesep) es_fo.write("# U segment is unmapped and should be unmapped" + os.linesep) es_fo.write("# u segment is unmapped and should be mapped" + os.linesep) es_fo.write("# Segs: number of segments" + os.linesep) es_fo.write("# " + os.linesep) es_fo.write("# RN\tQ\tChr\tD\tL\tR\tCat\tSegs" + os.linesep) with pysam.AlignmentFile(bam_fn, "rb") as sam: references_dict = {} for i in range(len(sam.references)): references_dict[sam.references[i]] = i + 1 for read in sam: rnf_read_tuple = rnftools.rnfformat.ReadTuple() rnf_read_tuple.destringize(read.query_name) left = read.reference_start + 1 right = read.reference_end chrom_id = references_dict[sam.references[read.reference_id]] nb_of_segments = len(rnf_read_tuple.segments) if rnf_read_tuple.segments[0].genome_id == 1: should_be_mapped = True else: should_be_mapped = False # read is unmapped if read.is_unmapped: # read should be mapped if should_be_mapped: category = "u" # read should be unmapped else: category = "U" # read is mapped else: # read should be mapped if should_be_mapped: exists_corresponding_segment = False for j in range(len(rnf_read_tuple.segments)): segment = rnf_read_tuple.segments[j] if ( (segment.left == 0 or abs(segment.left - left) <= allowed_delta) and (segment.right == 0 or abs(segment.right - right) <= allowed_delta) and (segment.left != 0 or segment.right == 0) and (chrom_id == 0 or chrom_id == segment.chr_id) ): exists_corresponding_segment = True segment = str(j + 1) break # read was mapped to correct location if exists_corresponding_segment: # exists ok location? category = "M_" + segment # read was mapped to incorrect location else: category = "w" # read should be unmapped else: category = "m" es_fo.write( "\t".join( map( str, [ # read name read.query_name, # aligned? "unmapped" if read.is_unmapped else "mapped_" + str(read.mapping_quality), # reference id chrom_id, # direction "R" if read.is_reverse else "F", # left left, # right right, # assigned category category, # count of segments nb_of_segments ] ) ) + os.linesep )
1,012,675
Convert ES to ET. Args: es_fo (file): File object for the ES file. et_fo (file): File object for the ET file.
def es2et( es_fo, et_fo, ): et_fo.write("# Mapping information for read tuples" + os.linesep) et_fo.write("#" + os.linesep) et_fo.write("# RN: read name" + os.linesep) et_fo.write("# I: intervals with asigned categories" + os.linesep) et_fo.write("#" + os.linesep) et_fo.write("# RN I" + os.linesep) last_rname = "" for line in es_fo: line = line.strip() if line == "" or line[0] == "#": continue else: (rname, mapped, ref, direction, left, right, category, nb_of_segments) = line.split("\t") nb_of_segments = int(nb_of_segments) # print(rname,last_rname,mapped) # new read if rname != last_rname: # update if last_rname != "": voc = Bam._vector_of_categories(single_reads_statistics, rname, nb_of_segments) et_fo.write(Bam._et_line(readname=rname, vector_of_categories=voc)) et_fo.write(os.linesep) # nulling single_reads_statistics = [ { "U": 0, "u": 0, "M": [], "m": 0, "w": 0, "T": 0, "t": 0, } for i in range(rnftools.lavender.MAXIMAL_MAPPING_QUALITY + 1) ] last_rname = rname #################### # Unmapped segment # #################### ##### # U # ##### if category == "U": for q in range(len(single_reads_statistics)): single_reads_statistics[q]["U"] += 1 ##### # u # ##### elif category == "u": for q in range(len(single_reads_statistics)): single_reads_statistics[q]["u"] += 1 ################## # Mapped segment # ################## else: mapping_quality = int(mapped.replace("mapped_", "")) assert 0 <= mapping_quality and mapping_quality <= rnftools.lavender.MAXIMAL_MAPPING_QUALITY, mapping_quality ##### # m # ##### if category == "m": for q in range(mapping_quality + 1): single_reads_statistics[q]["m"] += 1 for q in range(mapping_quality + 1, rnftools.lavender.MAXIMAL_MAPPING_QUALITY + 1): single_reads_statistics[q]["T"] += 1 ##### # w # ##### elif category == "w": for q in range(mapping_quality + 1): single_reads_statistics[q]["w"] += 1 for q in range(mapping_quality + 1, rnftools.lavender.MAXIMAL_MAPPING_QUALITY + 1): single_reads_statistics[q]["t"] += 1 ##### # M # ##### else: assert category[0] == "M", category segment_id = int(category.replace("M_", "")) for q in range(mapping_quality + 1): single_reads_statistics[q]["M"].append(segment_id) for q in range(mapping_quality + 1, rnftools.lavender.MAXIMAL_MAPPING_QUALITY + 1): single_reads_statistics[q]["t"] += 1 # last read voc = Bam._vector_of_categories(single_reads_statistics, rname, nb_of_segments) et_fo.write(Bam._et_line(readname=rname, vector_of_categories=voc)) et_fo.write(os.linesep)
1,012,679
ET to ROC conversion. Args: et_fo (file): File object for the ET file. roc_fo (file): File object for the ROC file. raises: ValueError
def et2roc(et_fo, roc_fo): stats_dicts = [ { "q": q, "M": 0, "w": 0, "m": 0, "P": 0, "U": 0, "u": 0, "T": 0, "t": 0, "x": 0 } for q in range(rnftools.lavender.MAXIMAL_MAPPING_QUALITY + 1) ] for line in et_fo: line = line.strip() if line != "" and line[0] != "#": (read_tuple_name, tab, info_categories) = line.partition("\t") intervals = info_categories.split(",") for interval in intervals: category = interval[0] (left, colon, right) = interval[2:].partition("-") for q in range(int(left), int(right) + 1): stats_dicts[q][category] += 1 roc_fo.write("# Numbers of reads in several categories in dependence" + os.linesep) roc_fo.write("# on the applied threshold on mapping quality q" + os.linesep) roc_fo.write("# " + os.linesep) roc_fo.write("# Categories:" + os.linesep) roc_fo.write("# M: Mapped correctly." + os.linesep) roc_fo.write("# w: Mapped to a wrong position." + os.linesep) roc_fo.write("# m: Mapped but should be unmapped." + os.linesep) roc_fo.write("# P: Multimapped." + os.linesep) roc_fo.write("# U: Unmapped and should be unmapped." + os.linesep) roc_fo.write("# u: Unmapped but should be mapped." + os.linesep) roc_fo.write("# T: Thresholded correctly." + os.linesep) roc_fo.write("# t: Thresholded incorrectly." + os.linesep) roc_fo.write("# x: Unknown." + os.linesep) roc_fo.write("#" + os.linesep) roc_fo.write("# q\tM\tw\tm\tP\tU\tu\tT\tt\tx\tall" + os.linesep) l_numbers = [] for line in stats_dicts: numbers = [ line["M"], line["w"], line["m"], line["P"], line["U"], line["u"], line["T"], line["t"], line["x"] ] if numbers != l_numbers: roc_fo.write("\t".join([str(line["q"])] + list(map(str, numbers)) + [str(sum(numbers))]) + os.linesep) l_numbers = numbers
1,012,681
Plot accuracy. Args: data: Panda dataframe in *the* format.
def plot_accuracy(data, output_dir_path='.', output_filename='accuracy.png', width=10, height=8): output_path = os.path.join(output_dir_path, output_filename) max_val_data = get_epoch_max_val_acc(data) max_val_label = round(max_val_data['acc'].values[0], 4) # max_val_epoch = max_val_data['epoch'].values[0] max_epoch_data = data[data['epoch'] == data['epoch'].max()] plot = ggplot(data, aes('epoch', 'acc', color='factor(data)')) + \ geom_line(size=1, show_legend=False) + \ geom_vline(aes(xintercept='epoch', color='data'), data=max_val_data, alpha=0.5, show_legend=False) + \ geom_label(aes('epoch', 'acc'), data=max_val_data, label=max_val_label, nudge_y=-0.02, va='top', label_size=0, show_legend=False) + \ geom_text(aes('epoch', 'acc', label='data'), data=max_epoch_data, nudge_x=2, ha='center', show_legend=False) + \ geom_point(aes('epoch', 'acc'), data=max_val_data, show_legend=False) + \ labs(y='Accuracy', x='Epochs') + \ theme_bw(base_family='Arial', base_size=15) + \ scale_color_manual(['#ef8a62', '#67a9cf', "#f7f7f7"]) plot.save(output_path, width=width, height=height)
1,012,720
Create two plots: 1) loss 2) accuracy. Args: data: Panda dataframe in *the* format.
def plot(data, output_dir_path='.', width=10, height=8): if not isinstance(data, pd.DataFrame): data = pd.DataFrame(data) plot_accuracy(data, output_dir_path=output_dir_path, width=width, height=height) plot_loss(data, output_dir_path, width=width, height=height)
1,012,721
Returns the value inside nested structure of data located at period delimited path When traversing a list, as long as that list is containing objects of type dict, items in that list will have their "name" and "type" values tested against the current key in the path. Args: data (dict or list): data to traverse path (str): '.' delimited string Kwargs: default: value to return if path does not exist
def dict_get_path(data, path, default=None): keys = path.split(".") for k in keys: if type(data) == list: found = False for item in data: name = item.get("name", item.get("type")) if name == k: found = True data = item break if not found: return default elif type(data) == dict: if k in data: data = data[k] else: return default else: return default return data
1,013,029
Evaluates calls from call_queue and places the results in result_queue. This worker is run in a seperate process. Args: call_queue: A multiprocessing.Queue of _CallItems that will be read and evaluated by the worker. result_queue: A multiprocessing.Queue of _ResultItems that will written to by the worker. shutdown: A multiprocessing.Event that will be set as a signal to the worker that it should exit when call_queue is empty.
def _process_worker(call_queue, result_queue, shutdown): while True: try: call_item = call_queue.get(block=True, timeout=0.1) except queue.Empty: if shutdown.is_set(): return else: try: r = call_item() except BaseException as e: result_queue.put(_ResultItem(call_item.work_id, exception=e)) else: result_queue.put(_ResultItem(call_item.work_id, result=r))
1,013,036
Initializes a new ProcessPoolExecutor instance. Args: max_workers: The maximum number of processes that can be used to execute the given calls. If None or not given then as many worker processes will be created as the machine has processors.
def __init__(self, max_workers=None): _remove_dead_thread_references() if max_workers is None: self._max_workers = multiprocessing.cpu_count() else: self._max_workers = max_workers # Make the call queue slightly larger than the number of processes to # prevent the worker processes from idling. But don't make it too big # because futures in the call queue cannot be cancelled. self._call_queue = multiprocessing.Queue(self._max_workers + EXTRA_QUEUED_CALLS) self._result_queue = multiprocessing.Queue() self._work_ids = queue.Queue() self._queue_management_thread = None self._processes = set() # Shutdown is a two-step process. self._shutdown_thread = False self._shutdown_process_event = multiprocessing.Event() self._shutdown_lock = threading.Lock() self._queue_count = 0 self._pending_work_items = {}
1,013,041
Check if 'app' is installed (OS X). Check if the given applications is installed on this OS X system. Args: app (str): The application name. Returns: bool: Is the app installed or not?
def mac_app_exists(app): APP_CHECK_APPLESCRIPT = with open('/tmp/app_check.AppleScript', 'w') as f: f.write(APP_CHECK_APPLESCRIPT % app) app_check_proc = sp.Popen( ['osascript', '-e', '/tmp/app_check.AppleScript']) if app_check_proc.wait() != 0: return False else: return True
1,013,058
Collects the analyses cases to be run and launches processes for each of them. Args: run_type: A string representation of the run type (eg. verification) module: The module corresponding to the run. Must have a run_suite function config: The configuration for the module
def run(run_type, module, config): print(" -----------------------------------------------------------------") print(" Beginning " + run_type.lower() + " test suite ") print(" -----------------------------------------------------------------") print("") summary = run_quiet(module, config) print(" -----------------------------------------------------------------") print(" " + run_type.capitalize() + " test suite complete ") print(" -----------------------------------------------------------------") print("") return summary
1,013,062
Check RNF validity of a read tuple. Args: read_tuple_name (str): Read tuple name to be checked.s
def validate(self, read_tuple_name): if reg_lrn.match(read_tuple_name) is None: self.report_error( read_tuple_name=read_tuple_name, error_name="wrong_read_tuple_name_structure", message="'{}' is not matched".format(reg_lrn), ) else: parts = read_tuple_name.split("__") if reg_prefix_part.match(parts[0]) is None: self.report_error( read_tuple_name=read_tuple_name, error_name="wrong_prefix_part", message="'{}' is not matched".format(reg_prefix_part), ) if reg_id_part.match(parts[1]) is None: self.report_error( read_tuple_name=read_tuple_name, error_name="wrong_id_part", message="'{}' is not matched".format(reg_id_part), ) if reg_segmental_part.match(parts[2]) is None: self.report_error( read_tuple_name=read_tuple_name, error_name="wrong_segmental_part", message="'{}' is not matched".format(reg_segmental_part), ) if reg_suffix_part.match(parts[3]) is None: self.report_error( read_tuple_name=read_tuple_name, error_name="wrong_suffix_part", message="'{}' is not matched".format(reg_suffix_part), ) if not self.rnf_profile.check(read_tuple_name): self.report_error( read_tuple_name=read_tuple_name, error_name="wrong_profile", message="Read has a wrong profile (wrong widths). It should be: {} but it is: {}.".format( self.rnf_profile, rnftools.rnfformat.RnfProfile(read_tuple_name=read_tuple_name), ), warning=True, )
1,013,150
Report an error. Args: read_tuple_name (): Name of the read tuple. error_name (): Name of the error. wrong (str): What is wrong. message (str): Additional msessage to be printed. warning (bool): Warning (not an error).
def report_error(self, read_tuple_name, error_name, wrong="", message="", warning=False): if (not self.report_only_first) or (error_name not in self.reported_errors): print("\t".join(["error" if warning == False else "warning", read_tuple_name, error_name, wrong, message])) self.reported_errors.add(error_name) if warning: self.warning_has_been_reported = True else: self.error_has_been_reported = True
1,013,151
Get the coin address associated with a user id. If the specified user id does not yet have an address for this coin, then generate one. Args: user_id (str): this user's unique identifier Returns: str: Base58Check address for this account
def getaccountaddress(self, user_id=""): address = self.rpc.call("getaccountaddress", user_id) self.logger.debug("Your", self.coin, "address is", address) return address
1,013,264
Calculate the total balance in all addresses belonging to this user. Args: user_id (str): this user's unique identifier as_decimal (bool): balance is returned as a Decimal if True (default) or a string if False Returns: str or Decimal: this account's total coin balance
def getbalance(self, user_id="", as_decimal=True): balance = unicode(self.rpc.call("getbalance", user_id)) self.logger.debug("\"" + user_id + "\"", self.coin, "balance:", balance) if as_decimal: return Decimal(balance) else: return balance
1,013,265
List all transactions associated with this account. Args: user_id (str): this user's unique identifier count (int): number of transactions to return (default=10) start_at (int): start the list at this transaction (default=0) Returns: list [dict]: transactions associated with this user's account
def listtransactions(self, user_id="", count=10, start_at=0): txlist = self.rpc.call("listtransactions", user_id, count, start_at) self.logger.debug("Got transaction list for " + str(user_id)) return txlist
1,013,266
Send coins from user's account. Args: user_id (str): this user's unique identifier dest_address (str): address which is to receive coins amount (str or Decimal): amount to send (eight decimal points) minconf (int): ensure the account has a valid balance using this many confirmations (default=1) Returns: str: transaction ID
def sendfrom(self, user_id, dest_address, amount, minconf=1): amount = Decimal(amount).quantize(self.quantum, rounding=ROUND_HALF_EVEN) txhash = self.rpc.call("sendfrom", user_id, dest_address, float(str(amount)), minconf ) self.logger.debug("Send %s %s from %s to %s" % (str(amount), self.coin, str(user_id), dest_address)) self.logger.debug("Transaction hash: %s" % txhash) return txhash
1,013,268
Sign a message with the private key of an address. Cryptographically signs a message using ECDSA. Since this requires an address's private key, the wallet must be unlocked first. Args: address (str): address used to sign the message message (str): plaintext message to which apply the signature Returns: str: ECDSA signature over the message
def signmessage(self, address, message): signature = self.rpc.call("signmessage", address, message) self.logger.debug("Signature: %s" % signature) return signature
1,013,270
Verifies that a message has been signed by an address. Args: address (str): address claiming to have signed the message signature (str): ECDSA signature message (str): plaintext message which was signed Returns: bool: True if the address signed the message, False otherwise
def verifymessage(self, address, signature, message): verified = self.rpc.call("verifymessage", address, signature, message) self.logger.debug("Signature verified: %s" % str(verified)) return verified
1,013,271
Passes an arbitrary command to the coin daemon. Args: command (str): command to be sent to the coin daemon
def call(self, command, *args): return self.rpc.call(str(command), *args)
1,013,272
Convert SAM to RNF-based FASTQ with respect to argparse parameters. Args: args (...): Arguments parsed by argparse
def sam2rnf(args): rnftools.mishmash.Source.recode_sam_reads( sam_fn=args.sam_fn, fastq_rnf_fo=args.fq_fo, fai_fo=args.fai_fo, genome_id=args.genome_id, number_of_read_tuples=10**9, simulator_name=args.simulator_name, allow_unmapped=args.allow_unmapped, )
1,013,593
Add another parser for a SAM2RNF-like command. Args: subparsers (subparsers): File name of the genome from which read tuples are created (FASTA file). simulator_name (str): Name of the simulator used in comments.
def add_sam2rnf_parser(subparsers, subcommand, help, description, simulator_name=None): parser_sam2rnf = subparsers.add_parser(subcommand, help=help, description=description) parser_sam2rnf.set_defaults(func=sam2rnf) parser_sam2rnf.add_argument( '-s', '--sam', type=str, metavar='file', dest='sam_fn', required=True, help='Input SAM/BAM with true (expected) alignments of the reads (- for standard input).' ) _add_shared_params(parser_sam2rnf, unmapped_switcher=True) parser_sam2rnf.add_argument( '-n', '--simulator-name', type=str, metavar='str', dest='simulator_name', default=simulator_name, help='Name of the simulator (for RNF).' if simulator_name is not None else argparse.SUPPRESS, )
1,013,594
Convert WgSim FASTQ files to RNF FASTQ files. Args: rnf_fastq_fo (file): File object of the target RNF file. fai_fo (file): File object of FAI index of the reference genome. genome_id (int): RNF genome ID. wgsim_fastq_1_fn (str): File name of the first WgSim FASTQ file. wgsim_fastq_2_fn (str): File name of the second WgSim FASTQ file. number_of_read_tuples (int): Expected number of read tuples (to estimate widths).
def recode_wgsim_reads( rnf_fastq_fo, fai_fo, genome_id, wgsim_fastq_1_fn, wgsim_fastq_2_fn=None, number_of_read_tuples=10**9, ): wgsim_pattern = re.compile( '@(.*)_([0-9]+)_([0-9]+)_([0-9]+):([0-9]+):([0-9]+)_([0-9]+):([0-9]+):([0-9]+)_([0-9a-f]+)/([12])' ) fai_index = rnftools.utils.FaIdx(fai_fo) read_tuple_id_width = len(format(number_of_read_tuples, 'x')) last_read_tuple_name = None fq_creator = rnftools.rnfformat.FqCreator( fastq_fo=rnf_fastq_fo, read_tuple_id_width=read_tuple_id_width, genome_id_width=2, chr_id_width=fai_index.chr_id_width, coor_width=fai_index.coor_width, info_reads_in_tuple=True, info_simulator="wgsim", ) reads_in_tuple = 2 if wgsim_fastq_2_fn is None: reads_in_tuple = 1 i = 0 with open(wgsim_fastq_1_fn, "r+") as f_inp_1: if reads_in_tuple == 2: # todo: close file f_inp_2 = open(wgsim_fastq_2_fn) for line_a in f_inp_1: lines = [line_a.strip()] if reads_in_tuple == 2: lines.append(f_inp_2.readline().strip()) if i % 4 == 0: segments = [] # bases=[] # qualities=[] m = wgsim_pattern.search(lines[0]) if m is None: rnftools.utils.error( "Read tuple '{}' was not generated by WgSim.".format(lines[0][1:]), program="RNFtools", subprogram="MIShmash", exception=ValueError ) contig_name = m.group(1) start_1 = int(m.group(2)) end_2 = int(m.group(3)) errors_1 = int(m.group(4)) substitutions_1 = int(m.group(5)) indels_1 = int(m.group(6)) errors_2 = int(m.group(7)) substitutions_2 = int(m.group(8)) indels_2 = int(m.group(9)) read_tuple_id_w = int(m.group(10), 16) pair = int(m.group(11)) chr_id = fai_index.dict_chr_ids[contig_name] if fai_index.dict_chr_ids != {} else "0" if start_1 < end_2: direction_1 = "F" direction_2 = "R" else: direction_1 = "R" direction_2 = "F" segment1 = rnftools.rnfformat.Segment( genome_id=genome_id, chr_id=chr_id, direction=direction_1, left=start_1, right=0, ) segment2 = rnftools.rnfformat.Segment( genome_id=genome_id, chr_id=chr_id, direction=direction_2, left=0, right=end_2, ) elif i % 4 == 1: bases = lines[0] if reads_in_tuple == 2: bases2 = lines[1] elif i % 4 == 2: pass elif i % 4 == 3: qualities = lines[0] if reads_in_tuple == 2: qualities2 = lines[1] if reads_in_tuple == 1: fq_creator.add_read( read_tuple_id=i // 4 + 1, bases=bases, qualities=qualities, segments=[segment1, segment2], ) else: fq_creator.add_read( read_tuple_id=i // 4 + 1, bases=bases, qualities=qualities, segments=[segment1], ) fq_creator.add_read( read_tuple_id=i // 4 + 1, bases=bases2, qualities=qualities2, segments=[segment2], ) i += 1 fq_creator.flush_read_tuple()
1,013,890
Calls jsonrpc service's method and returns its return value in a JSON string or None if there is none. Arguments: jsondata -- remote method call in jsonrpc format
def call(self, jsondata): result = yield self.call_py(jsondata) if result is None: defer.returnValue(None) else: defer.returnValue(json.dumps(result))
1,013,956
Create RNF representation of this read. Args: read_tuple_id_width (int): Maximal expected string length of read tuple ID. genome_id_width (int): Maximal expected string length of genome ID. chr_id_width (int): Maximal expected string length of chromosome ID. coor_width (int): Maximal expected string length of a coordinate.
def stringize( self, rnf_profile=RnfProfile(), ): sorted_segments = sorted(self.segments, key=lambda x: ( x.genome_id * (10 ** 23) + x.chr_id * (10 ** 21) + (x.left + (int(x.left == 0) * x.right - 1)) * (10 ** 11) + x.right * (10 ** 1) + int(x.direction == "F") ) ) segments_strings = [x.stringize(rnf_profile) for x in sorted_segments] read_tuple_name = "__".join( [ self.prefix, format(self.read_tuple_id, 'x').zfill(rnf_profile.read_tuple_id_width), ",".join(segments_strings), self.suffix, ] ) return read_tuple_name
1,014,000
Get RNF values for this read from its textual representation and save them into this object. Args: string(str): Textual representation of a read. Raises: ValueError
def destringize(self, string): # todo: assert -- starting with (, ending with ) # (prefix,read_tuple_id,segments_t,suffix)=(text).split("__") # segments=segments_t.split("),(") m = read_tuple_destr_pattern.match(string) if not m: smbl.messages.error( "'{}' is not a valid read name with respect to the RNF specification".format(string), program="RNFtools", subprogram="RNF format", exception=ValueError ) groups = m.groups() # todo: check number of groups self.prefix = groups[0] read_tuple_id = groups[1] self.read_tuple_id = int(read_tuple_id, 16) self.segments = [] segments_str = groups[2:-1] for b_str in segments_str: if b_str is not None: if b_str[0] == ",": b_str = b_str[1:] b = rnftools.rnfformat.Segment() b.destringize(b_str) self.segments.append(b) self.suffix = groups[-1]
1,014,001
Construct a .desktop file and return it as a string. Create a standards-compliant .desktop file, returning it as a string. Args: name (str) : The program's name. exec\_ (str) : The command. terminal (bool): Determine if program should be run in a terminal emulator or not. Defaults to ``False``. additional_opts (dict): Any additional fields. Returns: str: The constructed .desktop file.
def construct(name, exec_, terminal=False, additional_opts={}): desktop_file = '[Desktop Entry]\n' desktop_file_dict = { 'Name': name, 'Exec': exec_, 'Terminal': 'true' if terminal else 'false', 'Comment': additional_opts.get('Comment', name) } desktop_file = ('[Desktop Entry]\nName={name}\nExec={exec_}\n' 'Terminal={terminal}\nComment={comment}\n') desktop_file = desktop_file.format(name=desktop_file_dict['Name'], exec_=desktop_file_dict['Exec'], terminal=desktop_file_dict['Terminal'], comment=desktop_file_dict['Comment']) if additional_opts is None: additional_opts = {} for option in additional_opts: if not option in desktop_file_dict: desktop_file += '%s=%s\n' % (option, additional_opts[option]) return desktop_file
1,014,002
Execute a .desktop file. Executes a given .desktop file path properly. Args: desktop_file (str) : The path to the .desktop file. files (list): Any files to be launched by the .desktop. Defaults to empty list. return_cmd (bool): Return the command (as ``str``) instead of executing. Defaults to ``False``. background (bool): Run command in background. Defaults to ``False``. Returns: str: Only if ``return_cmd``. Returns command instead of running it. Else returns nothing.
def execute(desktop_file, files=None, return_cmd=False, background=False): # Attempt to manually parse and execute desktop_file_exec = parse(desktop_file)['Exec'] for i in desktop_file_exec.split(): if i.startswith('%'): desktop_file_exec = desktop_file_exec.replace(i, '') desktop_file_exec = desktop_file_exec.replace(r'%F', '') desktop_file_exec = desktop_file_exec.replace(r'%f', '') if files: for i in files: desktop_file_exec += ' ' + i if parse(desktop_file)['Terminal']: # Use eval and __import__ to bypass a circular dependency desktop_file_exec = eval( ('__import__("libdesktop").applications.terminal(exec_="%s",' ' keep_open_after_cmd_exec=True, return_cmd=True)') % desktop_file_exec) if return_cmd: return desktop_file_exec desktop_file_proc = sp.Popen([desktop_file_exec], shell=True) if not background: desktop_file_proc.wait()
1,014,003
Locate a .desktop from the standard locations. Find the path to the .desktop file of a given .desktop filename or application name. Standard locations: - ``~/.local/share/applications/`` - ``/usr/share/applications`` Args: desktop_filename_or_name (str): Either the filename of a .desktop file or the name of an application. Returns: list: A list of all matching .desktop files found.
def locate(desktop_filename_or_name): paths = [ os.path.expanduser('~/.local/share/applications'), '/usr/share/applications'] result = [] for path in paths: for file in os.listdir(path): if desktop_filename_or_name in file.split( '.') or desktop_filename_or_name == file: # Example: org.gnome.gedit result.append(os.path.join(path, file)) else: file_parsed = parse(os.path.join(path, file)) try: if desktop_filename_or_name.lower() == file_parsed[ 'Name'].lower(): result.append(file) elif desktop_filename_or_name.lower() == file_parsed[ 'Exec'].split(' ')[0]: result.append(file) except KeyError: pass for res in result: if not res.endswith('.desktop'): result.remove(res) if not result and not result.endswith('.desktop'): result.extend(locate(desktop_filename_or_name + '.desktop')) return result
1,014,004
Parse a .desktop file. Parse a .desktop file or a string with its contents into an easy-to-use dict, with standard values present even if not defined in file. Args: desktop_file_or_string (str): Either the path to a .desktop file or a string with a .desktop file as its contents. Returns: dict: A dictionary of the parsed file.
def parse(desktop_file_or_string): if os.path.isfile(desktop_file_or_string): with open(desktop_file_or_string) as f: desktop_file = f.read() else: desktop_file = desktop_file_or_string result = {} for line in desktop_file.split('\n'): if '=' in line: result[line.split('=')[0]] = line.split('=')[1] for key, value in result.items(): if value == 'false': result[key] = False elif value == 'true': result[key] = True if not 'Terminal' in result: result['Terminal'] = False if not 'Hidden' in result: result['Hidden'] = False return result
1,014,005
Naive cycle detector See help(cycle_detector) for more context. Args: sequence: A sequence to detect cyles in. f, start: Function and starting state for finite state machine Yields: Values yielded by sequence_a if it terminates, undefined if a cycle is found. Raises: CycleFound if exception is found. Will always generate a first and period value no matter which of the `seqs` or `f` interface is used.
def naive(seqs, f=None, start=None, key=lambda x: x): history = {} for step, value in enumerate(seqs[0]): keyed = key(value) yield value if keyed in history: raise CycleDetected( first=history[keyed], period=step - history[keyed]) history[keyed] = step
1,014,084
Gosper's cycle detector See help(cycle_detector) for more context. Args: sequence: A sequence to detect cyles in. f, start: Function and starting state for finite state machine Yields: Values yielded by sequence_a if it terminates, undefined if a cycle is found. Raises: CycleFound if exception is found. Unlike Floyd and Brent's, Gosper's can only detect period of a cycle. It cannot compute the first position
def gosper(seqs, f=None, start=None, key=lambda x: x): tab = [] for c, value in enumerate(seqs[0], start=1): yield value try: e = tab.index(key(value)) raise CycleDetected( period=c - ((((c >> e) - 1) | 1) << e)) except ValueError: try: tab[(c ^ (c - 1)).bit_length() - 1] = key(value) except IndexError: tab.append(value)
1,014,085
instantiate all registered vodka applications Args: config (dict or MungeConfig): configuration object
def instantiate(config): for handle, cfg in list(config["apps"].items()): if not cfg.get("enabled", True): continue app = get_application(handle) instances[app.handle] = app(cfg)
1,014,223
Parse all of the timing files, and generate some statistics about the run. Args: file_list: A list of timing files to parse var_list: A list of variables to look for in the timing file Returns: A dict containing values that have the form: [mean, min, max, mean, standard deviation]
def generate_timing_stats(file_list, var_list): timing_result = dict() timing_summary = dict() for file in file_list: timing_result[file] = functions.parse_gptl(file, var_list) for var in var_list: var_time = [] for f, data in timing_result.items(): try: var_time.append(data[var]) except: continue if len(var_time): timing_summary[var] = {'mean': np.mean(var_time), 'max': np.max(var_time), 'min': np.min(var_time), 'std': np.std(var_time)} return timing_summary
1,014,259
Generate a scaling plot. Args: timing_data: data returned from a `*_scaling` method title: the title of the plot ylabel: the y-axis label of the plot description: a description of the plot plot_file: the file to write out to Returns: an image element containing the plot file and metadata
def generate_scaling_plot(timing_data, title, ylabel, description, plot_file): proc_counts = timing_data['proc_counts'] if len(proc_counts) > 2: plt.figure(figsize=(10, 8), dpi=150) plt.title(title) plt.xlabel("Number of processors") plt.ylabel(ylabel) for case, case_color in zip(['bench', 'model'], ['#91bfdb', '#fc8d59']): case_data = timing_data[case] means = case_data['means'] mins = case_data['mins'] maxs = case_data['maxs'] plt.fill_between(proc_counts, mins, maxs, facecolor=case_color, alpha=0.5) plt.plot(proc_counts, means, 'o-', color=case_color, label=case) plt.legend(loc='best') else: plt.figure(figsize=(5, 3)) plt.axis('off') plt.text(0.4, 0.8, "ERROR:") plt.text(0.0, 0.6, "Not enough data points to draw scaling plot") plt.text(0.0, 0.44, "To generate this data rerun BATS with the") plt.text(0.0, 0.36, "performance option enabled.") if livvkit.publish: plt.savefig(os.path.splitext(plot_file)[0]+'.eps', dpi=600) plt.savefig(plot_file) plt.close() return elements.image(title, description, os.path.basename(plot_file))
1,014,261
Description Args: timing_stats: a dictionary of the form {proc_count : {model||bench : { var : { stat : val }}}} scaling_var: the variable that accounts for the total runtime title: the title of the plot description: the description of the plot plot_file: the file to write the plot out to Returns: an image element containing the plot file and metadata
def generate_timing_breakdown_plot(timing_stats, scaling_var, title, description, plot_file): # noinspection PyProtectedMember cmap_data = colormaps._viridis_data n_subplots = len(six.viewkeys(timing_stats)) fig, ax = plt.subplots(1, n_subplots+1, figsize=(3*(n_subplots+2), 5)) for plot_num, p_count in enumerate( sorted(six.iterkeys(timing_stats), key=functions.sort_processor_counts)): case_data = timing_stats[p_count] all_timers = set(six.iterkeys(case_data['model'])) | set(six.iterkeys(case_data['bench'])) all_timers = sorted(list(all_timers), reverse=True) cmap_stride = int(len(cmap_data)/(len(all_timers)+1)) colors = {all_timers[i]: cmap_data[i*cmap_stride] for i in range(len(all_timers))} sub_ax = plt.subplot(1, n_subplots+1, plot_num+1) sub_ax.set_title(p_count) sub_ax.set_ylabel('Runtime (s)') for case, var_data in case_data.items(): if case == 'bench': bar_num = 2 else: bar_num = 1 offset = 0 if var_data != {}: for var in sorted(six.iterkeys(var_data), reverse=True): if var != scaling_var: plt.bar(bar_num, var_data[var]['mean'], 0.8, bottom=offset, color=colors[var], label=(var if bar_num == 1 else '_none')) offset += var_data[var]['mean'] plt.bar(bar_num, var_data[scaling_var]['mean']-offset, 0.8, bottom=offset, color=colors[scaling_var], label=(scaling_var if bar_num == 1 else '_none')) sub_ax.set_xticks([1.4, 2.4]) sub_ax.set_xticklabels(('test', 'bench')) plt.legend(loc=6, bbox_to_anchor=(1.05, 0.5)) plt.tight_layout() sub_ax = plt.subplot(1, n_subplots+1, n_subplots+1) hid_bar = plt.bar(1, 100) for group in hid_bar: group.set_visible(False) sub_ax.set_visible(False) if livvkit.publish: plt.savefig(os.path.splitext(plot_file)[0]+'.eps', dpi=600) plt.savefig(plot_file) plt.close() return elements.image(title, description, os.path.basename(plot_file))
1,014,264
Add a new read to the current buffer. If it is a new read tuple (detected from ID), the buffer will be flushed. Args: read_tuple_id (int): ID of the read tuple. bases (str): Sequence of bases. qualities (str): Sequence of FASTQ qualities. segments (list of rnftools.rnfformat.segment): List of segments constituting the read.
def add_read( self, read_tuple_id, bases, qualities, segments, ): assert type(bases) is str, "Wrong type of bases: '{}'".format(bases) assert type(qualities) is str, "Wrong type of qualities: '{}'".format(qualities) assert type(segments) is tuple or type(segments) is list if self.current_read_tuple_id != read_tuple_id: self.flush_read_tuple() self.current_read_tuple_id = read_tuple_id self.seqs_bases.append(bases) self.seqs_qualities.append(qualities) self.segments.extend(segments)
1,014,431
Combine more profiles and set their maximal values. Args: *rnf_profiles (rnftools.rnfformat.RnfProfile): RNF profile.
def combine(*rnf_profiles): for rnf_profile in rnf_profiles: self.prefix_width = max(self.prefix_width, rnf_profile.prefix_width) self.read_tuple_id_width = max(self.read_tuple_id_width, rnf_profile.read_tuple_id_width) self.genome_id_width = max(self.genome_id_width, rnf_profile.genome_id_width) self.chr_id_width = max(self.chr_id_width, rnf_profile.chr_id_width) self.coor_width = max(self.coor_width, rnf_profile.coor_width)
1,014,457
Load RNF values from a read tuple name. Args: read_tuple_name (str): Read tuple name which the values are taken from.
def load(self, read_tuple_name): self.prefix_width = 0 self.read_tuple_id_width = 0 self.genome_id_width = 0 self.chr_id_width = 0 self.coor_width = 0 parts = read_tuple_name.split("__") self.prefix_width = len(parts[0]) self.read_tuple_id_width = len(parts[1]) segments = parts[2][1:-1].split("),(") for segment in segments: int_widths = list(map(len, segment.split(","))) self.genome_id_width = max(self.genome_id_width, int_widths[0]) self.chr_id_width = max(self.chr_id_width, int_widths[1]) self.coor_width = max(self.coor_width, int_widths[2], int_widths[3])
1,014,458
Apply profile on a read tuple name and update read tuple ID. Args: read_tuple_name (str): Read tuple name to be updated. read_tuple_id (id): New read tuple ID. synchronize_widths (bool): Update widths (in accordance to this profile).
def apply(self, read_tuple_name, read_tuple_id=None, synchronize_widths=True): parts = read_tuple_name.split("__") parts[0] = self._fill_right(parts[0], "-", self.prefix_width) if read_tuple_id is not None: parts[1] = "{:x}".format(read_tuple_id) parts[1] = self._fill_left(parts[1], "0", self.read_tuple_id_width) if synchronize_widths: new_segments = [] segments = parts[2][1:-1].split("),(") for segment in segments: values = segment.split(",") values[0] = values[0].zfill(self.genome_id_width) values[1] = values[1].zfill(self.chr_id_width) values[3] = values[3].zfill(self.coor_width) values[4] = values[4].zfill(self.coor_width) new_segments.append("(" + ",".join(values) + ")") parts[2] = ",".join(new_segments) return "__".join(parts)
1,014,459
Check if the given read tuple name satisfies this profile. Args: read_tuple_name (str): Read tuple name.
def check(self, read_tuple_name): parts = read_tuple_name.split("__") if len(parts[0]) != self.prefix_width or len(parts[1]) != self.read_tuple_id_width: return False segments = parts[2][1:-1].split("),(") for segment in segments: int_widths = list(map(len, segment.split(","))) if self.genome_id_width != int_widths[0]: return False if self.chr_id_width != int_widths[1]: return False if self.coor_width != int_widths[3] or self.coor_width != int_widths[4]: return False return True
1,014,460
Set the desktop wallpaper. Sets the desktop wallpaper to an image. Args: image (str): The path to the image to be set as wallpaper.
def set_wallpaper(image): desktop_env = system.get_name() if desktop_env in ['gnome', 'unity', 'cinnamon', 'pantheon', 'mate']: uri = 'file://%s' % image SCHEMA = 'org.gnome.desktop.background' KEY = 'picture-uri' if desktop_env == 'mate': uri = image SCHEMA = 'org.mate.background' KEY = 'picture-filename' try: from gi.repository import Gio gsettings = Gio.Settings.new(SCHEMA) gsettings.set_string(KEY, uri) except ImportError: try: gsettings_proc = sp.Popen( ['gsettings', 'set', SCHEMA, KEY, uri]) except: # MATE < 1.6 sp.Popen(['mateconftool-2', '-t', 'string', '--set', '/desktop/mate/background/picture_filename', '%s' % image], stdout=sp.PIPE) finally: gsettings_proc.communicate() if gsettings_proc.returncode != 0: sp.Popen(['mateconftool-2', '-t', 'string', '--set', '/desktop/mate/background/picture_filename', '%s' % image]) elif desktop_env == 'gnome2': sp.Popen( ['gconftool-2', '-t', 'string', '--set', '/desktop/gnome/background/picture_filename', image] ) elif desktop_env == 'kde': # This probably only works in Plasma 5+ kde_script = dedent( ).format(image) sp.Popen( ['dbus-send', '--session', '--dest=org.kde.plasmashell', '--type=method_call', '/PlasmaShell', 'org.kde.PlasmaShell.evaluateScript', 'string:{}'.format(kde_script)] ) elif desktop_env in ['kde3', 'trinity']: args = 'dcop kdesktop KBackgroundIface setWallpaper 0 "%s" 6' % image sp.Popen(args, shell=True) elif desktop_env == 'xfce4': # XFCE4's image property is not image-path but last-image (What?) list_of_properties = system.get_cmd_out( ['xfconf-query', '-R', '-l', '-c', 'xfce4-desktop', '-p', '/backdrop'] ) for i in list_of_properties.split('\n'): if i.endswith('last-image'): # The property given is a background property sp.Popen( ['xfconf-query -c xfce4-desktop -p %s -s "%s"' % (i, image)], shell=True) sp.Popen(['xfdesktop --reload'], shell=True) elif desktop_env == 'razor-qt': desktop_conf = configparser.ConfigParser() # Development version desktop_conf_file = os.path.join( get_config_dir('razor')[0], 'desktop.conf') if os.path.isfile(desktop_conf_file): config_option = r'screens\1\desktops\1\wallpaper' else: desktop_conf_file = os.path.join( os.path.expanduser('~'), '.razor/desktop.conf') config_option = r'desktops\1\wallpaper' desktop_conf.read(os.path.join(desktop_conf_file)) try: if desktop_conf.has_option('razor', config_option): desktop_conf.set('razor', config_option, image) with codecs.open(desktop_conf_file, 'w', encoding='utf-8', errors='replace') as f: desktop_conf.write(f) except: pass elif desktop_env in ['fluxbox', 'jwm', 'openbox', 'afterstep', 'i3']: try: args = ['feh', '--bg-scale', image] sp.Popen(args) except: sys.stderr.write('Error: Failed to set wallpaper with feh!') sys.stderr.write('Please make sre that You have feh installed.') elif desktop_env == 'icewm': args = ['icewmbg', image] sp.Popen(args) elif desktop_env == 'blackbox': args = ['bsetbg', '-full', image] sp.Popen(args) elif desktop_env == 'lxde': args = 'pcmanfm --set-wallpaper %s --wallpaper-mode=scaled' % image sp.Popen(args, shell=True) elif desktop_env == 'lxqt': args = 'pcmanfm-qt --set-wallpaper %s --wallpaper-mode=scaled' % image sp.Popen(args, shell=True) elif desktop_env == 'windowmaker': args = 'wmsetbg -s -u %s' % image sp.Popen(args, shell=True) elif desktop_env == 'enlightenment': args = 'enlightenment_remote -desktop-bg-add 0 0 0 0 %s' % image sp.Popen(args, shell=True) elif desktop_env == 'awesome': with sp.Popen("awesome-client", stdin=sp.PIPE) as awesome_client: command = ('local gears = require("gears"); for s = 1,' ' screen.count() do gears.wallpaper.maximized' '("%s", s, true); end;') % image awesome_client.communicate(input=bytes(command, 'UTF-8')) elif desktop_env == 'windows': WINDOWS_SCRIPT = dedent() % image windows_script_file = os.path.join( tempfile.gettempdir(), 'wallscript.bat') with open(windows_script_file, 'w') as f: f.write(WINDOWS_SCRIPT) sp.Popen([windows_script_file], shell=True) # Sometimes the method above works # and sometimes the one below SPI_SETDESKWALLPAPER = 20 ctypes.windll.user32.SystemParametersInfoA( SPI_SETDESKWALLPAPER, 0, image, 0) elif desktop_env == 'mac': try: from appscript import app, mactypes app('Finder').desktop_picture.set(mactypes.File(image)) except ImportError: OSX_SCRIPT = dedent( ) % image sp.Popen(['osascript', OSX_SCRIPT]) else: try: sp.Popen(['feh', '--bg-scale', image]) # feh is nearly a catch-all for Linux WMs except: pass
1,014,634
Add a new graph to the overlap report. Args: y (str): Value plotted on y-axis. x_label (str): Label on x-axis. y_label (str): Label on y-axis. title (str): Title of the plot. x_run ((float,float)): x-range. y_run ((int,int)): y-rang. svg_size_px ((int,int): Size of SVG image in pixels. key_position (str): GnuPlot position of the legend.
def add_graph( self, y, x_label=None, y_label="", title="", x_run=None, y_run=None, svg_size_px=None, key_position="bottom right", ): if x_run is None: x_run = self.default_x_run if y_run is None: y_run = self.default_y_run if svg_size_px is None: svg_size_px = self.default_svg_size_px for panel in self.panels: x_run = self._load_x_run(x_run) y_run = self._load_y_run(y_run) svg_size_px = self._load_svg_size_px(svg_size_px) panel.add_graph( y=y, x_run=x_run, y_run=y_run, svg_size_px=svg_size_px, y_label=y_label, x_label=x_label if x_label is not None else self.default_x_label, title=title, key_position=key_position, )
1,014,808
Convert DwgSim FASTQ file to RNF FASTQ file. Args: dwgsim_prefix (str): DwgSim prefix of the simulation (see its commandline parameters). fastq_rnf_fo (file): File object of RNF FASTQ. fai_fo (file): File object for FAI file of the reference genome. genome_id (int): RNF genome ID to be used. estimate_unknown_values (bool): Estimate unknown values (right coordinate of each end). number_of_read_tuples (int): Estimate of number of simulated read tuples (to set width).
def recode_dwgsim_reads( dwgsim_prefix, fastq_rnf_fo, fai_fo, genome_id, estimate_unknown_values, number_of_read_tuples=10**9, ): dwgsim_pattern = re.compile( '@(.*)_([0-9]+)_([0-9]+)_([01])_([01])_([01])_([01])_([0-9]+):([0-9]+):([0-9]+)_([0-9]+):([0-9]+):([0-9]+)_(([0-9abcdef])+)' ) ### # DWGSIM read name format # # 1) contig name (chromsome name) # 2) start end 1 (one-based) # 3) start end 2 (one-based) # 4) strand end 1 (0 - forward, 1 - reverse) # 5) strand end 2 (0 - forward, 1 - reverse) # 6) random read end 1 (0 - from the mutated reference, 1 - random) # 7) random read end 2 (0 - from the mutated reference, 1 - random) # 8) number of sequencing errors end 1 (color errors for colorspace) # 9) number of SNPs end 1 # 10) number of indels end 1 # 11) number of sequencing errors end 2 (color errors for colorspace) # 12) number of SNPs end 2 # 13) number of indels end 2 # 14) read number (unique within a given contig/chromosome) ### fai_index = rnftools.utils.FaIdx(fai_fo=fai_fo) read_tuple_id_width = len(format(number_of_read_tuples, 'x')) # parsing FQ file read_tuple_id = 0 last_read_tuple_name = None old_fq = "{}.bfast.fastq".format(dwgsim_prefix) fq_creator = rnftools.rnfformat.FqCreator( fastq_fo=fastq_rnf_fo, read_tuple_id_width=read_tuple_id_width, genome_id_width=2, chr_id_width=fai_index.chr_id_width, coor_width=fai_index.coor_width, info_reads_in_tuple=True, info_simulator="dwgsim", ) i = 0 with open(old_fq, "r+") as f1: for line in f1: if i % 4 == 0: read_tuple_name = line[1:].strip() if read_tuple_name != last_read_tuple_name: new_tuple = True if last_read_tuple_name is not None: read_tuple_id += 1 else: new_tuple = False last_read_tuple_name = read_tuple_name m = dwgsim_pattern.search(line) if m is None: rnftools.utils.error( "Read tuple '{}' was not created by DwgSim.".format(line[1:]), program="RNFtools", subprogram="MIShmash", exception=ValueError, ) contig_name = m.group(1) start_1 = int(m.group(2)) start_2 = int(m.group(3)) direction_1 = "F" if int(m.group(4)) == 0 else "R" direction_2 = "F" if int(m.group(5)) == 0 else "R" # random_1 = bool(m.group(6)) # random_2 = bool(m.group(7)) # seq_err_1 = int(m.group(8)) # snp_1 = int(m.group(9)) # indels_1 = int(m.group(10)) # seq_err_2 = int(m.group(11)) # snp_2 = int(m.group(12)) # indels_2 = int(m.group(13)) # read_tuple_id_dwg = int(m.group(14), 16) chr_id = fai_index.dict_chr_ids[contig_name] if fai_index.dict_chr_ids != {} else "0" elif i % 4 == 1: bases = line.strip() if new_tuple: segment = rnftools.rnfformat.Segment( genome_id=genome_id, chr_id=chr_id, direction=direction_1, left=start_1, right=start_1 + len(bases) - 1 if estimate_unknown_values else 0, ) else: segment = rnftools.rnfformat.Segment( genome_id=genome_id, chr_id=chr_id, direction=direction_2, left=start_2, right=start_2 + len(bases) - 1 if estimate_unknown_values else 0, ) elif i % 4 == 2: pass elif i % 4 == 3: qualities = line.strip() fq_creator.add_read( read_tuple_id=read_tuple_id, bases=bases, qualities=qualities, segments=[segment], ) i += 1 fq_creator.flush_read_tuple()
1,014,835
Parse a CISM output log and extract some information. Args: file_path: absolute path to the log file Return: A dictionary created by the elements object corresponding to the results of the bit for bit testing
def parse_log(file_path): if not os.path.isfile(file_path): return elements.error("Output Log", "Could not open file: " + file_path.split(os.sep)[-1]) headers = ["Converged Iterations", "Avg. Iterations to Converge", "Processor Count", "Dycore Type"] with open(file_path, 'r') as f: dycore_types = {"0": "Glide", "1": "Glam", "2": "Glissade", "3": "Albany_felix", "4": "BISICLES"} curr_step = 0 proc_count = 0 iter_number = 0 converged_iters = [] iters_to_converge = [] for line in f: split = line.split() if ('CISM dycore type' in line): if line.split()[-1] == '=': dycore_type = dycore_types[next(f).strip()] else: dycore_type = dycore_types[line.split()[-1]] elif ('total procs' in line): proc_count += int(line.split()[-1]) elif ('Nonlinear Solver Step' in line): curr_step = int(line.split()[4]) elif ('Compute ice velocities, time = ' in line): converged_iters.append(curr_step) curr_step = float(line.split()[-1]) elif ('"SOLVE_STATUS_CONVERGED"' in line): split = line.split() iters_to_converge.append(int(split[split.index('"SOLVE_STATUS_CONVERGED"') + 2])) elif ("Compute dH/dt" in line): iters_to_converge.append(int(iter_number)) elif len(split) > 0 and split[0].isdigit(): iter_number = split[0] if iters_to_converge == []: iters_to_converge.append(int(iter_number)) data = { "Dycore Type": dycore_type, "Processor Count": proc_count, "Converged Iterations": len(converged_iters), "Avg. Iterations to Converge": np.mean(iters_to_converge) } return elements.table("Output Log", headers, data)
1,014,905
Convert the CISM configuration file to a python dictionary Args: file_path: absolute path to the configuration file Returns: A dictionary representation of the given file
def parse_config(file_path): if not os.path.isfile(file_path): return {} parser = ConfigParser() parser.read(file_path) # Strip out inline comments for s in parser._sections: for v in six.iterkeys(parser._sections[s]): parser._sections[s][v] = parser._sections[s][v].split("#")[0].strip() return parser._sections
1,014,906
Create RNF representation of this segment. Args: rnf_profile (rnftools.rnfformat.RnfProfile): RNF profile (with widths).
def stringize( self, rnf_profile, ): coor_width = max(rnf_profile.coor_width, len(str(self.left)), len(str(self.right))) return "({},{},{},{},{})".format( str(self.genome_id).zfill(rnf_profile.genome_id_width), str(self.chr_id).zfill(rnf_profile.chr_id_width), self.direction, str(self.left).zfill(coor_width), str(self.right).zfill(coor_width) )
1,014,966
Get RNF values for this segment from its textual representation and save them into this object. Args: string (str): Textual representation of a segment.
def destringize(self, string): m = segment_destr_pattern.match(string) self.genome_id = int(m.group(1)) self.chr_id = int(m.group(2)) self.direction = m.group(3) self.left = int(m.group(4)) self.right = int(m.group(5))
1,014,967
Start configuration process for the provided handler Args: cfg (dict): config container handler (config.Handler class): config handler to use path (str): current path in the configuration progress
def configure(self, cfg, handler, path=""): # configure simple value attributes (str, int etc.) for name, attr in handler.attributes(): if cfg.get(name) is not None: continue if attr.expected_type not in [list, dict]: cfg[name] = self.set(handler, attr, name, path, cfg) elif attr.default is None and not hasattr(handler, "configure_%s" % name): self.action_required.append(("%s.%s: %s" % (path, name, attr.help_text)).strip(".")) # configure attributes that have complex handlers defined # on the config Handler class (class methods prefixed by # configure_ prefix for name, attr in handler.attributes(): if cfg.get(name) is not None: continue if hasattr(handler, "configure_%s" % name): fn = getattr(handler, "configure_%s" % name) fn(self, cfg, "%s.%s"% (path, name)) if attr.expected_type in [list, dict] and not cfg.get(name): try: del cfg[name] except KeyError: pass
1,015,019
Plot accuracy and loss from a panda's dataframe. Args: data: Panda dataframe in the format of the Keras CSV log. output_dir_path: The path to the directory where the resultings plots should end up.
def _from_keras_log_format(data, **kwargs): data_val = pd.DataFrame(data[['epoch']]) data_val['acc'] = data['val_acc'] data_val['loss'] = data['val_loss'] data_val['data'] = 'validation' data_training = pd.DataFrame(data[['acc', 'loss', 'epoch']]) data_training['data'] = 'training' result = pd.concat([data_training, data_val], sort=False) plot(result, **kwargs)
1,015,023
Plot accuracy and loss from a Keras CSV log. Args: csv_path: The path to the CSV log with the actual data. output_dir_path: The path to the directory where the resultings plots should end up.
def from_keras_log(csv_path, output_dir_path, **kwargs): # automatically get seperator by using Python's CSV parser data = pd.read_csv(csv_path, sep=None, engine='python') _from_keras_log_format(data, output_dir_path=output_dir_path, **kwargs)
1,015,024
Return configuration value Args: key_name (str): configuration key Returns: The value for the specified configuration key, or if not found in the config the default value specified in the Configuration Handler class specified inside this component
def get_config(self, key_name): if key_name in self.config: return self.config.get(key_name) return self.Configuration.default(key_name, inst=self)
1,015,048
Get resource or collection of resources. --- parameters: - name: resource in: path type: string
async def get(self, request, resource=None, **kwargs): if resource is not None and resource != '': return self.to_simple(request, resource, **kwargs) return self.to_simple(request, self.collection, many=True, **kwargs)
1,015,064
Update a resource. --- parameters: - name: resource in: path type: string
async def put(self, request, resource=None, **kwargs): if resource is None: raise RESTNotFound(reason='Resource not found') return await self.post(request, resource=resource, **kwargs)
1,015,068
Initialize preprocessor. Args: column_metadata(dict): Meta information of the column. transformer_type(str): Type of data the transformer is able to transform.
def __init__(self, column_metadata): self.column_metadata = column_metadata self.col_name = column_metadata['name'] self.check_data_type()
1,015,421
Check the type of the transformer and column match. Args: column_metadata(dict): Metadata of the column. Raises a ValueError if the types don't match
def check_data_type(self): metadata_type = self.column_metadata.get('type') if self.type != metadata_type and metadata_type not in self.type: raise ValueError('Types of transformer don\'t match')
1,015,422
Prepare the transformer to convert data. Args: col(pandas.DataFrame): Data to transform. Returns: None
def fit(self, col): dates = self.safe_datetime_cast(col) self.default_val = dates.groupby(dates).count().index[0].timestamp() * 1e9
1,015,424
Prepare the transformer to convert data and return the processed table. Args: col(pandas.DataFrame): Data to transform. Returns: pandas.DataFrame
def transform(self, col): out = pd.DataFrame() out[self.col_name] = self.safe_datetime_cast(col) out[self.col_name] = self.to_timestamp(out) return out
1,015,425
Converts data back into original format. Args: col(pandas.DataFrame): Data to transform. Returns: pandas.DataFrame
def reverse_transform(self, col): if isinstance(col, pd.Series): col = col.to_frame() output = pd.DataFrame(index=col.index) output[self.col_name] = col.apply(self.safe_date, axis=1) return output
1,015,426
Parses string values into datetime. Args: col(pandas.DataFrame): Data to transform. Returns: pandas.Series
def safe_datetime_cast(self, col): casted_dates = pd.to_datetime(col[self.col_name], format=self.date_format, errors='coerce') if len(casted_dates[casted_dates.isnull()]): # This will raise an error for bad formatted data # but not for out of bonds or missing dates. slice_ = casted_dates.isnull() & ~col[self.col_name].isnull() col[slice_][self.col_name].apply(self.strptime_format) return casted_dates
1,015,427
Transform a datetime series into linux epoch. Args: data(pandas.DataFrame): DataFrame containins a column named as `self.col_name`. Returns: pandas.Series
def to_timestamp(self, data): result = pd.Series(index=data.index) _slice = ~data[self.col_name].isnull() result[_slice] = data[_slice][self.col_name].astype('int64') return result
1,015,428
Transform x[self.col_name] into a date string. Args: x(dict like / pandas.Series): Row containing data to cast safely. Returns: str
def safe_date(self, x): t = x[self.col_name] if np.isnan(t): return t elif np.isposinf(t): t = sys.maxsize elif np.isneginf(t): t = -sys.maxsize tmp = time.localtime(float(t) / 1e9) return time.strftime(self.date_format, tmp)
1,015,429