text_prompt
stringlengths
100
17.7k
code_prompt
stringlengths
7
9.86k
<SYSTEM_TASK:> Yield installed packages <END_TASK> <USER_TASK:> Description: def get_distributions(self, show, pkg_name="", version=""): """ Yield installed packages @param show: Type of package(s) to show; active, non-active or all @type show: string: "active", "non-active", "all" @param pkg_name: PyPI project name @type pkg_name: string @param version: project's PyPI version @type version: string @returns: yields tuples of distribution and True or False depending on active state. e.g. (dist, True) """
#pylint: disable-msg=W0612 #'name' is a placeholder for the sorted list for name, dist in self.get_alpha(show, pkg_name, version): ver = dist.version for package in self.environment[dist.project_name]: if ver == package.version: if show == "nonactive" and dist not in self.working_set: yield (dist, self.query_activated(dist)) elif show == "active" and dist in self.working_set: yield (dist, self.query_activated(dist)) elif show == "all": yield (dist, self.query_activated(dist))
<SYSTEM_TASK:> Return list of alphabetized packages <END_TASK> <USER_TASK:> Description: def get_alpha(self, show, pkg_name="", version=""): """ Return list of alphabetized packages @param pkg_name: PyPI project name @type pkg_name: string @param version: project's PyPI version @type version: string @returns: Alphabetized list of tuples. Each tuple contains a string and a pkg_resources Distribution object. The string is the project name + version. """
alpha_list = [] for dist in self.get_packages(show): if pkg_name and dist.project_name != pkg_name: #Only checking for a single package name pass elif version and dist.version != version: #Only checking for a single version of a package pass else: alpha_list.append((dist.project_name + dist.version, dist)) alpha_list.sort() return alpha_list
<SYSTEM_TASK:> Return list of Distributions filtered by active status or all <END_TASK> <USER_TASK:> Description: def get_packages(self, show): """ Return list of Distributions filtered by active status or all @param show: Type of package(s) to show; active, non-active or all @type show: string: "active", "non-active", "all" @returns: list of pkg_resources Distribution objects """
if show == 'nonactive' or show == "all": all_packages = [] for package in self.environment: #There may be multiple versions of same packages for i in range(len(self.environment[package])): if self.environment[package][i]: all_packages.append(self.environment[package][i]) return all_packages else: # Only activated packages return self.working_set
<SYSTEM_TASK:> Return case-sensitive package name given any-case package name <END_TASK> <USER_TASK:> Description: def case_sensitive_name(self, package_name): """ Return case-sensitive package name given any-case package name @param project_name: PyPI project name @type project_name: string """
if len(self.environment[package_name]): return self.environment[package_name][0].project_name
<SYSTEM_TASK:> Non-atomic cache increment operation. Not optimal but <END_TASK> <USER_TASK:> Description: def cache_incr(self, key): """ Non-atomic cache increment operation. Not optimal but consistent across different cache backends. """
cache.set(key, cache.get(key, 0) + 1, self.expire_after())
<SYSTEM_TASK:> Call all method on plugins in list, that define it, with provided <END_TASK> <USER_TASK:> Description: def call_plugins(plugins, method, *arg, **kw): """Call all method on plugins in list, that define it, with provided arguments. The first response that is not None is returned. """
for plug in plugins: func = getattr(plug, method, None) if func is None: continue #LOG.debug("call plugin %s: %s", plug.name, method) result = func(*arg, **kw) if result is not None: return result return None
<SYSTEM_TASK:> Load plugins, either builtin, others, or both. <END_TASK> <USER_TASK:> Description: def load_plugins(builtin=True, others=True): """Load plugins, either builtin, others, or both. """
for entry_point in pkg_resources.iter_entry_points('yolk.plugins'): #LOG.debug("load plugin %s" % entry_point) try: plugin = entry_point.load() except KeyboardInterrupt: raise except Exception as err_msg: # never want a plugin load to exit yolk # but we can't log here because the logger is not yet # configured warn("Unable to load plugin %s: %s" % \ (entry_point, err_msg), RuntimeWarning) continue if plugin.__module__.startswith('yolk.plugins'): if builtin: yield plugin elif others: yield plugin
<SYSTEM_TASK:> Returns a Boto connection to the provided S3 bucket. <END_TASK> <USER_TASK:> Description: def s3_connect(bucket_name, s3_access_key_id, s3_secret_key): """ Returns a Boto connection to the provided S3 bucket. """
conn = connect_s3(s3_access_key_id, s3_secret_key) try: return conn.get_bucket(bucket_name) except S3ResponseError as e: if e.status == 403: raise Exception("Bad Amazon S3 credentials.") raise
<SYSTEM_TASK:> Lists the contents of the S3 bucket that end in .tbz and match <END_TASK> <USER_TASK:> Description: def s3_list(s3_bucket, s3_access_key_id, s3_secret_key, prefix=None): """ Lists the contents of the S3 bucket that end in .tbz and match the passed prefix, if any. """
bucket = s3_connect(s3_bucket, s3_access_key_id, s3_secret_key) return sorted([key.name for key in bucket.list() if key.name.endswith(".tbz") and (prefix is None or key.name.startswith(prefix))])
<SYSTEM_TASK:> Downloads the file matching the provided key, in the provided bucket, <END_TASK> <USER_TASK:> Description: def s3_download(output_file_path, s3_bucket, s3_access_key_id, s3_secret_key, s3_file_key=None, prefix=None): """ Downloads the file matching the provided key, in the provided bucket, from Amazon S3. If s3_file_key is none, it downloads the last file from the provided bucket with the .tbz extension, filtering by prefix if it is provided. """
bucket = s3_connect(s3_bucket, s3_access_key_id, s3_secret_key) if not s3_file_key: keys = s3_list(s3_bucket, s3_access_key_id, s3_secret_key, prefix) if not keys: raise Exception("Target S3 bucket is empty") s3_file_key = keys[-1] key = Key(bucket, s3_file_key) with open(output_file_path, "w+") as f: f.write(key.read())
<SYSTEM_TASK:> Uploads the to Amazon S3 the contents of the provided file, keyed <END_TASK> <USER_TASK:> Description: def s3_upload(source_file_path, bucket_name, s3_access_key_id, s3_secret_key): """ Uploads the to Amazon S3 the contents of the provided file, keyed with the name of the file. """
key = s3_key(bucket_name, s3_access_key_id, s3_secret_key) file_name = source_file_path.split("/")[-1] key.key = file_name if key.exists(): raise Exception("s3 key %s already exists for current period." % (file_name)) key.set_contents_from_filename(source_file_path)
<SYSTEM_TASK:> Write a line to stdout if it isn't in a blacklist <END_TASK> <USER_TASK:> Description: def write(self, inline): """ Write a line to stdout if it isn't in a blacklist Try to get the name of the calling module to see if we want to filter it. If there is no calling module, use current frame in case there's a traceback before there is any calling module """
frame = inspect.currentframe().f_back if frame: mod = frame.f_globals.get('__name__') else: mod = sys._getframe(0).f_globals.get('__name__') if not mod in self.modulenames: self.stdout.write(inline)
<SYSTEM_TASK:> Return plugin object if CLI option is activated and method exists <END_TASK> <USER_TASK:> Description: def get_plugin(self, method): """ Return plugin object if CLI option is activated and method exists @param method: name of plugin's method we're calling @type method: string @returns: list of plugins with `method` """
all_plugins = [] for entry_point in pkg_resources.iter_entry_points('yolk.plugins'): plugin_obj = entry_point.load() plugin = plugin_obj() plugin.configure(self.options, None) if plugin.enabled: if not hasattr(plugin, method): self.logger.warn("Error: plugin has no method: %s" % method) plugin = None else: all_plugins.append(plugin) return all_plugins
<SYSTEM_TASK:> Set log level according to command-line options <END_TASK> <USER_TASK:> Description: def set_log_level(self): """ Set log level according to command-line options @returns: logger object """
if self.options.debug: self.logger.setLevel(logging.DEBUG) elif self.options.quiet: self.logger.setLevel(logging.ERROR) else: self.logger.setLevel(logging.INFO) self.logger.addHandler(logging.StreamHandler()) return self.logger
<SYSTEM_TASK:> Perform actions based on CLI options <END_TASK> <USER_TASK:> Description: def run(self): """ Perform actions based on CLI options @returns: status code """
opt_parser = setup_opt_parser() (self.options, remaining_args) = opt_parser.parse_args() logger = self.set_log_level() pkg_spec = validate_pypi_opts(opt_parser) if not pkg_spec: pkg_spec = remaining_args self.pkg_spec = pkg_spec if not self.options.pypi_search and (len(sys.argv) == 1 or\ len(remaining_args) > 2): opt_parser.print_help() return 2 #Options that depend on querying installed packages, not PyPI. #We find the proper case for package names if they are installed, #otherwise PyPI returns the correct case. if self.options.show_deps or self.options.show_all or \ self.options.show_active or self.options.show_non_active or \ (self.options.show_updates and pkg_spec): want_installed = True else: want_installed = False #show_updates may or may not have a pkg_spec if not want_installed or self.options.show_updates: self.pypi = CheeseShop(self.options.debug) #XXX: We should return 2 here if we couldn't create xmlrpc server if pkg_spec: (self.project_name, self.version, self.all_versions) = \ self.parse_pkg_ver(want_installed) if want_installed and not self.project_name: logger.error("%s is not installed." % pkg_spec[0]) return 1 #I could prefix all these with 'cmd_' and the methods also #and then iterate over the `options` dictionary keys... commands = ['show_deps', 'query_metadata_pypi', 'fetch', 'versions_available', 'show_updates', 'browse_website', 'show_download_links', 'pypi_search', 'show_pypi_changelog', 'show_pypi_releases', 'yolk_version', 'show_all', 'show_active', 'show_non_active', 'show_entry_map', 'show_entry_points'] #Run first command it finds, and only the first command, then return #XXX: Check if more than one command was set in options and give error? for action in commands: if getattr(self.options, action): return getattr(self, action)() opt_parser.print_help()
<SYSTEM_TASK:> Check installed packages for available updates on PyPI <END_TASK> <USER_TASK:> Description: def show_updates(self): """ Check installed packages for available updates on PyPI @param project_name: optional package name to check; checks every installed pacakge if none specified @type project_name: string @returns: None """
dists = Distributions() if self.project_name: #Check for a single package pkg_list = [self.project_name] else: #Check for every installed package pkg_list = get_pkglist() found = None for pkg in pkg_list: for (dist, active) in dists.get_distributions("all", pkg, dists.get_highest_installed(pkg)): (project_name, versions) = \ self.pypi.query_versions_pypi(dist.project_name) if versions: #PyPI returns them in chronological order, #but who knows if its guaranteed in the API? #Make sure we grab the highest version: newest = get_highest_version(versions) if newest != dist.version: #We may have newer than what PyPI knows about if pkg_resources.parse_version(dist.version) < \ pkg_resources.parse_version(newest): found = True print(" %s %s (%s)" % (project_name, dist.version, newest)) if not found and self.project_name: self.logger.info("You have the latest version installed.") elif not found: self.logger.info("No newer packages found at The Cheese Shop") return 0
<SYSTEM_TASK:> Show list of installed activated OR non-activated packages <END_TASK> <USER_TASK:> Description: def show_distributions(self, show): """ Show list of installed activated OR non-activated packages @param show: type of pkgs to show (all, active or nonactive) @type show: string @returns: None or 2 if error """
show_metadata = self.options.metadata #Search for any plugins with active CLI options with add_column() method plugins = self.get_plugin("add_column") #Some locations show false positive for 'development' packages: ignores = ["/UNIONFS", "/KNOPPIX.IMG"] #Check if we're in a workingenv #See http://cheeseshop.python.org/pypi/workingenv.py workingenv = os.environ.get('WORKING_ENV') if workingenv: ignores.append(workingenv) dists = Distributions() results = None for (dist, active) in dists.get_distributions(show, self.project_name, self.version): metadata = get_metadata(dist) for prefix in ignores: if dist.location.startswith(prefix): dist.location = dist.location.replace(prefix, "") #Case-insensitve search because of Windows if dist.location.lower().startswith(get_python_lib().lower()): develop = "" else: develop = dist.location if metadata: add_column_text = "" for my_plugin in plugins: #See if package is 'owned' by a package manager such as #portage, apt, rpm etc. #add_column_text += my_plugin.add_column(filename) + " " add_column_text += my_plugin.add_column(dist) + " " self.print_metadata(metadata, develop, active, add_column_text) else: print(str(dist) + " has no metadata") results = True if not results and self.project_name: if self.version: pkg_spec = "%s==%s" % (self.project_name, self.version) else: pkg_spec = "%s" % self.project_name if show == "all": self.logger.error("There are no versions of %s installed." \ % pkg_spec) else: self.logger.error("There are no %s versions of %s installed." \ % \ (show, pkg_spec)) return 2 elif show == "all" and results and self.options.fields: print("Versions with '*' are non-active.") print("Versions with '!' are deployed in development mode.")
<SYSTEM_TASK:> Show detailed PyPI ChangeLog for the last `hours` <END_TASK> <USER_TASK:> Description: def show_pypi_changelog(self): """ Show detailed PyPI ChangeLog for the last `hours` @returns: 0 = sucess or 1 if failed to retrieve from XML-RPC server """
hours = self.options.show_pypi_changelog if not hours.isdigit(): self.logger.error("Error: You must supply an integer.") return 1 try: changelog = self.pypi.changelog(int(hours)) except XMLRPCFault as err_msg: self.logger.error(err_msg) self.logger.error("ERROR: Couldn't retrieve changelog.") return 1 last_pkg = '' for entry in changelog: pkg = entry[0] if pkg != last_pkg: print("%s %s\n\t%s" % (entry[0], entry[1], entry[3])) last_pkg = pkg else: print("\t%s" % entry[3]) return 0
<SYSTEM_TASK:> Show PyPI releases for the last number of `hours` <END_TASK> <USER_TASK:> Description: def show_pypi_releases(self): """ Show PyPI releases for the last number of `hours` @returns: 0 = success or 1 if failed to retrieve from XML-RPC server """
try: hours = int(self.options.show_pypi_releases) except ValueError: self.logger.error("ERROR: You must supply an integer.") return 1 try: latest_releases = self.pypi.updated_releases(hours) except XMLRPCFault as err_msg: self.logger.error(err_msg) self.logger.error("ERROR: Couldn't retrieve latest releases.") return 1 for release in latest_releases: print("%s %s" % (release[0], release[1])) return 0
<SYSTEM_TASK:> Download a package <END_TASK> <USER_TASK:> Description: def fetch(self): """ Download a package @returns: 0 = success or 1 if failed download """
#Default type to download source = True directory = "." if self.options.file_type == "svn": version = "dev" svn_uri = get_download_uri(self.project_name, \ "dev", True) if svn_uri: directory = self.project_name + "_svn" return self.fetch_svn(svn_uri, directory) else: self.logger.error(\ "ERROR: No subversion repository found for %s" % \ self.project_name) return 1 elif self.options.file_type == "source": source = True elif self.options.file_type == "egg": source = False uri = get_download_uri(self.project_name, self.version, source) if uri: return self.fetch_uri(directory, uri) else: self.logger.error("No %s URI found for package: %s " % \ (self.options.file_type, self.project_name)) return 1
<SYSTEM_TASK:> Use ``urllib.urlretrieve`` to download package to file in sandbox dir. <END_TASK> <USER_TASK:> Description: def fetch_uri(self, directory, uri): """ Use ``urllib.urlretrieve`` to download package to file in sandbox dir. @param directory: directory to download to @type directory: string @param uri: uri to download @type uri: string @returns: 0 = success or 1 for failed download """
filename = os.path.basename(urlparse(uri)[2]) if os.path.exists(filename): self.logger.error("ERROR: File exists: " + filename) return 1 try: downloaded_filename, headers = urlretrieve(uri, filename) self.logger.info("Downloaded ./" + filename) except IOError as err_msg: self.logger.error("Error downloading package %s from URL %s" \ % (filename, uri)) self.logger.error(str(err_msg)) return 1 if headers.gettype() in ["text/html"]: dfile = open(downloaded_filename) if re.search("404 Not Found", "".join(dfile.readlines())): dfile.close() self.logger.error("'404 Not Found' error") return 1 dfile.close() return 0
<SYSTEM_TASK:> Fetch subversion repository <END_TASK> <USER_TASK:> Description: def fetch_svn(self, svn_uri, directory): """ Fetch subversion repository @param svn_uri: subversion repository uri to check out @type svn_uri: string @param directory: directory to download to @type directory: string @returns: 0 = success or 1 for failed download """
if not command_successful("svn --version"): self.logger.error("ERROR: Do you have subversion installed?") return 1 if os.path.exists(directory): self.logger.error("ERROR: Checkout directory exists - %s" \ % directory) return 1 try: os.mkdir(directory) except OSError as err_msg: self.logger.error("ERROR: " + str(err_msg)) return 1 cwd = os.path.realpath(os.curdir) os.chdir(directory) self.logger.info("Doing subversion checkout for %s" % svn_uri) status, output = run_command("/usr/bin/svn co %s" % svn_uri) self.logger.info(output) os.chdir(cwd) self.logger.info("subversion checkout is in directory './%s'" \ % directory) return 0
<SYSTEM_TASK:> Launch web browser at project's homepage <END_TASK> <USER_TASK:> Description: def browse_website(self, browser=None): """ Launch web browser at project's homepage @param browser: name of web browser to use @type browser: string @returns: 0 if homepage found, 1 if no homepage found """
if len(self.all_versions): metadata = self.pypi.release_data(self.project_name, \ self.all_versions[0]) self.logger.debug("DEBUG: browser: %s" % browser) if metadata.has_key("home_page"): self.logger.info("Launching browser: %s" \ % metadata["home_page"]) if browser == 'konqueror': browser = webbrowser.Konqueror() else: browser = webbrowser.get() browser.open(metadata["home_page"], 2) return 0 self.logger.error("No homepage URL found.") return 1
<SYSTEM_TASK:> Query PyPI for a particular version or all versions of a package <END_TASK> <USER_TASK:> Description: def versions_available(self): """ Query PyPI for a particular version or all versions of a package @returns: 0 if version(s) found or 1 if none found """
if self.version: spec = "%s==%s" % (self.project_name, self.version) else: spec = self.project_name if self.all_versions and self.version in self.all_versions: print_pkg_versions(self.project_name, [self.version]) elif not self.version and self.all_versions: print_pkg_versions(self.project_name, self.all_versions) else: if self.version: self.logger.error("No pacakge found for version %s" \ % self.version) else: self.logger.error("No pacakge found for %s" % self.project_name) return 1 return 0
<SYSTEM_TASK:> Show entry map for a package <END_TASK> <USER_TASK:> Description: def show_entry_map(self): """ Show entry map for a package @param dist: package @param type: srting @returns: 0 for success or 1 if error """
pprinter = pprint.PrettyPrinter() try: entry_map = pkg_resources.get_entry_map(self.options.show_entry_map) if entry_map: pprinter.pprint(entry_map) except pkg_resources.DistributionNotFound: self.logger.error("Distribution not found: %s" \ % self.options.show_entry_map) return 1 return 0
<SYSTEM_TASK:> Show entry points for a module <END_TASK> <USER_TASK:> Description: def show_entry_points(self): """ Show entry points for a module @returns: 0 for success or 1 if error """
found = False for entry_point in \ pkg_resources.iter_entry_points(self.options.show_entry_points): found = True try: plugin = entry_point.load() print(plugin.__module__) print(" %s" % entry_point) if plugin.__doc__: print(plugin.__doc__) print except ImportError: pass if not found: self.logger.error("No entry points found for %s" \ % self.options.show_entry_points) return 1 return 0
<SYSTEM_TASK:> Return tuple with project_name and version from CLI args <END_TASK> <USER_TASK:> Description: def parse_pkg_ver(self, want_installed): """ Return tuple with project_name and version from CLI args If the user gave the wrong case for the project name, this corrects it @param want_installed: whether package we want is installed or not @type want_installed: boolean @returns: tuple(project_name, version, all_versions) """
all_versions = [] arg_str = ("").join(self.pkg_spec) if "==" not in arg_str: #No version specified project_name = arg_str version = None else: (project_name, version) = arg_str.split("==") project_name = project_name.strip() version = version.strip() #Find proper case for package name if want_installed: dists = Distributions() project_name = dists.case_sensitive_name(project_name) else: (project_name, all_versions) = \ self.pypi.query_versions_pypi(project_name) if not len(all_versions): msg = "I'm afraid we have no '%s' at " % project_name msg += "The Cheese Shop. A little Red Leicester, perhaps?" self.logger.error(msg) sys.exit(2) return (project_name, version, all_versions)
<SYSTEM_TASK:> Install a backport import hook for Qt4 api <END_TASK> <USER_TASK:> Description: def install_backport_hook(api): """ Install a backport import hook for Qt4 api Parameters ---------- api : str The Qt4 api whose structure should be intercepted ('pyqt4' or 'pyside'). Example ------- >>> install_backport_hook("pyqt4") >>> import PyQt4 Loaded module AnyQt._backport as a substitute for PyQt4 """
if api == USED_API: raise ValueError sys.meta_path.insert(0, ImportHookBackport(api))
<SYSTEM_TASK:> Install a deny import hook for Qt api. <END_TASK> <USER_TASK:> Description: def install_deny_hook(api): """ Install a deny import hook for Qt api. Parameters ---------- api : str The Qt api whose import should be prevented Example ------- >>> install_deny_import("pyqt4") >>> import PyQt4 Traceback (most recent call last):... ImportError: Import of PyQt4 is denied. """
if api == USED_API: raise ValueError sys.meta_path.insert(0, ImportHookDeny(api))
<SYSTEM_TASK:> Run command and return its return status code and its output <END_TASK> <USER_TASK:> Description: def run_command(cmd, env=None, max_timeout=None): """ Run command and return its return status code and its output """
arglist = cmd.split() output = os.tmpfile() try: pipe = Popen(arglist, stdout=output, stderr=STDOUT, env=env) except Exception as errmsg: return 1, errmsg # Wait only max_timeout seconds. if max_timeout: start = time.time() while pipe.poll() is None: time.sleep(0.1) if time.time() - start > max_timeout: os.kill(pipe.pid, signal.SIGINT) pipe.wait() return 1, "Time exceeded" pipe.wait() output.seek(0) return pipe.returncode, output.read()
<SYSTEM_TASK:> Iterate over a slack API method supporting pagination <END_TASK> <USER_TASK:> Description: async def iter( self, url: Union[str, methods], data: Optional[MutableMapping] = None, headers: Optional[MutableMapping] = None, *, limit: int = 200, iterkey: Optional[str] = None, itermode: Optional[str] = None, minimum_time: Optional[int] = None, as_json: Optional[bool] = None ) -> AsyncIterator[dict]: """ Iterate over a slack API method supporting pagination When using :class:`slack.methods` the request is made `as_json` if available Args: url: :class:`slack.methods` or url string data: JSON encodable MutableMapping headers: limit: Maximum number of results to return per call. iterkey: Key in response data to iterate over (required for url string). itermode: Iteration mode (required for url string) (one of `cursor`, `page` or `timeline`) minimum_time: Minimum elapsed time (in seconds) between two calls to the Slack API (default to 0). If not reached the client will sleep for the remaining time. as_json: Post JSON to the slack API Returns: Async iterator over `response_data[key]` """
itervalue = None if not data: data = {} last_request_time = None while True: current_time = time.time() if ( minimum_time and last_request_time and last_request_time + minimum_time > current_time ): await self.sleep(last_request_time + minimum_time - current_time) data, iterkey, itermode = sansio.prepare_iter_request( url, data, iterkey=iterkey, itermode=itermode, limit=limit, itervalue=itervalue, ) last_request_time = time.time() response_data = await self.query(url, data, headers, as_json) itervalue = sansio.decode_iter_request(response_data) for item in response_data[iterkey]: yield item if not itervalue: break
<SYSTEM_TASK:> Connect and discard incoming RTM event if necessary. <END_TASK> <USER_TASK:> Description: async def _incoming_from_rtm( self, url: str, bot_id: str ) -> AsyncIterator[events.Event]: """ Connect and discard incoming RTM event if necessary. :param url: Websocket url :param bot_id: Bot ID :return: Incoming events """
async for data in self._rtm(url): event = events.Event.from_rtm(json.loads(data)) if sansio.need_reconnect(event): break elif sansio.discard_event(event, bot_id): continue else: yield event
<SYSTEM_TASK:> Returns True if package manager 'owns' file <END_TASK> <USER_TASK:> Description: def package_manager_owns(self, dist): """ Returns True if package manager 'owns' file Returns False if package manager does not 'own' file There is currently no way to determine if distutils or setuptools installed a package. A future feature of setuptools will make a package manifest which can be checked. 'filename' must be the full path to file """
#Installed by distutils/setuptools or external package manager? #If location is in site-packages dir, check for .egg-info file if dist.location.lower() == get_python_lib().lower(): filename = os.path.join(dist.location, dist.egg_name() + ".egg-info") else: filename = dist.location status, output = getstatusoutput("/usr/bin/acmefile -q %s" % filename) #status == 0 (file was installed by Acme) #status == 256 (file was not installed by Acme) if status == 0: return self.name else: return ""
<SYSTEM_TASK:> Returns URL of specified file type <END_TASK> <USER_TASK:> Description: def filter_url(pkg_type, url): """ Returns URL of specified file type 'source', 'egg', or 'all' """
bad_stuff = ["?modtime", "#md5="] for junk in bad_stuff: if junk in url: url = url.split(junk)[0] break #pkg_spec==dev (svn) if url.endswith("-dev"): url = url.split("#egg=")[0] if pkg_type == "all": return url elif pkg_type == "source": valid_source_types = [".tgz", ".tar.gz", ".zip", ".tbz2", ".tar.bz2"] for extension in valid_source_types: if url.lower().endswith(extension): return url elif pkg_type == "egg": if url.lower().endswith(".egg"): return url
<SYSTEM_TASK:> Get a package name list from disk cache or PyPI <END_TASK> <USER_TASK:> Description: def get_cache(self): """ Get a package name list from disk cache or PyPI """
#This is used by external programs that import `CheeseShop` and don't #want a cache file written to ~/.pypi and query PyPI every time. if self.no_cache: self.pkg_list = self.list_packages() return if not os.path.exists(self.yolk_dir): os.mkdir(self.yolk_dir) if os.path.exists(self.pkg_cache_file): self.pkg_list = self.query_cached_package_list() else: self.logger.debug("DEBUG: Fetching package list cache from PyPi...") self.fetch_pkg_list()
<SYSTEM_TASK:> Fetch list of available versions for a package from The CheeseShop <END_TASK> <USER_TASK:> Description: def query_versions_pypi(self, package_name): """Fetch list of available versions for a package from The CheeseShop"""
if not package_name in self.pkg_list: self.logger.debug("Package %s not in cache, querying PyPI..." \ % package_name) self.fetch_pkg_list() #I have to set version=[] for edge cases like "Magic file extensions" #but I'm not sure why this happens. It's included with Python or #because it has a space in it's name? versions = [] for pypi_pkg in self.pkg_list: if pypi_pkg.lower() == package_name.lower(): if self.debug: self.logger.debug("DEBUG: %s" % package_name) versions = self.package_releases(pypi_pkg) package_name = pypi_pkg break return (package_name, versions)
<SYSTEM_TASK:> Return list of pickled package names from PYPI <END_TASK> <USER_TASK:> Description: def query_cached_package_list(self): """Return list of pickled package names from PYPI"""
if self.debug: self.logger.debug("DEBUG: reading pickled cache file") return cPickle.load(open(self.pkg_cache_file, "r"))
<SYSTEM_TASK:> Fetch and cache master list of package names from PYPI <END_TASK> <USER_TASK:> Description: def fetch_pkg_list(self): """Fetch and cache master list of package names from PYPI"""
self.logger.debug("DEBUG: Fetching package name list from PyPI") package_list = self.list_packages() cPickle.dump(package_list, open(self.pkg_cache_file, "w")) self.pkg_list = package_list
<SYSTEM_TASK:> Create an event with data coming from the RTM API. <END_TASK> <USER_TASK:> Description: def from_rtm(cls, raw_event: MutableMapping) -> "Event": """ Create an event with data coming from the RTM API. If the event type is a message a :class:`slack.events.Message` is returned. Args: raw_event: JSON decoded data from the RTM API Returns: :class:`slack.events.Event` or :class:`slack.events.Message` """
if raw_event["type"].startswith("message"): return Message(raw_event) else: return Event(raw_event)
<SYSTEM_TASK:> Create an event with data coming from the HTTP Event API. <END_TASK> <USER_TASK:> Description: def from_http( cls, raw_body: MutableMapping, verification_token: Optional[str] = None, team_id: Optional[str] = None, ) -> "Event": """ Create an event with data coming from the HTTP Event API. If the event type is a message a :class:`slack.events.Message` is returned. Args: raw_body: Decoded body of the Event API request verification_token: Slack verification token used to verify the request came from slack team_id: Verify the event is for the correct team Returns: :class:`slack.events.Event` or :class:`slack.events.Message` Raises: :class:`slack.exceptions.FailedVerification`: when `verification_token` or `team_id` does not match the incoming event's. """
if verification_token and raw_body["token"] != verification_token: raise exceptions.FailedVerification(raw_body["token"], raw_body["team_id"]) if team_id and raw_body["team_id"] != team_id: raise exceptions.FailedVerification(raw_body["token"], raw_body["team_id"]) if raw_body["event"]["type"].startswith("message"): return Message(raw_body["event"], metadata=raw_body) else: return Event(raw_body["event"], metadata=raw_body)
<SYSTEM_TASK:> Create a response message. <END_TASK> <USER_TASK:> Description: def response(self, in_thread: Optional[bool] = None) -> "Message": """ Create a response message. Depending on the incoming message the response can be in a thread. By default the response follow where the incoming message was posted. Args: in_thread (boolean): Overwrite the `threading` behaviour Returns: a new :class:`slack.event.Message` """
data = {"channel": self["channel"]} if in_thread: if "message" in self: data["thread_ts"] = ( self["message"].get("thread_ts") or self["message"]["ts"] ) else: data["thread_ts"] = self.get("thread_ts") or self["ts"] elif in_thread is None: if "message" in self and "thread_ts" in self["message"]: data["thread_ts"] = self["message"]["thread_ts"] elif "thread_ts" in self: data["thread_ts"] = self["thread_ts"] return Message(data)
<SYSTEM_TASK:> Serialize the message for sending to slack API <END_TASK> <USER_TASK:> Description: def serialize(self) -> dict: """ Serialize the message for sending to slack API Returns: serialized message """
data = {**self} if "attachments" in self: data["attachments"] = json.dumps(self["attachments"]) return data
<SYSTEM_TASK:> Query the slack API <END_TASK> <USER_TASK:> Description: def query( # type: ignore self, url: Union[str, methods], data: Optional[MutableMapping] = None, headers: Optional[MutableMapping] = None, as_json: Optional[bool] = None, ) -> dict: """ Query the slack API When using :class:`slack.methods` the request is made `as_json` if available Args: url: :class:`slack.methods` or url string data: JSON encodable MutableMapping headers: Custom headers as_json: Post JSON to the slack API Returns: dictionary of slack API response data """
url, body, headers = sansio.prepare_request( url=url, data=data, headers=headers, global_headers=self._headers, token=self._token, ) return self._make_query(url, body, headers)
<SYSTEM_TASK:> Iterate over event from the RTM API <END_TASK> <USER_TASK:> Description: def rtm( # type: ignore self, url: Optional[str] = None, bot_id: Optional[str] = None ) -> Iterator[events.Event]: """ Iterate over event from the RTM API Args: url: Websocket connection url bot_id: Connecting bot ID Returns: :class:`slack.events.Event` or :class:`slack.events.Message` """
while True: bot_id = bot_id or self._find_bot_id() url = url or self._find_rtm_url() for event in self._incoming_from_rtm(url, bot_id): yield event url = None
<SYSTEM_TASK:> Get configuration from a file. <END_TASK> <USER_TASK:> Description: def get_config(config_file): """Get configuration from a file."""
def load(fp): try: return yaml.safe_load(fp) except yaml.YAMLError as e: sys.stderr.write(text_type(e)) sys.exit(1) # TODO document exit codes if config_file == '-': return load(sys.stdin) if not os.path.exists(config_file): sys.stderr.write('ERROR: Must either run next to config.yaml or' ' specify a config file.\n' + __doc__) sys.exit(2) with open(config_file) as fp: return load(fp)
<SYSTEM_TASK:> Figure out what options to use based on the four places it can come from. <END_TASK> <USER_TASK:> Description: def get_options(config_options, local_options, cli_options): """ Figure out what options to use based on the four places it can come from. Order of precedence: * cli_options specified by the user at the command line * local_options specified in the config file for the metric * config_options specified in the config file at the base * DEFAULT_OPTIONS hard coded defaults """
options = DEFAULT_OPTIONS.copy() if config_options is not None: options.update(config_options) if local_options is not None: options.update(local_options) if cli_options is not None: options.update(cli_options) return options
<SYSTEM_TASK:> This method is analogous to "gsutil cp gsuri localpath", but in a <END_TASK> <USER_TASK:> Description: def download_to_path(self, gsuri, localpath, binary_mode=False, tmpdir=None): """ This method is analogous to "gsutil cp gsuri localpath", but in a programatically accesible way. The only difference is that we have to make a guess about the encoding of the file to not upset downstream file operations. If you are downloading a VCF, then "False" is great. If this is a BAM file you are asking for, you should enable the "binary_mode" to make sure file doesn't get corrupted. gsuri: full GS-based URI, e.g. gs://cohorts/rocks.txt localpath: the path for the downloaded file, e.g. /mnt/cohorts/yep.txt binary_mode: (logical) if yes, the binary file operations will be used; if not, standard ascii-based ones. """
bucket_name, gs_rel_path = self.parse_uri(gsuri) # And now request the handles for bucket and the file bucket = self._client.get_bucket(bucket_name) # Just assignment, no downloading (yet) ablob = bucket.get_blob(gs_rel_path) if not ablob: raise GoogleStorageIOError( "No such file on Google Storage: '{}'".format(gs_rel_path)) # A tmp file to serve intermediate phase # should be on same filesystem as localpath tmp_fid, tmp_file_path = tempfile.mkstemp(text=(not binary_mode), dir=tmpdir) # set chunk_size to reasonable default # https://github.com/GoogleCloudPlatform/google-cloud-python/issues/2222 ablob.chunk_size = 1<<30 # Download starts in a sec.... ablob.download_to_filename(client=self._client, filename=tmp_file_path) # ... end download ends. Let's move our finished file over. # You will see that below, instead of directly writing to a file # we are instead first using a different file and then move it to # its final location. We are doing this because we don't want # corrupted/incomplete data to be around as much as possible. return os.rename(tmp_file_path, localpath)
<SYSTEM_TASK:> Returns the first and last name of the user separated by a space. <END_TASK> <USER_TASK:> Description: def full_name(self): """ Returns the first and last name of the user separated by a space. """
formatted_user = [] if self.user.first_name is not None: formatted_user.append(self.user.first_name) if self.user.last_name is not None: formatted_user.append(self.user.last_name) return " ".join(formatted_user)
<SYSTEM_TASK:> Decorator for functions that should automatically fall back to the Cohort-default filter_fn and <END_TASK> <USER_TASK:> Description: def use_defaults(func): """ Decorator for functions that should automatically fall back to the Cohort-default filter_fn and normalized_per_mb if not specified. """
@wraps(func) def wrapper(row, cohort, filter_fn=None, normalized_per_mb=None, **kwargs): filter_fn = first_not_none_param([filter_fn, cohort.filter_fn], no_filter) normalized_per_mb = first_not_none_param([normalized_per_mb, cohort.normalized_per_mb], False) return func(row=row, cohort=cohort, filter_fn=filter_fn, normalized_per_mb=normalized_per_mb, **kwargs) return wrapper
<SYSTEM_TASK:> Creates a function that counts variants that are filtered by the provided filterable_variant_function. <END_TASK> <USER_TASK:> Description: def count_variants_function_builder(function_name, filterable_variant_function=None): """ Creates a function that counts variants that are filtered by the provided filterable_variant_function. The filterable_variant_function is a function that takes a filterable_variant and returns True or False. Users of this builder need not worry about applying e.g. the Cohort's default `filter_fn`. That will be applied as well. """
@count_function def count(row, cohort, filter_fn, normalized_per_mb, **kwargs): def count_filter_fn(filterable_variant, **kwargs): assert filter_fn is not None, "filter_fn should never be None, but it is." return ((filterable_variant_function(filterable_variant) if filterable_variant_function is not None else True) and filter_fn(filterable_variant, **kwargs)) patient_id = row["patient_id"] return cohort.load_variants( patients=[cohort.patient_from_id(patient_id)], filter_fn=count_filter_fn, **kwargs) count.__name__ = function_name count.__doc__ = str("".join(inspect.getsourcelines(filterable_variant_function)[0])) if filterable_variant_function is not None else "" return count
<SYSTEM_TASK:> Create a function that counts effects that are filtered by the provided filterable_effect_function. <END_TASK> <USER_TASK:> Description: def count_effects_function_builder(function_name, only_nonsynonymous, filterable_effect_function=None): """ Create a function that counts effects that are filtered by the provided filterable_effect_function. The filterable_effect_function is a function that takes a filterable_effect and returns True or False. Users of this builder need not worry about applying e.g. the Cohort's default `filter_fn`. That will be applied as well. """
@count_function def count(row, cohort, filter_fn, normalized_per_mb, **kwargs): def count_filter_fn(filterable_effect, **kwargs): assert filter_fn is not None, "filter_fn should never be None, but it is." return ((filterable_effect_function(filterable_effect) if filterable_effect_function is not None else True) and filter_fn(filterable_effect, **kwargs)) # This only loads one effect per variant. patient_id = row["patient_id"] return cohort.load_effects( only_nonsynonymous=only_nonsynonymous, patients=[cohort.patient_from_id(patient_id)], filter_fn=count_filter_fn, **kwargs) count.__name__ = function_name count.__doc__ = (("only_nonsynonymous=%s\n" % only_nonsynonymous) + str("".join(inspect.getsourcelines(filterable_effect_function)[0])) if filterable_effect_function is not None else "") # Keep track of these to be able to query the returned function for these attributes count.only_nonsynonymous = only_nonsynonymous count.filterable_effect_function = filterable_effect_function return count
<SYSTEM_TASK:> Calculate the boostrapped AUC for a given col trying to predict a pred_col. <END_TASK> <USER_TASK:> Description: def bootstrap_auc(df, col, pred_col, n_bootstrap=1000): """ Calculate the boostrapped AUC for a given col trying to predict a pred_col. Parameters ---------- df : pandas.DataFrame col : str column to retrieve the values from pred_col : str the column we're trying to predict n_boostrap : int the number of bootstrap samples Returns ------- list : AUCs for each sampling """
scores = np.zeros(n_bootstrap) old_len = len(df) df.dropna(subset=[col], inplace=True) new_len = len(df) if new_len < old_len: logger.info("Dropping NaN values in %s to go from %d to %d rows" % (col, old_len, new_len)) preds = df[pred_col].astype(int) for i in range(n_bootstrap): sampled_counts, sampled_pred = resample(df[col], preds) if is_single_class(sampled_pred, col=pred_col): continue scores[i] = roc_auc_score(sampled_pred, sampled_counts) return scores
<SYSTEM_TASK:> Creates a new Worker and start a new Thread with it. Returns the Worker. <END_TASK> <USER_TASK:> Description: def new_worker(self, name: str): """Creates a new Worker and start a new Thread with it. Returns the Worker."""
if not self.running: return self.immediate_worker worker = self._new_worker(name) self._start_worker(worker) return worker
<SYSTEM_TASK:> Creates a new worker pool and starts it. <END_TASK> <USER_TASK:> Description: def new_worker_pool(self, name: str, min_workers: int = 0, max_workers: int = 1, max_seconds_idle: int = DEFAULT_WORKER_POOL_MAX_SECONDS_IDLE): """ Creates a new worker pool and starts it. Returns the Worker that schedules works to the pool. """
if not self.running: return self.immediate_worker worker = self._new_worker_pool(name, min_workers, max_workers, max_seconds_idle) self._start_worker_pool(worker) return worker
<SYSTEM_TASK:> Return this Cohort as a DataFrame, and optionally include additional columns <END_TASK> <USER_TASK:> Description: def as_dataframe(self, on=None, join_with=None, join_how=None, return_cols=False, rename_cols=False, keep_paren_contents=True, **kwargs): """ Return this Cohort as a DataFrame, and optionally include additional columns using `on`. on : str or function or list or dict, optional - A column name. - Or a function that creates a new column for comparison, e.g. count.snv_count. - Or a list of column-generating functions or column names. - Or a map of new column names to their column-generating functions or column names. If `on` is a function or functions, kwargs is passed to those functions. Otherwise kwargs is ignored. Other parameters ---------------- `return_cols`: (bool) If True, return column names generated via `on` along with the `DataFrame` as a `DataFrameHolder` tuple. `rename_cols`: (bool) If True, then return columns using "stripped" column names ("stripped" means lower-case names without punctuation other than `_`) See `utils.strip_column_names` for more details defaults to False `keep_paren_contents`: (bool) If True, then contents of column names within parens are kept. If False, contents of column names within-parens are dropped. Defaults to True ---------- Return : `DataFrame` (or `DataFrameHolder` if `return_cols` is True) """
df = self._as_dataframe_unmodified(join_with=join_with, join_how=join_how) if on is None: return DataFrameHolder.return_obj(None, df, return_cols) if type(on) == str: return DataFrameHolder.return_obj(on, df, return_cols) def apply_func(on, col, df): """ Sometimes we have functions that, by necessity, have more parameters than just `row`. We construct a function with just the `row` parameter so it can be sent to `DataFrame.apply`. We hackishly pass `cohort` (as `self`) along if the function accepts a `cohort` argument. """ on_argnames = on.__code__.co_varnames if "cohort" not in on_argnames: func = lambda row: on(row=row, **kwargs) else: func = lambda row: on(row=row, cohort=self, **kwargs) if self.show_progress: tqdm.pandas(desc=col) df[col] = df.progress_apply(func, axis=1) ## depends on tqdm on prev line else: df[col] = df.apply(func, axis=1) return DataFrameHolder(col, df) def func_name(func, num=0): return func.__name__ if not is_lambda(func) else "column_%d" % num def is_lambda(func): return func.__name__ == (lambda: None).__name__ if type(on) == FunctionType: return apply_func(on, func_name(on), df).return_self(return_cols) if len(kwargs) > 0: logger.warning("Note: kwargs used with multiple functions; passing them to all functions") if type(on) == dict: cols = [] for key, value in on.items(): if type(value) == str: df[key] = df[value] col = key elif type(value) == FunctionType: col, df = apply_func(on=value, col=key, df=df) else: raise ValueError("A value of `on`, %s, is not a str or function" % str(value)) cols.append(col) if type(on) == list: cols = [] for i, elem in enumerate(on): if type(elem) == str: col = elem elif type(elem) == FunctionType: col = func_name(elem, i) col, df = apply_func(on=elem, col=col, df=df) cols.append(col) if rename_cols: rename_dict = _strip_column_names(df.columns, keep_paren_contents=keep_paren_contents) df.rename(columns=rename_dict, inplace=True) cols = [rename_dict[col] for col in cols] return DataFrameHolder(cols, df).return_self(return_cols)
<SYSTEM_TASK:> Instead of joining a DataFrameJoiner with the Cohort in `as_dataframe`, sometimes <END_TASK> <USER_TASK:> Description: def load_dataframe(self, df_loader_name): """ Instead of joining a DataFrameJoiner with the Cohort in `as_dataframe`, sometimes we may want to just directly load a particular DataFrame. """
logger.debug("loading dataframe: {}".format(df_loader_name)) # Get the DataFrameLoader object corresponding to this name. df_loaders = [df_loader for df_loader in self.df_loaders if df_loader.name == df_loader_name] if len(df_loaders) == 0: raise ValueError("No DataFrameLoader with name %s" % df_loader_name) if len(df_loaders) > 1: raise ValueError("Multiple DataFrameLoaders with name %s" % df_loader_name) return df_loaders[0].load_dataframe()
<SYSTEM_TASK:> Load a dictionary of patient_id to varcode.VariantCollection <END_TASK> <USER_TASK:> Description: def load_variants(self, patients=None, filter_fn=None, **kwargs): """Load a dictionary of patient_id to varcode.VariantCollection Parameters ---------- patients : str, optional Filter to a subset of patients filter_fn : function Takes a FilterableVariant and returns a boolean. Only variants returning True are preserved. Overrides default self.filter_fn. `None` passes through to self.filter_fn. Returns ------- merged_variants Dictionary of patient_id to VariantCollection """
filter_fn = first_not_none_param([filter_fn, self.filter_fn], no_filter) filter_fn_name = self._get_function_name(filter_fn) logger.debug("loading variants with filter_fn: {}".format(filter_fn_name)) patient_variants = {} for patient in self.iter_patients(patients): variants = self._load_single_patient_variants(patient, filter_fn, **kwargs) if variants is not None: patient_variants[patient.id] = variants return patient_variants
<SYSTEM_TASK:> Construct string representing state of filter_fn <END_TASK> <USER_TASK:> Description: def _hash_filter_fn(self, filter_fn, **kwargs): """ Construct string representing state of filter_fn Used to cache filtered variants or effects uniquely depending on filter fn values """
filter_fn_name = self._get_function_name(filter_fn, default="filter-none") logger.debug("Computing hash for filter_fn: {} with kwargs {}".format(filter_fn_name, str(dict(**kwargs)))) # hash function source code fn_source = str(dill.source.getsource(filter_fn)) pickled_fn_source = pickle.dumps(fn_source) ## encode as byte string hashed_fn_source = int(hashlib.sha1(pickled_fn_source).hexdigest(), 16) % (10 ** 11) # hash kwarg values kw_dict = dict(**kwargs) kw_hash = list() if not kw_dict: kw_hash = ["default"] else: [kw_hash.append("{}-{}".format(key, h)) for (key, h) in sorted(kw_dict.items())] # hash closure vars - for case where filter_fn is defined within closure of filter_fn closure = [] nonlocals = inspect.getclosurevars(filter_fn).nonlocals for (key, val) in nonlocals.items(): ## capture hash for any function within closure if inspect.isfunction(val): closure.append(self._hash_filter_fn(val)) closure.sort() # Sorted for file name consistency closure_str = "null" if len(closure) == 0 else "-".join(closure) # construct final string comprising hashed components hashed_fn = ".".join(["-".join([filter_fn_name, str(hashed_fn_source)]), ".".join(kw_hash), closure_str] ) return hashed_fn
<SYSTEM_TASK:> Load filtered, merged variants for a single patient, optionally using cache <END_TASK> <USER_TASK:> Description: def _load_single_patient_variants(self, patient, filter_fn, use_cache=True, **kwargs): """ Load filtered, merged variants for a single patient, optionally using cache Note that filtered variants are first merged before filtering, and each step is cached independently. Turn on debug statements for more details about cached files. Use `_load_single_patient_merged_variants` to see merged variants without filtering. """
if filter_fn is None: use_filtered_cache = False else: filter_fn_name = self._get_function_name(filter_fn) logger.debug("loading variants for patient {} with filter_fn {}".format(patient.id, filter_fn_name)) use_filtered_cache = use_cache ## confirm that we can get cache-name (else don't use filtered cache) if use_filtered_cache: logger.debug("... identifying filtered-cache file name") try: ## try to load filtered variants from cache filtered_cache_file_name = "%s-variants.%s.pkl" % (self.merge_type, self._hash_filter_fn(filter_fn, **kwargs)) except: logger.warning("... error identifying filtered-cache file name for patient {}: {}".format( patient.id, filter_fn_name)) use_filtered_cache = False else: logger.debug("... trying to load filtered variants from cache: {}".format(filtered_cache_file_name)) try: cached = self.load_from_cache(self.cache_names["variant"], patient.id, filtered_cache_file_name) if cached is not None: return cached except: logger.warning("Error loading variants from cache for patient: {}".format(patient.id)) pass ## get merged variants logger.debug("... getting merged variants for: {}".format(patient.id)) merged_variants = self._load_single_patient_merged_variants(patient, use_cache=use_cache) # Note None here is different from 0. We want to preserve None if merged_variants is None: logger.info("Variants did not exist for patient %s" % patient.id) return None logger.debug("... applying filters to variants for: {}".format(patient.id)) filtered_variants = filter_variants(variant_collection=merged_variants, patient=patient, filter_fn=filter_fn, **kwargs) if use_filtered_cache: logger.debug("... saving filtered variants to cache: {}".format(filtered_cache_file_name)) self.save_to_cache(filtered_variants, self.cache_names["variant"], patient.id, filtered_cache_file_name) return filtered_variants
<SYSTEM_TASK:> Load merged variants for a single patient, optionally using cache <END_TASK> <USER_TASK:> Description: def _load_single_patient_merged_variants(self, patient, use_cache=True): """ Load merged variants for a single patient, optionally using cache Note that merged variants are not filtered. Use `_load_single_patient_variants` to get filtered variants """
logger.debug("loading merged variants for patient {}".format(patient.id)) no_variants = False try: # get merged-variants from cache if use_cache: ## load unfiltered variants into list of collections variant_cache_file_name = "%s-variants.pkl" % (self.merge_type) merged_variants = self.load_from_cache(self.cache_names["variant"], patient.id, variant_cache_file_name) if merged_variants is not None: return merged_variants # get variant collections from file variant_collections = [] optional_maf_cols = ["t_ref_count", "t_alt_count", "n_ref_count", "n_alt_count"] if self.additional_maf_cols is not None: optional_maf_cols.extend(self.additional_maf_cols) for patient_variants in patient.variants_list: if type(patient_variants) == str: if ".vcf" in patient_variants: try: variant_collections.append(varcode.load_vcf_fast(patient_variants)) # StopIteration is thrown for empty VCFs. For an empty VCF, don't append any variants, # and don't throw an error. But do record a warning, in case the StopIteration was # thrown for another reason. except StopIteration as e: logger.warning("Empty VCF (or possibly a VCF error) for patient {}: {}".format( patient.id, str(e))) elif ".maf" in patient_variants: # See variant_stats.maf_somatic_variant_stats variant_collections.append( varcode.load_maf( patient_variants, optional_cols=optional_maf_cols, encoding="latin-1")) else: raise ValueError("Don't know how to read %s" % patient_variants) elif type(patient_variants) == VariantCollection: variant_collections.append(patient_variants) else: raise ValueError("Don't know how to read %s" % patient_variants) # merge variant-collections if len(variant_collections) == 0: no_variants = True elif len(variant_collections) == 1: # There is nothing to merge variants = variant_collections[0] merged_variants = variants else: merged_variants = self._merge_variant_collections(variant_collections, self.merge_type) except IOError: no_variants = True # Note that this is the number of variant collections and not the number of # variants. 0 variants will lead to 0 neoantigens, for example, but 0 variant # collections will lead to NaN variants and neoantigens. if no_variants: print("Variants did not exist for patient %s" % patient.id) merged_variants = None # save merged variants to file if use_cache: self.save_to_cache(merged_variants, self.cache_names["variant"], patient.id, variant_cache_file_name) return merged_variants
<SYSTEM_TASK:> Load a dataframe containing polyphen2 annotations for all variants <END_TASK> <USER_TASK:> Description: def load_polyphen_annotations(self, as_dataframe=False, filter_fn=None): """Load a dataframe containing polyphen2 annotations for all variants Parameters ---------- database_file : string, sqlite Path to the WHESS/Polyphen2 SQLite database. Can be downloaded and bunzip2"ed from http://bit.ly/208mlIU filter_fn : function Takes a FilterablePolyphen and returns a boolean. Only annotations returning True are preserved. Overrides default self.filter_fn. `None` passes through to self.filter_fn. Returns ------- annotations Dictionary of patient_id to a DataFrame that contains annotations """
filter_fn = first_not_none_param([filter_fn, self.filter_fn], no_filter) patient_annotations = {} for patient in self: annotations = self._load_single_patient_polyphen( patient, filter_fn=filter_fn) if annotations is not None: annotations["patient_id"] = patient.id patient_annotations[patient.id] = annotations if as_dataframe: return pd.concat(patient_annotations.values()) return patient_annotations
<SYSTEM_TASK:> Load a dictionary of patient_id to varcode.EffectCollection <END_TASK> <USER_TASK:> Description: def load_effects(self, patients=None, only_nonsynonymous=False, all_effects=False, filter_fn=None, **kwargs): """Load a dictionary of patient_id to varcode.EffectCollection Note that this only loads one effect per variant. Parameters ---------- patients : str, optional Filter to a subset of patients only_nonsynonymous : bool, optional If true, load only nonsynonymous effects, default False all_effects : bool, optional If true, return all effects rather than only the top-priority effect per variant filter_fn : function Takes a FilterableEffect and returns a boolean. Only effects returning True are preserved. Overrides default self.filter_fn. `None` passes through to self.filter_fn. Returns ------- effects Dictionary of patient_id to varcode.EffectCollection """
filter_fn = first_not_none_param([filter_fn, self.filter_fn], no_filter) filter_fn_name = self._get_function_name(filter_fn) logger.debug("loading effects with filter_fn {}".format(filter_fn_name)) patient_effects = {} for patient in self.iter_patients(patients): effects = self._load_single_patient_effects( patient, only_nonsynonymous, all_effects, filter_fn, **kwargs) if effects is not None: patient_effects[patient.id] = effects return patient_effects
<SYSTEM_TASK:> Load Kallisto transcript quantification data for a cohort <END_TASK> <USER_TASK:> Description: def load_kallisto(self): """ Load Kallisto transcript quantification data for a cohort Parameters ---------- Returns ------- kallisto_data : Pandas dataframe Pandas dataframe with Kallisto data for all patients columns include patient_id, gene_name, est_counts """
kallisto_data = pd.concat( [self._load_single_patient_kallisto(patient) for patient in self], copy=False ) if self.kallisto_ensembl_version is None: raise ValueError("Required a kallisto_ensembl_version but none was specified") ensembl_release = cached_release(self.kallisto_ensembl_version) kallisto_data["gene_name"] = \ kallisto_data["target_id"].map(lambda t: ensembl_release.gene_name_of_transcript_id(t)) # sum counts across genes kallisto_data = \ kallisto_data.groupby(["patient_id", "gene_name"])[["est_counts"]].sum().reset_index() return kallisto_data
<SYSTEM_TASK:> Load Kallisto gene quantification given a patient <END_TASK> <USER_TASK:> Description: def _load_single_patient_kallisto(self, patient): """ Load Kallisto gene quantification given a patient Parameters ---------- patient : Patient Returns ------- data: Pandas dataframe Pandas dataframe of sample's Kallisto data columns include patient_id, target_id, length, eff_length, est_counts, tpm """
data = pd.read_csv(patient.tumor_sample.kallisto_path, sep="\t") data["patient_id"] = patient.id return data
<SYSTEM_TASK:> Load a Cufflinks gene expression data for a cohort <END_TASK> <USER_TASK:> Description: def load_cufflinks(self, filter_ok=True): """ Load a Cufflinks gene expression data for a cohort Parameters ---------- filter_ok : bool, optional If true, filter Cufflinks data to row with FPKM_status == "OK" Returns ------- cufflinks_data : Pandas dataframe Pandas dataframe with Cufflinks data for all patients columns include patient_id, gene_id, gene_short_name, FPKM, FPKM_conf_lo, FPKM_conf_hi """
return \ pd.concat( [self._load_single_patient_cufflinks(patient, filter_ok) for patient in self], copy=False )
<SYSTEM_TASK:> Load Cufflinks gene quantification given a patient <END_TASK> <USER_TASK:> Description: def _load_single_patient_cufflinks(self, patient, filter_ok): """ Load Cufflinks gene quantification given a patient Parameters ---------- patient : Patient filter_ok : bool, optional If true, filter Cufflinks data to row with FPKM_status == "OK" Returns ------- data: Pandas dataframe Pandas dataframe of sample's Cufflinks data columns include patient_id, gene_id, gene_short_name, FPKM, FPKM_conf_lo, FPKM_conf_hi """
data = pd.read_csv(patient.tumor_sample.cufflinks_path, sep="\t") data["patient_id"] = patient.id if filter_ok: # Filter to OK FPKM counts data = data[data["FPKM_status"] == "OK"] return data
<SYSTEM_TASK:> Mostly replicates topiary.build_epitope_collection_from_binding_predictions <END_TASK> <USER_TASK:> Description: def get_filtered_isovar_epitopes(self, epitopes, ic50_cutoff): """ Mostly replicates topiary.build_epitope_collection_from_binding_predictions Note: topiary needs to do fancy stuff like subsequence_protein_offset + binding_prediction.offset in order to figure out whether a variant is in the peptide because it only has the variant's offset into the full protein; but isovar gives us the variant's offset into the protein subsequence (dictated by protein_sequence_length); so all we need to do is map that onto the smaller 8-11mer peptides generated by mhctools. """
mutant_binding_predictions = [] for binding_prediction in epitopes: peptide = binding_prediction.peptide peptide_offset = binding_prediction.offset isovar_row = dict(binding_prediction.source_sequence_key) is_mutant = contains_mutant_residues( peptide_start_in_protein=peptide_offset, peptide_length=len(peptide), mutation_start_in_protein=isovar_row["variant_aa_interval_start"], mutation_end_in_protein=isovar_row["variant_aa_interval_end"]) if is_mutant and binding_prediction.value <= ic50_cutoff: mutant_binding_predictions.append(binding_prediction) return EpitopeCollection(mutant_binding_predictions)
<SYSTEM_TASK:> Plot an ROC curve for benefit and a given variable <END_TASK> <USER_TASK:> Description: def plot_roc_curve(self, on, bootstrap_samples=100, ax=None, **kwargs): """Plot an ROC curve for benefit and a given variable Parameters ---------- on : str or function or list or dict See `cohort.load.as_dataframe` bootstrap_samples : int, optional Number of boostrap samples to use to compute the AUC ax : Axes, default None Axes to plot on Returns ------- (mean_auc_score, plot): (float, matplotlib plot) Returns the average AUC for the given predictor over `bootstrap_samples` and the associated ROC curve """
plot_col, df = self.as_dataframe(on, return_cols=True, **kwargs) df = filter_not_null(df, "benefit") df = filter_not_null(df, plot_col) df.benefit = df.benefit.astype(bool) return roc_curve_plot(df, plot_col, "benefit", bootstrap_samples, ax=ax)
<SYSTEM_TASK:> Plot a comparison of `boolean_col` in the cohort on a given variable via <END_TASK> <USER_TASK:> Description: def plot_boolean(self, on, boolean_col, plot_col=None, boolean_label=None, boolean_value_map={}, order=None, ax=None, alternative="two-sided", **kwargs): """Plot a comparison of `boolean_col` in the cohort on a given variable via `on` or `col`. If the variable (through `on` or `col`) is binary this will compare odds-ratios and perform a Fisher's exact test. If the variable is numeric, this will compare the distributions through a Mann-Whitney test and plot the distributions with box-strip plot Parameters ---------- on : str or function or list or dict See `cohort.load.as_dataframe` plot_col : str, optional If on has many columns, this is the one whose values we are plotting. If on has a single column, this is unnecessary. We might want many columns if, e.g. we're generating boolean_col from a function as well. boolean_col : str Column name of boolean column to plot or compare against. boolean_label : None, optional Label to give boolean column in the plot boolean_value_map : dict, optional Map of conversions for values in the boolean column, i.e. {True: 'High', False: 'Low'} order : None, optional Order of the labels on the x-axis ax : None, optional Axes to plot on alternative : str, optional Choose the sidedness of the mannwhitneyu or Fisher's Exact test. Returns ------- (Test statistic, p-value): (float, float) """
cols, df = self.as_dataframe(on, return_cols=True, **kwargs) plot_col = self.plot_col_from_cols(cols=cols, plot_col=plot_col) df = filter_not_null(df, boolean_col) df = filter_not_null(df, plot_col) if boolean_label: df[boolean_label] = df[boolean_col] boolean_col = boolean_label condition_value = None if boolean_value_map: assert set(boolean_value_map.keys()) == set([True, False]), \ "Improper mapping of boolean column provided" df[boolean_col] = df[boolean_col].map(lambda v: boolean_value_map[v]) condition_value = boolean_value_map[True] if df[plot_col].dtype == "bool": results = fishers_exact_plot( data=df, condition1=boolean_col, condition2=plot_col, condition1_value=condition_value, alternative=alternative, order=order, ax=ax) else: results = mann_whitney_plot( data=df, condition=boolean_col, distribution=plot_col, condition_value=condition_value, alternative=alternative, order=order, ax=ax) return results
<SYSTEM_TASK:> Plot the correlation between two variables. <END_TASK> <USER_TASK:> Description: def plot_correlation(self, on, x_col=None, plot_type="jointplot", stat_func=pearsonr, show_stat_func=True, plot_kwargs={}, **kwargs): """Plot the correlation between two variables. Parameters ---------- on : list or dict of functions or strings See `cohort.load.as_dataframe` x_col : str, optional If `on` is a dict, this guarantees we have the expected ordering. plot_type : str, optional Specify "jointplot", "regplot", "boxplot", or "barplot". stat_func : function, optional. Specify which function to use for the statistical test. show_stat_func : bool, optional Whether or not to show the stat_func result in the plot itself. plot_kwargs : dict, optional kwargs to pass through to plotting functions. """
if plot_type not in ["boxplot", "barplot", "jointplot", "regplot"]: raise ValueError("Invalid plot_type %s" % plot_type) plot_cols, df = self.as_dataframe(on, return_cols=True, **kwargs) if len(plot_cols) != 2: raise ValueError("Must be comparing two columns, but there are %d columns" % len(plot_cols)) for plot_col in plot_cols: df = filter_not_null(df, plot_col) if x_col is None: x_col = plot_cols[0] y_col = plot_cols[1] else: if x_col == plot_cols[0]: y_col = plot_cols[1] else: y_col = plot_cols[0] series_x = df[x_col] series_y = df[y_col] coeff, p_value = stat_func(series_x, series_y) if plot_type == "jointplot": plot = sb.jointplot(data=df, x=x_col, y=y_col, stat_func=stat_func if show_stat_func else None, **plot_kwargs) elif plot_type == "regplot": plot = sb.regplot(data=df, x=x_col, y=y_col, **plot_kwargs) elif plot_type == "boxplot": plot = stripboxplot(data=df, x=x_col, y=y_col, **plot_kwargs) else: plot = sb.barplot(data=df, x=x_col, y=y_col, **plot_kwargs) return CorrelationResults(coeff=coeff, p_value=p_value, stat_func=stat_func, series_x=series_x, series_y=series_y, plot=plot)
<SYSTEM_TASK:> Utility function to return a list of patient ids in the Cohort <END_TASK> <USER_TASK:> Description: def _list_patient_ids(self): """ Utility function to return a list of patient ids in the Cohort """
results = [] for patient in self: results.append(patient.id) return(results)
<SYSTEM_TASK:> Utility function to summarize provenance files for cached items used by a Cohort, <END_TASK> <USER_TASK:> Description: def summarize_provenance_per_cache(self): """Utility function to summarize provenance files for cached items used by a Cohort, for each cache_dir that exists. Only existing cache_dirs are summarized. This is a summary of provenance files because the function checks to see whether all patients data have the same provenance within the cache dir. The function assumes that it will be desireable to have all patients data generated using the same environment, for each cache type. At the moment, most PROVENANCE files contain details about packages used to generat e the cached data file. However, this function is generic & so it summarizes the contents of those files irrespective of their contents. Returns ---------- Dict containing summarized provenance for each existing cache_dir, after checking to see that provenance files are identical among all patients in the data frame for that cache_dir. If conflicting PROVENANCE files are discovered within a cache-dir: - a warning is generated, describing the conflict - and, a value of `None` is returned in the dictionary for that cache-dir See also ----------- * `?cohorts.Cohort.summarize_provenance` which summarizes provenance files among cache_dirs. * `?cohorts.Cohort.summarize_dataframe` which hashes/summarizes contents of the data frame for this cohort. """
provenance_summary = {} df = self.as_dataframe() for cache in self.cache_names: cache_name = self.cache_names[cache] cache_provenance = None num_discrepant = 0 this_cache_dir = path.join(self.cache_dir, cache_name) if path.exists(this_cache_dir): for patient_id in self._list_patient_ids(): patient_cache_dir = path.join(this_cache_dir, patient_id) try: this_provenance = self.load_provenance(patient_cache_dir = patient_cache_dir) except: this_provenance = None if this_provenance: if not(cache_provenance): cache_provenance = this_provenance else: num_discrepant += compare_provenance(this_provenance, cache_provenance) if num_discrepant == 0: provenance_summary[cache_name] = cache_provenance else: provenance_summary[cache_name] = None return(provenance_summary)
<SYSTEM_TASK:> Utility function to summarize provenance files for cached items used by a Cohort. <END_TASK> <USER_TASK:> Description: def summarize_provenance(self): """Utility function to summarize provenance files for cached items used by a Cohort. At the moment, most PROVENANCE files contain details about packages used to generate files. However, this function is generic & so it summarizes the contents of those files irrespective of their contents. Returns ---------- Dict containing summary of provenance items, among all cache dirs used by the Cohort. IE if all provenances are identical across all cache dirs, then a single set of provenances is returned. Otherwise, if all provenances are not identical, the provenance items per cache_dir are returned. See also ---------- `?cohorts.Cohort.summarize_provenance_per_cache` which is used to summarize provenance for each existing cache_dir. """
provenance_per_cache = self.summarize_provenance_per_cache() summary_provenance = None num_discrepant = 0 for cache in provenance_per_cache: if not(summary_provenance): ## pick arbitrary provenance & call this the "summary" (for now) summary_provenance = provenance_per_cache[cache] summary_provenance_name = cache ## for each cache, check equivalence with summary_provenance num_discrepant += compare_provenance( provenance_per_cache[cache], summary_provenance, left_outer_diff = "In %s but not in %s" % (cache, summary_provenance_name), right_outer_diff = "In %s but not in %s" % (summary_provenance_name, cache) ) ## compare provenance across cached items if num_discrepant == 0: prov = summary_provenance ## report summary provenance if exists else: prov = provenance_per_cache ## otherwise, return provenance per cache return(prov)
<SYSTEM_TASK:> Utility function to summarize data source status for this Cohort, useful for confirming <END_TASK> <USER_TASK:> Description: def summarize_data_sources(self): """Utility function to summarize data source status for this Cohort, useful for confirming the state of data used for an analysis Returns ---------- Dictionary with summary of data sources Currently contains - dataframe_hash: hash of the dataframe (see `?cohorts.Cohort.summarize_dataframe`) - provenance_file_summary: summary of provenance file contents (see `?cohorts.Cohort.summarize_provenance`) """
provenance_file_summary = self.summarize_provenance() dataframe_hash = self.summarize_dataframe() results = { "provenance_file_summary": provenance_file_summary, "dataframe_hash": dataframe_hash } return(results)
<SYSTEM_TASK:> Parse out the variant calling statistics for a given variant from a Strelka VCF <END_TASK> <USER_TASK:> Description: def strelka_somatic_variant_stats(variant, variant_metadata): """Parse out the variant calling statistics for a given variant from a Strelka VCF Parameters ---------- variant : varcode.Variant sample_info : dict Dictionary of sample to variant calling statistics, corresponds to the sample columns in a Strelka VCF Returns ------- SomaticVariantStats """
sample_info = variant_metadata["sample_info"] # Ensure there are exactly two samples in the VCF, a tumor and normal assert len(sample_info) == 2, "More than two samples found in the somatic VCF" tumor_stats = _strelka_variant_stats(variant, sample_info["TUMOR"]) normal_stats = _strelka_variant_stats(variant, sample_info["NORMAL"]) return SomaticVariantStats(tumor_stats=tumor_stats, normal_stats=normal_stats)
<SYSTEM_TASK:> Parse a single sample"s variant calling statistics based on Strelka VCF output <END_TASK> <USER_TASK:> Description: def _strelka_variant_stats(variant, sample_info): """Parse a single sample"s variant calling statistics based on Strelka VCF output Parameters ---------- variant : varcode.Variant sample_info : dict Dictionary of Strelka-specific variant calling fields Returns ------- VariantStats """
if variant.is_deletion or variant.is_insertion: # ref: https://sites.google.com/site/strelkasomaticvariantcaller/home/somatic-variant-output ref_depth = int(sample_info['TAR'][0]) # number of reads supporting ref allele (non-deletion) alt_depth = int(sample_info['TIR'][0]) # number of reads supporting alt allele (deletion) depth = ref_depth + alt_depth else: # Retrieve the Tier 1 counts from Strelka ref_depth = int(sample_info[variant.ref+"U"][0]) alt_depth = int(sample_info[variant.alt+"U"][0]) depth = alt_depth + ref_depth if depth > 0: vaf = float(alt_depth) / depth else: # unclear how to define vaf if no reads support variant # up to user to interpret this (hopefully filtered out in QC settings) vaf = None return VariantStats(depth=depth, alt_depth=alt_depth, variant_allele_frequency=vaf)
<SYSTEM_TASK:> Parse out the variant calling statistics for a given variant from a Mutect VCF <END_TASK> <USER_TASK:> Description: def mutect_somatic_variant_stats(variant, variant_metadata): """Parse out the variant calling statistics for a given variant from a Mutect VCF Parameters ---------- variant : varcode.Variant sample_info : dict Dictionary of sample to variant calling statistics, corresponds to the sample columns in a Mutect VCF Returns ------- SomaticVariantStats """
sample_info = variant_metadata["sample_info"] # Ensure there are exactly two samples in the VCF, a tumor and normal assert len(sample_info) == 2, "More than two samples found in the somatic VCF" # Find the sample with the genotype field set to variant in the VCF tumor_sample_infos = [info for info in sample_info.values() if info["GT"] == "0/1"] # Ensure there is only one such sample assert len(tumor_sample_infos) == 1, "More than one tumor sample found in the VCF file" tumor_sample_info = tumor_sample_infos[0] normal_sample_info = [info for info in sample_info.values() if info["GT"] != "0/1"][0] tumor_stats = _mutect_variant_stats(variant, tumor_sample_info) normal_stats = _mutect_variant_stats(variant, normal_sample_info) return SomaticVariantStats(tumor_stats=tumor_stats, normal_stats=normal_stats)
<SYSTEM_TASK:> Parse out the variant calling statistics for a given variant from a MAF file <END_TASK> <USER_TASK:> Description: def maf_somatic_variant_stats(variant, variant_metadata): """ Parse out the variant calling statistics for a given variant from a MAF file Assumes the MAF format described here: https://www.biostars.org/p/161298/#161777 Parameters ---------- variant : varcode.Variant variant_metadata : dict Dictionary of metadata for this variant Returns ------- SomaticVariantStats """
tumor_stats = None normal_stats = None if "t_ref_count" in variant_metadata: tumor_stats = _maf_variant_stats(variant, variant_metadata, prefix="t") if "n_ref_count" in variant_metadata: normal_stats = _maf_variant_stats(variant, variant_metadata, prefix="n") return SomaticVariantStats(tumor_stats=tumor_stats, normal_stats=normal_stats)
<SYSTEM_TASK:> Return True if variant_file given is in strelka format <END_TASK> <USER_TASK:> Description: def _vcf_is_strelka(variant_file, variant_metadata): """Return True if variant_file given is in strelka format """
if "strelka" in variant_file.lower(): return True elif "NORMAL" in variant_metadata["sample_info"].keys(): return True else: vcf_reader = vcf.Reader(open(variant_file, "r")) try: vcf_type = vcf_reader.metadata["content"] except KeyError: vcf_type = "" if "strelka" in vcf_type.lower(): return True return False
<SYSTEM_TASK:> Parse the variant calling stats from a variant called from multiple variant files. The stats are merged <END_TASK> <USER_TASK:> Description: def variant_stats_from_variant(variant, metadata, merge_fn=(lambda all_stats: \ max(all_stats, key=(lambda stats: stats.tumor_stats.depth)))): """Parse the variant calling stats from a variant called from multiple variant files. The stats are merged based on `merge_fn` Parameters ---------- variant : varcode.Variant metadata : dict Dictionary of variant file to variant calling metadata from that file merge_fn : function Function from list of SomaticVariantStats to single SomaticVariantStats. This is used if a variant is called by multiple callers or appears in multiple VCFs. By default, this uses the data from the caller that had a higher tumor depth. Returns ------- SomaticVariantStats """
all_stats = [] for (variant_file, variant_metadata) in metadata.items(): if _vcf_is_maf(variant_file=variant_file): stats = maf_somatic_variant_stats(variant, variant_metadata) elif _vcf_is_strelka(variant_file=variant_file, variant_metadata=variant_metadata): stats = strelka_somatic_variant_stats(variant, variant_metadata) elif _vcf_is_mutect(variant_file=variant_file, variant_metadata=variant_metadata): stats = mutect_somatic_variant_stats(variant, variant_metadata) else: raise ValueError("Cannot parse sample fields, variant file {} is from an unsupported caller.".format(variant_file)) all_stats.append(stats) return merge_fn(all_stats)
<SYSTEM_TASK:> Load in Pageant CoverageDepth results with Ensembl loci. <END_TASK> <USER_TASK:> Description: def load_ensembl_coverage(cohort, coverage_path, min_tumor_depth, min_normal_depth=0, pageant_dir_fn=None): """ Load in Pageant CoverageDepth results with Ensembl loci. coverage_path is a path to Pageant CoverageDepth output directory, with one subdirectory per patient and a `cdf.csv` file inside each patient subdir. If min_normal_depth is 0, calculate tumor coverage. Otherwise, calculate join tumor/normal coverage. pageant_dir_fn is a function that takes in a Patient and produces a Pageant dir name. Last tested with Pageant CoverageDepth version 1ca9ed2. """
# Function to grab the pageant file name using the Patient if pageant_dir_fn is None: pageant_dir_fn = lambda patient: patient.id columns_both = [ "depth1", # Normal "depth2", # Tumor "onBP1", "onBP2", "numOnLoci", "fracBPOn1", "fracBPOn2", "fracLociOn", "offBP1", "offBP2", "numOffLoci", "fracBPOff1", "fracBPOff2", "fracLociOff", ] columns_single = [ "depth", "onBP", "numOnLoci", "fracBPOn", "fracLociOn", "offBP", "numOffLoci", "fracBPOff", "fracLociOff" ] if min_normal_depth < 0: raise ValueError("min_normal_depth must be >= 0") use_tumor_only = (min_normal_depth == 0) columns = columns_single if use_tumor_only else columns_both ensembl_loci_dfs = [] for patient in cohort: patient_ensembl_loci_df = pd.read_csv( path.join(coverage_path, pageant_dir_fn(patient), "cdf.csv"), names=columns, header=1) # pylint: disable=no-member # pylint gets confused by read_csv if use_tumor_only: depth_mask = (patient_ensembl_loci_df.depth == min_tumor_depth) else: depth_mask = ( (patient_ensembl_loci_df.depth1 == min_normal_depth) & (patient_ensembl_loci_df.depth2 == min_tumor_depth)) patient_ensembl_loci_df = patient_ensembl_loci_df[depth_mask] assert len(patient_ensembl_loci_df) == 1, ( "Incorrect number of tumor={}, normal={} depth loci results: {} for patient {}".format( min_tumor_depth, min_normal_depth, len(patient_ensembl_loci_df), patient)) patient_ensembl_loci_df["patient_id"] = patient.id ensembl_loci_dfs.append(patient_ensembl_loci_df) ensembl_loci_df = pd.concat(ensembl_loci_dfs) ensembl_loci_df["MB"] = ensembl_loci_df.numOnLoci / 1000000.0 return ensembl_loci_df[["patient_id", "numOnLoci", "MB"]]
<SYSTEM_TASK:> Using the size of the y axis, return a fraction of that size. <END_TASK> <USER_TASK:> Description: def vertical_percent(plot, percent=0.1): """ Using the size of the y axis, return a fraction of that size. """
plot_bottom, plot_top = plot.get_ylim() return percent * (plot_top - plot_bottom)
<SYSTEM_TASK:> Overlay a stripplot on top of a boxplot. <END_TASK> <USER_TASK:> Description: def stripboxplot(x, y, data, ax=None, significant=None, **kwargs): """ Overlay a stripplot on top of a boxplot. """
ax = sb.boxplot( x=x, y=y, data=data, ax=ax, fliersize=0, **kwargs ) plot = sb.stripplot( x=x, y=y, data=data, ax=ax, jitter=kwargs.pop("jitter", 0.05), color=kwargs.pop("color", "0.3"), **kwargs ) if data[y].min() >= 0: hide_negative_y_ticks(plot) if significant is not None: add_significance_indicator(plot=plot, significant=significant) return plot
<SYSTEM_TASK:> Perform a Fisher's exact test to compare to binary columns <END_TASK> <USER_TASK:> Description: def fishers_exact_plot(data, condition1, condition2, ax=None, condition1_value=None, alternative="two-sided", **kwargs): """ Perform a Fisher's exact test to compare to binary columns Parameters ---------- data: Pandas dataframe Dataframe to retrieve information from condition1: str First binary column to compare (and used for test sidedness) condition2: str Second binary column to compare ax : Axes, default None Axes to plot on condition1_value: If `condition1` is not a binary column, split on =/!= to condition1_value alternative: Specify the sidedness of the test: "two-sided", "less" or "greater" """
plot = sb.barplot( x=condition1, y=condition2, ax=ax, data=data, **kwargs ) plot.set_ylabel("Percent %s" % condition2) condition1_mask = get_condition_mask(data, condition1, condition1_value) count_table = pd.crosstab(data[condition1], data[condition2]) print(count_table) oddsratio, p_value = fisher_exact(count_table, alternative=alternative) add_significance_indicator(plot=plot, significant=p_value <= 0.05) only_percentage_ticks(plot) if alternative != "two-sided": raise ValueError("We need to better understand the one-sided Fisher's Exact test") sided_str = "two-sided" print("Fisher's Exact Test: OR: {}, p-value={} ({})".format(oddsratio, p_value, sided_str)) return FishersExactResults(oddsratio=oddsratio, p_value=p_value, sided_str=sided_str, with_condition1_series=data[condition1_mask][condition2], without_condition1_series=data[~condition1_mask][condition2], plot=plot)
<SYSTEM_TASK:> Create a box plot comparing a condition and perform a <END_TASK> <USER_TASK:> Description: def mann_whitney_plot(data, condition, distribution, ax=None, condition_value=None, alternative="two-sided", skip_plot=False, **kwargs): """ Create a box plot comparing a condition and perform a Mann Whitney test to compare the distribution in condition A v B Parameters ---------- data: Pandas dataframe Dataframe to retrieve information from condition: str Column to use as the splitting criteria distribution: str Column to use as the Y-axis or distribution in the test ax : Axes, default None Axes to plot on condition_value: If `condition` is not a binary column, split on =/!= to condition_value alternative: Specify the sidedness of the Mann-Whitney test: "two-sided", "less" or "greater" skip_plot: Calculate the test statistic and p-value, but don't plot. """
condition_mask = get_condition_mask(data, condition, condition_value) U, p_value = mannwhitneyu( data[condition_mask][distribution], data[~condition_mask][distribution], alternative=alternative ) plot = None if not skip_plot: plot = stripboxplot( x=condition, y=distribution, data=data, ax=ax, significant=p_value <= 0.05, **kwargs ) sided_str = sided_str_from_alternative(alternative, condition) print("Mann-Whitney test: U={}, p-value={} ({})".format(U, p_value, sided_str)) return MannWhitneyResults(U=U, p_value=p_value, sided_str=sided_str, with_condition_series=data[condition_mask][distribution], without_condition_series=data[~condition_mask][distribution], plot=plot)
<SYSTEM_TASK:> Create a ROC curve and compute the bootstrap AUC for the given variable and outcome <END_TASK> <USER_TASK:> Description: def roc_curve_plot(data, value_column, outcome_column, bootstrap_samples=100, ax=None): """Create a ROC curve and compute the bootstrap AUC for the given variable and outcome Parameters ---------- data : Pandas dataframe Dataframe to retrieve information from value_column : str Column to retrieve the values from outcome_column : str Column to use as the outcome variable bootstrap_samples : int, optional Number of bootstrap samples to use to compute the AUC ax : Axes, default None Axes to plot on Returns ------- (mean_bootstrap_auc, roc_plot) : (float, matplotlib plot) Mean AUC for the given number of bootstrap samples and the plot """
scores = bootstrap_auc(df=data, col=value_column, pred_col=outcome_column, n_bootstrap=bootstrap_samples) mean_bootstrap_auc = scores.mean() print("{}, Bootstrap (samples = {}) AUC:{}, std={}".format( value_column, bootstrap_samples, mean_bootstrap_auc, scores.std())) outcome = data[outcome_column].astype(int) values = data[value_column] fpr, tpr, thresholds = roc_curve(outcome, values) if ax is None: ax = plt.gca() roc_plot = ax.plot(fpr, tpr, lw=1, label=value_column) ax.set_xlim([-0.05, 1.05]) ax.set_ylim([-0.05, 1.05]) ax.set_xlabel('False Positive Rate') ax.set_ylabel('True Positive Rate') ax.legend(loc=2, borderaxespad=0.) ax.set_title('{} ROC Curve (n={})'.format(value_column, len(values))) return (mean_bootstrap_auc, roc_plot)
<SYSTEM_TASK:> Utility script applying several regexs to a string. <END_TASK> <USER_TASK:> Description: def _strip_column_name(col_name, keep_paren_contents=True): """ Utility script applying several regexs to a string. Intended to be used by `strip_column_names`. This function will: 1. replace informative punctuation components with text 2. (optionally) remove text within parentheses 3. replace remaining punctuation/whitespace with _ 4. strip leading/trailing punctuation/whitespace Parameters ---------- col_name (str): input character string keep_paren_contents (logical): controls behavior of within-paren elements of text - if True, (the default) all text within parens retained - if False, text within parens will be removed from the field name Returns -------- modified string for new field name Examples -------- > print([_strip_column_name(col) for col in ['PD-L1','PD L1','PD L1_']]) """
# start with input new_col_name = col_name # replace meaningful punctuation with text equivalents # surround each with whitespace to enforce consistent use of _ punctuation_to_text = { '<=': 'le', '>=': 'ge', '=<': 'le', '=>': 'ge', '<': 'lt', '>': 'gt', '#': 'num' } for punctuation, punctuation_text in punctuation_to_text.items(): new_col_name = new_col_name.replace(punctuation, punctuation_text) # remove contents within () if not(keep_paren_contents): new_col_name = re.sub('\([^)]*\)', '', new_col_name) # replace remaining punctuation/whitespace with _ punct_pattern = '[\W_]+' punct_replacement = '_' new_col_name = re.sub(punct_pattern, punct_replacement, new_col_name) # remove leading/trailing _ if it exists (if last char was punctuation) new_col_name = new_col_name.strip("_") # TODO: check for empty string # return lower-case version of column name return new_col_name.lower()
<SYSTEM_TASK:> Utility script for renaming pandas columns to patsy-friendly names. <END_TASK> <USER_TASK:> Description: def strip_column_names(cols, keep_paren_contents=True): """ Utility script for renaming pandas columns to patsy-friendly names. Revised names have been: - stripped of all punctuation and whitespace (converted to text or `_`) - converted to lower case Takes a list of column names, returns a dict mapping names to revised names. If there are any concerns with the conversion, this will print a warning & return original column names. Parameters ---------- cols (list): list of strings containing column names keep_paren_contents (logical): controls behavior of within-paren elements of text - if True, (the default) all text within parens retained - if False, text within parens will be removed from the field name Returns ------- dict mapping col_names -> new_col_names Example ------- > df = {'one' : pd.Series([1., 2., 3.], index=['a', 'b', 'c']), 'two' : pd.Series([1., 2., 3., 4.], index=['a', 'b', 'c', 'd']), 'PD L1 (value)': pd.Series([1., 2., 3., 4.], index=['a', 'b', 'c', 'd']), 'PD L1 (>1)': pd.Series([0., 1., 1., 0.], index=['a', 'b', 'c', 'd']), } > df = pd.DataFrame(df) > df = df.rename(columns = strip_column_names(df.columns)) ## observe, by comparison > df2 = df.rename(columns = strip_column_names(df.columns, keep_paren_contents=False)) """
# strip/replace punctuation new_cols = [ _strip_column_name(col, keep_paren_contents=keep_paren_contents) for col in cols] if len(new_cols) != len(set(new_cols)): warn_str = 'Warning: strip_column_names (if run) would introduce duplicate names.' warn_str += ' Reverting column names to the original.' warnings.warn(warn_str, Warning) print('Warning: strip_column_names would introduce duplicate names. Please fix & try again.') return dict(zip(cols, cols)) return dict(zip(cols, new_cols))
<SYSTEM_TASK:> Given an object and a dictionary, give the object new attributes from that dictionary. <END_TASK> <USER_TASK:> Description: def set_attributes(obj, additional_data): """ Given an object and a dictionary, give the object new attributes from that dictionary. Uses _strip_column_name to git rid of whitespace/uppercase/special characters. """
for key, value in additional_data.items(): if hasattr(obj, key): raise ValueError("Key %s in additional_data already exists in this object" % key) setattr(obj, _strip_column_name(key), value)
<SYSTEM_TASK:> Construct a DataFrameHolder and then return either that or the DataFrame. <END_TASK> <USER_TASK:> Description: def return_obj(cols, df, return_cols=False): """Construct a DataFrameHolder and then return either that or the DataFrame."""
df_holder = DataFrameHolder(cols=cols, df=df) return df_holder.return_self(return_cols=return_cols)
<SYSTEM_TASK:> Utility function to compare two abritrary provenance dicts <END_TASK> <USER_TASK:> Description: def compare_provenance( this_provenance, other_provenance, left_outer_diff = "In current but not comparison", right_outer_diff = "In comparison but not current"): """Utility function to compare two abritrary provenance dicts returns number of discrepancies. Parameters ---------- this_provenance: provenance dict (to be compared to "other_provenance") other_provenance: comparison provenance dict (optional) left_outer_diff: description/prefix used when printing items in this_provenance but not in other_provenance right_outer_diff: description/prefix used when printing items in other_provenance but not in this_provenance Returns ----------- Number of discrepancies (0: None) """
## if either this or other items is null, return 0 if (not this_provenance or not other_provenance): return 0 this_items = set(this_provenance.items()) other_items = set(other_provenance.items()) # Two-way diff: are any modules introduced, and are any modules lost? new_diff = this_items.difference(other_items) old_diff = other_items.difference(this_items) warn_str = "" if len(new_diff) > 0: warn_str += "%s: %s" % ( left_outer_diff, _provenance_str(new_diff)) if len(old_diff) > 0: warn_str += "%s: %s" % ( right_outer_diff, _provenance_str(old_diff)) if len(warn_str) > 0: warnings.warn(warn_str, Warning) return(len(new_diff)+len(old_diff))
<SYSTEM_TASK:> Generate a random collection of missense variants by trying random variants repeatedly. <END_TASK> <USER_TASK:> Description: def generate_random_missense_variants(num_variants=10, max_search=100000, reference="GRCh37"): """ Generate a random collection of missense variants by trying random variants repeatedly. """
variants = [] for i in range(max_search): bases = ["A", "C", "T", "G"] random_ref = choice(bases) bases.remove(random_ref) random_alt = choice(bases) random_contig = choice(["1", "2", "3", "4", "5"]) random_variant = Variant(contig=random_contig, start=randint(1, 1000000), ref=random_ref, alt=random_alt, ensembl=reference) try: effects = random_variant.effects() for effect in effects: if isinstance(effect, Substitution): variants.append(random_variant) break except: continue if len(variants) == num_variants: break return VariantCollection(variants)
<SYSTEM_TASK:> Filter variants from the Variant Collection <END_TASK> <USER_TASK:> Description: def filter_variants(variant_collection, patient, filter_fn, **kwargs): """Filter variants from the Variant Collection Parameters ---------- variant_collection : varcode.VariantCollection patient : cohorts.Patient filter_fn: function Takes a FilterableVariant and returns a boolean. Only variants returning True are preserved. Returns ------- varcode.VariantCollection Filtered variant collection, with only the variants passing the filter """
if filter_fn: return variant_collection.clone_with_new_elements([ variant for variant in variant_collection if filter_fn(FilterableVariant( variant=variant, variant_collection=variant_collection, patient=patient, ), **kwargs) ]) else: return variant_collection
<SYSTEM_TASK:> Filter variants from the Effect Collection <END_TASK> <USER_TASK:> Description: def filter_effects(effect_collection, variant_collection, patient, filter_fn, all_effects, **kwargs): """Filter variants from the Effect Collection Parameters ---------- effect_collection : varcode.EffectCollection variant_collection : varcode.VariantCollection patient : cohorts.Patient filter_fn : function Takes a FilterableEffect and returns a boolean. Only effects returning True are preserved. all_effects : boolean Return the single, top-priority effect if False. If True, return all effects (don't filter to top-priority). Returns ------- varcode.EffectCollection Filtered effect collection, with only the variants passing the filter """
def top_priority_maybe(effect_collection): """ Always (unless all_effects=True) take the top priority effect per variant so we end up with a single effect per variant. """ if all_effects: return effect_collection return EffectCollection(list(effect_collection.top_priority_effect_per_variant().values())) def apply_filter_fn(filter_fn, effect): """ Return True if filter_fn is true for the effect or its alternate_effect. If no alternate_effect, then just return True if filter_fn is True. """ applied = filter_fn(FilterableEffect( effect=effect, variant_collection=variant_collection, patient=patient), **kwargs) if hasattr(effect, "alternate_effect"): applied_alternate = filter_fn(FilterableEffect( effect=effect.alternate_effect, variant_collection=variant_collection, patient=patient), **kwargs) return applied or applied_alternate return applied if filter_fn: return top_priority_maybe(EffectCollection([ effect for effect in effect_collection if apply_filter_fn(filter_fn, effect)])) else: return top_priority_maybe(effect_collection)
<SYSTEM_TASK:> Create a string representation of this collection, showing up to <END_TASK> <USER_TASK:> Description: def to_string(self, limit=None): """ Create a string representation of this collection, showing up to `limit` items. """
header = self.short_string() if len(self) == 0: return header contents = "" element_lines = [ " -- %s" % (element,) for element in self.elements[:limit] ] contents = "\n".join(element_lines) if limit is not None and len(self.elements) > limit: contents += "\n ... and %d more" % (len(self) - limit) return "%s\n%s" % (header, contents)
<SYSTEM_TASK:> Log error failing silently on error <END_TASK> <USER_TASK:> Description: def safe_log_error(self, error: Exception, *info: str): """Log error failing silently on error"""
self.__do_safe(lambda: self.logger.error(error, *info))
<SYSTEM_TASK:> Log info failing silently on error <END_TASK> <USER_TASK:> Description: def safe_log_info(self, *info: str): """Log info failing silently on error"""
self.__do_safe(lambda: self.logger.info(*info))
<SYSTEM_TASK:> Make a client if we didn't get one. <END_TASK> <USER_TASK:> Description: def _default_client(jws_client, reactor, key, alg): """ Make a client if we didn't get one. """
if jws_client is None: pool = HTTPConnectionPool(reactor) agent = Agent(reactor, pool=pool) jws_client = JWSClient(HTTPClient(agent=agent), key, alg) return jws_client
<SYSTEM_TASK:> Find a challenge combination that consists of a single challenge that the <END_TASK> <USER_TASK:> Description: def _find_supported_challenge(authzr, responders): """ Find a challenge combination that consists of a single challenge that the responder can satisfy. :param ~acme.messages.AuthorizationResource auth: The authorization to examine. :type responder: List[`~txacme.interfaces.IResponder`] :param responder: The possible responders to use. :raises NoSupportedChallenges: When a suitable challenge combination is not found. :rtype: Tuple[`~txacme.interfaces.IResponder`, `~acme.messages.ChallengeBody`] :return: The responder and challenge that were found. """
matches = [ (responder, challbs[0]) for challbs in authzr.body.resolved_combinations for responder in responders if [challb.typ for challb in challbs] == [responder.challenge_type]] if len(matches) == 0: raise NoSupportedChallenges(authzr) else: return matches[0]
<SYSTEM_TASK:> Complete an authorization using a responder. <END_TASK> <USER_TASK:> Description: def answer_challenge(authzr, client, responders): """ Complete an authorization using a responder. :param ~acme.messages.AuthorizationResource auth: The authorization to complete. :param .Client client: The ACME client. :type responders: List[`~txacme.interfaces.IResponder`] :param responders: A list of responders that can be used to complete the challenge with. :return: A deferred firing when the authorization is verified. """
responder, challb = _find_supported_challenge(authzr, responders) response = challb.response(client.key) def _stop_responding(): return maybeDeferred( responder.stop_responding, authzr.body.identifier.value, challb.chall, response) return ( maybeDeferred( responder.start_responding, authzr.body.identifier.value, challb.chall, response) .addCallback(lambda _: client.answer_challenge(challb, response)) .addCallback(lambda _: _stop_responding) )