text_prompt
stringlengths
100
17.7k
code_prompt
stringlengths
7
9.86k
<SYSTEM_TASK:> Perform a 'git status' in each data repository. <END_TASK> <USER_TASK:> Description: def git_status_all_repos(cat, hard=True, origin=False, clean=True): """Perform a 'git status' in each data repository. """
log = cat.log log.debug("gitter.git_status_all_repos()") all_repos = cat.PATHS.get_all_repo_folders() for repo_name in all_repos: log.info("Repo in: '{}'".format(repo_name)) # Get the initial git SHA sha_beg = get_sha(repo_name) log.debug("Current SHA: '{}'".format(sha_beg)) log.info("Fetching") fetch(repo_name, log=cat.log) git_comm = ["git", "status"] _call_command_in_repo( git_comm, repo_name, cat.log, fail=True, log_flag=True) sha_end = get_sha(repo_name) if sha_end != sha_beg: log.info("Updated SHA: '{}'".format(sha_end)) return
<SYSTEM_TASK:> Given a list of repositories, make sure they're all cloned. <END_TASK> <USER_TASK:> Description: def clone(repo, log, depth=1): """Given a list of repositories, make sure they're all cloned. Should be called from the subclassed `Catalog` objects, passed a list of specific repository names. Arguments --------- all_repos : list of str *Absolute* path specification of each target repository. """
kwargs = {} if depth > 0: kwargs['depth'] = depth try: repo_name = os.path.split(repo)[-1] repo_name = "https://github.com/astrocatalogs/" + repo_name + ".git" log.warning("Cloning '{}' (only needs to be done ".format(repo) + "once, may take few minutes per repo).") grepo = git.Repo.clone_from(repo_name, repo, **kwargs) except: log.error("CLONING '{}' INTERRUPTED".format(repo)) raise return grepo
<SYSTEM_TASK:> Check that spectrum has legal combination of attributes. <END_TASK> <USER_TASK:> Description: def _check(self): """Check that spectrum has legal combination of attributes."""
# Run the super method super(Spectrum, self)._check() err_str = None has_data = self._KEYS.DATA in self has_wave = self._KEYS.WAVELENGTHS in self has_flux = self._KEYS.FLUXES in self has_filename = self._KEYS.FILENAME in self if not has_data: if (not has_wave or not has_flux) and not has_filename: err_str = ( "If `{}` not given".format(self._KEYS.DATA) + "; `{}` or `{}` needed".format( self._KEYS.WAVELENGTHS, self._KEYS.FLUXES)) if err_str is not None: raise ValueError(err_str) return
<SYSTEM_TASK:> Check if spectrum is duplicate of another. <END_TASK> <USER_TASK:> Description: def is_duplicate_of(self, other): """Check if spectrum is duplicate of another."""
if super(Spectrum, self).is_duplicate_of(other): return True row_matches = 0 for ri, row in enumerate(self.get(self._KEYS.DATA, [])): lambda1, flux1 = tuple(row[0:2]) if (self._KEYS.DATA not in other or ri > len(other[self._KEYS.DATA])): break lambda2, flux2 = tuple(other[self._KEYS.DATA][ri][0:2]) minlambdalen = min(len(lambda1), len(lambda2)) minfluxlen = min(len(flux1), len(flux2)) if (lambda1[:minlambdalen + 1] == lambda2[:minlambdalen + 1] and flux1[:minfluxlen + 1] == flux2[:minfluxlen + 1] and float(flux1[:minfluxlen + 1]) != 0.0): row_matches += 1 # Five row matches should be enough to be sure spectrum is a dupe. if row_matches >= 5: return True # Matches need to happen in the first 10 rows. if ri >= 10: break return False
<SYSTEM_TASK:> Logic for sorting keys in a `Spectrum` relative to one another. <END_TASK> <USER_TASK:> Description: def sort_func(self, key): """Logic for sorting keys in a `Spectrum` relative to one another."""
if key == self._KEYS.TIME: return 'aaa' if key == self._KEYS.DATA: return 'zzy' if key == self._KEYS.SOURCE: return 'zzz' return key
<SYSTEM_TASK:> Return a 'pretty' string representation of this `Key`. <END_TASK> <USER_TASK:> Description: def pretty(self): """Return a 'pretty' string representation of this `Key`. note: do not override the builtin `__str__` or `__repr__` methods! """
retval = ("Key(name={}, type={}, listable={}, compare={}, " "priority={}, kind_preference={}, " "replace_better={})").format( self.name, self.type, self.listable, self.compare, self.priority, self.kind_preference, self.replace_better) return retval
<SYSTEM_TASK:> Make sure given value is consistent with this `Key` specification. <END_TASK> <USER_TASK:> Description: def check(self, val): """Make sure given value is consistent with this `Key` specification. NOTE: if `type` is 'None', then `listable` also is *not* checked. """
# If there is no `type` requirement, everything is allowed if self.type is None: return True is_list = isinstance(val, list) # If lists are not allowed, and this is a list --> false if not self.listable and is_list: return False # `is_number` already checks for either list or single value if self.type == KEY_TYPES.NUMERIC and not is_number(val): return False elif (self.type == KEY_TYPES.TIME and not is_number(val) and '-' not in val and '/' not in val): return False elif self.type == KEY_TYPES.STRING: # If its a list, check first element if is_list: if not isinstance(val[0], basestring): return False # Otherwise, check it elif not isinstance(val, basestring): return False elif self.type == KEY_TYPES.BOOL: if is_list and not isinstance(val[0], bool): return False elif not isinstance(val, bool): return False return True
<SYSTEM_TASK:> Create a standard logger object which logs to file and or stdout stream. <END_TASK> <USER_TASK:> Description: def get_logger(name=None, stream_fmt=None, file_fmt=None, date_fmt=None, stream_level=None, file_level=None, tofile=None, tostr=True): """Create a standard logger object which logs to file and or stdout stream. If a logger has already been created in this session, it is returned (unless `name` is given). Arguments --------- name : str, Handle for this logger, must be distinct for a distinct logger. stream_fmt : str or `None`, Format of log messages to stream (stdout). If `None`, default settings are used. file_fmt : str or `None`, Format of log messages to file. If `None`, default settings are used. date_fmt : str or `None` Format of time stamps to stream and/or file. If `None`, default settings are used. stream_level : int, Logging level for stream. file_level : int, Logging level for file. tofile : str or `None`, Filename to log to (turned off if `None`). tostr : bool, Log to stdout stream. Returns ------- logger : ``logging.Logger`` object, Logger object to use for logging. """
if tofile is None and not tostr: raise ValueError( "Must log to something: `tofile` or `tostr` must be `True`.") logger = logging.getLogger(name) # Add a custom attribute to this `logger` so that we know when an existing # one is being returned if hasattr(logger, '_OSC_LOGGER'): return logger else: logger._OSC_LOGGER = True # Set other custom parameters logger._LOADED = _LOADED_LEVEL # Make sure handlers don't get duplicated (ipython issue) while len(logger.handlers) > 0: logger.handlers.pop() # Prevents duplication or something something... logger.propagate = 0 # Determine and Set Logging Levels if file_level is None: file_level = _FILE_LEVEL_DEF if stream_level is None: stream_level = _STREAM_LEVEL_DEF # Logger object must be at minimum level logger.setLevel(int(np.min([file_level, stream_level]))) if date_fmt is None: date_fmt = '%Y/%m/%d %H:%M:%S' # Log to file # ----------- if tofile is not None: if file_fmt is None: file_fmt = "%(asctime)s %(levelname)8.8s [%(filename)20.20s:" file_fmt += "%(funcName)-20.20s]%(indent)s%(message)s" fileFormatter = IndentFormatter(file_fmt, datefmt=date_fmt) fileHandler = logging.FileHandler(tofile, 'w') fileHandler.setFormatter(fileFormatter) fileHandler.setLevel(file_level) logger.addHandler(fileHandler) # Store output filename to `logger` object logger.filename = tofile # Log To stdout # ------------- if tostr: if stream_fmt is None: stream_fmt = "%(indent)s%(message)s" strFormatter = IndentFormatter(stream_fmt, datefmt=date_fmt) strHandler = logging.StreamHandler() strHandler.setFormatter(strFormatter) strHandler.setLevel(stream_level) logger.addHandler(strHandler) return logger
<SYSTEM_TASK:> Log an error message and raise an error. <END_TASK> <USER_TASK:> Description: def log_raise(log, err_str, err_type=RuntimeError): """Log an error message and raise an error. Arguments --------- log : `logging.Logger` object err_str : str Error message to be logged and raised. err_type : `Exception` object Type of error to raise. """
log.error(err_str) # Make sure output is flushed # (happens automatically to `StreamHandlers`, but not `FileHandlers`) for handle in log.handlers: handle.flush() # Raise given error raise err_type(err_str)
<SYSTEM_TASK:> Parse arguments and return configuration settings. <END_TASK> <USER_TASK:> Description: def load_args(self, args, clargs): """Parse arguments and return configuration settings. """
# Parse All Arguments args = self.parser.parse_args(args=clargs, namespace=args) # Print the help information if no subcommand is given # subcommand is required for operation if args.subcommand is None: self.parser.print_help() args = None return args
<SYSTEM_TASK:> Create `argparse` instance, and setup with appropriate parameters. <END_TASK> <USER_TASK:> Description: def _setup_argparse(self): """Create `argparse` instance, and setup with appropriate parameters. """
parser = argparse.ArgumentParser( prog='catalog', description='Parent Catalog class for astrocats.') subparsers = parser.add_subparsers( description='valid subcommands', dest='subcommand') # Data Import # ----------- # Add the 'import' command, and related arguments self._add_parser_arguments_import(subparsers) # Git Subcommands # --------------- self._add_parser_arguments_git(subparsers) # Analyze Catalogs # ---------------- # Add the 'analyze' command, and related arguments self._add_parser_arguments_analyze(subparsers) return parser
<SYSTEM_TASK:> Create parser for 'import' subcommand, and associated arguments. <END_TASK> <USER_TASK:> Description: def _add_parser_arguments_import(self, subparsers): """Create parser for 'import' subcommand, and associated arguments. """
import_pars = subparsers.add_parser( "import", help="Import data.") import_pars.add_argument( '--update', '-u', dest='update', default=False, action='store_true', help='Only update catalog using live sources.') import_pars.add_argument( '--load-stubs', dest='load_stubs', default=False, action='store_true', help='Load stubs before running.') import_pars.add_argument( '--archived', '-a', dest='archived', default=False, action='store_true', help='Always use task caches.') # Control which 'tasks' are executed # ---------------------------------- import_pars.add_argument( '--tasks', dest='args_task_list', nargs='*', default=None, help='space delimited list of tasks to perform.') import_pars.add_argument( '--yes', dest='yes_task_list', nargs='+', default=None, help='space delimited list of tasks to turn on.') import_pars.add_argument( '--no', dest='no_task_list', nargs='+', default=None, help='space delimited list of tasks to turn off.') import_pars.add_argument( '--min-task-priority', dest='min_task_priority', default=None, help='minimum priority for a task to run') import_pars.add_argument( '--max-task-priority', dest='max_task_priority', default=None, help='maximum priority for a task to run') import_pars.add_argument( '--task-groups', dest='task_groups', default=None, help='predefined group(s) of tasks to run.') return import_pars
<SYSTEM_TASK:> Compress the file with the given name and delete the uncompressed file. <END_TASK> <USER_TASK:> Description: def compress_gz(fname): """Compress the file with the given name and delete the uncompressed file. The compressed filename is simply the input filename with '.gz' appended. Arguments --------- fname : str Name of the file to compress and delete. Returns ------- comp_fname : str Name of the compressed file produced. Equal to `fname + '.gz'`. """
import shutil import gzip comp_fname = fname + '.gz' with codecs.open(fname, 'rb') as f_in, gzip.open( comp_fname, 'wb') as f_out: shutil.copyfileobj(f_in, f_out) os.remove(fname) return comp_fname
<SYSTEM_TASK:> Returns the size of given type, and check its suitability for use in an <END_TASK> <USER_TASK:> Description: def IOC_TYPECHECK(t): """ Returns the size of given type, and check its suitability for use in an ioctl command number. """
result = ctypes.sizeof(t) assert result <= _IOC_SIZEMASK, result return result
<SYSTEM_TASK:> An ioctl with read parameters. <END_TASK> <USER_TASK:> Description: def IOR(type, nr, size): """ An ioctl with read parameters. size (ctype type or instance) Type/structure of the argument passed to ioctl's "arg" argument. """
return IOC(IOC_READ, type, nr, IOC_TYPECHECK(size))
<SYSTEM_TASK:> An ioctl with write parameters. <END_TASK> <USER_TASK:> Description: def IOW(type, nr, size): """ An ioctl with write parameters. size (ctype type or instance) Type/structure of the argument passed to ioctl's "arg" argument. """
return IOC(IOC_WRITE, type, nr, IOC_TYPECHECK(size))
<SYSTEM_TASK:> An ioctl with both read an writes parameters. <END_TASK> <USER_TASK:> Description: def IOWR(type, nr, size): """ An ioctl with both read an writes parameters. size (ctype type or instance) Type/structure of the argument passed to ioctl's "arg" argument. """
return IOC(IOC_READ | IOC_WRITE, type, nr, IOC_TYPECHECK(size))
<SYSTEM_TASK:> Get a path including only the trailing `num` directories. <END_TASK> <USER_TASK:> Description: def _get_last_dirs(path, num=1): """Get a path including only the trailing `num` directories. Returns ------- last_path : str """
head, tail = os.path.split(path) last_path = str(tail) for ii in range(num): head, tail = os.path.split(head) last_path = os.path.join(tail, last_path) last_path = "..." + last_path return last_path
<SYSTEM_TASK:> Run the analysis routines determined from the given `args`. <END_TASK> <USER_TASK:> Description: def analyze(self, args): """Run the analysis routines determined from the given `args`. """
self.log.info("Running catalog analysis") if args.count: self.count() return
<SYSTEM_TASK:> Analyze the counts of ...things. <END_TASK> <USER_TASK:> Description: def count(self): """Analyze the counts of ...things. Returns ------- retvals : dict Dictionary of 'property-name: counts' pairs for further processing """
self.log.info("Running 'count'") retvals = {} # Numbers of 'tasks' num_tasks = self._count_tasks() retvals['num_tasks'] = num_tasks # Numbers of 'files' num_files = self._count_repo_files() retvals['num_files'] = num_files return retvals
<SYSTEM_TASK:> Count the number of tasks, both in the json and directory. <END_TASK> <USER_TASK:> Description: def _count_tasks(self): """Count the number of tasks, both in the json and directory. Returns ------- num_tasks : int The total number of all tasks included in the `tasks.json` file. """
self.log.warning("Tasks:") tasks, task_names = self.catalog._load_task_list_from_file() # Total number of all tasks num_tasks = len(tasks) # Number which are active by default num_tasks_act = len([tt for tt, vv in tasks.items() if vv.active]) # Number of python files in the tasks directory num_task_files = os.path.join(self.catalog.PATHS.tasks_dir, '*.py') num_task_files = len(glob(num_task_files)) tasks_str = "{} ({} default active) with {} task-files.".format( num_tasks, num_tasks_act, num_task_files) self.log.warning(tasks_str) return num_tasks
<SYSTEM_TASK:> Count the number of files in the data repositories. <END_TASK> <USER_TASK:> Description: def _count_repo_files(self): """Count the number of files in the data repositories. `_COUNT_FILE_TYPES` are used to determine which file types are checked explicitly. `_IGNORE_FILES` determine which files are ignored in (most) counts. Returns ------- repo_files : int Total number of (non-ignored) files in all data repositories. """
self.log.warning("Files:") num_files = 0 repos = self.catalog.PATHS.get_all_repo_folders() num_type = np.zeros(len(self._COUNT_FILE_TYPES), dtype=int) num_ign = 0 for rep in repos: # Get the last portion of the filepath for this repo last_path = _get_last_dirs(rep, 2) # Get counts for different file types n_all = self._count_files_by_type(rep, '*') n_type = np.zeros(len(self._COUNT_FILE_TYPES), dtype=int) for ii, ftype in enumerate(self._COUNT_FILE_TYPES): n_type[ii] = self._count_files_by_type(rep, '*.' + ftype) # Get the number of ignored files # (total including ignore, minus 'all') n_ign = self._count_files_by_type(rep, '*', ignore=False) n_ign -= n_all f_str = self._file_nums_str(n_all, n_type, n_ign) f_str = "{}: {}".format(last_path, f_str) self.log.warning(f_str) # Update cumulative counts num_files += n_all num_type += n_type num_ign += n_ign f_str = self._file_nums_str(num_files, num_type, num_ign) self.log.warning(f_str) return num_files
<SYSTEM_TASK:> Construct a string showing the number of different file types. <END_TASK> <USER_TASK:> Description: def _file_nums_str(self, n_all, n_type, n_ign): """Construct a string showing the number of different file types. Returns ------- f_str : str """
# 'other' is the difference between all and named n_oth = n_all - np.sum(n_type) f_str = "{} Files".format(n_all) + " (" if len(n_type): f_str += ", ".join("{} {}".format(name, num) for name, num in zip(self._COUNT_FILE_TYPES, n_type)) f_str += ", " f_str += "other {}; {} ignored)".format(n_oth, n_ign) return f_str
<SYSTEM_TASK:> Count files in the given path, with the given pattern. <END_TASK> <USER_TASK:> Description: def _count_files_by_type(self, path, pattern, ignore=True): """Count files in the given path, with the given pattern. If `ignore = True`, skip files in the `_IGNORE_FILES` list. Returns ------- num_files : int """
# Get all files matching the given path and pattern files = glob(os.path.join(path, pattern)) # Count the files files = [ff for ff in files if os.path.split(ff)[-1] not in self._IGNORE_FILES or not ignore] num_files = len(files) return num_files
<SYSTEM_TASK:> Return the path that this Entry should be saved to. <END_TASK> <USER_TASK:> Description: def _get_save_path(self, bury=False): """Return the path that this Entry should be saved to."""
filename = self.get_filename(self[self._KEYS.NAME]) # Put objects that shouldn't belong in this catalog in the boneyard if bury: outdir = self.catalog.get_repo_boneyard() # Get normal repository save directory else: repo_folders = self.catalog.PATHS.get_repo_output_folders() # If no repo folders exist, raise an error -- cannot save if not len(repo_folders): err_str = ( "No output data repositories found. Cannot save.\n" "Make sure that repo names are correctly configured " "in the `input/repos.json` file, and either manually or " "automatically (using `astrocats CATALOG git-clone`) " "clone the appropriate data repositories.") self.catalog.log.error(err_str) raise RuntimeError(err_str) outdir = repo_folders[0] return outdir, filename
<SYSTEM_TASK:> Convert the object into a plain OrderedDict. <END_TASK> <USER_TASK:> Description: def _ordered(self, odict): """Convert the object into a plain OrderedDict."""
ndict = OrderedDict() if isinstance(odict, CatDict) or isinstance(odict, Entry): key = odict.sort_func else: key = None nkeys = list(sorted(odict.keys(), key=key)) for key in nkeys: if isinstance(odict[key], OrderedDict): odict[key] = self._ordered(odict[key]) if isinstance(odict[key], list): if (not (odict[key] and not isinstance(odict[key][0], OrderedDict))): nlist = [] for item in odict[key]: if isinstance(item, OrderedDict): nlist.append(self._ordered(item)) else: nlist.append(item) odict[key] = nlist ndict[key] = odict[key] return ndict
<SYSTEM_TASK:> Return a unique hash associated with the listed keys. <END_TASK> <USER_TASK:> Description: def get_hash(self, keys=[]): """Return a unique hash associated with the listed keys."""
if not len(keys): keys = list(self.keys()) string_rep = '' oself = self._ordered(deepcopy(self)) for key in keys: string_rep += json.dumps(oself.get(key, ''), sort_keys=True) return hashlib.sha512(string_rep.encode()).hexdigest()[:16]
<SYSTEM_TASK:> Clean quantity value before it is added to entry. <END_TASK> <USER_TASK:> Description: def _clean_quantity(self, quantity): """Clean quantity value before it is added to entry."""
value = quantity.get(QUANTITY.VALUE, '').strip() error = quantity.get(QUANTITY.E_VALUE, '').strip() unit = quantity.get(QUANTITY.U_VALUE, '').strip() kind = quantity.get(QUANTITY.KIND, '') if isinstance(kind, list) and not isinstance(kind, string_types): kind = [x.strip() for x in kind] else: kind = kind.strip() if not value: return False if is_number(value): value = '%g' % Decimal(value) if error: error = '%g' % Decimal(error) if value: quantity[QUANTITY.VALUE] = value if error: quantity[QUANTITY.E_VALUE] = error if unit: quantity[QUANTITY.U_VALUE] = unit if kind: quantity[QUANTITY.KIND] = kind return True
<SYSTEM_TASK:> Check that a source exists and that a quantity isn't erroneous. <END_TASK> <USER_TASK:> Description: def _check_cat_dict_source(self, cat_dict_class, key_in_self, **kwargs): """Check that a source exists and that a quantity isn't erroneous."""
# Make sure that a source is given source = kwargs.get(cat_dict_class._KEYS.SOURCE, None) if source is None: raise CatDictError( "{}: `source` must be provided!".format(self[self._KEYS.NAME]), warn=True) # Check that source is a list of integers for x in source.split(','): if not is_integer(x): raise CatDictError( "{}: `source` is comma-delimited list of " " integers!".format(self[self._KEYS.NAME]), warn=True) # If this source/data is erroneous, skip it if self.is_erroneous(key_in_self, source): self._log.info("This source is erroneous, skipping") return None # If this source/data is private, skip it if (self.catalog.args is not None and not self.catalog.args.private and self.is_private(key_in_self, source)): self._log.info("This source is private, skipping") return None return source
<SYSTEM_TASK:> Add a `CatDict` to this `Entry`. <END_TASK> <USER_TASK:> Description: def _add_cat_dict(self, cat_dict_class, key_in_self, check_for_dupes=True, compare_to_existing=True, **kwargs): """Add a `CatDict` to this `Entry`. CatDict only added if initialization succeeds and it doesn't already exist within the Entry. """
# Make sure that a source is given, and is valid (nor erroneous) if cat_dict_class != Error: try: source = self._check_cat_dict_source(cat_dict_class, key_in_self, **kwargs) except CatDictError as err: if err.warn: self._log.info("'{}' Not adding '{}': '{}'".format(self[ self._KEYS.NAME], key_in_self, str(err))) return False if source is None: return False # Try to create a new instance of this subclass of `CatDict` new_entry = self._init_cat_dict(cat_dict_class, key_in_self, **kwargs) if new_entry is None: return False # Compare this new entry with all previous entries to make sure is new if compare_to_existing and cat_dict_class != Error: for item in self.get(key_in_self, []): if new_entry.is_duplicate_of(item): item.append_sources_from(new_entry) # Return the entry in case we want to use any additional # tags to augment the old entry return new_entry # If this is an alias, add it to the parent catalog's reverse # dictionary linking aliases to names for fast lookup. if key_in_self == self._KEYS.ALIAS: # Check if this adding this alias makes us a dupe, if so mark # ourselves as a dupe. if (check_for_dupes and 'aliases' in dir(self.catalog) and new_entry[QUANTITY.VALUE] in self.catalog.aliases): possible_dupe = self.catalog.aliases[new_entry[QUANTITY.VALUE]] # print(possible_dupe) if (possible_dupe != self[self._KEYS.NAME] and possible_dupe in self.catalog.entries): self.dupe_of.append(possible_dupe) if 'aliases' in dir(self.catalog): self.catalog.aliases[new_entry[QUANTITY.VALUE]] = self[ self._KEYS.NAME] self.setdefault(key_in_self, []).append(new_entry) if (key_in_self == self._KEYS.ALIAS and check_for_dupes and self.dupe_of): self.merge_dupes() return True
<SYSTEM_TASK:> Construct a new `Entry` instance from an input file. <END_TASK> <USER_TASK:> Description: def init_from_file(cls, catalog, name=None, path=None, clean=False, merge=True, pop_schema=True, ignore_keys=[], compare_to_existing=True, try_gzip=False, filter_on={}): """Construct a new `Entry` instance from an input file. The input file can be given explicitly by `path`, or a path will be constructed appropriately if possible. Arguments --------- catalog : `astrocats.catalog.catalog.Catalog` instance The parent catalog object of which this entry belongs. name : str or 'None' The name of this entry, e.g. `SN1987A` for a `Supernova` entry. If no `path` is given, a path is constructed by trying to find a file in one of the 'output' repositories with this `name`. note: either `name` or `path` must be provided. path : str or 'None' The absolutely path of the input file. note: either `name` or `path` must be provided. clean : bool Whether special sanitization processing should be done on the input data. This is mostly for input files from the 'internal' repositories. """
if not catalog: from astrocats.catalog.catalog import Catalog log = logging.getLogger() catalog = Catalog(None, log) catalog.log.debug("init_from_file()") if name is None and path is None: err = ("Either entry `name` or `path` must be specified to load " "entry.") log.error(err) raise ValueError(err) # If the path is given, use that to load from load_path = '' if path is not None: load_path = path name = '' # If the name is given, try to find a path for it else: repo_paths = catalog.PATHS.get_repo_output_folders() for rep in repo_paths: filename = cls.get_filename(name) newpath = os.path.join(rep, filename + '.json') if os.path.isfile(newpath): load_path = newpath break if load_path is None or not os.path.isfile(load_path): # FIX: is this warning worthy? return None # Create a new `Entry` instance new_entry = cls(catalog, name) # Check if .gz file if try_gzip and not load_path.endswith('.gz'): try_gzip = False # Fill it with data from json file new_entry._load_data_from_json( load_path, clean=clean, merge=merge, pop_schema=pop_schema, ignore_keys=ignore_keys, compare_to_existing=compare_to_existing, gzip=try_gzip, filter_on=filter_on) return new_entry
<SYSTEM_TASK:> Add an alias, optionally 'cleaning' the alias string. <END_TASK> <USER_TASK:> Description: def add_alias(self, alias, source, clean=True): """Add an alias, optionally 'cleaning' the alias string. Calls the parent `catalog` method `clean_entry_name` - to apply the same name-cleaning as is applied to entry names themselves. Returns ------- alias : str The stored version of the alias (cleaned or not). """
if clean: alias = self.catalog.clean_entry_name(alias) self.add_quantity(self._KEYS.ALIAS, alias, source) return alias
<SYSTEM_TASK:> Add an `Error` instance to this entry. <END_TASK> <USER_TASK:> Description: def add_error(self, value, **kwargs): """Add an `Error` instance to this entry."""
kwargs.update({ERROR.VALUE: value}) self._add_cat_dict(Error, self._KEYS.ERRORS, **kwargs) return
<SYSTEM_TASK:> Add a `Photometry` instance to this entry. <END_TASK> <USER_TASK:> Description: def add_photometry(self, compare_to_existing=True, **kwargs): """Add a `Photometry` instance to this entry."""
self._add_cat_dict( Photometry, self._KEYS.PHOTOMETRY, compare_to_existing=compare_to_existing, **kwargs) return
<SYSTEM_TASK:> Merge two entries that correspond to the same entry. <END_TASK> <USER_TASK:> Description: def merge_dupes(self): """Merge two entries that correspond to the same entry."""
for dupe in self.dupe_of: if dupe in self.catalog.entries: if self.catalog.entries[dupe]._stub: # merge = False to avoid infinite recursion self.catalog.load_entry_from_name( dupe, delete=True, merge=False) self.catalog.copy_entry_to_entry(self.catalog.entries[dupe], self) del self.catalog.entries[dupe] self.dupe_of = []
<SYSTEM_TASK:> Add an `Quantity` instance to this entry. <END_TASK> <USER_TASK:> Description: def add_quantity(self, quantities, value, source, check_for_dupes=True, compare_to_existing=True, **kwargs): """Add an `Quantity` instance to this entry."""
success = True for quantity in listify(quantities): kwargs.update({QUANTITY.VALUE: value, QUANTITY.SOURCE: source}) cat_dict = self._add_cat_dict( Quantity, quantity, compare_to_existing=compare_to_existing, check_for_dupes=check_for_dupes, **kwargs) if isinstance(cat_dict, CatDict): self._append_additional_tags(quantity, source, cat_dict) success = False return success
<SYSTEM_TASK:> Add a source that refers to the catalog itself. <END_TASK> <USER_TASK:> Description: def add_self_source(self): """Add a source that refers to the catalog itself. For now this points to the Open Supernova Catalog by default. """
return self.add_source( bibcode=self.catalog.OSC_BIBCODE, name=self.catalog.OSC_NAME, url=self.catalog.OSC_URL, secondary=True)
<SYSTEM_TASK:> Add a `Source` instance to this entry. <END_TASK> <USER_TASK:> Description: def add_source(self, allow_alias=False, **kwargs): """Add a `Source` instance to this entry."""
if not allow_alias and SOURCE.ALIAS in kwargs: err_str = "`{}` passed in kwargs, this shouldn't happen!".format( SOURCE.ALIAS) self._log.error(err_str) raise RuntimeError(err_str) # Set alias number to be +1 of current number of sources if SOURCE.ALIAS not in kwargs: kwargs[SOURCE.ALIAS] = str(self.num_sources() + 1) source_obj = self._init_cat_dict(Source, self._KEYS.SOURCES, **kwargs) if source_obj is None: return None for item in self.get(self._KEYS.SOURCES, ''): if source_obj.is_duplicate_of(item): return item[item._KEYS.ALIAS] self.setdefault(self._KEYS.SOURCES, []).append(source_obj) return source_obj[source_obj._KEYS.ALIAS]
<SYSTEM_TASK:> Add a `Model` instance to this entry. <END_TASK> <USER_TASK:> Description: def add_model(self, allow_alias=False, **kwargs): """Add a `Model` instance to this entry."""
if not allow_alias and MODEL.ALIAS in kwargs: err_str = "`{}` passed in kwargs, this shouldn't happen!".format( SOURCE.ALIAS) self._log.error(err_str) raise RuntimeError(err_str) # Set alias number to be +1 of current number of models if MODEL.ALIAS not in kwargs: kwargs[MODEL.ALIAS] = str(self.num_models() + 1) model_obj = self._init_cat_dict(Model, self._KEYS.MODELS, **kwargs) if model_obj is None: return None for item in self.get(self._KEYS.MODELS, ''): if model_obj.is_duplicate_of(item): return item[item._KEYS.ALIAS] self.setdefault(self._KEYS.MODELS, []).append(model_obj) return model_obj[model_obj._KEYS.ALIAS]
<SYSTEM_TASK:> Add a `Spectrum` instance to this entry. <END_TASK> <USER_TASK:> Description: def add_spectrum(self, compare_to_existing=True, **kwargs): """Add a `Spectrum` instance to this entry."""
spec_key = self._KEYS.SPECTRA # Make sure that a source is given, and is valid (nor erroneous) source = self._check_cat_dict_source(Spectrum, spec_key, **kwargs) if source is None: return None # Try to create a new instance of `Spectrum` new_spectrum = self._init_cat_dict(Spectrum, spec_key, **kwargs) if new_spectrum is None: return None is_dupe = False for item in self.get(spec_key, []): # Only the `filename` should be compared for duplicates. If a # duplicate is found, that means the previous `exclude` array # should be saved to the new object, and the old deleted if new_spectrum.is_duplicate_of(item): if SPECTRUM.EXCLUDE in new_spectrum: item[SPECTRUM.EXCLUDE] = new_spectrum[SPECTRUM.EXCLUDE] elif SPECTRUM.EXCLUDE in item: item.update(new_spectrum) is_dupe = True break if not is_dupe: self.setdefault(spec_key, []).append(new_spectrum) return
<SYSTEM_TASK:> Check that the entry has the required fields. <END_TASK> <USER_TASK:> Description: def check(self): """Check that the entry has the required fields."""
# Make sure there is a schema key in dict if self._KEYS.SCHEMA not in self: self[self._KEYS.SCHEMA] = self.catalog.SCHEMA.URL # Make sure there is a name key in dict if (self._KEYS.NAME not in self or len(self[self._KEYS.NAME]) == 0): raise ValueError("Entry name is empty:\n\t{}".format( json.dumps( self, indent=2))) return
<SYSTEM_TASK:> Retrieve the aliases of this object as a list of strings. <END_TASK> <USER_TASK:> Description: def get_aliases(self, includename=True): """Retrieve the aliases of this object as a list of strings. Arguments --------- includename : bool Include the 'name' parameter in the list of aliases. """
# empty list if doesnt exist alias_quanta = self.get(self._KEYS.ALIAS, []) aliases = [aq[QUANTITY.VALUE] for aq in alias_quanta] if includename and self[self._KEYS.NAME] not in aliases: aliases = [self[self._KEYS.NAME]] + aliases return aliases
<SYSTEM_TASK:> Given an alias, find the corresponding source in this entry. <END_TASK> <USER_TASK:> Description: def get_source_by_alias(self, alias): """Given an alias, find the corresponding source in this entry. If the given alias doesn't exist (e.g. there are no sources), then a `ValueError` is raised. Arguments --------- alias : str The str-integer (e.g. '8') of the target source. Returns ------- source : `astrocats.catalog.source.Source` object The source object corresponding to the passed alias. """
for source in self.get(self._KEYS.SOURCES, []): if source[self._KEYS.ALIAS] == alias: return source raise ValueError("Source '{}': alias '{}' not found!".format(self[ self._KEYS.NAME], alias))
<SYSTEM_TASK:> Get a new `Entry` which contains the 'stub' of this one. <END_TASK> <USER_TASK:> Description: def get_stub(self): """Get a new `Entry` which contains the 'stub' of this one. The 'stub' is only the name and aliases. Usage: ----- To convert a normal entry into a stub (for example), overwrite the entry in place, i.e. >>> entries[name] = entries[name].get_stub() Returns ------- stub : `astrocats.catalog.entry.Entry` subclass object The type of the returned object is this instance's type. """
stub = type(self)(self.catalog, self[self._KEYS.NAME], stub=True) if self._KEYS.ALIAS in self: stub[self._KEYS.ALIAS] = self[self._KEYS.ALIAS] if self._KEYS.DISTINCT_FROM in self: stub[self._KEYS.DISTINCT_FROM] = self[self._KEYS.DISTINCT_FROM] if self._KEYS.RA in self: stub[self._KEYS.RA] = self[self._KEYS.RA] if self._KEYS.DEC in self: stub[self._KEYS.DEC] = self[self._KEYS.DEC] if self._KEYS.DISCOVER_DATE in self: stub[self._KEYS.DISCOVER_DATE] = self[self._KEYS.DISCOVER_DATE] if self._KEYS.SOURCES in self: stub[self._KEYS.SOURCES] = self[self._KEYS.SOURCES] return stub
<SYSTEM_TASK:> Check if attribute has been marked as being erroneous. <END_TASK> <USER_TASK:> Description: def is_erroneous(self, field, sources): """Check if attribute has been marked as being erroneous."""
if self._KEYS.ERRORS in self: my_errors = self[self._KEYS.ERRORS] for alias in sources.split(','): source = self.get_source_by_alias(alias) bib_err_values = [ err[ERROR.VALUE] for err in my_errors if err[ERROR.KIND] == SOURCE.BIBCODE and err[ERROR.EXTRA] == field ] if (SOURCE.BIBCODE in source and source[SOURCE.BIBCODE] in bib_err_values): return True name_err_values = [ err[ERROR.VALUE] for err in my_errors if err[ERROR.KIND] == SOURCE.NAME and err[ERROR.EXTRA] == field ] if (SOURCE.NAME in source and source[SOURCE.NAME] in name_err_values): return True return False
<SYSTEM_TASK:> Check if attribute is private. <END_TASK> <USER_TASK:> Description: def is_private(self, key, sources): """Check if attribute is private."""
# aliases are always public. if key == ENTRY.ALIAS: return False return all([ SOURCE.PRIVATE in self.get_source_by_alias(x) for x in sources.split(',') ])
<SYSTEM_TASK:> Write entry to JSON file in the proper location. <END_TASK> <USER_TASK:> Description: def save(self, bury=False, final=False): """Write entry to JSON file in the proper location. Arguments --------- bury : bool final : bool If this is the 'final' save, perform additional sanitization and cleaning operations. """
outdir, filename = self._get_save_path(bury=bury) if final: self.sanitize() # FIX: use 'dump' not 'dumps' jsonstring = json.dumps( { self[self._KEYS.NAME]: self._ordered(self) }, indent='\t' if sys.version_info[0] >= 3 else 4, separators=(',', ':'), ensure_ascii=False) if not os.path.isdir(outdir): raise RuntimeError("Output directory '{}' for event '{}' does " "not exist.".format(outdir, self[ self._KEYS.NAME])) save_name = os.path.join(outdir, filename + '.json') with codecs.open(save_name, 'w', encoding='utf8') as sf: sf.write(jsonstring) if not os.path.exists(save_name): raise RuntimeError("File '{}' was not saved!".format(save_name)) return save_name
<SYSTEM_TASK:> Used to sort keys when writing Entry to JSON format. <END_TASK> <USER_TASK:> Description: def sort_func(self, key): """Used to sort keys when writing Entry to JSON format. Should be supplemented/overridden by inheriting classes. """
if key == self._KEYS.SCHEMA: return 'aaa' if key == self._KEYS.NAME: return 'aab' if key == self._KEYS.SOURCES: return 'aac' if key == self._KEYS.ALIAS: return 'aad' if key == self._KEYS.MODELS: return 'aae' if key == self._KEYS.PHOTOMETRY: return 'zzy' if key == self._KEYS.SPECTRA: return 'zzz' return key
<SYSTEM_TASK:> Set photometry dictionary from a flux density measurement. <END_TASK> <USER_TASK:> Description: def set_pd_mag_from_flux_density(photodict, fd='', efd='', lefd='', uefd='', sig=DEFAULT_UL_SIGMA): """Set photometry dictionary from a flux density measurement. `fd` is assumed to be in microjanskys. """
with localcontext() as ctx: if lefd == '' or uefd == '': lefd = efd uefd = efd prec = max( get_sig_digits(str(fd), strip_zeroes=False), get_sig_digits(str(lefd), strip_zeroes=False), get_sig_digits(str(uefd), strip_zeroes=False)) + 1 ctx.prec = prec dlefd = Decimal(str(lefd)) duefd = Decimal(str(uefd)) if fd != '': dfd = Decimal(str(fd)) dsig = Decimal(str(sig)) if fd == '' or float(fd) < DEFAULT_UL_SIGMA * float(uefd): photodict[PHOTOMETRY.UPPER_LIMIT] = True photodict[PHOTOMETRY.UPPER_LIMIT_SIGMA] = str(sig) photodict[PHOTOMETRY.MAGNITUDE] = str(Decimal('23.9') - D25 * ( dsig * duefd).log10()) if fd: photodict[PHOTOMETRY.E_UPPER_MAGNITUDE] = str(D25 * ( (dfd + duefd).log10() - dfd.log10())) else: photodict[PHOTOMETRY.MAGNITUDE] = str(Decimal('23.9') - D25 * dfd.log10()) photodict[PHOTOMETRY.E_UPPER_MAGNITUDE] = str(D25 * ( (dfd + duefd).log10() - dfd.log10())) photodict[PHOTOMETRY.E_LOWER_MAGNITUDE] = str(D25 * ( dfd.log10() - (dfd - dlefd).log10()))
<SYSTEM_TASK:> Check that entry attributes are legal. <END_TASK> <USER_TASK:> Description: def _check(self): """Check that entry attributes are legal."""
# Run the super method super(Photometry, self)._check() err_str = None has_flux = self._KEYS.FLUX in self has_flux_dens = self._KEYS.FLUX_DENSITY in self has_u_flux = self._KEYS.U_FLUX in self has_u_flux_dens = self._KEYS.U_FLUX_DENSITY in self has_freq = self._KEYS.FREQUENCY in self has_band = self._KEYS.BAND in self has_ener = self._KEYS.ENERGY in self has_u_freq = self._KEYS.U_FREQUENCY in self has_u_ener = self._KEYS.U_ENERGY in self if has_flux or has_flux_dens: if not any([has_freq, has_band, has_ener]): err_str = ("Has `{}` or `{}`".format(self._KEYS.FLUX, self._KEYS.FLUX_DENSITY) + " but None of `{}`, `{}`, `{}`".format( self._KEYS.FREQUENCY, self._KEYS.BAND, self._KEYS.ENERGY)) elif has_flux and not has_u_flux: err_str = "`{}` provided without `{}`.".format( self._KEYS.FLUX, self._KEYS.U_FLUX) elif has_flux_dens and not has_u_flux_dens: err_str = "`{}` provided without `{}`.".format( self._KEYS.FLUX_DENSITY, self._KEYS.U_FLUX_DENSITY) elif has_freq and not has_u_freq: err_str = "`{}` provided without `{}`.".format( self._KEYS.FREQUENCY, self._KEYS.U_FREQUENCY) elif has_ener and not has_u_ener: err_str = "`{}` provided without `{}`.".format( self._KEYS.ENERGY, self._KEYS.U_ENERGY) if err_str is not None: raise ValueError(err_str) return
<SYSTEM_TASK:> Specify order for attributes. <END_TASK> <USER_TASK:> Description: def sort_func(self, key): """Specify order for attributes."""
if key == self._KEYS.TIME: return 'aaa' if key == self._KEYS.MODEL: return 'zzy' if key == self._KEYS.SOURCE: return 'zzz' return key
<SYSTEM_TASK:> return all instances by user name, perm name and resource id <END_TASK> <USER_TASK:> Description: def by_resource_user_and_perm( cls, user_id, perm_name, resource_id, db_session=None ): """ return all instances by user name, perm name and resource id :param user_id: :param perm_name: :param resource_id: :param db_session: :return: """
db_session = get_db_session(db_session) query = db_session.query(cls.model).filter(cls.model.user_id == user_id) query = query.filter(cls.model.resource_id == resource_id) query = query.filter(cls.model.perm_name == perm_name) return query.first()
<SYSTEM_TASK:> Get the next sensor while iterating. <END_TASK> <USER_TASK:> Description: def tdSensor(self): """Get the next sensor while iterating. :return: a dict with the keys: protocol, model, id, datatypes. """
protocol = create_string_buffer(20) model = create_string_buffer(20) sid = c_int() datatypes = c_int() self._lib.tdSensor(protocol, sizeof(protocol), model, sizeof(model), byref(sid), byref(datatypes)) return {'protocol': self._to_str(protocol), 'model': self._to_str(model), 'id': sid.value, 'datatypes': datatypes.value}
<SYSTEM_TASK:> Get the sensor value for a given sensor. <END_TASK> <USER_TASK:> Description: def tdSensorValue(self, protocol, model, sid, datatype): """Get the sensor value for a given sensor. :return: a dict with the keys: value, timestamp. """
value = create_string_buffer(20) timestamp = c_int() self._lib.tdSensorValue(protocol, model, sid, datatype, value, sizeof(value), byref(timestamp)) return {'value': self._to_str(value), 'timestamp': timestamp.value}
<SYSTEM_TASK:> Get the next controller while iterating. <END_TASK> <USER_TASK:> Description: def tdController(self): """Get the next controller while iterating. :return: a dict with the keys: id, type, name, available. """
cid = c_int() ctype = c_int() name = create_string_buffer(255) available = c_int() self._lib.tdController(byref(cid), byref(ctype), name, sizeof(name), byref(available)) return {'id': cid.value, 'type': ctype.value, 'name': self._to_str(name), 'available': available.value}
<SYSTEM_TASK:> This function handles attaching model to service if model has one specified <END_TASK> <USER_TASK:> Description: def ziggurat_model_init( user=None, group=None, user_group=None, group_permission=None, user_permission=None, user_resource_permission=None, group_resource_permission=None, resource=None, external_identity=None, *args, **kwargs ): """ This function handles attaching model to service if model has one specified as `_ziggurat_service`, Also attached a proxy object holding all model definitions that services might use :param args: :param kwargs: :param passwordmanager, the password manager to override default one :param passwordmanager_schemes, list of schemes for default passwordmanager to use :return: """
models = ModelProxy() models.User = user models.Group = group models.UserGroup = user_group models.GroupPermission = group_permission models.UserPermission = user_permission models.UserResourcePermission = user_resource_permission models.GroupResourcePermission = group_resource_permission models.Resource = resource models.ExternalIdentity = external_identity model_service_mapping = import_model_service_mappings() if kwargs.get("passwordmanager"): user.passwordmanager = kwargs["passwordmanager"] else: user.passwordmanager = make_passwordmanager( kwargs.get("passwordmanager_schemes") ) for name, cls in models.items(): # if model has a manager attached attached the class also to manager services = model_service_mapping.get(name, []) for service in services: setattr(service, "model", cls) setattr(service, "models_proxy", models)
<SYSTEM_TASK:> Show messages for the given query or day. <END_TASK> <USER_TASK:> Description: def messages(request, year=None, month=None, day=None, template="gnotty/messages.html"): """ Show messages for the given query or day. """
query = request.REQUEST.get("q") prev_url, next_url = None, None messages = IRCMessage.objects.all() if hide_joins_and_leaves(request): messages = messages.filter(join_or_leave=False) if query: search = Q(message__icontains=query) | Q(nickname__icontains=query) messages = messages.filter(search).order_by("-message_time") elif year and month and day: messages = messages.filter(message_time__year=year, message_time__month=month, message_time__day=day) day_delta = timedelta(days=1) this_date = date(int(year), int(month), int(day)) prev_date = this_date - day_delta next_date = this_date + day_delta prev_url = reverse("gnotty_day", args=prev_date.timetuple()[:3]) next_url = reverse("gnotty_day", args=next_date.timetuple()[:3]) else: return redirect("gnotty_year", year=datetime.now().year) context = dict(settings) context["messages"] = messages context["prev_url"] = prev_url context["next_url"] = next_url return render(request, template, context)
<SYSTEM_TASK:> Deletes all expired mutex locks if a ttl is provided. <END_TASK> <USER_TASK:> Description: def delete_expired_locks(self): """ Deletes all expired mutex locks if a ttl is provided. """
ttl_seconds = self.get_mutex_ttl_seconds() if ttl_seconds is not None: DBMutex.objects.filter(creation_time__lte=timezone.now() - timedelta(seconds=ttl_seconds)).delete()
<SYSTEM_TASK:> Acquires the db mutex lock. Takes the necessary steps to delete any stale locks. <END_TASK> <USER_TASK:> Description: def start(self): """ Acquires the db mutex lock. Takes the necessary steps to delete any stale locks. Throws a DBMutexError if it can't acquire the lock. """
# Delete any expired locks first self.delete_expired_locks() try: with transaction.atomic(): self.lock = DBMutex.objects.create(lock_id=self.lock_id) except IntegrityError: raise DBMutexError('Could not acquire lock: {0}'.format(self.lock_id))
<SYSTEM_TASK:> Releases the db mutex lock. Throws an error if the lock was released before the function finished. <END_TASK> <USER_TASK:> Description: def stop(self): """ Releases the db mutex lock. Throws an error if the lock was released before the function finished. """
if not DBMutex.objects.filter(id=self.lock.id).exists(): raise DBMutexTimeoutError('Lock {0} expired before function completed'.format(self.lock_id)) else: self.lock.delete()
<SYSTEM_TASK:> Decorates a function with the db_mutex decorator by using this class as a context manager around <END_TASK> <USER_TASK:> Description: def decorate_callable(self, func): """ Decorates a function with the db_mutex decorator by using this class as a context manager around it. """
def wrapper(*args, **kwargs): try: with self: result = func(*args, **kwargs) return result except DBMutexError as e: if self.suppress_acquisition_exceptions: LOG.error(e) else: raise e functools.update_wrapper(wrapper, func) return wrapper
<SYSTEM_TASK:> Default groupfinder implementaion for pyramid applications <END_TASK> <USER_TASK:> Description: def groupfinder(userid, request): """ Default groupfinder implementaion for pyramid applications :param userid: :param request: :return: """
if userid and hasattr(request, "user") and request.user: groups = ["group:%s" % g.id for g in request.user.groups] return groups return []
<SYSTEM_TASK:> Iterate through the nodes or edges and apply the forces directly to the node objects. <END_TASK> <USER_TASK:> Description: def apply_repulsion(repulsion, nodes, barnes_hut_optimize=False, region=None, barnes_hut_theta=1.2): """ Iterate through the nodes or edges and apply the forces directly to the node objects. """
if not barnes_hut_optimize: for i in range(0, len(nodes)): for j in range(0, i): repulsion.apply_node_to_node(nodes[i], nodes[j]) else: for i in range(0, len(nodes)): region.apply_force(nodes[i], repulsion, barnes_hut_theta)
<SYSTEM_TASK:> Iterate through the nodes or edges and apply the gravity directly to the node objects. <END_TASK> <USER_TASK:> Description: def apply_gravity(repulsion, nodes, gravity, scaling_ratio): """ Iterate through the nodes or edges and apply the gravity directly to the node objects. """
for i in range(0, len(nodes)): repulsion.apply_gravitation(nodes[i], gravity / scaling_ratio)
<SYSTEM_TASK:> return by user and permission name <END_TASK> <USER_TASK:> Description: def by_user_and_perm(cls, user_id, perm_name, db_session=None): """ return by user and permission name :param user_id: :param perm_name: :param db_session: :return: """
db_session = get_db_session(db_session) query = db_session.query(cls.model).filter(cls.model.user_id == user_id) query = query.filter(cls.model.perm_name == perm_name) return query.first()
<SYSTEM_TASK:> Checks if cls node has parent with subclass_name. <END_TASK> <USER_TASK:> Description: def node_is_subclass(cls, *subclass_names): """Checks if cls node has parent with subclass_name."""
if not isinstance(cls, (ClassDef, Instance)): return False # if cls.bases == YES: # return False for base_cls in cls.bases: try: for inf in base_cls.inferred(): # pragma no branch if inf.qname() in subclass_names: return True if inf != cls and node_is_subclass( # pragma no branch inf, *subclass_names): # check up the hierarchy in case we are a subclass of # a subclass of a subclass ... return True except InferenceError: # pragma no cover continue return False
<SYSTEM_TASK:> Checks if a call to a field instance method is valid. A call is <END_TASK> <USER_TASK:> Description: def is_field_method(node): """Checks if a call to a field instance method is valid. A call is valid if the call is a method of the underlying type. So, in a StringField the methods from str are valid, in a ListField the methods from list are valid and so on..."""
name = node.attrname parent = node.last_child() inferred = safe_infer(parent) if not inferred: return False for cls_name, inst in FIELD_TYPES.items(): if node_is_instance(inferred, cls_name) and hasattr(inst, name): return True return False
<SYSTEM_TASK:> Supposes that node is a mongoengine field in a class and tries to <END_TASK> <USER_TASK:> Description: def get_node_parent_class(node): """Supposes that node is a mongoengine field in a class and tries to get its parent class"""
while node.parent: # pragma no branch if isinstance(node, ClassDef): return node node = node.parent
<SYSTEM_TASK:> node is a class attribute that is a mongoengine. Returns <END_TASK> <USER_TASK:> Description: def get_field_definition(node): """"node is a class attribute that is a mongoengine. Returns the definition statement for the attribute """
name = node.attrname cls = get_node_parent_class(node) definition = cls.lookup(name)[1][0].statement() return definition
<SYSTEM_TASK:> Returns de ClassDef for the related embedded document in a <END_TASK> <USER_TASK:> Description: def get_field_embedded_doc(node): """Returns de ClassDef for the related embedded document in a embedded document field."""
definition = get_field_definition(node) cls_name = definition.last_child().last_child() cls = next(cls_name.infer()) return cls
<SYSTEM_TASK:> Checks if a node is a valid field or method in a embedded document. <END_TASK> <USER_TASK:> Description: def node_is_embedded_doc_attr(node): """Checks if a node is a valid field or method in a embedded document. """
embedded_doc = get_field_embedded_doc(node.last_child()) name = node.attrname try: r = bool(embedded_doc.lookup(name)[1][0]) except IndexError: r = False return r
<SYSTEM_TASK:> This is the method in ``SimpleIRCClient`` that all IRC events <END_TASK> <USER_TASK:> Description: def _dispatcher(self, connection, event): """ This is the method in ``SimpleIRCClient`` that all IRC events get passed through. Here we map events to our own custom event handlers, and call them. """
super(BaseBot, self)._dispatcher(connection, event) for handler in self.events[event.eventtype()]: handler(self, connection, event)
<SYSTEM_TASK:> We won't receive our own messages, so log them manually. <END_TASK> <USER_TASK:> Description: def message_channel(self, message): """ We won't receive our own messages, so log them manually. """
self.log(None, message) super(BaseBot, self).message_channel(message)
<SYSTEM_TASK:> Log any public messages, and also handle the command event. <END_TASK> <USER_TASK:> Description: def on_pubmsg(self, connection, event): """ Log any public messages, and also handle the command event. """
for message in event.arguments(): self.log(event, message) command_args = filter(None, message.split()) command_name = command_args.pop(0) for handler in self.events["command"]: if handler.event.args["command"] == command_name: self.handle_command_event(event, handler, command_args)
<SYSTEM_TASK:> Command handler - treats each word in the message <END_TASK> <USER_TASK:> Description: def handle_command_event(self, event, command, args): """ Command handler - treats each word in the message that triggered the command as an argument to the command, and does some validation to ensure that the number of arguments match. """
argspec = getargspec(command) num_all_args = len(argspec.args) - 2 # Ignore self/event args num_pos_args = num_all_args - len(argspec.defaults or []) if num_pos_args <= len(args) <= num_all_args: response = command(self, event, *args) elif num_all_args == num_pos_args: s = "s are" if num_all_args != 1 else " is" response = "%s arg%s required" % (num_all_args, s) else: bits = (num_pos_args, num_all_args) response = "between %s and %s args are required" % bits response = "%s: %s" % (self.get_nickname(event), response) self.message_channel(response)
<SYSTEM_TASK:> Runs each timer handler in a separate greenlet thread. <END_TASK> <USER_TASK:> Description: def handle_timer_event(self, handler): """ Runs each timer handler in a separate greenlet thread. """
while True: handler(self) sleep(handler.event.args["seconds"])
<SYSTEM_TASK:> Webhook handler - each handler for the webhook event <END_TASK> <USER_TASK:> Description: def handle_webhook_event(self, environ, url, params): """ Webhook handler - each handler for the webhook event takes an initial pattern argument for matching the URL requested. Here we match the URL to the pattern for each webhook handler, and bail out if it returns a response. """
for handler in self.events["webhook"]: urlpattern = handler.event.args["urlpattern"] if not urlpattern or match(urlpattern, url): response = handler(self, environ, url, params) if response: return response
<SYSTEM_TASK:> Create the correct device instance based on device type and return it. <END_TASK> <USER_TASK:> Description: def DeviceFactory(id, lib=None): """Create the correct device instance based on device type and return it. :return: a :class:`Device` or :class:`DeviceGroup` instance. """
lib = lib or Library() if lib.tdGetDeviceType(id) == const.TELLSTICK_TYPE_GROUP: return DeviceGroup(id, lib=lib) return Device(id, lib=lib)
<SYSTEM_TASK:> Dispatch a single callback in the current thread. <END_TASK> <USER_TASK:> Description: def process_callback(self, block=True): """Dispatch a single callback in the current thread. :param boolean block: If True, blocks waiting for a callback to come. :return: True if a callback was processed; otherwise False. """
try: (callback, args) = self._queue.get(block=block) try: callback(*args) finally: self._queue.task_done() except queue.Empty: return False return True
<SYSTEM_TASK:> Return all known devices. <END_TASK> <USER_TASK:> Description: def devices(self): """Return all known devices. :return: list of :class:`Device` or :class:`DeviceGroup` instances. """
devices = [] count = self.lib.tdGetNumberOfDevices() for i in range(count): device = DeviceFactory(self.lib.tdGetDeviceId(i), lib=self.lib) devices.append(device) return devices
<SYSTEM_TASK:> Return all known sensors. <END_TASK> <USER_TASK:> Description: def sensors(self): """Return all known sensors. :return: list of :class:`Sensor` instances. """
sensors = [] try: while True: sensor = self.lib.tdSensor() sensors.append(Sensor(lib=self.lib, **sensor)) except TelldusError as e: if e.error != const.TELLSTICK_ERROR_DEVICE_NOT_FOUND: raise return sensors
<SYSTEM_TASK:> Return all known controllers. <END_TASK> <USER_TASK:> Description: def controllers(self): """Return all known controllers. Requires Telldus core library version >= 2.1.2. :return: list of :class:`Controller` instances. """
controllers = [] try: while True: controller = self.lib.tdController() del controller["name"] del controller["available"] controllers.append(Controller(lib=self.lib, **controller)) except TelldusError as e: if e.error != const.TELLSTICK_ERROR_NOT_FOUND: raise return controllers
<SYSTEM_TASK:> Add a new device group. <END_TASK> <USER_TASK:> Description: def add_group(self, name, devices): """Add a new device group. :return: a :class:`DeviceGroup` instance. """
device = self.add_device(name, "group") device.add_to_group(devices) return device
<SYSTEM_TASK:> Connect a controller. <END_TASK> <USER_TASK:> Description: def connect_controller(self, vid, pid, serial): """Connect a controller."""
self.lib.tdConnectTellStickController(vid, pid, serial)
<SYSTEM_TASK:> Disconnect a controller. <END_TASK> <USER_TASK:> Description: def disconnect_controller(self, vid, pid, serial): """Disconnect a controller."""
self.lib.tdDisconnectTellStickController(vid, pid, serial)
<SYSTEM_TASK:> Get dict with all set parameters. <END_TASK> <USER_TASK:> Description: def parameters(self): """Get dict with all set parameters."""
parameters = {} for name in self.PARAMETERS: try: parameters[name] = self.get_parameter(name) except AttributeError: pass return parameters
<SYSTEM_TASK:> Fetch list of devices in group. <END_TASK> <USER_TASK:> Description: def devices_in_group(self): """Fetch list of devices in group."""
try: devices = self.get_parameter('devices') except AttributeError: return [] ctor = DeviceFactory return [ctor(int(x), lib=self.lib) for x in devices.split(',') if x]
<SYSTEM_TASK:> Move any off curves at the end of the contour <END_TASK> <USER_TASK:> Description: def _prepPointsForSegments(points): """ Move any off curves at the end of the contour to the beginning of the contour. This makes segmentation easier. """
while 1: point = points[-1] if point.segmentType: break else: point = points.pop() points.insert(0, point) continue break
<SYSTEM_TASK:> Reverse the points. This differs from the <END_TASK> <USER_TASK:> Description: def _reversePoints(points): """ Reverse the points. This differs from the reversal point pen in RoboFab in that it doesn't worry about maintaing the start point position. That has no benefit within the context of this module. """
# copy the points points = _copyPoints(points) # find the first on curve type and recycle # it for the last on curve type firstOnCurve = None for index, point in enumerate(points): if point.segmentType is not None: firstOnCurve = index break lastSegmentType = points[firstOnCurve].segmentType # reverse the points points = reversed(points) # work through the reversed remaining points final = [] for point in points: segmentType = point.segmentType if segmentType is not None: point.segmentType = lastSegmentType lastSegmentType = segmentType final.append(point) # move any offcurves at the end of the points # to the start of the points _prepPointsForSegments(final) # done return final
<SYSTEM_TASK:> Finds a t value on a curve from a point. <END_TASK> <USER_TASK:> Description: def _tValueForPointOnCubicCurve(point, cubicCurve, isHorizontal=0): """ Finds a t value on a curve from a point. The points must be originaly be a point on the curve. This will only back trace the t value, needed to split the curve in parts """
pt1, pt2, pt3, pt4 = cubicCurve a, b, c, d = bezierTools.calcCubicParameters(pt1, pt2, pt3, pt4) solutions = bezierTools.solveCubic(a[isHorizontal], b[isHorizontal], c[isHorizontal], d[isHorizontal] - point[isHorizontal]) solutions = [t for t in solutions if 0 <= t < 1] if not solutions and not isHorizontal: # can happen that a horizontal line doens intersect, try the vertical return _tValueForPointOnCubicCurve(point, (pt1, pt2, pt3, pt4), isHorizontal=1) if len(solutions) > 1: intersectionLenghts = {} for t in solutions: tp = _getCubicPoint(t, pt1, pt2, pt3, pt4) dist = _distance(tp, point) intersectionLenghts[dist] = t minDist = min(intersectionLenghts.keys()) solutions = [intersectionLenghts[minDist]] return solutions
<SYSTEM_TASK:> Scale points and optionally convert them to integers. <END_TASK> <USER_TASK:> Description: def _scalePoints(points, scale=1, convertToInteger=True): """ Scale points and optionally convert them to integers. """
if convertToInteger: points = [ (int(round(x * scale)), int(round(y * scale))) for (x, y) in points ] else: points = [(x * scale, y * scale) for (x, y) in points] return points
<SYSTEM_TASK:> Scale a single point <END_TASK> <USER_TASK:> Description: def _scaleSinglePoint(point, scale=1, convertToInteger=True): """ Scale a single point """
x, y = point if convertToInteger: return int(round(x * scale)), int(round(y * scale)) else: return (x * scale, y * scale)
<SYSTEM_TASK:> Estimate the length of this curve by iterating <END_TASK> <USER_TASK:> Description: def _estimateCubicCurveLength(pt0, pt1, pt2, pt3, precision=10): """ Estimate the length of this curve by iterating through it and averaging the length of the flat bits. """
points = [] length = 0 step = 1.0 / precision factors = range(0, precision + 1) for i in factors: points.append(_getCubicPoint(i * step, pt0, pt1, pt2, pt3)) for i in range(len(points) - 1): pta = points[i] ptb = points[i + 1] length += _distance(pta, ptb) return length
<SYSTEM_TASK:> Split the segment according the t values <END_TASK> <USER_TASK:> Description: def split(self, tValues): """ Split the segment according the t values """
if self.segmentType == "curve": on1 = self.previousOnCurve off1 = self.points[0].coordinates off2 = self.points[1].coordinates on2 = self.points[2].coordinates return bezierTools.splitCubicAtT(on1, off1, off2, on2, *tValues) elif self.segmentType == "line": segments = [] x1, y1 = self.previousOnCurve x2, y2 = self.points[0].coordinates dx = x2 - x1 dy = y2 - y1 pp = x1, y1 for t in tValues: np = (x1+dx*t, y1+dy*t) segments.append([pp, np]) pp = np segments.append([pp, (x2, y2)]) return segments elif self.segmentType == "qcurve": raise NotImplementedError else: raise NotImplementedError
<SYSTEM_TASK:> Return a list of normalized InputPoint objects <END_TASK> <USER_TASK:> Description: def getData(self): """ Return a list of normalized InputPoint objects for the contour drawn with this pen. """
# organize the points into segments # 1. make sure there is an on curve haveOnCurve = False for point in self._points: if point.segmentType is not None: haveOnCurve = True break # 2. move the off curves to front of the list if haveOnCurve: _prepPointsForSegments(self._points) # 3. ignore double points on start and end firstPoint = self._points[0] lastPoint = self._points[-1] if firstPoint.segmentType is not None and lastPoint.segmentType is not None: if firstPoint.coordinates == lastPoint.coordinates: if (firstPoint.segmentType in ["line", "move"]): del self._points[0] else: raise AssertionError("Unhandled point type sequence") # done return self._points
<SYSTEM_TASK:> Match if entire input contour matches entire output contour, <END_TASK> <USER_TASK:> Description: def reCurveFromEntireInputContour(self, inputContour): """ Match if entire input contour matches entire output contour, allowing for different start point. """
if self.clockwise: inputFlat = inputContour.clockwiseFlat else: inputFlat = inputContour.counterClockwiseFlat outputFlat = [] for segment in self.segments: # XXX this could be expensive assert segment.segmentType == "flat" outputFlat += segment.points # test lengths haveMatch = False if len(inputFlat) == len(outputFlat): if inputFlat == outputFlat: haveMatch = True else: inputStart = inputFlat[0] if inputStart in outputFlat: # there should be only one occurance of the point # but handle it just in case if outputFlat.count(inputStart) > 1: startIndexes = [index for index, point in enumerate(outputFlat) if point == inputStart] else: startIndexes = [outputFlat.index(inputStart)] # slice and dice to test possible orders for startIndex in startIndexes: test = outputFlat[startIndex:] + outputFlat[:startIndex] if inputFlat == test: haveMatch = True break if haveMatch: # clear out the flat points self.segments = [] # replace with the appropriate points from the input if self.clockwise: inputSegments = inputContour.clockwiseSegments else: inputSegments = inputContour.counterClockwiseSegments for inputSegment in inputSegments: self.segments.append( OutputSegment( segmentType=inputSegment.segmentType, points=[ OutputPoint( coordinates=point.coordinates, segmentType=point.segmentType, smooth=point.smooth, name=point.name, kwargs=point.kwargs ) for point in inputSegment.points ], final=True ) ) inputSegment.used = True # reset the direction of the final contour self.clockwise = inputContour.clockwise return True return False
<SYSTEM_TASK:> Checks if a function definition is a queryset manager created <END_TASK> <USER_TASK:> Description: def _is_custom_qs_manager(funcdef): """Checks if a function definition is a queryset manager created with the @queryset_manager decorator."""
decors = getattr(funcdef, 'decorators', None) if decors: for dec in decors.get_children(): try: if dec.name == 'queryset_manager': # pragma no branch return True except AttributeError: continue return False
<SYSTEM_TASK:> Checks if the call is being done to a custom queryset manager. <END_TASK> <USER_TASK:> Description: def _is_call2custom_manager(node): """Checks if the call is being done to a custom queryset manager."""
called = safe_infer(node.func) funcdef = getattr(called, '_proxied', None) return _is_custom_qs_manager(funcdef)
<SYSTEM_TASK:> Checks if the attribute is a valid attribute for a queryset manager. <END_TASK> <USER_TASK:> Description: def _is_custom_manager_attribute(node): """Checks if the attribute is a valid attribute for a queryset manager. """
attrname = node.attrname if not name_is_from_qs(attrname): return False for attr in node.get_children(): inferred = safe_infer(attr) funcdef = getattr(inferred, '_proxied', None) if _is_custom_qs_manager(funcdef): return True return False
<SYSTEM_TASK:> return by by_user_and_perm and permission name <END_TASK> <USER_TASK:> Description: def by_group_and_perm(cls, group_id, perm_name, db_session=None): """ return by by_user_and_perm and permission name :param group_id: :param perm_name: :param db_session: :return: """
db_session = get_db_session(db_session) query = db_session.query(cls.model).filter(cls.model.group_id == group_id) query = query.filter(cls.model.perm_name == perm_name) return query.first()