text_prompt
stringlengths
100
17.7k
code_prompt
stringlengths
7
9.86k
<SYSTEM_TASK:> Looks for any MACH9_ prefixed environment variables and applies <END_TASK> <USER_TASK:> Description: def load_environment_vars(self): """ Looks for any MACH9_ prefixed environment variables and applies them to the configuration if present. """
for k, v in os.environ.items(): if k.startswith(MACH9_PREFIX): _, config_key = k.split(MACH9_PREFIX, 1) self[config_key] = v
<SYSTEM_TASK:> Copies an existing structure and all of it's children <END_TASK> <USER_TASK:> Description: def copy(self, parent=None): """Copies an existing structure and all of it's children"""
new = Structure(None, parent=parent) new.key = self.key new.type_ = self.type_ new.val_guaranteed = self.val_guaranteed new.key_guaranteed = self.key_guaranteed for child in self.children: new.children.append(child.copy(new)) return new
<SYSTEM_TASK:> Returns the number of ancestors that are dictionaries <END_TASK> <USER_TASK:> Description: def generation(self): """Returns the number of ancestors that are dictionaries"""
if not self.parent: return 0 elif self.parent.is_dict: return 1 + self.parent.generation else: return self.parent.generation
<SYSTEM_TASK:> Returns a string representing the type of the structure <END_TASK> <USER_TASK:> Description: def type_string(self): """Returns a string representing the type of the structure"""
if self.is_tuple: subtypes = [item.type_string for item in self.children] return '{}({})'.format( '' if self.val_guaranteed else '*', ', '.join(subtypes)) elif self.is_list: return '{}[{}]'.format( '' if self.val_guaranteed else '*', self.children[0].type_string) else: return '{}{}'.format( '' if self.val_guaranteed else '*', self.type_.__name__)
<SYSTEM_TASK:> Fancy setattr with debugging. <END_TASK> <USER_TASK:> Description: def set_field(obj, field_name, value): """Fancy setattr with debugging."""
old = getattr(obj, field_name) field = obj._meta.get_field(field_name) # is_relation is Django 1.8 only if field.is_relation: # If field_name is the `_id` field, then there is no 'pk' attr and # old/value *is* the pk old_repr = None if old is None else getattr(old, 'pk', old) new_repr = None if value is None else getattr(value, 'pk', value) elif field.__class__.__name__ == 'DateTimeField': old_repr = None if old is None else datetime_repr(old) new_repr = None if value is None else datetime_repr(value) else: old_repr = None if old is None else str(old) new_repr = None if value is None else str(value) if old_repr != new_repr: setattr(obj, field_name, value) if not hasattr(obj, DIRTY): setattr(obj, DIRTY, []) getattr(obj, DIRTY).append(dict( field_name=field_name, old_value=old_repr, new_value=new_repr, ))
<SYSTEM_TASK:> Fancy way to update `obj` with `data` dict. <END_TASK> <USER_TASK:> Description: def obj_update(obj, data: dict, *, update_fields=UNSET, save: bool=True) -> bool: """ Fancy way to update `obj` with `data` dict. Parameters ---------- obj : Django model instance data The data to update ``obj`` with update_fields Use your ``update_fields`` instead of our generated one. If you need an auto_now or auto_now_add field to get updated, set this to ``None`` to get the default Django behavior. save If save=False, then don't actually save. This can be useful if you just want to utilize the verbose logging. DEPRECRATED in favor of the more standard ``update_fields=[]`` Returns ------- bool True if data changed """
for field_name, value in data.items(): set_field(obj, field_name, value) dirty_data = getattr(obj, DIRTY, None) if not dirty_data: return False logger.debug( human_log_formatter(dirty_data), extra={ 'model': obj._meta.object_name, 'pk': obj.pk, 'changes': json_log_formatter(dirty_data), } ) if update_fields == UNSET: update_fields = list(map(itemgetter('field_name'), dirty_data)) if not save: update_fields = () obj.save(update_fields=update_fields) delattr(obj, DIRTY) return True
<SYSTEM_TASK:> Detects partitioning scheme of the source <END_TASK> <USER_TASK:> Description: def detect_scheme(filename): """Detects partitioning scheme of the source Args: filename (str): path to file or device for detection of \ partitioning scheme. Returns: SCHEME_MBR, SCHEME_GPT or SCHEME_UNKNOWN Raises: IOError: The file doesn't exist or cannot be opened for reading >>> from rawdisk.scheme.common import * >>> scheme = detect_scheme('/dev/disk1') >>> if scheme == PartitionScheme.SCHEME_MBR: >>> <...> """
logger = logging.getLogger(__name__) logger.info('Detecting partitioning scheme') with open(filename, 'rb') as f: # Look for MBR signature first f.seek(mbr.MBR_SIG_OFFSET) data = f.read(mbr.MBR_SIG_SIZE) signature = struct.unpack("<H", data)[0] if signature != mbr.MBR_SIGNATURE: # Something else logger.debug('Unknown partitioning scheme') return PartitionScheme.SCHEME_UNKNOWN else: # Could be MBR or GPT, look for GPT header f.seek(gpt.GPT_HEADER_OFFSET) data = f.read(gpt.GPT_SIG_SIZE) signature = struct.unpack("<8s", data)[0] if signature != gpt.GPT_SIGNATURE: logger.debug('MBR scheme detected') return PartitionScheme.SCHEME_MBR else: logger.debug('GPT scheme detected') return PartitionScheme.SCHEME_GPT
<SYSTEM_TASK:> Check if the file has been rolled <END_TASK> <USER_TASK:> Description: def _has_file_rolled(self): """Check if the file has been rolled"""
# if the size is smaller then before, the file has # probabilly been rolled if self._fh: size = self._getsize_of_current_file() if size < self.oldsize: return True self.oldsize = size return False
<SYSTEM_TASK:> Return a filehandle to the file being tailed <END_TASK> <USER_TASK:> Description: def _filehandle(self): """ Return a filehandle to the file being tailed """
# if file is opened and it has been rolled we need to close the file # and then to reopen it if self._fh and self._has_file_rolled(): try: self._fh.close() except Exception: pass self._fh = None # if the file is closed (or has been closed right now), open it if not self._fh: self._open_file(self.filename) if not self.opened_before: self.opened_before = True self._fh.seek(0, os.SEEK_END) return self._fh
<SYSTEM_TASK:> Get a class from a dotted string <END_TASK> <USER_TASK:> Description: def get_class(class_string): """ Get a class from a dotted string """
split_string = class_string.encode('ascii').split('.') import_path = '.'.join(split_string[:-1]) class_name = split_string[-1] if class_name: try: if import_path: mod = __import__(import_path, globals(), {}, [class_name]) cls = getattr(mod, class_name) else: cls = __import__(class_name, globals(), {}) if cls: return cls except (ImportError, AttributeError): pass return None
<SYSTEM_TASK:> Register a function to be an event handler <END_TASK> <USER_TASK:> Description: def _register_handler(event, fun, external=False): """Register a function to be an event handler"""
registry = core.HANDLER_REGISTRY if external: registry = core.EXTERNAL_HANDLER_REGISTRY if not isinstance(event, basestring): # If not basestring, it is a BaseEvent subclass. # This occurs when class methods are registered as handlers event = core.parse_event_to_name(event) if event in registry: registry[event].append(fun) else: registry[event] = [fun] return fun
<SYSTEM_TASK:> Decorator that associates a handler to an event class <END_TASK> <USER_TASK:> Description: def handler(param): """Decorator that associates a handler to an event class This decorator works for both methods and functions. Since it only registers the callable object and returns it without evaluating it. The name param should be informed in a dotted notation and should contain two informations: the django app name and the class name. Just like this: >>> @handler('deal.ActionLog') ... def blah(data): ... sys.stdout.write('I love python!\n') You can also use this same decorator to mark class methods as handlers. Just notice that the class *must* inherit from `BaseEvent`. >>> class MyEvent(BaseEvent) ... @handler('deal.ActionLog') ... def another_blah(data): ... sys.stdout.write('Stuff!\n') """
if isinstance(param, basestring): return lambda f: _register_handler(param, f) else: core.HANDLER_METHOD_REGISTRY.append(param) return param
<SYSTEM_TASK:> Entry point for the event lib that starts the logging process <END_TASK> <USER_TASK:> Description: def log(name, data=None): """Entry point for the event lib that starts the logging process This function uses the `name` param to find the event class that will be processed to log stuff. This name must provide two informations separated by a dot: the app name and the event class name. Like this: >>> name = 'deal.ActionLog' The "ActionLog" is a class declared inside the 'deal.events' module and this function will raise an `EventNotFoundError` error if it's not possible to import the right event class. The `data` param *must* be a dictionary, otherwise a `TypeError` will be rised. All keys *must* be strings and all values *must* be serializable by the `json.dumps` function. If you need to pass any unsupported object, you will have to register a serializer function. Consult the RFC-00003-serialize-registry for more information. """
data = data or {} data.update(core.get_default_values(data)) # InvalidEventNameError, EventNotFoundError event_cls = core.find_event(name) event = event_cls(name, data) event.validate() # ValidationError data = core.filter_data_values(data) data = ejson.dumps(data) # TypeError # We don't use celery when developing if conf.getsetting('DEBUG'): core.process(name, data) else: tasks.process_task.delay(name, data)
<SYSTEM_TASK:> Validation helper to ensure that keys are present in data <END_TASK> <USER_TASK:> Description: def validate_keys(self, *keys): """Validation helper to ensure that keys are present in data This method makes sure that all of keys received here are present in the data received from the caller. It is better to call this method in the `validate()` method of your event. Not in the `clean()` one, since the first will be called locally, making it easier to debug things and find problems. """
current_keys = set(self.data.keys()) needed_keys = set(keys) if not needed_keys.issubset(current_keys): raise ValidationError( 'One of the following keys are missing from the ' 'event\'s data: {}'.format( ', '.join(needed_keys.difference(current_keys))) ) return True
<SYSTEM_TASK:> Age this particle. <END_TASK> <USER_TASK:> Description: def age(self, **kwargs): """ Age this particle. parameters (optional, only one allowed): days (default) hours minutes seconds """
if kwargs.get('days', None) is not None: self._age += kwargs.get('days') return if kwargs.get('hours', None) is not None: self._age += kwargs.get('hours') / 24. return if kwargs.get('minutes', None) is not None: self._age += kwargs.get('minutes') / 24. / 60. return if kwargs.get('seconds', None) is not None: self._age += kwargs.get('seconds') / 24. / 60. / 60. return raise KeyError("Could not age particle, please specify 'days', 'hours', 'minutes', or 'seconds' parameter")
<SYSTEM_TASK:> This function will normalize the particles locations <END_TASK> <USER_TASK:> Description: def normalized_indexes(self, model_timesteps): """ This function will normalize the particles locations to the timestep of the model that was run. This is used in output, as we should only be outputting the model timestep that was chosen to be run. In most cases, the length of the model_timesteps and the particle's locations will be the same (unless it hits shore). If they are not the same length pull out of locations the timesteps that are closest to the model_timesteps """
# Clean up locations # If duplicate time instances, remove the lower index clean_locs = [] for i,loc in enumerate(self.locations): try: if loc.time == self.locations[i+1].time: continue else: clean_locs.append(loc) except StandardError: clean_locs.append(loc) if len(clean_locs) == len(model_timesteps): return [ind for ind,loc in enumerate(self.locations) if loc in clean_locs] elif len(model_timesteps) < len(clean_locs): # We have at least one internal timestep for this particle # Pull out the matching location indexes indexes = [ind for ind,loc in enumerate(self.locations) if loc in clean_locs] if len(model_timesteps) == len(indexes): return indexes raise ValueError("Can't normalize") elif len(model_timesteps) > len(clean_locs): # The particle stopped before forcing for all of the model timesteps raise ValueError("Particle has less locations than model timesteps")
<SYSTEM_TASK:> Add an edge to the graph. Multiple edges between the same vertices will quietly be ignored. N-partite graphs <END_TASK> <USER_TASK:> Description: def add_edge(self, fr, to): """ Add an edge to the graph. Multiple edges between the same vertices will quietly be ignored. N-partite graphs can be used to permit multiple edges by partitioning the graph into vertices and edges. :param fr: The name of the origin vertex. :param to: The name of the destination vertex. :return: """
fr = self.add_vertex(fr) to = self.add_vertex(to) self.adjacency[fr].children.add(to) self.adjacency[to].parents.add(fr)
<SYSTEM_TASK:> create a volume then clone the contents of <END_TASK> <USER_TASK:> Description: def clone(self, source_id, backup_id, size, volume_id=None, source_host=None): """ create a volume then clone the contents of the backup into the new volume """
volume_id = volume_id or str(uuid.uuid4()) return self.http_put('/volumes/%s' % volume_id, params=self.unused({ 'source_host': source_host, 'source_volume_id': source_id, 'backup_id': backup_id, 'size': size }))
<SYSTEM_TASK:> return installed board names. <END_TASK> <USER_TASK:> Description: def board_names(hwpack='arduino'): """return installed board names."""
ls = list(boards(hwpack).keys()) ls.sort() return ls
<SYSTEM_TASK:> search for lib dir under root. <END_TASK> <USER_TASK:> Description: def find_lib_dir(root): """search for lib dir under root."""
root = path(root) log.debug('files in dir: %s', root) for x in root.walkfiles(): log.debug(' %s', x) # only 1 dir in root? (example: github) if not len(root.files()) and len(root.dirs()) == 1: log.debug('go inside root') root = root.dirs()[0] if len(root.files('keywords.txt')): root = rename_root(root) return root, root keywords = list(root.walkfiles('keywords.txt')) if len(keywords): if len(keywords) > 1: log.warning('more keywords.txt found. Installing only one. %s', keywords) lib_dir = keywords[0].parent lib_dir = fix_libdir(lib_dir) return root, lib_dir header_only = len(list(noexample(root.walkfiles('*.cpp')))) == 0 log.debug('header_only: %s', header_only) lib_dir = None headers = list(noexample(root.walkfiles('*.h'))) for h in headers: cpp = h.stripext() + '.cpp' if (header_only or cpp.exists()) and h.parent.name.lower() == h.namebase.lower(): assert not lib_dir lib_dir = h.parent log.debug('found lib: %s', lib_dir) if not lib_dir: if len(headers) == 1 and len(list(root.files('*.h'))) == 0: log.debug('only 1 header, not in root') lib_dir = headers[0].parent lib_dir = rename_root(lib_dir) if not lib_dir: # xxx.cpp and xxx.h in root? -> rename root dir root = rename_root(root) return root, root # for h in root.files('*.h'): # cpp = h.stripext() + '.cpp' # if header_only or cpp.exists(): # assert not lib_dir # root.rename(root.parent / h.namebase) # root = lib_dir = root.parent / h.namebase assert lib_dir return root, lib_dir
<SYSTEM_TASK:> find examples not under lib dir, and move into ``examples`` <END_TASK> <USER_TASK:> Description: def move_examples(root, lib_dir): """find examples not under lib dir, and move into ``examples``"""
all_pde = files_multi_pattern(root, INO_PATTERNS) lib_pde = files_multi_pattern(lib_dir, INO_PATTERNS) stray_pde = all_pde.difference(lib_pde) if len(stray_pde) and not len(lib_pde): log.debug( 'examples found outside lib dir, moving them: %s', stray_pde) examples = lib_dir / EXAMPLES examples.makedirs() for x in stray_pde: d = examples / x.namebase d.makedirs() x.move(d)
<SYSTEM_TASK:> rename examples dir to ``examples`` <END_TASK> <USER_TASK:> Description: def fix_examples_dir(lib_dir): """rename examples dir to ``examples``"""
for x in lib_dir.dirs(): if x.name.lower() == EXAMPLES: return for x in lib_dir.dirs(): if x.name.lower() == EXAMPLES: _fix_dir(x) return for x in lib_dir.dirs(): if 'example' in x.name.lower(): _fix_dir(x) return for x in lib_dir.dirs(): if len(files_multi_pattern(x, INO_PATTERNS)): _fix_dir(x) return
<SYSTEM_TASK:> install library from web or local files system. <END_TASK> <USER_TASK:> Description: def install_lib(url, replace_existing=False, fix_wprogram=True): """install library from web or local files system. :param url: web address or file path :param replace_existing: bool :rtype: None """
d = tmpdir(tmpdir()) f = download(url) Archive(f).extractall(d) clean_dir(d) d, src_dlib = find_lib_dir(d) move_examples(d, src_dlib) fix_examples_dir(src_dlib) if fix_wprogram: fix_wprogram_in_files(src_dlib) targ_dlib = libraries_dir() / src_dlib.name if targ_dlib.exists(): log.debug('library already exists: %s', targ_dlib) if replace_existing: log.debug('remove %s', targ_dlib) targ_dlib.rmtree() else: raise ConfduinoError('library already exists:' + targ_dlib) log.debug('move %s -> %s', src_dlib, targ_dlib) src_dlib.move(targ_dlib) libraries_dir().copymode(targ_dlib) for x in targ_dlib.walk(): libraries_dir().copymode(x) return targ_dlib.name
<SYSTEM_TASK:> Return a StrictRedis instance or None based on redis_spec. <END_TASK> <USER_TASK:> Description: def _init_redis(redis_spec): """ Return a StrictRedis instance or None based on redis_spec. redis_spec may be None, a Redis URL, or a StrictRedis instance """
if not redis_spec: return if isinstance(redis_spec, six.string_types): return redis.StrictRedis.from_url(redis_spec) # assume any other value is a valid instance return redis_spec
<SYSTEM_TASK:> if 'deploy' resolves in this environment, use the hostname for which <END_TASK> <USER_TASK:> Description: def _get_base(): """ if 'deploy' resolves in this environment, use the hostname for which that name resolves. Override with 'VELOCIRAPTOR_URL' """
try: name, _aliaslist, _addresslist = socket.gethostbyname_ex('deploy') except socket.gaierror: name = 'deploy' fallback = 'https://{name}/'.format(name=name) return os.environ.get('VELOCIRAPTOR_URL', fallback)
<SYSTEM_TASK:> Create instances of all objects found <END_TASK> <USER_TASK:> Description: def load_all(cls, vr, params=None): """ Create instances of all objects found """
ob_docs = vr.query(cls.base, params) return [cls(vr, ob) for ob in ob_docs]
<SYSTEM_TASK:> Patch the swarm with changes and then trigger the swarm. <END_TASK> <USER_TASK:> Description: def dispatch(self, **changes): """ Patch the swarm with changes and then trigger the swarm. """
self.patch(**changes) trigger_url = self._vr._build_url(self.resource_uri, 'swarm/') resp = self._vr.session.post(trigger_url) resp.raise_for_status() try: return resp.json() except ValueError: return None
<SYSTEM_TASK:> Get the song collection of a user. <END_TASK> <USER_TASK:> Description: def collection(self, user_id): """ Get the song collection of a user. :param user_id: ID of a user. :rtype: list of :class:`Song` """
# TODO further evaluation of the page param, I don't know where the # limit is. dct = {'userID': user_id, 'page': 0} r = 'userGetSongsInLibrary' result = self.connection.request(r, dct, self.connection.header(r)) songs = result[1]['Songs'] return [Song.from_response(song, self.connection) for song in songs]
<SYSTEM_TASK:> return installed hardware package names. <END_TASK> <USER_TASK:> Description: def hwpack_names(): """return installed hardware package names."""
ls = hwpack_dir().listdir() ls = [x.name for x in ls] ls = [x for x in ls if x != 'tools'] arduino_included = 'arduino' in ls ls = [x for x in ls if x != 'arduino'] ls.sort() if arduino_included: ls = ['arduino'] + ls # move to 1st pos return ls
<SYSTEM_TASK:> Fill in command-line arguments from argv <END_TASK> <USER_TASK:> Description: def main(cls, args=None): """ Fill in command-line arguments from argv """
if args is None: args = sys.argv[1:] try: o = cls() o.parseOptions(args) except usage.UsageError as e: print(o.getSynopsis()) print(o.getUsage()) print(str(e)) return 1 except CLIError as ce: print(str(ce)) return ce.returnCode return 0
<SYSTEM_TASK:> List of Phasics tif file names in the input zip file <END_TASK> <USER_TASK:> Description: def files(self): """List of Phasics tif file names in the input zip file"""
if self._files is None: self._files = SeriesZipTifPhasics._index_files(self.path) return self._files
<SYSTEM_TASK:> Verify that `path` is a zip file with Phasics TIFF files <END_TASK> <USER_TASK:> Description: def verify(path): """Verify that `path` is a zip file with Phasics TIFF files"""
valid = False try: zf = zipfile.ZipFile(path) except (zipfile.BadZipfile, IsADirectoryError): pass else: names = sorted(zf.namelist()) names = [nn for nn in names if nn.endswith(".tif")] names = [nn for nn in names if nn.startswith("SID PHA")] for name in names: with zf.open(name) as pt: fd = io.BytesIO(pt.read()) if SingleTifPhasics.verify(fd): valid = True break zf.close() return valid
<SYSTEM_TASK:> Log exceptions instead of printing a traceback to stderr. <END_TASK> <USER_TASK:> Description: def excepthook(type, value, traceback): # pylint: disable=unused-argument """Log exceptions instead of printing a traceback to stderr."""
try: six.reraise(type, value, traceback) except type: _LOGGER.exception(str(value)) if isinstance(value, KeyboardInterrupt): message = "Cancelling at the user's request." else: message = handle_unexpected_exception(value) print(message, file=sys.stderr)
<SYSTEM_TASK:> Return an error message and write a log file if logging was not enabled. <END_TASK> <USER_TASK:> Description: def handle_unexpected_exception(exc): # type: (BaseException) -> str """Return an error message and write a log file if logging was not enabled. Args: exc: The unexpected exception. Returns: A message to display to the user concerning the unexpected exception. """
try: write_logfile() addendum = 'Please see the log file for more information.' except IOError: addendum = 'Unable to write log file.' try: message = str(exc) return '{}{}{}'.format(message, '\n' if message else '', addendum) except Exception: # pylint: disable=broad-except return str(exc)
<SYSTEM_TASK:> Get the log level from the CLI arguments. <END_TASK> <USER_TASK:> Description: def get_log_level(args): # type: (typing.Dict[str, typing.Any]) -> int """Get the log level from the CLI arguments. Removes logging arguments from sys.argv. Args: args: The parsed docopt arguments to be used to determine the logging level. Returns: The correct log level based on the three CLI arguments given. Raises: ValueError: Raised if the given log level is not in the acceptable list of values. """
index = -1 log_level = None if '<command>' in args and args['<command>']: index = sys.argv.index(args['<command>']) if args.get('--debug'): log_level = 'DEBUG' if '--debug' in sys.argv and sys.argv.index('--debug') < index: sys.argv.remove('--debug') elif '-d' in sys.argv and sys.argv.index('-d') < index: sys.argv.remove('-d') elif args.get('--verbose'): log_level = 'INFO' if '--verbose' in sys.argv and sys.argv.index('--verbose') < index: sys.argv.remove('--verbose') elif '-v' in sys.argv and sys.argv.index('-v') < index: sys.argv.remove('-v') elif args.get('--log-level'): log_level = args['--log-level'] sys.argv.remove('--log-level') sys.argv.remove(log_level) if log_level not in (None, 'DEBUG', 'INFO', 'WARN', 'ERROR'): raise exceptions.InvalidLogLevelError(log_level) return getattr(logging, log_level) if log_level else None
<SYSTEM_TASK:> Handle exit signals and write out a log file. <END_TASK> <USER_TASK:> Description: def _logfile_sigterm_handler(*_): # type: (...) -> None """Handle exit signals and write out a log file. Raises: SystemExit: Contains the signal as the return code. """
logging.error('Received SIGTERM.') write_logfile() print('Received signal. Please see the log file for more information.', file=sys.stderr) sys.exit(signal)
<SYSTEM_TASK:> Format the log record with timestamps and level based colors. <END_TASK> <USER_TASK:> Description: def format(self, record): # type: (logging.LogRecord) -> str """Format the log record with timestamps and level based colors. Args: record: The log record to format. Returns: The formatted log record. """
if record.levelno >= logging.ERROR: color = colorama.Fore.RED elif record.levelno >= logging.WARNING: color = colorama.Fore.YELLOW elif record.levelno >= logging.INFO: color = colorama.Fore.RESET else: color = colorama.Fore.CYAN format_template = ( '{}{}%(levelname)s{} [%(asctime)s][%(name)s]{} %(message)s') if sys.stdout.isatty(): self._fmt = format_template.format( colorama.Style.BRIGHT, color, colorama.Fore.RESET, colorama.Style.RESET_ALL ) else: self._fmt = format_template.format(*[''] * 4) if six.PY3: self._style._fmt = self._fmt # pylint: disable=protected-access return super(_LogColorFormatter, self).format(record)
<SYSTEM_TASK:> Ask Watson a question via the Question and Answer API <END_TASK> <USER_TASK:> Description: def ask_question(self, question_text, question=None): """Ask Watson a question via the Question and Answer API :param question_text: question to ask Watson :type question_text: str :param question: if question_text is not provided, a Question object representing the question to ask Watson :type question: WatsonQuestion :return: Answer """
if question is not None: q = question.to_dict() else: q = WatsonQuestion(question_text).to_dict() r = requests.post(self.url + '/question', json={'question': q}, headers={ 'Accept': 'application/json', 'X-SyncTimeout': 30 }, auth=(self.username, self.password)) try: response_json = r.json() except ValueError: raise Exception('Failed to parse response JSON') return WatsonAnswer(response_json)
<SYSTEM_TASK:> Loops and waits on queue calling queue's `next_task` method. <END_TASK> <USER_TASK:> Description: def process_queue(queue=None, **kwargs): """Loops and waits on queue calling queue's `next_task` method. If an exception occurs, log the error, log the exception, and break. """
while True: item = queue.get() if item is None: queue.task_done() logger.info(f"{queue}: exiting process queue.") break filename = os.path.basename(item) try: queue.next_task(item, **kwargs) except Exception as e: queue.task_done() logger.warn(f"{queue}: item={filename}. {e}\n") logger.exception(e) sys.stdout.write( style.ERROR( f"{queue}. item={filename}. {e}. Exception has been logged.\n" ) ) sys.stdout.flush() break else: logger.info(f"{queue}: Successfully processed {filename}.\n") queue.task_done()
<SYSTEM_TASK:> Classic memoize decorator for non-class methods <END_TASK> <USER_TASK:> Description: def memoize(func): """ Classic memoize decorator for non-class methods """
cache = {} @functools.wraps(func) def wrapper(*args): key = "__".join(str(arg) for arg in args) if key not in cache: cache[key] = func(*args) return cache[key] return wrapper
<SYSTEM_TASK:> Prevents the decorated function from parallel execution. <END_TASK> <USER_TASK:> Description: def guard(func): """ Prevents the decorated function from parallel execution. Internally, this decorator creates a Lock object and transparently obtains/releases it when calling the function. """
semaphore = threading.Lock() @functools.wraps(func) def wrapper(*args, **kwargs): semaphore.acquire() try: return func(*args, **kwargs) finally: semaphore.release() return wrapper
<SYSTEM_TASK:> Remove all files caching this function <END_TASK> <USER_TASK:> Description: def invalidate_all(self): """ Remove all files caching this function """
for fname in os.listdir(self.cache_path): if fname.startswith(self.func.__name__ + "."): os.remove(os.path.join(self.cache_path, fname))
<SYSTEM_TASK:> Continous Time Markov Chain <END_TASK> <USER_TASK:> Description: def ctmc(data, numstates, transintv=1.0, toltime=1e-8, debug=False): """ Continous Time Markov Chain Parameters ---------- data : list of lists A python list of N examples (e.g. rating histories of N companies, the event data of N basketball games, etc.). The i-th example consist of one list with M_i encoded state labels and M_i the durations or time periods the state lasted since the recording started. numstates : int number of unique states transintv : float The time interval toltime : float (If debug=True) Will throw an exception if the aggregated state duration or aggregated time periods of any state is smaller than toltime. debug : bool (Default: False) If True run the ctmc.datacheck function. Enable this flag if you to check if your 'data' variable has been processed correctly. Returns ------- transmat : ndarray The estimated transition/stochastic matrix. genmat : ndarray The estimated generator matrix transcount : ndarray statetime : ndarray Errors: ------- - ctmc assumes a clean data object and does not autocorrect any errors as result of it The main error sources are - transitions counting (e.g. two consequtive states has not been aggregated, only one distinct state reported) and - a state is modeled ore required that does not occur in the dataset (e.g. you a certain scale in mind and just assume it's in the data) or resp. involved in any transition (e.g. an example with just one state) You can enable error checking and exceptions by setting debug=True. You should do this for the first run on a smaller dataset. Example: -------- Use `datacheck` to check during preprocessing the dataset data = ... ctmc.datacheck(data, numstates, toltime) Disable checks in `ctmc` transmat, genmat, transcount, statetime = ctmc.ctmc( data, numstates, toltime, checks=False) Check aftwards if there has been an error ctmc.errorcheck(transcount, statetime, toltime) """
# raise an exception if the data format is wrong if debug: datacheck(data, numstates, toltime) # aggregate event data transcount, statetime = aggregateevents(data, numstates) # raise an exception if the event data aggregation failed if debug: errorcheck(transcount, statetime, toltime) # create generator matrix genmat = generatormatrix(transcount, statetime) # compute matrix exponential of the generator matrix transmat = scipy.linalg.expm(genmat * transintv) # done return transmat, genmat, transcount, statetime
<SYSTEM_TASK:> Construct an url of home list page by state, county, zipcode, street. <END_TASK> <USER_TASK:> Description: def browse_home_listpage_url(self, state=None, county=None, zipcode=None, street=None, **kwargs): """ Construct an url of home list page by state, county, zipcode, street. Example: - https://www.zillow.com/browse/homes/ca/ - https://www.zillow.com/browse/homes/ca/los-angeles-county/ - https://www.zillow.com/browse/homes/ca/los-angeles-county/91001/ - https://www.zillow.com/browse/homes/ca/los-angeles-county/91001/tola-ave_5038895/ """
url = self.domain_browse_homes for item in [state, county, zipcode, street]: if item: url = url + "/%s" % item url = url + "/" return url
<SYSTEM_TASK:> Renders the HTML for a bundle in place - one HTML tag or many depending on settings.USE_BUNDLES <END_TASK> <USER_TASK:> Description: def _render_bundle(bundle_name): """ Renders the HTML for a bundle in place - one HTML tag or many depending on settings.USE_BUNDLES """
try: bundle = get_bundles()[bundle_name] except KeyError: raise ImproperlyConfigured("Bundle '%s' is not defined" % bundle_name) if bundle.use_bundle: return _render_file(bundle.bundle_type, bundle.get_url(), attrs=({'media':bundle.media} if bundle.media else {})) # Render files individually bundle_files = [] for bundle_file in bundle.files: if bundle_file.precompile_in_debug: bundle_files.append(_render_file(bundle_file.bundle_type, bundle_file.precompile_url, attrs=({'media':bundle_file.media} if bundle.media else {}))) else: bundle_files.append(_render_file(bundle_file.file_type, bundle_file.file_url, attrs=({'media':bundle_file.media} if bundle.media else {}))) return '\n'.join(bundle_files)
<SYSTEM_TASK:> Extracts resource data from the given string and converts them to <END_TASK> <USER_TASK:> Description: def from_string(self, string_representation, resource=None): """ Extracts resource data from the given string and converts them to a new resource or updates the given resource from it. """
stream = NativeIO(string_representation) return self.from_stream(stream, resource=resource)
<SYSTEM_TASK:> Converts the given resource to a string representation and returns <END_TASK> <USER_TASK:> Description: def to_string(self, obj): """ Converts the given resource to a string representation and returns it. """
stream = NativeIO() self.to_stream(obj, stream) return text_(stream.getvalue(), encoding=self.encoding)
<SYSTEM_TASK:> Converts the given bytes representation to resource data. <END_TASK> <USER_TASK:> Description: def data_from_bytes(self, byte_representation): """ Converts the given bytes representation to resource data. """
text = byte_representation.decode(self.encoding) return self.data_from_string(text)
<SYSTEM_TASK:> Converts the given data element into a string representation. <END_TASK> <USER_TASK:> Description: def data_to_string(self, data_element): """ Converts the given data element into a string representation. :param data_element: object implementing :class:`everest.representers.interfaces.IExplicitDataElement` :returns: string representation (using the MIME content type configured for this representer) """
stream = NativeIO() self.data_to_stream(data_element, stream) return stream.getvalue()
<SYSTEM_TASK:> Creates a new representer for the given resource class. <END_TASK> <USER_TASK:> Description: def create_from_resource_class(cls, resource_class): """ Creates a new representer for the given resource class. The representer obtains a reference to the (freshly created or looked up) mapping for the resource class. """
mp_reg = get_mapping_registry(cls.content_type) mp = mp_reg.find_or_create_mapping(resource_class) return cls(resource_class, mp)
<SYSTEM_TASK:> Creates a data element reading a representation from the given stream. <END_TASK> <USER_TASK:> Description: def data_from_stream(self, stream): """ Creates a data element reading a representation from the given stream. :returns: object implementing :class:`everest.representers.interfaces.IExplicitDataElement` """
parser = self._make_representation_parser(stream, self.resource_class, self._mapping) return parser.run()
<SYSTEM_TASK:> Writes the given data element to the given stream. <END_TASK> <USER_TASK:> Description: def data_to_stream(self, data_element, stream): """ Writes the given data element to the given stream. """
generator = \ self._make_representation_generator(stream, self.resource_class, self._mapping) generator.run(data_element)
<SYSTEM_TASK:> Converts the given data element to a resource. <END_TASK> <USER_TASK:> Description: def resource_from_data(self, data_element, resource=None): """ Converts the given data element to a resource. :param data_element: object implementing :class:`everest.representers.interfaces.IExplicitDataElement` """
return self._mapping.map_to_resource(data_element, resource=resource)
<SYSTEM_TASK:> Configures the options and attribute options of the mapping associated <END_TASK> <USER_TASK:> Description: def configure(self, options=None, attribute_options=None): # pylint: disable=W0221 """ Configures the options and attribute options of the mapping associated with this representer with the given dictionaries. :param dict options: configuration options for the mapping associated with this representer. :param dict attribute_options: attribute options for the mapping associated with this representer. """
self._mapping.update(options=options, attribute_options=attribute_options)
<SYSTEM_TASK:> Returns a context in which this representer is updated with the <END_TASK> <USER_TASK:> Description: def with_updated_configuration(self, options=None, attribute_options=None): """ Returns a context in which this representer is updated with the given options and attribute options. """
return self._mapping.with_updated_configuration(options=options, attribute_options= attribute_options)
<SYSTEM_TASK:> Removes old temp files that is older than expiration_hours. <END_TASK> <USER_TASK:> Description: def remove_old_tmp_files(profiles=None, max_lifetime=(7 * 24)): """ Removes old temp files that is older than expiration_hours. If profiles is None then will be use all profiles. """
assert isinstance(profiles, (list, tuple)) or profiles is None if profiles is None: profiles = dju_settings.DJU_IMG_UPLOAD_PROFILES.keys() profiles = set(('default',) + tuple(profiles)) total = removed = 0 old_dt = datetime.datetime.utcnow() - datetime.timedelta(hours=max_lifetime) for profile in profiles: conf = get_profile_configs(profile=profile) root_path = os.path.join(settings.MEDIA_ROOT, dju_settings.DJU_IMG_UPLOAD_SUBDIR, conf['PATH']) for file_path in get_files_recursive(root_path): m = re_tmp.match(os.path.basename(file_path)) if m is None: continue total += 1 fdt = dtstr_to_datetime(m.group('dtstr')) if fdt and old_dt > fdt: os.remove(file_path) removed += 1 return removed, total
<SYSTEM_TASK:> Calls import_batch for the next filename in the queue <END_TASK> <USER_TASK:> Description: def next_task(self, item, **kwargs): """Calls import_batch for the next filename in the queue and "archives" the file. The archive folder is typically the folder for the deserializer queue. """
filename = os.path.basename(item) try: self.tx_importer.import_batch(filename=filename) except TransactionImporterError as e: raise TransactionsFileQueueError(e) from e else: self.archive(filename)
<SYSTEM_TASK:> Get visible comments for the model. <END_TASK> <USER_TASK:> Description: def get_public_comments_for_model(model): """ Get visible comments for the model. """
if not IS_INSTALLED: # No local comments, return empty queryset. # The project might be using DISQUS or Facebook comments instead. return CommentModelStub.objects.none() else: return CommentModel.objects.for_model(model).filter(is_public=True, is_removed=False)
<SYSTEM_TASK:> Check if comments are open for the instance <END_TASK> <USER_TASK:> Description: def get_comments_are_open(instance): """ Check if comments are open for the instance """
if not IS_INSTALLED: return False try: # Get the moderator which is installed for this model. mod = moderator._registry[instance.__class__] except KeyError: # No moderator = no restrictions return True # Check the 'enable_field', 'auto_close_field' and 'close_after', # by reusing the basic Django policies. return CommentModerator.allow(mod, None, instance, None)
<SYSTEM_TASK:> Check if comments are moderated for the instance <END_TASK> <USER_TASK:> Description: def get_comments_are_moderated(instance): """ Check if comments are moderated for the instance """
if not IS_INSTALLED: return False try: # Get the moderator which is installed for this model. mod = moderator._registry[instance.__class__] except KeyError: # No moderator = no moderation return False # Check the 'auto_moderate_field', 'moderate_after', # by reusing the basic Django policies. return CommentModerator.moderate(mod, None, instance, None)
<SYSTEM_TASK:> calculate local indices, return start and stop index per dimension per process for local data field <END_TASK> <USER_TASK:> Description: def calc_local_indices(shape, num_partitions, coordinate): """ calculate local indices, return start and stop index per dimension per process for local data field :param shape: global shape of data :param num_partitions: number of partition for each dimension (from MPI.Compute_dims()) :param coordinate: cartesian coordinate descriptor (from CARTESIAN_COMMUNICATOR.Get_coords(rank)) :return: tuple of start/stop index per dimension ((start_x, stop_x), (start_y, stop_y), ...) """
dimension = len(shape) # check matching of cartesian communicator and shape assert dimension == len(num_partitions) decomposed_shapes = [] # build shape list for every dimension for idx in range(dimension): local_shape = shape[idx] // num_partitions[idx] temp_shape_list = [] for _ in range(num_partitions[idx]): temp_shape_list.append(local_shape) # expand local partitions to match global shape for j in range(shape[idx] % num_partitions[idx]): temp_shape_list[j] += 1 # decomposed_shapes[dimension][partition] decomposed_shapes.append(temp_shape_list) # calculate indices for partitions indices = [] # TODO: redefine calculation -> first select and calculate for i in range(dimension): temp_index_list = [] start_idx = 0 end_idx = 0 for j in range(num_partitions[i]): end_idx = end_idx + decomposed_shapes[i][j] temp_index_list.append([start_idx, end_idx]) start_idx = end_idx indices.append(temp_index_list) start_index = [] stop_index = [] shape = [] # select partition, start and stop index for idx in range(dimension): start_index.append(indices[idx][coordinate[idx]][0]) stop_index.append(indices[idx][coordinate[idx]][1]) shape.append(decomposed_shapes[idx][coordinate[idx]]) shape = tuple(shape) start_index = tuple(start_index) stop_index = tuple(stop_index) return start_index, stop_index, shape
<SYSTEM_TASK:> Read in file contents and set the current string. <END_TASK> <USER_TASK:> Description: def load_file(self, filename): """Read in file contents and set the current string."""
with open(filename, 'r') as sourcefile: self.set_string(sourcefile.read())
<SYSTEM_TASK:> Set the working string and its length then reset positions. <END_TASK> <USER_TASK:> Description: def set_string(self, string): """Set the working string and its length then reset positions."""
self.string = string self.length = len(string) self.reset_position()
<SYSTEM_TASK:> Add to the working string and its length and reset eos. <END_TASK> <USER_TASK:> Description: def add_string(self, string): """Add to the working string and its length and reset eos."""
self.string += string self.length += len(string) self.eos = 0
<SYSTEM_TASK:> Reset all current positions. <END_TASK> <USER_TASK:> Description: def reset_position(self): """Reset all current positions."""
self.pos = 0 self.col = 0 self.row = 1 self.eos = 0
<SYSTEM_TASK:> Returns boolean if self.pos + length < working string length. <END_TASK> <USER_TASK:> Description: def has_space(self, length=1, offset=0): """Returns boolean if self.pos + length < working string length."""
return self.pos + (length + offset) - 1 < self.length
<SYSTEM_TASK:> Return the amount of characters until the next newline. <END_TASK> <USER_TASK:> Description: def eol_distance_next(self, offset=0): """Return the amount of characters until the next newline."""
distance = 0 for char in self.string[self.pos + offset:]: if char == '\n': break else: distance += 1 return distance
<SYSTEM_TASK:> Return the ammount of characters until the last newline. <END_TASK> <USER_TASK:> Description: def eol_distance_last(self, offset=0): """Return the ammount of characters until the last newline."""
distance = 0 for char in reversed(self.string[:self.pos + offset]): if char == '\n': break else: distance += 1 return distance
<SYSTEM_TASK:> Move current position backwards by length. <END_TASK> <USER_TASK:> Description: def spew_length(self, length): """Move current position backwards by length."""
pos = self.pos if not pos or length > pos: return None row = self.row for char in reversed(self.string[pos - length:pos]): pos -= 1 if char == '\n': # handle a newline char row -= 1 self.pos = pos self.col = self.eol_distance_last() self.row = row if self.has_space(): # Set eos if there is no more space left. self.eos = 0
<SYSTEM_TASK:> Move current position forward by length and sets eos if needed. <END_TASK> <USER_TASK:> Description: def eat_length(self, length): """Move current position forward by length and sets eos if needed."""
pos = self.pos if self.eos or pos + length > self.length: return None col = self.col row = self.row for char in self.string[pos:pos + length]: col += 1 pos += 1 if char == '\n': # handle a newline char col = 0 row += 1 self.pos = pos self.col = col self.row = row if not self.has_space(): # Set eos if there is no more space left. self.eos = 1
<SYSTEM_TASK:> Move current position by length of string and count lines by \n. <END_TASK> <USER_TASK:> Description: def eat_string(self, string): """Move current position by length of string and count lines by \n."""
pos = self.pos if self.eos or pos + len(string) > self.length: return None col = self.col row = self.row for char in string: col += 1 pos += 1 if char == '\n': # handle a newline char col = 0 row += 1 self.pos = pos self.col = col self.row = row if not self.has_space(): # Set eos if there is no more space left. self.eos = 1
<SYSTEM_TASK:> Move current position forward until the next line. <END_TASK> <USER_TASK:> Description: def eat_line(self): """Move current position forward until the next line."""
if self.eos: return None eat_length = self.eat_length get_char = self.get_char has_space = self.has_space while has_space() and get_char() != '\n': eat_length(1) eat_length(1)
<SYSTEM_TASK:> Return the current character in the working string. <END_TASK> <USER_TASK:> Description: def get_char(self, offset=0): """Return the current character in the working string."""
if not self.has_space(offset=offset): return '' return self.string[self.pos + offset]
<SYSTEM_TASK:> Return string at current position + length. <END_TASK> <USER_TASK:> Description: def get_length(self, length, trim=0, offset=0): """Return string at current position + length. If trim == true then get as much as possible before eos. """
if trim and not self.has_space(offset + length): return self.string[self.pos + offset:] elif self.has_space(offset + length): return self.string[self.pos + offset:self.pos + offset + length] else: return ''
<SYSTEM_TASK:> Return non space chars from current position until a whitespace. <END_TASK> <USER_TASK:> Description: def get_string(self, offset=0): """Return non space chars from current position until a whitespace."""
if not self.has_space(offset=offset): return '' # Get a char for each char in the current string from pos onward # solong as the char is not whitespace. string = self.string pos = self.pos + offset for i, char in enumerate(string[pos:]): if char.isspace(): return string[pos:pos + i] else: return string[pos:]
<SYSTEM_TASK:> A copy of the current position till the end of the source string. <END_TASK> <USER_TASK:> Description: def rest_of_string(self, offset=0): """A copy of the current position till the end of the source string."""
if self.has_space(offset=offset): return self.string[self.pos + offset:] else: return ''
<SYSTEM_TASK:> Return a SourceLine of the current line. <END_TASK> <USER_TASK:> Description: def get_current_line(self): """Return a SourceLine of the current line."""
if not self.has_space(): return None pos = self.pos - self.col string = self.string end = self.length output = [] while pos < len(string) and string[pos] != '\n': output.append(string[pos]) pos += 1 if pos == end: break else: output.append(string[pos]) if not output: return None return SourceLine(''.join(output), self.row)
<SYSTEM_TASK:> Return SourceLines for lines between and including first & last. <END_TASK> <USER_TASK:> Description: def get_lines(self, first, last): """Return SourceLines for lines between and including first & last."""
line = 1 linestring = [] linestrings = [] for char in self.string: if line >= first and line <= last: linestring.append(char) if char == '\n': linestrings.append((''.join(linestring), line)) linestring = [] elif line > last: break if char == '\n': line += 1 if linestring: linestrings.append((''.join(linestring), line)) elif not linestrings: return None return [SourceLine(string, lineno) for string, lineno in linestrings]
<SYSTEM_TASK:> Return the current line and x,y previous and future lines. <END_TASK> <USER_TASK:> Description: def get_surrounding_lines(self, past=1, future=1): """Return the current line and x,y previous and future lines. Returns a list of SourceLine's. """
string = self.string pos = self.pos - self.col end = self.length row = self.row linesback = 0 while linesback > -past: if pos <= 0: break elif string[pos - 2] == '\n': linesback -= 1 pos -= 1 output = [] linestring = [] lines = future + 1 while linesback < lines: if pos >= end: linestring.append(string[pos - 1]) output.append( SourceLine(''.join(linestring[:-1]), row + linesback)) break elif string[pos] == '\n': linestring.append(string[pos]) pos += 1 output.append( SourceLine(''.join(linestring), row + linesback)) linesback += 1 linestring = [] linestring.append(string[pos]) pos += 1 return output
<SYSTEM_TASK:> Return all lines of the SourceString as a list of SourceLine's. <END_TASK> <USER_TASK:> Description: def get_all_lines(self): """Return all lines of the SourceString as a list of SourceLine's."""
output = [] line = [] lineno = 1 for char in self.string: line.append(char) if char == '\n': output.append(SourceLine(''.join(line), lineno)) line = [] lineno += 1 if line: output.append(SourceLine(''.join(line), lineno)) return output
<SYSTEM_TASK:> Returns 1 if string can be matches against SourceString's <END_TASK> <USER_TASK:> Description: def match_string(self, string, word=0, offset=0): """Returns 1 if string can be matches against SourceString's current position. If word is >= 1 then it will only match string followed by whitepsace. """
if word: return self.get_string(offset) == string return self.get_length(len(string), offset) == string
<SYSTEM_TASK:> Attempts to match each string in strings in order. <END_TASK> <USER_TASK:> Description: def match_any_string(self, strings, word=0, offset=0): """Attempts to match each string in strings in order. Will return the string that matches or an empty string if no match. If word arg >= 1 then only match if string is followed by a whitespace which is much higher performance. If word is 0 then you should sort the strings argument yourself by length. """
if word: current = self.get_string(offset) return current if current in strings else '' current = '' currentlength = 0 length = 0 for string in strings: length = len(string) if length != currentlength: current = self.get_length(length, offset) if string == current: return string return ''
<SYSTEM_TASK:> Match and return the current SourceString char if its in chars. <END_TASK> <USER_TASK:> Description: def match_any_char(self, chars, offset=0): """Match and return the current SourceString char if its in chars."""
if not self.has_space(offset=offset): return '' current = self.string[self.pos + offset] return current if current in chars else ''
<SYSTEM_TASK:> Match each char sequentially from current SourceString position <END_TASK> <USER_TASK:> Description: def match_function_pattern(self, first, rest=None, least=1, offset=0): """Match each char sequentially from current SourceString position until the pattern doesnt match and return all maches. Integer argument least defines and minimum amount of chars that can be matched. This version takes functions instead of string patterns. Each function must take one argument, a string, and return a value that can be evauluated as True or False. If rest is defined then first is used only to match the first arg and the rest of the chars are matched against rest. """
if not self.has_space(offset=offset): return '' firstchar = self.string[self.pos + offset] if not first(firstchar): return '' output = [firstchar] pattern = first if rest is None else rest for char in self.string[self.pos + offset + 1:]: if pattern(char): output.append(char) else: break if len(output) < least: return '' return ''.join(output)
<SYSTEM_TASK:> Finds the last meaningful line and returns its indent level. <END_TASK> <USER_TASK:> Description: def count_indents_last_line(self, spacecount, tabs=0, back=5): """Finds the last meaningful line and returns its indent level. Back specifies the amount of lines to look back for a none whitespace line. """
if not self.has_space(): return 0 lines = self.get_surrounding_lines(back, 0) for line in reversed(lines): if not line.string.isspace(): return line.count_indents(spacecount, tabs) return 0
<SYSTEM_TASK:> Finds the last meaningful line and returns its indent level and <END_TASK> <USER_TASK:> Description: def count_indents_length_last_line(self, spacecount, tabs=0, back=5): """Finds the last meaningful line and returns its indent level and character length. Back specifies the amount of lines to look back for a none whitespace line. """
if not self.has_space(): return 0 lines = self.get_surrounding_lines(back, 0) for line in reversed(lines): if not line.string.isspace(): return line.count_indents_length(spacecount, tabs) return (0, 0)
<SYSTEM_TASK:> Moves the position forwards to the next non newline space character. <END_TASK> <USER_TASK:> Description: def skip_whitespace(self, newlines=0): """Moves the position forwards to the next non newline space character. If newlines >= 1 include newlines as spaces. """
if newlines: while not self.eos: if self.get_char().isspace(): self.eat_length(1) else: break else: char = '' while not self.eos: char = self.get_char() if char.isspace() and char != '\n': self.eat_length(1) else: break
<SYSTEM_TASK:> Return a string of this line including linenumber. <END_TASK> <USER_TASK:> Description: def pretty_print(self, carrot=False): """Return a string of this line including linenumber. If carrot is True then a line is added under the string with a carrot under the current character position. """
lineno = self.lineno padding = 0 if lineno < 1000: padding = 1 if lineno < 100: padding = 2 if lineno < 10: padding = 3 string = str(lineno) + (' ' * padding) + '|' + self.string if carrot: string += '\n' + (' ' * (self.col + 5)) return string
<SYSTEM_TASK:> exit without breaking pipes. <END_TASK> <USER_TASK:> Description: def safe_exit(output): """exit without breaking pipes."""
try: sys.stdout.write(output) sys.stdout.flush() except IOError: pass
<SYSTEM_TASK:> returns Markdown text of selected fragment. <END_TASK> <USER_TASK:> Description: def frag2text(endpoint, stype, selector, clean=False, raw=False, verbose=False): """returns Markdown text of selected fragment. Args: endpoint: URL, file, or HTML string stype: { 'css' | 'xpath' } selector: CSS selector or XPath expression Returns: Markdown text Options: clean: cleans fragment (lxml.html.clean defaults) raw: returns raw HTML fragment verbose: show http status, encoding, headers """
try: return main(endpoint, stype, selector, clean, raw, verbose) except StandardError as err: return err
<SYSTEM_TASK:> return local file contents as endpoint. <END_TASK> <USER_TASK:> Description: def read(self, _file): """return local file contents as endpoint."""
with open(_file) as fh: data = fh.read() if self.verbose: sys.stdout.write("read %d bytes from %s\n" % (fh.tell(), _file)) return data
<SYSTEM_TASK:> Directive for registering a file-system based repository. <END_TASK> <USER_TASK:> Description: def filesystem_repository(_context, name=None, make_default=False, aggregate_class=None, repository_class=None, directory=None, content_type=None): """ Directive for registering a file-system based repository. """
cnf = {} if not directory is None: cnf['directory'] = directory if not content_type is None: cnf['content_type'] = content_type _repository(_context, name, make_default, aggregate_class, repository_class, REPOSITORY_TYPES.FILE_SYSTEM, 'add_filesystem_repository', cnf)
<SYSTEM_TASK:> Directive for registering a RDBM based repository. <END_TASK> <USER_TASK:> Description: def rdb_repository(_context, name=None, make_default=False, aggregate_class=None, repository_class=None, db_string=None, metadata_factory=None): """ Directive for registering a RDBM based repository. """
cnf = {} if not db_string is None: cnf['db_string'] = db_string if not metadata_factory is None: cnf['metadata_factory'] = metadata_factory _repository(_context, name, make_default, aggregate_class, repository_class, REPOSITORY_TYPES.RDB, 'add_rdb_repository', cnf)
<SYSTEM_TASK:> Directive for setting up the user message resource in the appropriate <END_TASK> <USER_TASK:> Description: def messaging(_context, repository, reset_on_start=False): """ Directive for setting up the user message resource in the appropriate repository. :param str repository: The repository to create the user messages resource in. """
discriminator = ('messaging', repository) reg = get_current_registry() config = Configurator(reg, package=_context.package) _context.action(discriminator=discriminator, # pylint: disable=E1101 callable=config.setup_system_repository, args=(repository,), kw=dict(reset_on_start=reset_on_start))
<SYSTEM_TASK:> Remove any keys not in 'keep' <END_TASK> <USER_TASK:> Description: def _filter(self, dict, keep): """ Remove any keys not in 'keep' """
if not keep: return dict result = {} for key, value in dict.iteritems(): if key in keep: result[key] = value return result
<SYSTEM_TASK:> Creates new estimate from a census series. <END_TASK> <USER_TASK:> Description: def write_county_estimate(self, table, variable, code, datum): """ Creates new estimate from a census series. Data has following signature from API: { 'B00001_001E': '5373', 'NAME': 'Anderson County, Texas', 'county': '001', 'state': '48' } """
try: division = Division.objects.get( code="{}{}".format(datum["state"], datum["county"]), level=self.COUNTY_LEVEL, ) CensusEstimate.objects.update_or_create( division=division, variable=variable, defaults={"estimate": datum[code] or 0}, ) except ObjectDoesNotExist: print("ERROR: {}, {}".format(datum["NAME"], datum["state"]))
<SYSTEM_TASK:> Calls API for all districts in a state and a given estimate. <END_TASK> <USER_TASK:> Description: def get_district_estimates_by_state( self, api, table, variable, estimate, state ): """ Calls API for all districts in a state and a given estimate. """
state = Division.objects.get(level=self.STATE_LEVEL, code=state) district_data = api.get( ("NAME", estimate), { "for": "congressional district:*", "in": "state:{}".format(state.code), }, year=int(table.year), ) for datum in district_data: self.write_district_estimate(table, variable, estimate, datum)
<SYSTEM_TASK:> Calls API for all counties in a state and a given estimate. <END_TASK> <USER_TASK:> Description: def get_county_estimates_by_state( self, api, table, variable, estimate, state ): """ Calls API for all counties in a state and a given estimate. """
state = Division.objects.get(level=self.STATE_LEVEL, code=state) county_data = api.get( ("NAME", estimate), {"for": "county:*", "in": "state:{}".format(state.code)}, year=int(table.year), ) for datum in county_data: self.write_county_estimate(table, variable, estimate, datum)
<SYSTEM_TASK:> Calls API for a state and a given estimate. <END_TASK> <USER_TASK:> Description: def get_state_estimates_by_state( self, api, table, variable, estimate, state ): """ Calls API for a state and a given estimate. """
state = Division.objects.get(level=self.STATE_LEVEL, code=state) state_data = api.get( ("NAME", estimate), {"for": "state:{}".format(state.code)}, year=int(table.year), ) for datum in state_data: self.write_state_estimate(table, variable, estimate, datum)
<SYSTEM_TASK:> Aggregate census table variables by a custom label. <END_TASK> <USER_TASK:> Description: def aggregate_variable(estimate, id): """ Aggregate census table variables by a custom label. """
estimates = [ variable.estimates.get(division__id=id).estimate for variable in estimate.variable.label.variables.all() ] method = estimate.variable.label.aggregation if method == "s": aggregate = sum(estimates) elif method == "a": aggregate = statistics.mean(estimates) elif method == "m": aggregate = statistics.median(estimates) else: aggregate = None return aggregate