text_prompt
stringlengths
157
13.1k
code_prompt
stringlengths
7
19.8k
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def wait_for_web_available(self): """ Wait for the web server to become available or raise DatacatsError if it fails to start. """
try: if not wait_for_service_available( self._get_container_name('web'), self.web_address(), WEB_START_TIMEOUT_SECONDS): raise DatacatsError('Error while starting web container:\n' + container_logs(self._get_container_name('web'), "all", False, None)) except ServiceTimeout: raise DatacatsError('Timeout while starting web container. Logs:' + container_logs(self._get_container_name('web'), "all", False, None))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _choose_port(self): """ Return a port number from 5000-5999 based on the environment name to be used as a default when the user hasn't selected one. """
# instead of random let's base it on the name chosen (and the site name) return 5000 + unpack('Q', sha((self.name + self.site_name) .decode('ascii')).digest()[:8])[0] % 1000
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _next_port(self, port): """ Return another port from the 5000-5999 range """
port = 5000 + (port + 1) % 1000 if port == self.port: raise DatacatsError('Too many instances running') return port
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def stop_ckan(self): """ Stop and remove the web container """
remove_container(self._get_container_name('web'), force=True) remove_container(self._get_container_name('datapusher'), force=True)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _current_web_port(self): """ return just the port number for the web container, or None if not running """
info = inspect_container(self._get_container_name('web')) if info is None: return None try: if not info['State']['Running']: return None return info['NetworkSettings']['Ports']['5000/tcp'][0]['HostPort'] except TypeError: return None
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def web_address(self): """ Return the url of the web server or None if not running """
port = self._current_web_port() address = self.address or '127.0.0.1' if port is None: return None return 'http://{0}:{1}/'.format( address if address and not is_boot2docker() else docker_host(), port)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def create_admin_set_password(self, password): """ create 'admin' account with given password """
with open(self.sitedir + '/run/admin.json', 'w') as out: json.dump({ 'name': 'admin', 'email': 'none', 'password': password, 'sysadmin': True}, out) self.user_run_script( script=scripts.get_script_path('update_add_admin.sh'), args=[], db_links=True, ro={ self.sitedir + '/run/admin.json': '/input/admin.json' }, ) remove(self.sitedir + '/run/admin.json')
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def interactive_shell(self, command=None, paster=False, detach=False): """ launch interactive shell session with all writable volumes :param: list of strings to execute instead of bash """
if not exists(self.target + '/.bash_profile'): # this file is required for activating the virtualenv self.create_bash_profile() if not command: command = [] use_tty = sys.stdin.isatty() and sys.stdout.isatty() background = environ.get('CIRCLECI', False) or detach if is_boot2docker(): venv_volumes = ['--volumes-from', self._get_container_name('venv')] else: venv_volumes = ['-v', self.datadir + '/venv:/usr/lib/ckan:rw'] self._create_run_ini(self.port, production=False, output='run.ini') self._create_run_ini(self.port, production=True, output='test.ini', source='ckan/test-core.ini', override_site_url=False) script = scripts.get_script_path('shell.sh') if paster: script = scripts.get_script_path('paster.sh') if command and command != ['help'] and command != ['--help']: command += ['--config=/project/development.ini'] command = [self.extension_dir] + command proxy_settings = self._proxy_settings() if proxy_settings: venv_volumes += ['-v', self.sitedir + '/run/proxy-environment:/etc/environment:ro'] links = {self._get_container_name('solr'): 'solr', self._get_container_name('postgres'): 'db'} links.update({self._get_container_name(container): container for container in self.extra_containers}) link_params = [] for link in links: link_params.append('--link') link_params.append(link + ':' + links[link]) if 'datapusher' in self.containers_running(): link_params.append('--link') link_params.append(self._get_container_name('datapusher') + ':datapusher') # FIXME: consider switching this to dockerpty # using subprocess for docker client's interactive session return subprocess.call([ DOCKER_EXE, 'run', ] + (['--rm'] if not background else []) + [ '-t' if use_tty else '', '-d' if detach else '-i', ] + venv_volumes + [ '-v', self.target + ':/project:rw', '-v', self.sitedir + '/files:/var/www/storage:rw', '-v', script + ':/scripts/shell.sh:ro', '-v', scripts.get_script_path('paster_cd.sh') + ':/scripts/paster_cd.sh:ro', '-v', self.sitedir + '/run/run.ini:/project/development.ini:ro', '-v', self.sitedir + '/run/test.ini:/project/ckan/test-core.ini:ro'] + link_params + ['--hostname', self.name, 'datacats/web', '/scripts/shell.sh'] + command)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def install_package_requirements(self, psrc, stream_output=None): """ Install from requirements.txt file found in psrc :param psrc: name of directory in environment directory """
package = self.target + '/' + psrc assert isdir(package), package reqname = '/requirements.txt' if not exists(package + reqname): reqname = '/pip-requirements.txt' if not exists(package + reqname): return return self.user_run_script( script=scripts.get_script_path('install_reqs.sh'), args=['/project/' + psrc + reqname], rw_venv=True, rw_project=True, stream_output=stream_output )
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def purge_data(self, which_sites=None, never_delete=False): """ Remove uploaded files, postgres db, solr index, venv """
# Default to the set of all sites if not exists(self.datadir + '/.version'): format_version = 1 else: with open(self.datadir + '/.version') as f: format_version = int(f.read().strip()) if format_version == 1: print 'WARNING: Defaulting to old purge for version 1.' datadirs = ['files', 'solr'] if is_boot2docker(): remove_container('datacats_pgdata_{}'.format(self.name)) remove_container('datacats_venv_{}'.format(self.name)) else: datadirs += ['postgres', 'venv'] web_command( command=['/scripts/purge.sh'] + ['/project/data/' + d for d in datadirs], ro={scripts.get_script_path('purge.sh'): '/scripts/purge.sh'}, rw={self.datadir: '/project/data'}, ) shutil.rmtree(self.datadir) elif format_version == 2: if not which_sites: which_sites = self.sites datadirs = [] boot2docker = is_boot2docker() if which_sites: if self.target: cp = SafeConfigParser() cp.read([self.target + '/.datacats-environment']) for site in which_sites: if boot2docker: remove_container(self._get_container_name('pgdata')) else: datadirs += [site + '/postgres'] # Always rm the site dir & solr & files datadirs += [site, site + '/files', site + '/solr'] if self.target: cp.remove_section('site_' + site) self.sites.remove(site) if self.target: with open(self.target + '/.datacats-environment', 'w') as conf: cp.write(conf) datadirs = ['sites/' + datadir for datadir in datadirs] if not self.sites and not never_delete: datadirs.append('venv') web_command( command=['/scripts/purge.sh'] + ['/project/data/' + d for d in datadirs], ro={scripts.get_script_path('purge.sh'): '/scripts/purge.sh'}, rw={self.datadir: '/project/data'}, ) if not self.sites and not never_delete: shutil.rmtree(self.datadir) else: raise DatacatsError('Unknown format version {}'.format(format_version))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def less(environment, opts): # pylint: disable=unused-argument """Recompiles less files in an environment. Usage: datacats less [ENVIRONMENT] ENVIRONMENT may be an environment name or a path to an environment directory. Default: '.' """
require_extra_image(LESSC_IMAGE) print 'Converting .less files to .css...' for log in environment.compile_less(): print log
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def fetch_and_convert_dataset(source_files, target_filename): """ Decorator applied to a dataset conversion function that converts acquired source files into a dataset file that BatchUp can use. Parameters source_file: list of `AbstractSourceFile` instances A list of files to be acquired target_filename: str or callable The name of the target file in which to store the converted data either as a string or as a function of the form `fn() -> str` that returns it. The conversion function is of the form `fn(source_paths, target_path)`. It should return `target_path` if successful, `None` otherwise. After the conversion function is successfully applied, the temporary source files that were downloaded or copied into BatchUp's temporary directory are deleted, unless the conversion function moved or deleted them in which case no action is taken. Example ------- In this example, we will show how to acquire the USPS dataset from an online source. USPS is provided as an HDF5 file anyway, so the conversion function simply moves it to the target path: In this example, the USPS dataset will be acquired from a file on the filesystem. Note that the source path is fixed; the next example shows how we can determine the source path dynamically: The source path is provided as an argument to the decorated fetch function: """
if not isinstance(target_filename, six.string_types) and \ not callable(target_filename): raise TypeError( 'target_filename must either be a string or be callable (it is ' 'a {})'.format(type(target_filename))) for src in source_files: if not isinstance(src, AbstractSourceFile): raise TypeError('source_files should contain' '`AbstractSourceFile` instances, ' 'not {}'.format(type(src))) def decorate_fetcher(convert_function): def fetch(**kwargs): target_fn = path_string(target_filename) target_path = config.get_data_path(target_fn) # If the target file does not exist, we need to acquire the # source files and convert them if not os.path.exists(target_path): # Acquire the source files source_paths = [] for src in source_files: p = src.acquire(**kwargs) if p is not None: if p in source_paths: raise ValueError( 'Duplicate source file {}'.format(p)) source_paths.append(p) else: print('Failed to acquire {}'.format(src)) return None # Got the source files # Convert converted_path = convert_function(source_paths, target_path) # If successful, delete the source files if converted_path is not None: for src in source_files: src.clean_up() return converted_path else: # Target file already exists return target_path fetch.__name__ = convert_function.__name__ return fetch return decorate_fetcher
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def acquire(self, **kwargs): """ Download the file and return its path Returns ------- str or None The path of the file in BatchUp's temporary directory or None if the download failed. """
return config.download_data(self.temp_filename, self.url, self.sha256)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def retrieve(self): """ Retrieve a result from executing a task. Note that tasks are executed in order and that if the next task has not yet completed, this call will block until the result is available. Returns ------- A result from the result buffer. """
if len(self.__result_buffer) > 0: res = self.__result_buffer.popleft() value = res.get() else: return None self.__populate_buffer() return value
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def install(environment, opts): """Install or reinstall Python packages within this environment Usage: datacats install -c [q] [--address=IP] [ENVIRONMENT] Options: --address=IP The address to bind to when reloading after install -c --clean Reinstall packages into a clean virtualenv -q --quiet Do not show output from installing packages and requirements. ENVIRONMENT may be an environment name or a path to an environment directory. Default: '.' """
environment.require_data() install_all(environment, opts['--clean'], verbose=not opts['--quiet'], packages=opts['PACKAGE']) for site in environment.sites: environment = Environment.load(environment.name, site) if 'web' in environment.containers_running(): # FIXME: reload without changing debug setting? manage.reload_(environment, { '--address': opts['--address'], '--background': False, '--no-watch': False, '--production': False, 'PORT': None, '--syslog': False, '--site-url': None, '--interactive': False })
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def migrate(opts): """Migrate an environment to a given revision of the datadir format. Usage: datacats migrate [-y] [-r VERSION] [ENVIRONMENT_DIR] Options: -r --revision=VERSION The version of the datadir format you want to convert to [default: 2] -y --yes Answer yes to all questions. Defaults to '.' if ENVIRONMENT_DIR isn't specified. """
try: version = int(opts['--revision']) except: raise DatacatsError('--revision parameter must be an integer.') always_yes = opts['--yes'] if 'ENVIRONMENT_DIR' not in opts or not opts['ENVIRONMENT_DIR']: cwd = getcwd() # Get the dirname opts['ENVIRONMENT_DIR'] = split(cwd if cwd[-1] != '/' else cwd[:-1])[1] datadir = expanduser('~/.datacats/' + opts['ENVIRONMENT_DIR']) if needs_format_conversion(datadir, version): convert_environment(datadir, version, always_yes) print 'Successfully converted datadir {} to format version {}'.format(datadir, version) else: print 'datadir {} is already at version {}.'.format(datadir, version)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _trim_batch(batch, length): """Trim the mini-batch `batch` to the size `length`. `batch` can be: - a NumPy array, in which case it's first axis will be trimmed to size `length` - a tuple, in which case `_trim_batch` applied recursively to each element and the resulting tuple returned As a consequence, mini-batches can be structured; lists and tuples can be nested arbitrarily deep. Parameters batch: tuple or NumPy array the mini-batch to trim length: int the size to which `batch` is to be trimmed Returns ------- tuple or NumPy array of same structure as `batch` The trimmed mini-batch """
if isinstance(batch, tuple): return tuple([_trim_batch(b, length) for b in batch]) else: return batch[:length]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def batch_map_concat(func, batch_iter, progress_iter_func=None, n_batches=None, prepend_args=None): """ Apply a function to all the samples that are accessed as mini-batches obtained from an iterator. Returns the per-sample results. The function `func` should return the result for each sample in the mini-batch as an array. To return multiple results (e.g. loss and errors) return a tuple of arrays (e.g. `(loss_array, error_array)`) `batch_iter` must be an iterator that generates mini-batches that contain samples Parameters func: callable `func(*batch) -> results` The function to call on each mini-batch. Note that the results must be `None`, a tuple or a NumPy array batch_iter: data set iterator Iterator that generates mini-batches of data progress_iter_func: [optional] callable `progress_iter_func(iterator, total=total, leave=leave)` A `tqdm` style function that will be passed the iterator that generates training batches along with the total number of batches and `False` for the `leave` parameter. By passing either `tqdm.tqdm` or `tqdm.tqdm_notebook` as this argument you can have the training loop display a progress bar. n_batches: [optional] integer Process at most this number of batches before returning. prepend_args: [optional] tuple Arguments to prepend to the arguments passed to `func` Returns ------- tuple The per-sample sum of the results of the function `func` e.g. Returns an empty tuple if there were 0 samples in the data set. Examples -------- In these examples we will demonstrate the use of `batch_map` to apply a function (e.g. a Theano function that runs on the GPU) to samples in a data set. We construct an iterator that generates mini-batches from the data set and pass it to `batch_map` along with the function that we wish to apply. The function will receive the batches and process them. Define a function to apply to samples: Construct data to process and create a data source: Apply the function defined above: There are also cases where we wish to limit the number of batches that will be processed: - when the iterator generates an infinite number of samples - when the data set is huge and we wish to show results as we go Use the `n_batches` argument to limit the number of batches to process: """
# Accumulator for results and number of samples results = [] # If `progress_iter_func` is not `None`, apply it if progress_iter_func is not None: batch_iter = progress_iter_func(batch_iter, total=n_batches, leave=False) # Apply `func` to each batch n_processed = 0 for batch in batch_iter: # Apply on batch and check the type of the results if prepend_args is not None: batch_results = func(*(prepend_args + tuple(batch))) else: batch_results = func(*batch) if batch_results is None: pass elif isinstance(batch_results, np.ndarray): batch_results = (batch_results,) elif isinstance(batch_results, tuple): pass else: raise TypeError( 'Batch function should return a tuple of results, a ' 'single result as a NumPy array, or None, ' 'not {}'.format(type(batch_results))) # Accumulate training results if batch_results is not None: results.append(batch_results) n_processed += 1 if n_batches is not None and n_processed >= n_batches: break # Concatenate result arrays if len(results) > 0: results = zip(*results) results = tuple([np.concatenate(list(r), axis=0) for r in results]) return results else: return None
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def batch_map_mean(func, batch_iter, progress_iter_func=None, sum_axis=None, n_batches=None, prepend_args=None): """ Apply a function to all the samples that are accessed as mini-batches obtained from an iterator. Returns the across-samples mean of the results returned by `func` The `sum_axis` arguments tells `mean_batch_map` how to process the results of `func` before accumulating them: - If `sum_axis` is `None`, `func` should return the across-samples SUM of the results of operating on the mini-batch the sum of the values for the samples, e.g. for loss and error it should - Otherwise, `sum_axis` should specify the axis or axes over which the the batch results should be summed, e.g. if `func` returns a per-sample loss and error in two arrays a value of `0` to sum over axis 0 to get the per-batch loss and error. These results will be accumulated and divided by the number of samples at the end to get the mean. Parameters func: callable `func(*batch) -> results` The function to call on each mini-batch. Note that the results must be `None`, a tuple or a NumPy array batch_iter: data set iterator Iterator that generates mini-batches of data progress_iter_func: [optional] callable `progress_iter_func(iterator, total=total, leave=leave)` A `tqdm` style function that will be passed the iterator that generates training batches along with the total number of batches and `False` for the `leave` parameter. By passing either `tqdm.tqdm` or `tqdm.tqdm_notebook` as this argument you can have the training loop display a progress bar. sum_axis: (default=`None`) int, tuple of ints or None If an integer or a tuple of integers, the results returned by `func` will be summed across this axis / these axes before being accumulated; e.g. if `func` returns an array of per-sample losses, with axis 0 being the sample dimension, passing a value of `0` as `sum_axis` will cause these results to be summed along axis 0 to get the per-batch sum before accumulating the losses. The total summed loss will be divided by the number of samples at the end in order to compute the mean loss. n_batches: [optional] integer that specifies the number of mini-batches to process before returning prepend_args: [optional] tuple Arguments to prepend to the arguments passed to `func` Returns ------- tuple The sum of the results of the function `fn` divided by the number of samples processed, e.g. `(sum(outA_per_batch) / n_samples, sum(outB_per_batch) / n_samples, Examples -------- The following examples will demonstrate the use of `mean_batch_map` to compute binary cross entropy loss over a data set. A few variants will be demonstrated: - the default behaviour in which the function being applied should return the sum over the batch sample axis - having the function return per sample results and maving `mean_batch_map` perform the sum operation. This is easier to understand but less efficient as a Theano function would have to move more data back from the GPU. - limiting the number of batches that will be processed in order to get partial results when dealing with a large data set Define a function to compute the per-sample binary cross entropy loss: Now define a function that computes the *SUM* of the binary cross entropy losses over the sample axis (axis 0), as the default behaviour of `mean_batch_map` will sum them up and divide by the number of samples at the end: Construct prediction and target data Apply the loss sum function defined above: Have `mean_batch_map` sum over axis 0: Construct a large data set and use `batch """
# Accumulator for results and number of samples results_accum = None n_samples_accum = 0 # If `progress_iter_func` is not `None`, apply it if progress_iter_func is not None: batch_iter = progress_iter_func(batch_iter, total=n_batches, leave=False) # Train on each batch n_processed = 0 for batch in batch_iter: # Get number of samples in batch; can vary batch_n = _length_of_batch(batch) # Apply on batch and check the type of the results if prepend_args is not None: batch_results = func(*(prepend_args + tuple(batch))) else: batch_results = func(*batch) if batch_results is None: pass elif isinstance(batch_results, (np.ndarray, float)): batch_results = (batch_results,) elif isinstance(batch_results, tuple): pass else: raise TypeError( 'Batch function should return a tuple of results, a ' 'single result as a NumPy array or float, or None, ' 'not {}'.format(type(batch_results))) # Accumulate results and number of samples if results_accum is None: # Initialise the accumulator to the batch results if `func` # returns summed results or if it returned None; # don't attempt to iterate over None and sum each item if batch_results is None: pass elif sum_axis is None: results_accum = list(batch_results) else: results_accum = [br.sum(axis=sum_axis) for br in batch_results] else: if batch_results is not None: for i in range(len(results_accum)): br = batch_results[i] if sum_axis is not None: br = br.sum(axis=sum_axis) results_accum[i] += br n_samples_accum += batch_n n_processed += 1 if n_batches is not None and n_processed >= n_batches: break # Divide by the number of training examples used to compute mean if results_accum is not None: results_accum = tuple([np.array(r).astype(float) / n_samples_accum for r in results_accum]) return results_accum
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def coerce_data_source(x): """ Helper function to coerce an object into a data source, selecting the appropriate data source class for the given object. If `x` is already a data source it is returned as is. Parameters x: any The object to coerce. If `x` is a data source, it is returned as is. If it is a list or tuple of array-like objects they will be wrapped in an `ArrayDataSource` that will be returned. If `x` is an iterator it will be wrapped in an `IteratorDataSource`. If it is a callable it will be wrapped in a `CallableDataSource`. Returns ------- `x` coerced into a data source Raises ------ `TypeError` if `x` is not a data souce, a list or tuple of array-like objects, an iterator or a callable. """
if isinstance(x, AbstractDataSource): return x elif isinstance(x, (list, tuple)): # Sequence of array-likes items = [] for item in x: if _is_array_like(item): items.append(item) else: raise TypeError( 'Cannot convert x to a data source; x is a sequence and ' 'one of the elements is not an array-like object, rather ' 'a {}'.format(type(item))) if len(items) == 0: raise ValueError('Cannot convert x to a data source; x is an ' 'empty sequence') return ArrayDataSource(items) elif isinstance(x, collections.Iterator): return IteratorDataSource(x) elif callable(x): return CallableDataSource(x) else: raise TypeError('Cannot convert x to a data source; can only handle ' 'iterators, callables, non-empty sequences of ' 'array-like objects; cannot ' 'handle {}'.format(type(x)))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def batch_map_concat(self, func, batch_size, progress_iter_func=None, n_batches=None, prepend_args=None, **kwargs): """A batch oriented implementation of `map`. Applies a function to all the samples in this data source by breaking the data into mini-batches and applying the function to each mini-batch. Returns the per-sample results. This method is a wrapper around the :func:`batch_map` function; please see its documentation for more information and examples. The function `func` should return the result for each sample in the mini-batch as an array. To return multiple results (e.g. loss and errors) return a tuple of arrays (e.g. `(loss_array, error_array)`) Parameters func: callable `func(*batch) -> results` The function to call on each mini-batch. Note that the results must be `None`, a tuple or a NumPy array batch_size: int The mini-batch size progress_iter_func: [optional] callable `progress_iter_func(iterator, total=total, leave=leave)` A `tqdm` style function that will be passed the iterator that generates training batches along with the total number of batches and `False` for the `leave` parameter. By passing either `tqdm.tqdm` or `tqdm.tqdm_notebook` as this argument you can have the training loop display a progress bar. n_batches: [optional] integer that specifies the number of mini-batches to process before returning prepend_args: [optional] tuple Arguments to prepend to the arguments passed to `func` Returns ------- tuple The per-sample sum of the results of the function `func` e.g. Returns an empty tuple if there were 0 samples in the data set. Examples -------- Define a function to apply to samples: Construct data to process and create a data source: Apply the function defined above: """
if n_batches is None: n = self.num_samples(**kwargs) if n == np.inf: raise ValueError('Data set has infinite size or sampler will ' 'generate infinite samples but no n_batches ' 'limit specified') elif n is not None: n_batches = sampling.num_batches(n, batch_size) batch_iter = self.batch_iterator(batch_size, **kwargs) return batch_map_concat(func, batch_iter, progress_iter_func, n_batches, prepend_args)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def batch_indices_iterator(self, batch_size, shuffle=None, **kwargs): """ Create an iterator that generates mini-batch sample indices. The batches will have `batch_size` elements, with the exception of the final batch which will have less if there are insufficient elements left to make a complete batch. If `shuffle` is `None` or `False` elements will be extracted in order. If it is a `numpy.random.RandomState`, it will be used to randomise the order in which elements are extracted from the data. If it is `True`, NumPy's default random number generator will be use to shuffle elements. If an array of indices was provided to the constructor, the subset of samples identified in that array is used, rather than the complete set of samples. The generated mini-batches indices take the form of 1D NumPy integer arrays. Parameters batch_size: int Mini-batch size shuffle: `numpy.random.RandomState` or `True` or `None` Used to randomise element order. If `None`, elements will be extracted in order. If it is a `RandomState` instance, that RNG will be used to shuffle elements. If it is `True`, NumPy's default RNG will be used. Returns ------- iterator An iterator that generates mini-batches in the form of 1D NumPy integer arrays. """
shuffle_rng = self._get_shuffle_rng(shuffle) if shuffle_rng is not None: return self.sampler.shuffled_indices_batch_iterator( batch_size, shuffle_rng) else: return self.sampler.in_order_indices_batch_iterator(batch_size)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def batch_iterator(self, batch_size, shuffle=None, **kwargs): """ Create an iterator that generates mini-batches extracted from this data source. The batches will have `batch_size` elements, with the exception of the final batch which will have less if there are insufficient elements left to make a complete batch. If `shuffle` is `None` or `False` elements will be extracted in order. If it is a `numpy.random.RandomState`, it will be used to randomise the order in which elements are extracted from the data. If it is `True`, NumPy's default random number generator will be use to shuffle elements. If an array of indices was provided to the constructor, the subset of samples identified in that array is used, rather than the complete set of samples. Parameters batch_size: int Mini-batch size shuffle: `numpy.random.RandomState` or `True` or `None` Used to randomise element order. If `None`, elements will be extracted in order. If it is a `RandomState` instance, that RNG will be used to shuffle elements. If it is `True`, NumPy's default RNG will be used. Returns ------- iterator where `batch_x`, `batch_y`, etc are themselves arrays. """
for batch_ndx in self.batch_indices_iterator( batch_size, shuffle=shuffle, **kwargs): yield self.samples_by_indices_nomapping(batch_ndx)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def num_samples(self, **kwargs): """ Get the number of samples in this data source. Returns ------- int, `np.inf` or `None`. An int if the number of samples is known, `np.inf` if it is infinite or `None` if the number of samples is unknown. """
if self.num_samples_fn is None: return None elif callable(self.num_samples_fn): return self.num_samples_fn(**kwargs) else: return self.num_samples_fn
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def samples_by_indices(self, indices): """ Gather a batch of samples by indices, applying any index mapping defined by the underlying data sources. Parameters indices: 1D-array of ints or slice An index array or a slice that selects the samples to retrieve Returns ------- nested list of arrays A mini-batch """
if not self._random_access: raise TypeError('samples_by_indices method not supported as one ' 'or more of the underlying data sources does ' 'not support random access') batch = self.source.samples_by_indices(indices) return self.fn(*batch)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def purge(opts): """Purge environment database and uploaded files Usage: datacats purge [-s NAME | --delete-environment] [-y] [ENVIRONMENT] Options: --delete-environment Delete environment directory as well as its data, as well as the data for **all** sites. -s --site=NAME Specify a site to be purge [default: primary] -y --yes Respond yes to all prompts (i.e. force) ENVIRONMENT may be an environment name or a path to an environment directory. Default: '.' """
old = False try: environment = Environment.load(opts['ENVIRONMENT'], opts['--site']) except DatacatsError: environment = Environment.load(opts['ENVIRONMENT'], opts['--site'], data_only=True) if get_format_version(environment.datadir) == 1: old = True environment = Environment.load(opts['ENVIRONMENT'], opts['--site'], allow_old=True) # We need a valid site if they don't want to blow away everything. if not opts['--delete-environment'] and not old: environment.require_valid_site() sites = [opts['--site']] if not opts['--delete-environment'] else environment.sites if not opts['--yes']: y_or_n_prompt('datacats purge will delete all stored data') environment.stop_ckan() environment.stop_supporting_containers() environment.purge_data(sites) if opts['--delete-environment']: if environment.target: rmtree(environment.target) else: DatacatsError(("Unable to find the environment source" " directory so that it can be deleted.\n" "Chances are it's because it already does not exist"))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def pretty_print(self): """ Print the error message to stdout with colors and borders """
print colored.blue("-" * 40) print colored.red("datacats: problem was encountered:") print self.message print colored.blue("-" * 40)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def generate_password(): """ Return a 16-character alphanumeric random string generated by the operating system's secure pseudo random number generator """
chars = uppercase + lowercase + digits return ''.join(SystemRandom().choice(chars) for x in xrange(16))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _machine_check_connectivity(): """ This method calls to docker-machine on the command line and makes sure that it is up and ready. Potential improvements to be made: - Support multiple machine names (run a `docker-machine ls` and then see which machines are active. Use a priority list) """
with open(devnull, 'w') as devnull_f: try: status = subprocess.check_output( ['docker-machine', 'status', 'dev'], stderr=devnull_f).strip() if status == 'Stopped': raise DatacatsError('Please start your docker-machine ' 'VM with "docker-machine start dev"') # XXX HACK: This exists because of # http://github.com/datacats/datacats/issues/63, # as a temporary fix. if 'tls' in _docker_kwargs: # It will print out messages to the user otherwise. _docker_kwargs['tls'].assert_hostname = False except subprocess.CalledProcessError: raise DatacatsError('Please create a docker-machine with ' '"docker-machine start dev"')
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def web_command(command, ro=None, rw=None, links=None, image='datacats/web', volumes_from=None, commit=False, clean_up=False, stream_output=None, entrypoint=None): """ Run a single command in a web image optionally preloaded with the ckan source and virtual envrionment. :param command: command to execute :param ro: {localdir: binddir} dict for read-only volumes :param rw: {localdir: binddir} dict for read-write volumes :param links: links passed to start :param image: docker image name to use :param volumes_from: :param commit: True to create a new image based on result :param clean_up: True to remove container even on error :param stream_output: file to write stderr+stdout from command :param entrypoint: override entrypoint (script that runs command) :returns: image id if commit=True """
binds = ro_rw_to_binds(ro, rw) c = _get_docker().create_container( image=image, command=command, volumes=binds_to_volumes(binds), detach=False, host_config=_get_docker().create_host_config(binds=binds, volumes_from=volumes_from, links=links), entrypoint=entrypoint) _get_docker().start( container=c['Id'], ) if stream_output: for output in _get_docker().attach( c['Id'], stdout=True, stderr=True, stream=True): stream_output.write(output) if _get_docker().wait(c['Id']): # Before the (potential) cleanup, grab the logs! logs = _get_docker().logs(c['Id']) if clean_up: remove_container(c['Id']) raise WebCommandError(command, c['Id'][:12], logs) if commit: rval = _get_docker().commit(c['Id']) if not remove_container(c['Id']): # circle ci doesn't let us remove containers, quiet the warnings if not environ.get('CIRCLECI', False): warn('failed to remove container: {0}'.format(c['Id'])) if commit: return rval['Id']
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def run_container(name, image, command=None, environment=None, ro=None, rw=None, links=None, detach=True, volumes_from=None, port_bindings=None, log_syslog=False): """ Wrapper for docker create_container, start calls :param log_syslog: bool flag to redirect container's logs to host's syslog :returns: container info dict or None if container couldn't be created Raises PortAllocatedError if container couldn't start on the requested port. """
binds = ro_rw_to_binds(ro, rw) log_config = LogConfig(type=LogConfig.types.JSON) if log_syslog: log_config = LogConfig( type=LogConfig.types.SYSLOG, config={'syslog-tag': name}) host_config = _get_docker().create_host_config(binds=binds, log_config=log_config, links=links, volumes_from=volumes_from, port_bindings=port_bindings) c = _get_docker().create_container( name=name, image=image, command=command, environment=environment, volumes=binds_to_volumes(binds), detach=detach, stdin_open=False, tty=False, ports=list(port_bindings) if port_bindings else None, host_config=host_config) try: _get_docker().start( container=c['Id'], ) except APIError as e: if 'address already in use' in e.explanation: try: _get_docker().remove_container(name, force=True) except APIError: pass raise PortAllocatedError() raise return c
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def remove_container(name, force=False): """ Wrapper for docker remove_container :returns: True if container was found and removed """
try: if not force: _get_docker().stop(name) except APIError: pass try: _get_docker().remove_container(name, force=True) return True except APIError: return False
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def container_logs(name, tail, follow, timestamps): """ Wrapper for docker logs, attach commands. """
if follow: return _get_docker().attach( name, stdout=True, stderr=True, stream=True ) return _docker.logs( name, stdout=True, stderr=True, tail=tail, timestamps=timestamps, )
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def collect_logs(name): """ Returns a string representation of the logs from a container. This is similar to container_logs but uses the `follow` option and flattens the logs into a string instead of a generator. :param name: The container name to grab logs for :return: A string representation of the logs """
logs = container_logs(name, "all", True, None) string = "" for s in logs: string += s return string
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def pull_stream(image): """ Return generator of pull status objects """
return (json.loads(s) for s in _get_docker().pull(image, stream=True))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def data_only_container(name, volumes): """ create "data-only container" if it doesn't already exist. We'd like to avoid these, but postgres + boot2docker make it difficult, see issue #5 """
info = inspect_container(name) if info: return c = _get_docker().create_container( name=name, image='datacats/postgres', # any image will do command='true', volumes=volumes, detach=True) return c
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def main(): """ The main entry point for datacats cli tool (as defined in setup.py's entry_points) It parses the cli arguments for corresponding options and runs the corresponding command """
# pylint: disable=bare-except try: command_fn, opts = _parse_arguments(sys.argv[1:]) # purge handles loading differently # 1 - Bail and just call the command if it doesn't have ENVIRONMENT. if command_fn == purge.purge or 'ENVIRONMENT' not in opts: return command_fn(opts) environment = Environment.load( opts['ENVIRONMENT'] or '.', opts['--site'] if '--site' in opts else 'primary') if command_fn not in COMMANDS_THAT_USE_SSH: return command_fn(environment, opts) # for commands that communicate with a remote server # we load UserProfile and test our communication user_profile = UserProfile() user_profile.test_ssh_key(environment) return command_fn(environment, opts, user_profile) except DatacatsError as e: _error_exit(e) except SystemExit: raise except: exc_info = "\n".join([line.rstrip() for line in traceback.format_exception(*sys.exc_info())]) user_message = ("Something that should not" " have happened happened when attempting" " to run this command:\n" " datacats {args}\n\n" "It is seems to be a bug.\n" "Please report this issue to us by" " creating an issue ticket at\n\n" " https://github.com/datacats/datacats/issues\n\n" "so that we would be able to look into that " "and fix the issue." ).format(args=" ".join(sys.argv[1:])) _error_exit(DatacatsError(user_message, parent_exception=UndocumentedError(exc_info)))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def start(environment, opts): """Create containers and start serving environment Usage: datacats start [-b] [--site-url SITE_URL] [-p|--no-watch] [-s NAME] [-i] [--syslog] [--address=IP] [ENVIRONMENT [PORT]] datacats start -r [-b] [--site-url SITE_URL] [-s NAME] [--syslog] [-i] [--address=IP] [ENVIRONMENT] Options: --address=IP Address to listen on (Linux-only) -b --background Don't wait for response from web server --no-watch Do not automatically reload templates and .py files on change -i --interactive Calls out to docker via the command line, allowing for interactivity with the web image. -p --production Start with apache and debug=false -s --site=NAME Specify a site to start [default: primary] --syslog Log to the syslog --site-url SITE_URL The site_url to use in API responses. Defaults to old setting or will attempt to determine it. (e.g. http://example.org:{port}/) ENVIRONMENT may be an environment name or a path to an environment directory. Default: '.' """
environment.require_data() if environment.fully_running(): print 'Already running at {0}'.format(environment.web_address()) return reload_(environment, opts)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def reload_(environment, opts): """Reload environment source and configuration Usage: datacats reload [-b] [-p|--no-watch] [--syslog] [-s NAME] [--site-url=SITE_URL] [-i] [--address=IP] [ENVIRONMENT [PORT]] datacats reload -r [-b] [--syslog] [-s NAME] [--address=IP] [--site-url=SITE_URL] [-i] [ENVIRONMENT] Options: --address=IP Address to listen on (Linux-only) -i --interactive Calls out to docker via the command line, allowing for interactivity with the web image. --site-url=SITE_URL The site_url to use in API responses. Can use Python template syntax to insert the port and address (e.g. http://example.org:{port}/) -b --background Don't wait for response from web server --no-watch Do not automatically reload templates and .py files on change -p --production Reload with apache and debug=false -s --site=NAME Specify a site to reload [default: primary] --syslog Log to the syslog ENVIRONMENT may be an environment name or a path to an environment directory. Default: '.' """
if opts['--interactive']: # We can't wait for the server if we're tty'd opts['--background'] = True if opts['--address'] and is_boot2docker(): raise DatacatsError('Cannot specify address on boot2docker.') environment.require_data() environment.stop_ckan() if opts['PORT'] or opts['--address'] or opts['--site-url']: if opts['PORT']: environment.port = int(opts['PORT']) if opts['--address']: environment.address = opts['--address'] if opts['--site-url']: site_url = opts['--site-url'] # TODO: Check it against a regex or use urlparse try: site_url = site_url.format(address=environment.address, port=environment.port) environment.site_url = site_url environment.save_site(False) except (KeyError, IndexError, ValueError) as e: raise DatacatsError('Could not parse site_url: {}'.format(e)) environment.save() for container in environment.extra_containers: require_extra_image(EXTRA_IMAGE_MAPPING[container]) environment.stop_supporting_containers() environment.start_supporting_containers() environment.start_ckan( production=opts['--production'], paster_reload=not opts['--no-watch'], log_syslog=opts['--syslog'], interactive=opts['--interactive']) write('Starting web server at {0} ...'.format(environment.web_address())) if opts['--background']: write('\n') return try: environment.wait_for_web_available() finally: write('\n')
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def info(environment, opts): """Display information about environment and running containers Usage: datacats info [-qr] [ENVIRONMENT] Options: -q --quiet Echo only the web URL or nothing if not running ENVIRONMENT may be an environment name or a path to an environment directory. Default: '.' """
damaged = False sites = environment.sites if not environment.sites: sites = [] damaged = True if opts['--quiet']: if damaged: raise DatacatsError('Damaged datadir: cannot get address.') for site in sites: environment.site_name = site print '{}: {}'.format(site, environment.web_address()) return datadir = environment.datadir if not environment.data_exists(): datadir = '' elif damaged: datadir += ' (damaged)' print 'Environment name: ' + environment.name print ' Environment dir: ' + environment.target print ' Data dir: ' + datadir print ' Sites: ' + ' '.join(environment.sites) for site in environment.sites: print environment.site_name = site print ' Site: ' + site print ' Containers: ' + ' '.join(environment.containers_running()) sitedir = environment.sitedir + (' (damaged)' if not environment.data_complete() else '') print ' Site dir: ' + sitedir addr = environment.web_address() if addr: print ' Available at: ' + addr
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def logs(environment, opts): """Display or follow container logs Usage: datacats logs [--postgres | --solr | --datapusher] [-s NAME] [-tr] [--tail=LINES] [ENVIRONMENT] datacats logs -f [--postgres | --solr | --datapusher] [-s NAME] [-r] [ENVIRONMENT] Options: --datapusher Show logs for datapusher instead of web logs --postgres Show postgres database logs instead of web logs -f --follow Follow logs instead of exiting immediately --solr Show solr search logs instead of web logs -t --timestamps Add timestamps to log lines -s --site=NAME Specify a site for logs if needed [default: primary] --tail=LINES Number of lines to show [default: all] ENVIRONMENT may be an environment name or a path to an environment directory. Default: '.' """
container = 'web' if opts['--solr']: container = 'solr' if opts['--postgres']: container = 'postgres' if opts['--datapusher']: container = 'datapusher' tail = opts['--tail'] if tail != 'all': tail = int(tail) l = environment.logs(container, tail, opts['--follow'], opts['--timestamps']) if not opts['--follow']: print l return try: for message in l: write(message) except KeyboardInterrupt: print
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def open_(environment, opts): # pylint: disable=unused-argument """Open web browser window to this environment Usage: datacats open [-r] [-s NAME] [ENVIRONMENT] Options: -s --site=NAME Choose a site to open [default: primary] ENVIRONMENT may be an environment name or a path to an environment directory. Default: '.' """
environment.require_data() addr = environment.web_address() if not addr: print "Site not currently running" else: webbrowser.open(addr)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def tweak(environment, opts): """Commands operating on environment data Usage: datacats tweak --install-postgis [ENVIRONMENT] datacats tweak --add-redis [ENVIRONMENT] datacats tweak --admin-password [ENVIRONMENT] Options: --install-postgis Install postgis in ckan database --add-redis Adds redis next time this environment reloads -s --site=NAME Choose a site to tweak [default: primary] -p --admin-password Prompt to change the admin password ENVIRONMENT may be an environment name or a path to an environment directory. Default: '.' """
environment.require_data() if opts['--install-postgis']: print "Installing postgis" environment.install_postgis_sql() if opts['--add-redis']: # Let the user know if they are trying to add it and it is already there print ('Adding redis extra container... Please note that you will have ' 'to reload your environment for these changes to take effect ("datacats reload {}")' .format(environment.name)) environment.add_extra_container('redis', error_on_exists=True) if opts['--admin-password']: environment.create_admin_set_password(confirm_password())
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_history_by_flight_number(self, flight_number, page=1, limit=100): """Fetch the history of a flight by its number. This method can be used to get the history of a flight route by the number. It checks the user authentication and returns the data accordingly. Args: flight_number (str): The flight number, e.g. AI101 page (int): Optional page number; for users who are on a plan with flightradar24 they can pass in higher page numbers to get more data limit (int): Optional limit on number of records returned Returns: A list of dicts with the data; one dict for each row of data from flightradar24 Example:: from pyflightdata import FlightData f=FlightData() #optional login f.login(myemail,mypassword) f.get_history_by_flight_number('AI101') f.get_history_by_flight_number('AI101',page=1,limit=10) """
url = FLT_BASE.format(flight_number, str(self.AUTH_TOKEN), page, limit) return self._fr24.get_data(url)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_history_by_tail_number(self, tail_number, page=1, limit=100): """Fetch the history of a particular aircraft by its tail number. This method can be used to get the history of a particular aircraft by its tail number. It checks the user authentication and returns the data accordingly. Args: tail_number (str): The tail number, e.g. VT-ANL page (int): Optional page number; for users who are on a plan with flightradar24 they can pass in higher page numbers to get more data limit (int): Optional limit on number of records returned Returns: A list of dicts with the data; one dict for each row of data from flightradar24 Example:: from pyflightdata import FlightData f=FlightData() #optional login f.login(myemail,mypassword) f.get_history_by_flight_number('VT-ANL') f.get_history_by_flight_number('VT-ANL',page=1,limit=10) """
url = REG_BASE.format(tail_number, str(self.AUTH_TOKEN), page, limit) return self._fr24.get_data(url, True)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_airports(self, country): """Returns a list of all the airports For a given country this returns a list of dicts, one for each airport, with information like the iata code of the airport etc Args: country (str): The country for which the airports will be fetched Example:: from pyflightdata import FlightData f=FlightData() f.get_airports('India') """
url = AIRPORT_BASE.format(country.replace(" ", "-")) return self._fr24.get_airports_data(url)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_info_by_tail_number(self, tail_number, page=1, limit=100): """Fetch the details of a particular aircraft by its tail number. This method can be used to get the details of a particular aircraft by its tail number. Details include the serial number, age etc along with links to the images of the aircraft. It checks the user authentication and returns the data accordingly. Args: tail_number (str): The tail number, e.g. VT-ANL page (int): Optional page number; for users who are on a plan with flightradar24 they can pass in higher page numbers to get more data limit (int): Optional limit on number of records returned Returns: A list of dicts with the data; one dict for each row of data from flightradar24 Example:: from pyflightdata import FlightData f=FlightData() #optional login f.login(myemail,mypassword) f.get_info_by_flight_number('VT-ANL') f.get_info_by_flight_number('VT-ANL',page=1,limit=10) """
url = REG_BASE.format(tail_number, str(self.AUTH_TOKEN), page, limit) return self._fr24.get_aircraft_data(url)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_fleet(self, airline_key): """Get the fleet for a particular airline. Given a airline code form the get_airlines() method output, this method returns the fleet for the airline. Args: airline_key (str): The code for the airline on flightradar24 Returns: A list of dicts, one for each aircraft in the airlines fleet Example:: from pyflightdata import FlightData f=FlightData() #optional login f.login(myemail,mypassword) f.get_fleet('ai-aic') """
url = AIRLINE_FLEET_BASE.format(airline_key) return self._fr24.get_airline_fleet_data(url, self.AUTH_TOKEN != '')
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_flights(self, search_key): """Get the flights for a particular airline. Given a full or partial flight number string, this method returns the first 100 flights matching that string. Please note this method was different in earlier versions. The older versions took an airline code and returned all scheduled flights for that airline Args: search_key (str): Full or partial flight number for any airline e.g. MI47 to get all SilkAir flights starting with MI47 Returns: A list of dicts, one for each scheduled flight in the airlines network Example:: from pyflightdata import FlightData f=FlightData() #optional login f.login(myemail,mypassword) f.get_flights('MI47') """
# assume limit 100 to return first 100 of any wild card search url = AIRLINE_FLT_BASE.format(search_key, 100) return self._fr24.get_airline_flight_data(url)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_flights_from_to(self, origin, destination): """Get the flights for a particular origin and destination. Given an origin and destination this method returns the upcoming scheduled flights between these two points. The data returned has the airline, airport and schedule information - this is subject to change in future. Args: origin (str): The origin airport code destination (str): The destination airport code Returns: A list of dicts, one for each scheduled flight between the two points. Example:: from pyflightdata import FlightData f=FlightData() #optional login f.login(myemail,mypassword) f.get_flights_from_to('SIN','HYD') """
# assume limit 100 to return first 100 of any wild card search url = AIRLINE_FLT_BASE_POINTS.format(origin, destination) return self._fr24.get_airline_flight_data(url, by_airports=True)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_airport_weather(self, iata, page=1, limit=100): """Retrieve the weather at an airport Given the IATA code of an airport, this method returns the weather information. Args: iata (str): The IATA code for an airport, e.g. HYD page (int): Optional page number; for users who are on a plan with flightradar24 they can pass in higher page numbers to get more data limit (int): Optional limit on number of records returned Returns: A list of dicts with the data; one dict for each row of data from flightradar24 Example:: from pyflightdata import FlightData f=FlightData() #optional login f.login(myemail,mypassword) f.get_airport_weather('HYD') f.get_airport_weather('HYD',page=1,limit=10) """
url = AIRPORT_DATA_BASE.format(iata, str(self.AUTH_TOKEN), page, limit) weather = self._fr24.get_airport_weather(url) mi = weather['sky']['visibility']['mi'] if (mi is not None) and (mi != "None"): mi = float(mi) km = mi * 1.6094 weather['sky']['visibility']['km'] = km return weather
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_airport_metars(self, iata, page=1, limit=100): """Retrieve the metar data at the current time Given the IATA code of an airport, this method returns the metar information. Args: iata (str): The IATA code for an airport, e.g. HYD page (int): Optional page number; for users who are on a plan with flightradar24 they can pass in higher page numbers to get more data limit (int): Optional limit on number of records returned Returns: The metar data for the airport Example:: from pyflightdata import FlightData f=FlightData() #optional login f.login(myemail,mypassword) f.get_airport_metars('HYD') """
url = AIRPORT_DATA_BASE.format(iata, str(self.AUTH_TOKEN), page, limit) w = self._fr24.get_airport_weather(url) return w['metar']
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_airport_metars_hist(self, iata): """Retrieve the metar data for past 72 hours. The data will not be parsed to readable format. Given the IATA code of an airport, this method returns the metar information for last 72 hours. Args: iata (str): The IATA code for an airport, e.g. HYD Returns: The metar data for the airport Example:: from pyflightdata import FlightData f=FlightData() #optional login f.login(myemail,mypassword) f.get_airport_metars_hist('HYD') """
url = AIRPORT_BASE.format(iata) + "/weather" return self._fr24.get_airport_metars_hist(url)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_airport_stats(self, iata, page=1, limit=100): """Retrieve the performance statistics at an airport Given the IATA code of an airport, this method returns the performance statistics for the airport. Args: iata (str): The IATA code for an airport, e.g. HYD page (int): Optional page number; for users who are on a plan with flightradar24 they can pass in higher page numbers to get more data limit (int): Optional limit on number of records returned Returns: A list of dicts with the data; one dict for each row of data from flightradar24 Example:: from pyflightdata import FlightData f=FlightData() #optional login f.login(myemail,mypassword) f.get_airport_stats('HYD') f.get_airport_stats('HYD',page=1,limit=10) """
url = AIRPORT_DATA_BASE.format(iata, str(self.AUTH_TOKEN), page, limit) return self._fr24.get_airport_stats(url)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_airport_details(self, iata, page=1, limit=100): """Retrieve the details of an airport Given the IATA code of an airport, this method returns the detailed information like lat lon, full name, URL, codes etc. Args: iata (str): The IATA code for an airport, e.g. HYD page (int): Optional page number; for users who are on a plan with flightradar24 they can pass in higher page numbers to get more data limit (int): Optional limit on number of records returned Returns: A list of dicts with the data; one dict for each row of data from flightradar24 Example:: from pyflightdata import FlightData f=FlightData() #optional login f.login(myemail,mypassword) f.get_airport_details('HYD') f.get_airport_details('HYD',page=1,limit=10) """
url = AIRPORT_DATA_BASE.format(iata, str(self.AUTH_TOKEN), page, limit) details = self._fr24.get_airport_details(url) weather = self._fr24.get_airport_weather(url) # weather has more correct and standard elevation details in feet and meters details['position']['elevation'] = weather['elevation'] return details
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_images_by_tail_number(self, tail_number, page=1, limit=100): """Fetch the images of a particular aircraft by its tail number. This method can be used to get the images of the aircraft. The images are in 3 sizes and you can use what suits your need. Args: tail_number (str): The tail number, e.g. VT-ANL page (int): Optional page number; for users who are on a plan with flightradar24 they can pass in higher page numbers to get more data limit (int): Optional limit on number of records returned Returns: A dict with the images of the aircraft in various sizes Example:: from pyflightdata import FlightData f=FlightData() #optional login f.login(myemail,mypassword) f.get_images_by_flight_number('VT-ANL') f.get_images_by_flight_number('VT-ANL',page=1,limit=10) """
url = REG_BASE.format(tail_number, str(self.AUTH_TOKEN), page, limit) return self._fr24.get_aircraft_image_data(url)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def login(self, email, password): """Login to the flightradar24 session The API currently uses flightradar24 as the primary data source. The site provides different levels of data based on user plans. For users who have signed up for a plan, this method allows to login with the credentials from flightradar24. The API obtains a token that will be passed on all the requests; this obtains the data as per the plan limits. Args: email (str): The email ID which is used to login to flightradar24 password (str): The password for the user ID Example:: from pyflightdata import FlightData f=FlightData() f.login(myemail,mypassword) """
response = FlightData.session.post( url=LOGIN_URL, data={ 'email': email, 'password': password, 'remember': 'true', 'type': 'web' }, headers={ 'Origin': 'https://www.flightradar24.com', 'Referer': 'https://www.flightradar24.com', 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:28.0) Gecko/20100101 Firefox/28.0' } ) response = self._fr24.json_loads_byteified( response.content) if response.status_code == 200 else None if response: token = response['userData']['subscriptionKey'] self.AUTH_TOKEN = token
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def decode_metar(self, metar): """ Simple method that decodes a given metar string. Args: metar (str): The metar data Returns: The metar data in readable format Example:: from pyflightdata import FlightData f=FlightData() f.decode_metar('WSSS 181030Z 04009KT 010V080 9999 FEW018TCU BKN300 29/22 Q1007 NOSIG') """
try: from metar import Metar except: return "Unable to parse metars. Please install parser from https://github.com/tomp/python-metar." m = Metar.Metar(metar) return m.string()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _perform_radius_auth(self, client, packet): """ Perform the actual radius authentication by passing the given packet to the server which `client` is bound to. Returns True or False depending on whether the user is authenticated successfully. """
try: reply = client.SendPacket(packet) except Timeout as e: logging.error("RADIUS timeout occurred contacting %s:%s" % ( client.server, client.authport)) return False except Exception as e: logging.error("RADIUS error: %s" % e) return False if reply.code == AccessReject: logging.warning("RADIUS access rejected for user '%s'" % ( packet['User-Name'])) return False elif reply.code != AccessAccept: logging.error("RADIUS access error for user '%s' (code %s)" % ( packet['User-Name'], reply.code)) return False logging.info("RADIUS access granted for user '%s'" % ( packet['User-Name'])) return True
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def authenticate(self, request, username=None, password=None): """ Check credentials against RADIUS server and return a User object or None. """
if isinstance(username, basestring): username = username.encode('utf-8') if isinstance(password, basestring): password = password.encode('utf-8') server = self._get_server_from_settings() result = self._radius_auth(server, username, password) if result: return self.get_django_user(username, password) return None
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def move(self, dst): "Closes then moves the file to dst." self.close() shutil.move(self.path, dst)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def sigma_clipping(date, mag, err, threshold=3, iteration=1): """ Remove any fluctuated data points by magnitudes. Parameters date : array_like An array of dates. mag : array_like An array of magnitudes. err : array_like An array of magnitude errors. threshold : float, optional Threshold for sigma-clipping. iteration : int, optional The number of iteration. Returns ------- date : array_like Sigma-clipped dates. mag : array_like Sigma-clipped magnitudes. err : array_like Sigma-clipped magnitude errors. """
# Check length. if (len(date) != len(mag)) \ or (len(date) != len(err)) \ or (len(mag) != len(err)): raise RuntimeError('The length of date, mag, and err must be same.') # By magnitudes for i in range(int(iteration)): mean = np.median(mag) std = np.std(mag) index = (mag >= mean - threshold*std) & (mag <= mean + threshold*std) date = date[index] mag = mag[index] err = err[index] return date, mag, err
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def from_spec(spec): """Return a schema object from a spec. A spec is either a string for a scalar type, or a list of 0 or 1 specs, """
if spec == '': return any_schema if framework.is_str(spec): # Scalar type if spec not in SCALAR_TYPES: raise exceptions.SchemaError('Not a valid schema type: %r' % spec) return ScalarSchema(spec) if framework.is_list(spec): return ListSchema(spec[0] if len(spec) else any_schema) if framework.is_tuple(spec): return TupleSchema(spec.get('fields', {}), spec.get('required', [])) raise exceptions.SchemaError('Not valid schema spec; %r' % spec)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def validate(obj, schema): """Validate an object according to its own AND an externally imposed schema."""
if not framework.EvaluationContext.current().validate: # Short circuit evaluation when disabled return obj # Validate returned object according to its own schema if hasattr(obj, 'tuple_schema'): obj.tuple_schema.validate(obj) # Validate object according to externally imposed schema if schema: schema.validate(obj) return obj
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def attach(obj, schema): """Attach the given schema to the given object."""
# We have a silly exception for lists, since they have no 'attach_schema' # method, and I don't feel like making a subclass for List just to add it. # So, we recursively search the list for tuples and attach the schema in # there. if framework.is_list(obj) and isinstance(schema, ListSchema): for x in obj: attach(x, schema.element_schema) return # Otherwise, the object should be able to handle its own schema attachment. getattr(obj, 'attach_schema', nop)(schema)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_feature_set_all(): """ Return a list of entire features. A set of entire features regardless of being used to train a model or predict a class. Returns ------- feature_names : list A list of features' names. """
features = get_feature_set() features.append('cusum') features.append('eta') features.append('n_points') features.append('period_SNR') features.append('period_log10FAP') features.append('period_uncertainty') features.append('weighted_mean') features.append('weighted_std') features.sort() return features
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def parameters(self): """ A property that returns all of the model's parameters. """
parameters = [] for hl in self.hidden_layers: parameters.extend(hl.parameters) parameters.extend(self.top_layer.parameters) return parameters
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def parameters(self, value): """ Used to set all of the model's parameters to new values. **Parameters:** value : array_like New values for the model parameters. Must be of length ``self.n_parameters``. """
if len(value) != self.n_parameters: raise ValueError("Incorrect length of parameter vector. " "Model has %d parameters, but got %d" % (self.n_parameters, len(value))) i = 0 for hl in self.hidden_layers: hl.parameters = value[i:i + hl.n_parameters] i += hl.n_parameters self.top_layer.parameters = value[-self.top_layer.n_parameters:]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def checksum(self): """ Returns an MD5 digest of the model. This can be used to easily identify whether two models have the same architecture. """
m = md5() for hl in self.hidden_layers: m.update(str(hl.architecture)) m.update(str(self.top_layer.architecture)) return m.hexdigest()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def evaluate(self, input_data, targets, return_cache=False, prediction=True): """ Evaluate the loss function without computing gradients. **Parameters:** input_data : GPUArray Data to evaluate targets: GPUArray Targets return_cache : bool, optional Whether to return intermediary variables from the computation and the hidden activations. prediction : bool, optional Whether to use prediction model. Only relevant when using dropout. If true, then weights are multiplied by 1 - dropout if the layer uses dropout. **Returns:** loss : float The value of the loss function. hidden_cache : list, only returned if ``return_cache == True`` Cache as returned by :meth:`hebel.models.NeuralNet.feed_forward`. activations : list, only returned if ``return_cache == True`` Hidden activations as returned by :meth:`hebel.models.NeuralNet.feed_forward`. """
# Forward pass activations, hidden_cache = self.feed_forward( input_data, return_cache=True, prediction=prediction) loss = self.top_layer.train_error(None, targets, average=False, cache=activations, prediction=prediction) for hl in self.hidden_layers: if hl.l1_penalty_weight: loss += hl.l1_penalty if hl.l2_penalty_weight: loss += hl.l2_penalty if self.top_layer.l1_penalty_weight: loss += self.top_layer.l1_penalty if self.top_layer.l2_penalty_weight: loss += self.top_layer.l2_penalty if not return_cache: return loss else: return loss, hidden_cache, activations
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def training_pass(self, input_data, targets): """ Perform a full forward and backward pass through the model. **Parameters:** input_data : GPUArray Data to train the model with. targets : GPUArray Training targets. **Returns:** loss : float Value of loss function as evaluated on the data and targets. gradients : list of GPUArray Gradients obtained from backpropagation in the backward pass. """
# Forward pass loss, hidden_cache, logistic_cache = self.evaluate( input_data, targets, return_cache=True, prediction=False) if not np.isfinite(loss): raise ValueError('Infinite activations!') # Backpropagation if self.hidden_layers: hidden_activations = hidden_cache[-1][0] else: hidden_activations = input_data df_top_layer = \ self.top_layer.backprop(hidden_activations, targets, cache=logistic_cache) gradients = list(df_top_layer[0][::-1]) df_hidden = df_top_layer[1] if self.hidden_layers: hidden_inputs = [input_data] + [c[0] for c in hidden_cache[:-1]] for hl, hc, hi in \ zip(self.hidden_layers[::-1], hidden_cache[::-1], hidden_inputs[::-1]): g, df_hidden = hl.backprop(hi, df_hidden, cache=hc) gradients.extend(g[::-1]) gradients.reverse() return loss, gradients
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def feed_forward(self, input_data, return_cache=False, prediction=True): """ Run data forward through the model. **Parameters:** input_data : GPUArray Data to run through the model. return_cache : bool, optional Whether to return the intermediary results. prediction : bool, optional Whether to run in prediction mode. Only relevant when using dropout. If true, weights are multiplied by 1 - dropout. If false, then half of hidden units are randomly dropped and the dropout mask is returned in case ``return_cache==True``. **Returns:** prediction : GPUArray Predictions from the model. cache : list of GPUArray, only returned if ``return_cache == True`` Results of intermediary computations. """
hidden_cache = None # Create variable in case there are no hidden layers if self.hidden_layers: # Forward pass hidden_cache = [] for i in range(len(self.hidden_layers)): hidden_activations = hidden_cache[i - 1][0] if i else input_data # Use dropout predict if previous layer has dropout hidden_cache.append(self.hidden_layers[i] .feed_forward(hidden_activations, prediction=prediction)) hidden_activations = hidden_cache[-1][0] else: hidden_activations = input_data # Use dropout_predict if last hidden layer has dropout activations = \ self.top_layer.feed_forward(hidden_activations, prediction=False) if return_cache: return activations, hidden_cache return activations
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def shallow_run(self): """Derive not-period-based features."""
# Number of data points self.n_points = len(self.date) # Weight calculation. # All zero values. if not self.err.any(): self.err = np.ones(len(self.mag)) * np.std(self.mag) # Some zero values. elif not self.err.all(): np.putmask(self.err, self.err==0, np.median(self.err)) self.weight = 1. / self.err self.weighted_sum = np.sum(self.weight) # Simple statistics, mean, median and std. self.mean = np.mean(self.mag) self.median = np.median(self.mag) self.std = np.std(self.mag) # Weighted mean and std. self.weighted_mean = np.sum(self.mag * self.weight) / self.weighted_sum self.weighted_std = np.sqrt(np.sum((self.mag - self.weighted_mean) ** 2 \ * self.weight) / self.weighted_sum) # Skewness and kurtosis. self.skewness = ss.skew(self.mag) self.kurtosis = ss.kurtosis(self.mag) # Normalization-test. Shapiro-Wilk test. shapiro = ss.shapiro(self.mag) self.shapiro_w = shapiro[0] # self.shapiro_log10p = np.log10(shapiro[1]) # Percentile features. self.quartile31 = np.percentile(self.mag, 75) \ - np.percentile(self.mag, 25) # Stetson K. self.stetson_k = self.get_stetson_k(self.mag, self.median, self.err) # Ratio between higher and lower amplitude than average. self.hl_amp_ratio = self.half_mag_amplitude_ratio( self.mag, self.median, self.weight) # This second function's value is very similar with the above one. # self.hl_amp_ratio2 = self.half_mag_amplitude_ratio2( # self.mag, self.median) # Cusum self.cusum = self.get_cusum(self.mag) # Eta self.eta = self.get_eta(self.mag, self.weighted_std)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def deep_run(self): """Derive period-based features."""
# Lomb-Scargle period finding. self.get_period_LS(self.date, self.mag, self.n_threads, self.min_period) # Features based on a phase-folded light curve # such as Eta, slope-percentile, etc. # Should be called after the getPeriodLS() is called. # Created phased a folded light curve. # We use period * 2 to take eclipsing binaries into account. phase_folded_date = self.date % (self.period * 2.) sorted_index = np.argsort(phase_folded_date) folded_date = phase_folded_date[sorted_index] folded_mag = self.mag[sorted_index] # phase Eta self.phase_eta = self.get_eta(folded_mag, self.weighted_std) # Slope percentile. self.slope_per10, self.slope_per90 = \ self.slope_percentile(folded_date, folded_mag) # phase Cusum self.phase_cusum = self.get_cusum(folded_mag)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_period_LS(self, date, mag, n_threads, min_period): """ Period finding using the Lomb-Scargle algorithm. Finding two periods. The second period is estimated after whitening the first period. Calculating various other features as well using derived periods. Parameters date : array_like An array of observed date, in days. mag : array_like An array of observed magnitude. n_threads : int The number of threads to use. min_period : float The minimum period to calculate. """
# DO NOT CHANGE THESE PARAMETERS. oversampling = 3. hifac = int((max(date) - min(date)) / len(date) / min_period * 2.) # Minimum hifac if hifac < 100: hifac = 100 # Lomb-Scargle. fx, fy, nout, jmax, prob = pLS.fasper(date, mag, oversampling, hifac, n_threads) self.f = fx[jmax] self.period = 1. / self.f self.period_uncertainty = self.get_period_uncertainty(fx, fy, jmax) self.period_log10FAP = \ np.log10(pLS.getSignificance(fx, fy, nout, oversampling)[jmax]) # self.f_SNR1 = fy[jmax] / np.median(fy) self.period_SNR = (fy[jmax] - np.median(fy)) / np.std(fy) # Fit Fourier Series of order 3. order = 3 # Initial guess of Fourier coefficients. p0 = np.ones(order * 2 + 1) date_period = (date % self.period) / self.period p1, success = leastsq(self.residuals, p0, args=(date_period, mag, order)) # fitted_y = self.FourierSeries(p1, date_period, order) # print p1, self.mean, self.median # plt.plot(date_period, self.mag, 'b+') # plt.show() # Derive Fourier features for the first period. # Petersen, J. O., 1986, A&A self.amplitude = np.sqrt(p1[1] ** 2 + p1[2] ** 2) self.r21 = np.sqrt(p1[3] ** 2 + p1[4] ** 2) / self.amplitude self.r31 = np.sqrt(p1[5] ** 2 + p1[6] ** 2) / self.amplitude self.f_phase = np.arctan(-p1[1] / p1[2]) self.phi21 = np.arctan(-p1[3] / p1[4]) - 2. * self.f_phase self.phi31 = np.arctan(-p1[5] / p1[6]) - 3. * self.f_phase """ # Derive a second period. # Whitening a light curve. residual_mag = mag - fitted_y # Lomb-Scargle again to find the second period. omega_top, power_top = search_frequencies(date, residual_mag, err, #LS_kwargs={'generalized':True, 'subtract_mean':True}, n_eval=5000, n_retry=3, n_save=50) self.period2 = 2*np.pi/omega_top[np.where(power_top==np.max(power_top))][0] self.f2 = 1. / self.period2 self.f2_SNR = power_top[np.where(power_top==np.max(power_top))][0] \ * (len(self.date) - 1) / 2. # Fit Fourier Series again. p0 = [1.] * order * 2 date_period = (date % self.period) / self.period p2, success = leastsq(self.residuals, p0, args=(date_period, residual_mag, order)) fitted_y = self.FourierSeries(p2, date_period, order) #plt.plot(date%self.period2, residual_mag, 'b+') #plt.show() # Derive Fourier features for the first second. self.f2_amp = 2. * np.sqrt(p2[1]**2 + p2[2]**2) self.f2_R21 = np.sqrt(p2[3]**2 + p2[4]**2) / self.f2_amp self.f2_R31 = np.sqrt(p2[5]**2 + p2[6]**2) / self.f2_amp self.f2_R41 = np.sqrt(p2[7]**2 + p2[8]**2) / self.f2_amp self.f2_R51 = np.sqrt(p2[9]**2 + p2[10]**2) / self.f2_amp self.f2_phase = np.arctan(-p2[1] / p2[2]) self.f2_phi21 = np.arctan(-p2[3] / p2[4]) - 2. * self.f2_phase self.f2_phi31 = np.arctan(-p2[5] / p2[6]) - 3. * self.f2_phase self.f2_phi41 = np.arctan(-p2[7] / p2[8]) - 4. * self.f2_phase self.f2_phi51 = np.arctan(-p2[9] / p2[10]) - 5. * self.f2_phase # Calculate features using the first and second periods. self.f12_ratio = self.f2 / self.f1 self.f12_remain = self.f1 % self.f2 \ if self.f1 > self.f2 else self.f2 % self.f1 self.f12_amp = self.f2_amp / self.f1_amp self.f12_phase = self.f2_phase - self.f1_phase """
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_period_uncertainty(self, fx, fy, jmax, fx_width=100): """ Get uncertainty of a period. The uncertainty is defined as the half width of the frequencies around the peak, that becomes lower than average + standard deviation of the power spectrum. Since we may not have fine resolution around the peak, we do not assume it is gaussian. So, no scaling factor of 2.355 (= 2 * sqrt(2 * ln2)) is applied. Parameters fx : array_like An array of frequencies. fy : array_like An array of amplitudes. jmax : int An index at the peak frequency. fx_width : int, optional Width of power spectrum to calculate uncertainty. Returns ------- p_uncertain : float Period uncertainty. """
# Get subset start_index = jmax - fx_width end_index = jmax + fx_width if start_index < 0: start_index = 0 if end_index > len(fx) - 1: end_index = len(fx) - 1 fx_subset = fx[start_index:end_index] fy_subset = fy[start_index:end_index] fy_mean = np.median(fy_subset) fy_std = np.std(fy_subset) # Find peak max_index = np.argmax(fy_subset) # Find list whose powers become lower than average + std. index = np.where(fy_subset <= fy_mean + fy_std)[0] # Find the edge at left and right. This is the full width. left_index = index[(index < max_index)] if len(left_index) == 0: left_index = 0 else: left_index = left_index[-1] right_index = index[(index > max_index)] if len(right_index) == 0: right_index = len(fy_subset) - 1 else: right_index = right_index[0] # We assume the half of the full width is the period uncertainty. half_width = (1. / fx_subset[left_index] - 1. / fx_subset[right_index]) / 2. period_uncertainty = half_width return period_uncertainty
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def residuals(self, pars, x, y, order): """ Residual of Fourier Series. Parameters pars : array_like Fourier series parameters. x : array_like An array of date. y : array_like An array of true values to fit. order : int An order of Fourier Series. """
return y - self.fourier_series(pars, x, order)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def fourier_series(self, pars, x, order): """ Function to fit Fourier Series. Parameters x : array_like An array of date divided by period. It doesn't need to be sorted. pars : array_like Fourier series parameters. order : int An order of Fourier series. """
sum = pars[0] for i in range(order): sum += pars[i * 2 + 1] * np.sin(2 * np.pi * (i + 1) * x) \ + pars[i * 2 + 2] * np.cos(2 * np.pi * (i + 1) * x) return sum
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_stetson_k(self, mag, avg, err): """ Return Stetson K feature. Parameters mag : array_like An array of magnitude. avg : float An average value of magnitudes. err : array_like An array of magnitude errors. Returns ------- stetson_k : float Stetson K value. """
residual = (mag - avg) / err stetson_k = np.sum(np.fabs(residual)) \ / np.sqrt(np.sum(residual * residual)) / np.sqrt(len(mag)) return stetson_k
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_eta(self, mag, std): """ Return Eta feature. Parameters mag : array_like An array of magnitudes. std : array_like A standard deviation of magnitudes. Returns ------- eta : float The value of Eta index. """
diff = mag[1:] - mag[:len(mag) - 1] eta = np.sum(diff * diff) / (len(mag) - 1.) / std / std return eta
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def slope_percentile(self, date, mag): """ Return 10% and 90% percentile of slope. Parameters date : array_like An array of phase-folded date. Sorted. mag : array_like An array of phase-folded magnitudes. Sorted by date. Returns ------- per_10 : float 10% percentile values of slope. per_90 : float 90% percentile values of slope. """
date_diff = date[1:] - date[:len(date) - 1] mag_diff = mag[1:] - mag[:len(mag) - 1] # Remove zero mag_diff. index = np.where(mag_diff != 0.) date_diff = date_diff[index] mag_diff = mag_diff[index] # Derive slope. slope = date_diff / mag_diff percentile_10 = np.percentile(slope, 10.) percentile_90 = np.percentile(slope, 90.) return percentile_10, percentile_90
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_cusum(self, mag): """ Return max - min of cumulative sum. Parameters mag : array_like An array of magnitudes. Returns ------- mm_cusum : float Max - min of cumulative sum. """
c = np.cumsum(mag - self.weighted_mean) / len(mag) / self.weighted_std return np.max(c) - np.min(c)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def init(device_id=None, random_seed=None): """Initialize Hebel. This function creates a CUDA context, CUBLAS context and initializes and seeds the pseudo-random number generator. **Parameters:** device_id : integer, optional The ID of the GPU device to use. If this is omitted, PyCUDA's default context is used, which by default uses the fastest available device on the system. Alternatively, you can put the device id in the environment variable ``CUDA_DEVICE`` or into the file ``.cuda-device`` in the user's home directory. random_seed : integer, optional The seed to use for the pseudo-random number generator. If this is omitted, the seed is taken from the environment variable ``RANDOM_SEED`` and if that is not defined, a random integer is used as a seed. """
if device_id is None: random_seed = _os.environ.get('CUDA_DEVICE') if random_seed is None: random_seed = _os.environ.get('RANDOM_SEED') global is_initialized if not is_initialized: is_initialized = True global context context.init_context(device_id) from pycuda import gpuarray, driver, curandom # Initialize memory pool global memory_pool memory_pool.init() # Initialize PRG global sampler sampler.set_seed(random_seed) # Initialize pycuda_ops from hebel import pycuda_ops pycuda_ops.init()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def inflate_context_tuple(ast_rootpath, root_env): """Instantiate a Tuple from a TupleNode. Walking the AST tree upwards, evaluate from the root down again. """
with util.LogTime('inflate_context_tuple'): # We only need to look at tuple members going down. inflated = ast_rootpath[0].eval(root_env) current = inflated env = root_env try: for node in ast_rootpath[1:]: if is_tuple_member_node(node): assert framework.is_tuple(current) with util.LogTime('into tuple'): thunk, env = inflated.get_thunk_env(node.name) current = framework.eval(thunk, env) elif framework.is_list(current): with util.LogTime('eval thing'): current = framework.eval(node, env) if framework.is_tuple(current): inflated = current except (gcl.EvaluationError, ast.UnparseableAccess): # Eat evaluation error, probably means the rightmost tuplemember wasn't complete. # Return what we have so far. pass return inflated
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def is_identifier_position(rootpath): """Return whether the cursor is in identifier-position in a member declaration."""
if len(rootpath) >= 2 and is_tuple_member_node(rootpath[-2]) and is_identifier(rootpath[-1]): return True if len(rootpath) >= 1 and is_tuple_node(rootpath[-1]): # No deeper node than tuple? Must be identifier position, otherwise we'd have a TupleMemberNode. return True return False
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def find_completions_at_cursor(ast_tree, filename, line, col, root_env=gcl.default_env): """Find completions at the cursor. Return a dict of { name => Completion } objects. """
q = gcl.SourceQuery(filename, line, col - 1) rootpath = ast_tree.find_tokens(q) if is_identifier_position(rootpath): return find_inherited_key_completions(rootpath, root_env) try: ret = find_deref_completions(rootpath, root_env) or enumerate_scope(rootpath, root_env=root_env) assert isinstance(ret, dict) return ret except gcl.EvaluationError: # Probably an unbound value or something--just return an empty list return {}
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def find_inherited_key_completions(rootpath, root_env): """Return completion keys from INHERITED tuples. Easiest way to get those is to evaluate the tuple, check if it is a CompositeTuple, then enumerate the keys that are NOT in the rightmost tuple. """
tup = inflate_context_tuple(rootpath, root_env) if isinstance(tup, runtime.CompositeTuple): keys = set(k for t in tup.tuples[:-1] for k in t.keys()) return {n: get_completion(tup, n) for n in keys} return {}
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def find_value_at_cursor(ast_tree, filename, line, col, root_env=gcl.default_env): """Find the value of the object under the cursor."""
q = gcl.SourceQuery(filename, line, col) rootpath = ast_tree.find_tokens(q) rootpath = path_until(rootpath, is_thunk) if len(rootpath) <= 1: # Just the file tuple itself, or some non-thunk element at the top level return None tup = inflate_context_tuple(rootpath, root_env) try: if isinstance(rootpath[-1], ast.Inherit): # Special case handling of 'Inherit' nodes, show the value that's being # inherited. return tup[rootpath[-1].name] return rootpath[-1].eval(tup.env(tup)) except gcl.EvaluationError as e: return e
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def add_vec_to_mat(mat, vec, axis=None, inplace=False, target=None, substract=False): """ Add a vector to a matrix """
assert mat.flags.c_contiguous if axis is None: if vec.shape[0] == mat.shape[0]: axis = 0 elif vec.shape[0] == mat.shape[1]: axis = 1 else: raise ValueError('Vector length must be equal ' 'to one side of the matrix') n, m = mat.shape block = (_compilation_constants['add_vec_block_size'], _compilation_constants['add_vec_block_size'], 1) gridx = ceil_div(n, block[0]) gridy = ceil_div(m, block[1]) grid = (gridx, gridy, 1) if inplace: target = mat elif target is None: target = gpuarray.empty_like(mat) if axis == 0: assert vec.shape[0] == mat.shape[0] add_col_vec_kernel.prepared_call( grid, block, mat.gpudata, vec.gpudata, target.gpudata, np.uint32(n), np.uint32(m), np.int32(substract)) elif axis == 1: assert vec.shape[0] == mat.shape[1] add_row_vec_kernel.prepared_call( grid, block, mat.gpudata, vec.gpudata, target.gpudata, np.uint32(n), np.uint32(m), np.int32(substract)) return target
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def vector_normalize(mat, max_vec_norm=1.): """ Normalize each column vector in mat to length max_vec_norm if it is longer than max_vec_norm """
assert mat.flags.c_contiguous n, m = mat.shape vector_normalize_kernel.prepared_call( (m, 1, 1), (32, 1, 1), mat.gpudata, np.float32(max_vec_norm), np.int32(m), np.int32(n))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def tokenize_by_number(s): """ splits a string into a list of tokens each is either a string containing no numbers or a float """
r = find_number(s) if r == None: return [ s ] else: tokens = [] if r[0] > 0: tokens.append(s[0:r[0]]) tokens.append( float(s[r[0]:r[1]]) ) if r[1] < len(s): tokens.extend(tokenize_by_number(s[r[1]:])) return tokens assert False
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def number_aware_alphabetical_cmp(str1, str2): """ cmp function for sorting a list of strings by alphabetical order, but with numbers sorted numerically. i.e., foo1, foo2, foo10, foo11 instead of foo1, foo10 """
def flatten_tokens(tokens): l = [] for token in tokens: if isinstance(token, str): for char in token: l.append(char) else: assert isinstance(token, float) l.append(token) return l seq1 = flatten_tokens(tokenize_by_number(str1)) seq2 = flatten_tokens(tokenize_by_number(str2)) l = min(len(seq1),len(seq2)) i = 0 while i < l: if seq1[i] < seq2[i]: return -1 elif seq1[i] > seq2[i]: return 1 i += 1 if len(seq1) < len(seq2): return -1 elif len(seq1) > len(seq2): return 1 return 0
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def is_period_alias(period): """ Check if a given period is possibly an alias. Parameters period : float A period to test if it is a possible alias or not. Returns ------- is_alias : boolean True if the given period is in a range of period alias. """
# Based on the period vs periodSN plot of EROS-2 dataset (Kim+ 2014). # Period alias occurs mostly at ~1 and ~30. # Check each 1, 2, 3, 4, 5 factors. for i in range(1, 6): # One-day and one-month alias if (.99 / float(i)) < period < (1.004 / float(i)): return True if (1.03 / float(i)) < period < (1.04 / float(i)): return True if (29.2 / float(i)) < period < (29.9 / float(i)): return True # From candidates from the two fields 01, 08. # All of them are close to one day (or sidereal) alias. if (0.96465 / float(i)) < period < (0.96485 / float(i)): return True if (0.96725 / float(i)) < period < (0.96745 / float(i)): return True if (0.98190 / float(i)) < period < (0.98230 / float(i)): return True if (1.01034 / float(i)) < period < (1.01076 / float(i)): return True if (1.01568 / float(i)) < period < (1.01604 / float(i)): return True if (1.01718 / float(i)) < period < (1.01742 / float(i)): return True # From the all candidates from the entire LMC fields. # Some of these could be overlapped with the above cuts. if (0.50776 / float(i)) < period < (0.50861 / float(i)): return True if (0.96434 / float(i)) < period < (0.9652 / float(i)): return True if (0.96688 / float(i)) < period < (0.96731 / float(i)): return True if (1.0722 / float(i)) < period < (1.0729 / float(i)): return True if (27.1 / float(i)) < period < (27.5 / float(i)): return True # Not in the range of any alias. return False
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def save(filepath, obj, on_overwrite = 'ignore'): """ Serialize `object` to a file denoted by `filepath`. Parameters filepath : str A filename. If the suffix is `.joblib` and joblib can be imported, `joblib.dump` is used in place of the regular pickling mechanisms; this results in much faster saves by saving arrays as separate .npy files on disk. If the file suffix is `.npy` than `numpy.save` is attempted on `obj`. Otherwise, (c)pickle is used. obj : object A Python object to be serialized. on_overwrite: A string specifying what to do if the file already exists. ignore: just overwrite it backup: make a copy of the file (<filepath>.bak) and delete it when done saving the new copy. this allows recovery of the old version of the file if saving the new one fails """
filepath = preprocess(filepath) if os.path.exists(filepath): if on_overwrite == 'backup': backup = filepath + '.bak' shutil.move(filepath, backup) save(filepath, obj) try: os.remove(backup) except Exception, e: warnings.warn("Got an error while traing to remove "+backup+":"+str(e)) return else: assert on_overwrite == 'ignore' try: _save(filepath, obj) except RuntimeError, e: """ Sometimes for large theano graphs, pickle/cPickle exceed the maximum recursion depth. This seems to me like a fundamental design flaw in pickle/cPickle. The workaround I employ here is the one recommended to someone who had a similar problem on stackexchange: http://stackoverflow.com/questions/2134706/hitting-maximum-recursion-depth-using-pythons-pickle-cpickle Obviously this does not scale and could cause a crash but I don't see another solution short of writing our own implementation of pickle. """ if str(e).find('recursion') != -1: warnings.warn('pylearn2.utils.save encountered the following ' 'error: ' + str(e) + '\nAttempting to resolve this error by calling ' + 'sys.setrecusionlimit and retrying') old_limit = sys.getrecursionlimit() try: sys.setrecursionlimit(50000) _save(filepath, obj) finally: sys.setrecursionlimit(old_limit)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_pickle_protocol(): """ Allow configuration of the pickle protocol on a per-machine basis. This way, if you use multiple platforms with different versions of pickle, you can configure each of them to use the highest protocol supported by all of the machines that you want to be able to communicate. """
try: protocol_str = os.environ['PYLEARN2_PICKLE_PROTOCOL'] except KeyError: # If not defined, we default to 0 because this is the default # protocol used by cPickle.dump (and because it results in # maximum portability) protocol_str = '0' if protocol_str == 'pickle.HIGHEST_PROTOCOL': return pickle.HIGHEST_PROTOCOL return int(protocol_str)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def load_train_file(config_file_path): """Loads and parses a yaml file for a Train object. Publishes the relevant training environment variables"""
from pylearn2.config import yaml_parse suffix_to_strip = '.yaml' # publish environment variables related to file name if config_file_path.endswith(suffix_to_strip): config_file_full_stem = config_file_path[0:-len(suffix_to_strip)] else: config_file_full_stem = config_file_path for varname in ["PYLEARN2_TRAIN_FILE_NAME", #this one is deprecated "PYLEARN2_TRAIN_FILE_FULL_STEM"]: #this is the new, accepted name environ.putenv(varname, config_file_full_stem) directory = config_file_path.split('/')[:-1] directory = '/'.join(directory) if directory != '': directory += '/' environ.putenv("PYLEARN2_TRAIN_DIR", directory) environ.putenv("PYLEARN2_TRAIN_BASE_NAME", config_file_path.split('/')[-1] ) environ.putenv("PYLEARN2_TRAIN_FILE_STEM", config_file_full_stem.split('/')[-1] ) return yaml_parse.load_path(config_file_path)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def POINTER(obj): """ Create ctypes pointer to object. Notes ----- This function converts None to a real NULL pointer because of bug in how ctypes handles None on 64-bit platforms. """
p = ctypes.POINTER(obj) if not isinstance(p.from_param, classmethod): def from_param(cls, x): if x is None: return cls() else: return x p.from_param = classmethod(from_param) return p
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def gpuarray_ptr(g): """ Return ctypes pointer to data in GPUAarray object. """
addr = int(g.gpudata) if g.dtype == np.int8: return ctypes.cast(addr, POINTER(ctypes.c_byte)) if g.dtype == np.uint8: return ctypes.cast(addr, POINTER(ctypes.c_ubyte)) if g.dtype == np.int16: return ctypes.cast(addr, POINTER(ctypes.c_short)) if g.dtype == np.uint16: return ctypes.cast(addr, POINTER(ctypes.c_ushort)) if g.dtype == np.int32: return ctypes.cast(addr, POINTER(ctypes.c_int)) if g.dtype == np.uint32: return ctypes.cast(addr, POINTER(ctypes.c_uint)) if g.dtype == np.int64: return ctypes.cast(addr, POINTER(ctypes.c_long)) if g.dtype == np.uint64: return ctypes.cast(addr, POINTER(ctypes.c_ulong)) if g.dtype == np.float32: return ctypes.cast(addr, POINTER(ctypes.c_float)) elif g.dtype == np.float64: return ctypes.cast(addr, POINTER(ctypes.c_double)) elif g.dtype == np.complex64: return ctypes.cast(addr, POINTER(cuFloatComplex)) elif g.dtype == np.complex128: return ctypes.cast(addr, POINTER(cuDoubleComplex)) else: raise ValueError('unrecognized type')
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def cudaMalloc(count, ctype=None): """ Allocate device memory. Allocate memory on the device associated with the current active context. Parameters count : int Number of bytes of memory to allocate ctype : _ctypes.SimpleType, optional ctypes type to cast returned pointer. Returns ------- ptr : ctypes pointer Pointer to allocated device memory. """
ptr = ctypes.c_void_p() status = _libcudart.cudaMalloc(ctypes.byref(ptr), count) cudaCheckStatus(status) if ctype != None: ptr = ctypes.cast(ptr, ctypes.POINTER(ctype)) return ptr
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def cudaMallocPitch(pitch, rows, cols, elesize): """ Allocate pitched device memory. Allocate pitched memory on the device associated with the current active context. Parameters pitch : int Pitch for allocation. rows : int Requested pitched allocation height. cols : int Requested pitched allocation width. elesize : int Size of memory element. Returns ------- ptr : ctypes pointer Pointer to allocated device memory. """
ptr = ctypes.c_void_p() status = _libcudart.cudaMallocPitch(ctypes.byref(ptr), ctypes.c_size_t(pitch), cols*elesize, rows) cudaCheckStatus(status) return ptr, pitch