text_prompt
stringlengths 100
17.7k
⌀ | code_prompt
stringlengths 7
9.86k
⌀ |
---|---|
<SYSTEM_TASK:>
Query for OPUS data between times t1 and t2.
<END_TASK>
<USER_TASK:>
Description:
def get_between_times(self, t1, t2, target=None):
"""
Query for OPUS data between times t1 and t2.
Parameters
----------
t1, t2 : datetime.datetime, strings
Start and end time for the query. If type is datetime, will be
converted to isoformat string. If type is string already, it needs
to be in an accepted international format for time strings.
target : str
Potential target for the observation query. Most likely will reduce
the amount of data matching the query a lot.
Returns
-------
None, but set's state of the object to have new query results stored
in self.obsids.
""" |
try:
# checking if times have isoformat() method (datetimes have)
t1 = t1.isoformat()
t2 = t2.isoformat()
except AttributeError:
# if not, should already be a string, so do nothing.
pass
myquery = self._get_time_query(t1, t2)
if target is not None:
myquery["target"] = target
self.create_files_request(myquery, fmt="json")
self.unpack_json_response() |
<SYSTEM_TASK:>
Shows preview images using the Jupyter notebook HTML display.
<END_TASK>
<USER_TASK:>
Description:
def show_images(self, size="small"):
"""Shows preview images using the Jupyter notebook HTML display.
Parameters
==========
size : {'small', 'med', 'thumb', 'full'}
Determines the size of the preview image to be shown.
""" |
d = dict(small=256, med=512, thumb=100, full=1024)
try:
width = d[size]
except KeyError:
print("Allowed keys:", d.keys())
return
img_urls = [i._get_img_url(size) for i in self.obsids]
imagesList = "".join(
[
"<img style='width: {0}px; margin: 0px; float: "
"left; border: 1px solid black;' "
"src='{1}' />".format(width, s)
for s in img_urls
]
)
display(HTML(imagesList)) |
<SYSTEM_TASK:>
Download the previously found and stored Opus obsids.
<END_TASK>
<USER_TASK:>
Description:
def download_results(self, savedir=None, raw=True, calib=False, index=None):
"""Download the previously found and stored Opus obsids.
Parameters
==========
savedir: str or pathlib.Path, optional
If the database root folder as defined by the config.ini should not be used,
provide a different savedir here. It will be handed to PathManager.
""" |
obsids = self.obsids if index is None else [self.obsids[index]]
for obsid in obsids:
pm = io.PathManager(obsid.img_id, savedir=savedir)
pm.basepath.mkdir(exist_ok=True)
to_download = []
if raw is True:
to_download.extend(obsid.raw_urls)
if calib is True:
to_download.extend(obsid.calib_urls)
for url in to_download:
basename = Path(url).name
print("Downloading", basename)
store_path = str(pm.basepath / basename)
try:
urlretrieve(url, store_path)
except Exception as e:
urlretrieve(url.replace("https", "http"), store_path)
return str(pm.basepath) |
<SYSTEM_TASK:>
Download preview files for the previously found and stored Opus obsids.
<END_TASK>
<USER_TASK:>
Description:
def download_previews(self, savedir=None):
"""Download preview files for the previously found and stored Opus obsids.
Parameters
==========
savedir: str or pathlib.Path, optional
If the database root folder as defined by the config.ini should not be used,
provide a different savedir here. It will be handed to PathManager.
""" |
for obsid in self.obsids:
pm = io.PathManager(obsid.img_id, savedir=savedir)
pm.basepath.mkdir(exist_ok=True)
basename = Path(obsid.medium_img_url).name
print("Downloading", basename)
urlretrieve(obsid.medium_img_url, str(pm.basepath / basename)) |
<SYSTEM_TASK:>
Find which swap situtation we are in by time.
<END_TASK>
<USER_TASK:>
Description:
def which_epi_janus_resonance(name, time):
"""Find which swap situtation we are in by time.
Starting from 2006-01-21 where a Janus-Epimetheus swap occured, and
defining the next 4 years until the next swap as `scenario1, and the 4
years after that `scenario2`.
Calculate in units of 4 years, in which scenario the given time falls.
Parameters
----------
time : timestring, datetime
Time of the image. The astropy Time object can deal with both formats.
Returns
-------
str
The given name string (either `janus` or `epimetheus`) and attach
a 1 or 2, as appropriate.
""" |
t1 = Time('2002-01-21').to_datetime()
delta = Time(time).to_datetime() - t1
yearfraction = delta.days / 365
if int(yearfraction / 4) % 2 == 0:
return name + '2'
else:
return name + '1' |
<SYSTEM_TASK:>
Get a specific version of a layer.
<END_TASK>
<USER_TASK:>
Description:
def get_version(self, layer_id, version_id, expand=[]):
"""
Get a specific version of a layer.
""" |
target_url = self.client.get_url('VERSION', 'GET', 'single', {'layer_id': layer_id, 'version_id': version_id})
return self._get(target_url, expand=expand) |
<SYSTEM_TASK:>
Creates a new draft version.
<END_TASK>
<USER_TASK:>
Description:
def create_draft(self, layer_id):
"""
Creates a new draft version.
If anything in the data object has changed then an import will begin immediately.
Otherwise to force a re-import from the previous sources call :py:meth:`koordinates.layers.LayerManager.start_import`.
:rtype: Layer
:return: the new version
:raises Conflict: if there is already a draft version for this layer.
""" |
target_url = self.client.get_url('VERSION', 'POST', 'create', {'layer_id': layer_id})
r = self.client.request('POST', target_url, json={})
return self.create_from_result(r.json()) |
<SYSTEM_TASK:>
Set the XML metadata on a layer draft version.
<END_TASK>
<USER_TASK:>
Description:
def set_metadata(self, layer_id, version_id, fp):
"""
Set the XML metadata on a layer draft version.
:param file fp: file-like object to read the XML metadata from.
:raises NotAllowed: if the version is already published.
""" |
base_url = self.client.get_url('VERSION', 'GET', 'single', {'layer_id': layer_id, 'version_id': version_id})
self._metadata.set(base_url, fp) |
<SYSTEM_TASK:>
Return if this version is the published version of a layer
<END_TASK>
<USER_TASK:>
Description:
def is_published_version(self):
""" Return if this version is the published version of a layer """ |
pub_ver = getattr(self, 'published_version', None)
this_ver = getattr(self, 'this_version', None)
return this_ver and pub_ver and (this_ver == pub_ver) |
<SYSTEM_TASK:>
Return if this version is the draft version of a layer
<END_TASK>
<USER_TASK:>
Description:
def is_draft_version(self):
""" Return if this version is the draft version of a layer """ |
pub_ver = getattr(self, 'published_version', None)
latest_ver = getattr(self, 'latest_version', None)
this_ver = getattr(self, 'this_version', None)
return this_ver and latest_ver and (this_ver == latest_ver) and (latest_ver != pub_ver) |
<SYSTEM_TASK:>
Get a specific version of this layer
<END_TASK>
<USER_TASK:>
Description:
def get_version(self, version_id, expand=[]):
"""
Get a specific version of this layer
""" |
target_url = self._client.get_url('VERSION', 'GET', 'single', {'layer_id': self.id, 'version_id': version_id})
return self._manager._get(target_url, expand=expand) |
<SYSTEM_TASK:>
Creates a publish task just for this version, which publishes as soon as any import is complete.
<END_TASK>
<USER_TASK:>
Description:
def publish(self, version_id=None):
"""
Creates a publish task just for this version, which publishes as soon as any import is complete.
:return: the publish task
:rtype: Publish
:raises Conflict: If the version is already published, or already has a publish job.
""" |
if not version_id:
version_id = self.version.id
target_url = self._client.get_url('VERSION', 'POST', 'publish', {'layer_id': self.id, 'version_id': version_id})
r = self._client.request('POST', target_url, json={})
return self._client.get_manager(Publish).create_from_result(r.json()) |
<SYSTEM_TASK:>
Create and return poly1d objects.
<END_TASK>
<USER_TASK:>
Description:
def create_polynoms():
"""Create and return poly1d objects.
Uses the parameters from Morgan to create poly1d objects for
calculations.
""" |
fname = pr.resource_filename('pyciss', 'data/soliton_prediction_parameters.csv')
res_df = pd.read_csv(fname)
polys = {}
for resorder, row in zip('65 54 43 21'.split(),
range(4)):
p = poly1d([res_df.loc[row, 'Slope (km/yr)'], res_df.loc[row, 'Intercept (km)']])
polys['janus ' + ':'.join(resorder)] = p
return polys |
<SYSTEM_TASK:>
Workhorse function.
<END_TASK>
<USER_TASK:>
Description:
def check_for_soliton(img_id):
"""Workhorse function.
Creates the polynom.
Calculates radius constraints from attributes in `ringcube` object.
Parameters
----------
ringcube : pyciss.ringcube.RingCube
A containter class for a ring-projected ISS image file.
Returns
-------
dict
Dictionary with all solitons found. Reason why it is a dict is
that it could be more than one in one image.
""" |
pm = io.PathManager(img_id)
try:
ringcube = RingCube(pm.cubepath)
except FileNotFoundError:
ringcube = RingCube(pm.undestriped)
polys = create_polynoms()
minrad = ringcube.minrad.to(u.km)
maxrad = ringcube.maxrad.to(u.km)
delta_years = get_year_since_resonance(ringcube)
soliton_radii = {}
for k, p in polys.items():
current_r = p(delta_years) * u.km
if minrad < current_r < maxrad:
soliton_radii[k] = current_r
return soliton_radii if soliton_radii else None |
<SYSTEM_TASK:>
Takes the supplied headers and adds in any which
<END_TASK>
<USER_TASK:>
Description:
def _assemble_headers(self, method, user_headers=None):
"""
Takes the supplied headers and adds in any which
are defined at a client level and then returns
the result.
:param user_headers: a `dict` containing headers defined at the
request level, optional.
:return: a `dict` instance
""" |
headers = copy.deepcopy(user_headers or {})
if method not in ('GET', 'HEAD'):
headers.setdefault('Content-Type', 'application/json')
return headers |
<SYSTEM_TASK:>
Extracts parameters from a populated URL
<END_TASK>
<USER_TASK:>
Description:
def reverse_url(self, datatype, url, verb='GET', urltype='single', api_version=None):
"""
Extracts parameters from a populated URL
:param datatype: a string identifying the data the url accesses.
:param url: the fully-qualified URL to extract parameters from.
:param verb: the HTTP verb needed for use with the url.
:param urltype: an adjective used to the nature of the request.
:return: dict
""" |
api_version = api_version or 'v1'
templates = getattr(self, 'URL_TEMPLATES__%s' % api_version)
# this is fairly simplistic, if necessary we could use the parse lib
template_url = r"https://(?P<api_host>.+)/services/api/(?P<api_version>.+)"
template_url += re.sub(r'{([^}]+)}', r'(?P<\1>.+)', templates[datatype][verb][urltype])
# /foo/{foo_id}/bar/{id}/
m = re.match(template_url, url or '')
if not m:
raise KeyError("No reverse match from '%s' to %s.%s.%s" % (url, datatype, verb, urltype))
r = m.groupdict()
del r['api_host']
if r.pop('api_version') != api_version:
raise ValueError("API version mismatch")
return r |
<SYSTEM_TASK:>
Returns a fully formed url
<END_TASK>
<USER_TASK:>
Description:
def get_url(self, datatype, verb, urltype, params={}, api_host=None, api_version=None):
"""Returns a fully formed url
:param datatype: a string identifying the data the url will access.
:param verb: the HTTP verb needed for use with the url.
:param urltype: an adjective used to the nature of the request.
:param \*\*params: substitution variables for the URL.
:return: string
:rtype: A fully formed url.
""" |
api_version = api_version or 'v1'
api_host = api_host or self.host
subst = params.copy()
subst['api_host'] = api_host
subst['api_version'] = api_version
url = "https://{api_host}/services/api/{api_version}"
url += self.get_url_path(datatype, verb, urltype, params, api_version)
return url.format(**subst) |
<SYSTEM_TASK:>
Turn CDMRemote variable into something like a numpy.ndarray.
<END_TASK>
<USER_TASK:>
Description:
def open_store_variable(self, name, var):
"""Turn CDMRemote variable into something like a numpy.ndarray.""" |
data = indexing.LazilyOuterIndexedArray(CDMArrayWrapper(name, self))
return Variable(var.dimensions, data, {a: getattr(var, a) for a in var.ncattrs()}) |
<SYSTEM_TASK:>
Get the global attributes from underlying data set.
<END_TASK>
<USER_TASK:>
Description:
def get_attrs(self):
"""Get the global attributes from underlying data set.""" |
return FrozenOrderedDict((a, getattr(self.ds, a)) for a in self.ds.ncattrs()) |
<SYSTEM_TASK:>
Get the dimensions from underlying data set.
<END_TASK>
<USER_TASK:>
Description:
def get_dimensions(self):
"""Get the dimensions from underlying data set.""" |
return FrozenOrderedDict((k, len(v)) for k, v in self.ds.dimensions.items()) |
<SYSTEM_TASK:>
Identify the base URL of the THREDDS server from the catalog URL.
<END_TASK>
<USER_TASK:>
Description:
def _find_base_tds_url(catalog_url):
"""Identify the base URL of the THREDDS server from the catalog URL.
Will retain URL scheme, host, port and username/password when present.
""" |
url_components = urlparse(catalog_url)
if url_components.path:
return catalog_url.split(url_components.path)[0]
else:
return catalog_url |
<SYSTEM_TASK:>
Filter keys for an item closest to the desired time.
<END_TASK>
<USER_TASK:>
Description:
def filter_time_nearest(self, time, regex=None):
"""Filter keys for an item closest to the desired time.
Loops over all keys in the collection and uses `regex` to extract and build
`datetime`s. The collection of `datetime`s is compared to `start` and the value that
has a `datetime` closest to that requested is returned.If none of the keys in the
collection match the regex, indicating that the keys are not date/time-based,
a ``ValueError`` is raised.
Parameters
----------
time : ``datetime.datetime``
The desired time
regex : str, optional
The regular expression to use to extract date/time information from the key. If
given, this should contain named groups: 'year', 'month', 'day', 'hour', 'minute',
'second', and 'microsecond', as appropriate. When a match is found, any of those
groups missing from the pattern will be assigned a value of 0. The default pattern
looks for patterns like: 20171118_2356.
Returns
-------
The value with a time closest to that desired
""" |
return min(self._get_datasets_with_times(regex),
key=lambda i: abs((i[0] - time).total_seconds()))[-1] |
<SYSTEM_TASK:>
Filter keys for all items within the desired time range.
<END_TASK>
<USER_TASK:>
Description:
def filter_time_range(self, start, end, regex=None):
"""Filter keys for all items within the desired time range.
Loops over all keys in the collection and uses `regex` to extract and build
`datetime`s. From the collection of `datetime`s, all values within `start` and `end`
(inclusive) are returned. If none of the keys in the collection match the regex,
indicating that the keys are not date/time-based, a ``ValueError`` is raised.
Parameters
----------
start : ``datetime.datetime``
The start of the desired time range, inclusive
end : ``datetime.datetime``
The end of the desired time range, inclusive
regex : str, optional
The regular expression to use to extract date/time information from the key. If
given, this should contain named groups: 'year', 'month', 'day', 'hour', 'minute',
'second', and 'microsecond', as appropriate. When a match is found, any of those
groups missing from the pattern will be assigned a value of 0. The default pattern
looks for patterns like: 20171118_2356.
Returns
-------
All values corresponding to times within the specified range
""" |
return [item[-1] for item in self._get_datasets_with_times(regex)
if start <= item[0] <= end] |
<SYSTEM_TASK:>
Remove and return the value associated with case-insensitive ``key``.
<END_TASK>
<USER_TASK:>
Description:
def pop(self, key, *args, **kwargs):
"""Remove and return the value associated with case-insensitive ``key``.""" |
return super(CaseInsensitiveDict, self).pop(CaseInsensitiveStr(key)) |
<SYSTEM_TASK:>
Resolve the url of the dataset when reading latest.xml.
<END_TASK>
<USER_TASK:>
Description:
def resolve_url(self, catalog_url):
"""Resolve the url of the dataset when reading latest.xml.
Parameters
----------
catalog_url : str
The catalog url to be resolved
""" |
if catalog_url != '':
resolver_base = catalog_url.split('catalog.xml')[0]
resolver_url = resolver_base + self.url_path
resolver_xml = session_manager.urlopen(resolver_url)
tree = ET.parse(resolver_xml)
root = tree.getroot()
if 'name' in root.attrib:
self.catalog_name = root.attrib['name']
else:
self.catalog_name = 'No name found'
resolved_url = ''
found = False
for child in root.iter():
if not found:
tag_type = child.tag.split('}')[-1]
if tag_type == 'dataset':
if 'urlPath' in child.attrib:
ds = Dataset(child)
resolved_url = ds.url_path
found = True
if found:
return resolved_url
else:
log.warning('no dataset url path found in latest.xml!') |
<SYSTEM_TASK:>
Make fully qualified urls for the access methods enabled on the dataset.
<END_TASK>
<USER_TASK:>
Description:
def make_access_urls(self, catalog_url, all_services, metadata=None):
"""Make fully qualified urls for the access methods enabled on the dataset.
Parameters
----------
catalog_url : str
The top level server url
all_services : List[SimpleService]
list of :class:`SimpleService` objects associated with the dataset
metadata : dict
Metadata from the :class:`TDSCatalog`
""" |
all_service_dict = CaseInsensitiveDict({})
for service in all_services:
all_service_dict[service.name] = service
if isinstance(service, CompoundService):
for subservice in service.services:
all_service_dict[subservice.name] = subservice
service_name = metadata.get('serviceName', None)
access_urls = CaseInsensitiveDict({})
server_url = _find_base_tds_url(catalog_url)
# process access urls for datasets that reference top
# level catalog services (individual or compound service
# types).
if service_name in all_service_dict:
service = all_service_dict[service_name]
if service.service_type != 'Resolver':
# if service is a CompoundService, create access url
# for each SimpleService
if isinstance(service, CompoundService):
for subservice in service.services:
server_base = urljoin(server_url, subservice.base)
access_urls[subservice.service_type] = urljoin(server_base,
self.url_path)
else:
server_base = urljoin(server_url, service.base)
access_urls[service.service_type] = urljoin(server_base, self.url_path)
# process access children of dataset elements
for service_type in self.access_element_info:
url_path = self.access_element_info[service_type]
if service_type in all_service_dict:
server_base = urljoin(server_url, all_service_dict[service_type].base)
access_urls[service_type] = urljoin(server_base, url_path)
self.access_urls = access_urls |
<SYSTEM_TASK:>
Create an access method from a catalog element.
<END_TASK>
<USER_TASK:>
Description:
def add_access_element_info(self, access_element):
"""Create an access method from a catalog element.""" |
service_name = access_element.attrib['serviceName']
url_path = access_element.attrib['urlPath']
self.access_element_info[service_name] = url_path |
<SYSTEM_TASK:>
Download the dataset to a local file.
<END_TASK>
<USER_TASK:>
Description:
def download(self, filename=None):
"""Download the dataset to a local file.
Parameters
----------
filename : str, optional
The full path to which the dataset will be saved
""" |
if filename is None:
filename = self.name
with self.remote_open() as infile:
with open(filename, 'wb') as outfile:
outfile.write(infile.read()) |
<SYSTEM_TASK:>
Access the remote dataset.
<END_TASK>
<USER_TASK:>
Description:
def remote_access(self, service=None, use_xarray=None):
"""Access the remote dataset.
Open the remote dataset and get a netCDF4-compatible `Dataset` object providing
index-based subsetting capabilities.
Parameters
----------
service : str, optional
The name of the service to use for access to the dataset, either
'CdmRemote' or 'OPENDAP'. Defaults to 'CdmRemote'.
Returns
-------
Dataset
Object for netCDF4-like access to the dataset
""" |
if service is None:
service = 'CdmRemote' if 'CdmRemote' in self.access_urls else 'OPENDAP'
if service not in (CaseInsensitiveStr('CdmRemote'), CaseInsensitiveStr('OPENDAP')):
raise ValueError(service + ' is not a valid service for remote_access')
return self.access_with_service(service, use_xarray) |
<SYSTEM_TASK:>
Subset the dataset.
<END_TASK>
<USER_TASK:>
Description:
def subset(self, service=None):
"""Subset the dataset.
Open the remote dataset and get a client for talking to ``service``.
Parameters
----------
service : str, optional
The name of the service for subsetting the dataset. Defaults to 'NetcdfSubset'
or 'NetcdfServer', in that order, depending on the services listed in the
catalog.
Returns
-------
a client for communicating using ``service``
""" |
if service is None:
for serviceName in self.ncssServiceNames:
if serviceName in self.access_urls:
service = serviceName
break
else:
raise RuntimeError('Subset access is not available for this dataset.')
elif service not in self.ncssServiceNames:
raise ValueError(service + ' is not a valid service for subset. Options are: '
+ ', '.join(self.ncssServiceNames))
return self.access_with_service(service) |
<SYSTEM_TASK:>
Access the dataset using a particular service.
<END_TASK>
<USER_TASK:>
Description:
def access_with_service(self, service, use_xarray=None):
"""Access the dataset using a particular service.
Return an Python object capable of communicating with the server using the particular
service. For instance, for 'HTTPServer' this is a file-like object capable of
HTTP communication; for OPENDAP this is a netCDF4 dataset.
Parameters
----------
service : str
The name of the service for accessing the dataset
Returns
-------
An instance appropriate for communicating using ``service``.
""" |
service = CaseInsensitiveStr(service)
if service == 'CdmRemote':
if use_xarray:
from .cdmr.xarray_support import CDMRemoteStore
try:
import xarray as xr
provider = lambda url: xr.open_dataset(CDMRemoteStore(url)) # noqa: E731
except ImportError:
raise ImportError('CdmRemote access needs xarray to be installed.')
else:
from .cdmr import Dataset as CDMRDataset
provider = CDMRDataset
elif service == 'OPENDAP':
if use_xarray:
try:
import xarray as xr
provider = xr.open_dataset
except ImportError:
raise ImportError('xarray to be installed if `use_xarray` is True.')
else:
try:
from netCDF4 import Dataset as NC4Dataset
provider = NC4Dataset
except ImportError:
raise ImportError('OPENDAP access needs netCDF4-python to be installed.')
elif service in self.ncssServiceNames:
from .ncss import NCSS
provider = NCSS
elif service == 'HTTPServer':
provider = session_manager.urlopen
else:
raise ValueError(service + ' is not an access method supported by Siphon')
try:
return provider(self.access_urls[service])
except KeyError:
raise ValueError(service + ' is not available for this dataset') |
<SYSTEM_TASK:>
Get header information and store as metadata for the endpoint.
<END_TASK>
<USER_TASK:>
Description:
def _get_metadata(self):
"""Get header information and store as metadata for the endpoint.""" |
self.metadata = self.fetch_header()
self.variables = {g.name for g in self.metadata.grids} |
<SYSTEM_TASK:>
Make a header request to the endpoint.
<END_TASK>
<USER_TASK:>
Description:
def fetch_header(self):
"""Make a header request to the endpoint.""" |
query = self.query().add_query_parameter(req='header')
return self._parse_messages(self.get_query(query).content)[0] |
<SYSTEM_TASK:>
Request the featureType from the endpoint.
<END_TASK>
<USER_TASK:>
Description:
def fetch_feature_type(self):
"""Request the featureType from the endpoint.""" |
query = self.query().add_query_parameter(req='featureType')
return self.get_query(query).content |
<SYSTEM_TASK:>
Pull down coordinate data from the endpoint.
<END_TASK>
<USER_TASK:>
Description:
def fetch_coords(self, query):
"""Pull down coordinate data from the endpoint.""" |
q = query.add_query_parameter(req='coord')
return self._parse_messages(self.get_query(q).content) |
<SYSTEM_TASK:>
Retreive IGRA version 2 data for one station.
<END_TASK>
<USER_TASK:>
Description:
def request_data(cls, time, site_id, derived=False):
"""Retreive IGRA version 2 data for one station.
Parameters
--------
site_id : str
11-character IGRA2 station identifier.
time : datetime
The date and time of the desired observation. If list of two times is given,
dataframes for all dates within the two dates will be returned.
Returns
-------
:class: `pandas.DataFrame` containing the data.
""" |
igra2 = cls()
# Set parameters for data query
if derived:
igra2.ftpsite = igra2.ftpsite + 'derived/derived-por/'
igra2.suffix = igra2.suffix + '-drvd.txt'
else:
igra2.ftpsite = igra2.ftpsite + 'data/data-por/'
igra2.suffix = igra2.suffix + '-data.txt'
if type(time) == datetime.datetime:
igra2.begin_date = time
igra2.end_date = time
else:
igra2.begin_date, igra2.end_date = time
igra2.site_id = site_id
df, headers = igra2._get_data()
return df, headers |
<SYSTEM_TASK:>
Process the IGRA2 text file for observations at site_id matching time.
<END_TASK>
<USER_TASK:>
Description:
def _get_data(self):
"""Process the IGRA2 text file for observations at site_id matching time.
Return:
-------
:class: `pandas.DataFrame` containing the body data.
:class: `pandas.DataFrame` containing the header data.
""" |
# Split the list of times into begin and end dates. If only
# one date is supplied, set both begin and end dates equal to that date.
body, header, dates_long, dates = self._get_data_raw()
params = self._get_fwf_params()
df_body = pd.read_fwf(StringIO(body), **params['body'])
df_header = pd.read_fwf(StringIO(header), **params['header'])
df_body['date'] = dates_long
df_body = self._clean_body_df(df_body)
df_header = self._clean_header_df(df_header)
df_header['date'] = dates
return df_body, df_header |
<SYSTEM_TASK:>
Download observations matching the time range.
<END_TASK>
<USER_TASK:>
Description:
def _get_data_raw(self):
"""Download observations matching the time range.
Returns a tuple with a string for the body, string for the headers,
and a list of dates.
""" |
# Import need to be here so we can monkeypatch urlopen for testing and avoid
# downloading live data for testing
try:
from urllib.request import urlopen
except ImportError:
from urllib2 import urlopen
with closing(urlopen(self.ftpsite + self.site_id + self.suffix + '.zip')) as url:
f = ZipFile(BytesIO(url.read()), 'r').open(self.site_id + self.suffix)
lines = [line.decode('utf-8') for line in f.readlines()]
body, header, dates_long, dates = self._select_date_range(lines)
return body, header, dates_long, dates |
<SYSTEM_TASK:>
Identify lines containing headers within the range begin_date to end_date.
<END_TASK>
<USER_TASK:>
Description:
def _select_date_range(self, lines):
"""Identify lines containing headers within the range begin_date to end_date.
Parameters
-----
lines: list
list of lines from the IGRA2 data file.
""" |
headers = []
num_lev = []
dates = []
# Get indices of headers, and make a list of dates and num_lev
for idx, line in enumerate(lines):
if line[0] == '#':
year, month, day, hour = map(int, line[13:26].split())
# All soundings have YMD, most have hour
try:
date = datetime.datetime(year, month, day, hour)
except ValueError:
date = datetime.datetime(year, month, day)
# Check date
if self.begin_date <= date <= self.end_date:
headers.append(idx)
num_lev.append(int(line[32:36]))
dates.append(date)
if date > self.end_date:
break
if len(dates) == 0:
# Break if no matched dates.
# Could improve this later by showing the date range for the station.
raise ValueError('No dates match selection.')
# Compress body of data into a string
begin_idx = min(headers)
end_idx = max(headers) + num_lev[-1]
# Make a boolean vector that selects only list indices within the time range
selector = np.zeros(len(lines), dtype=bool)
selector[begin_idx:end_idx + 1] = True
selector[headers] = False
body = ''.join([line for line in itertools.compress(lines, selector)])
selector[begin_idx:end_idx + 1] = ~selector[begin_idx:end_idx + 1]
header = ''.join([line for line in itertools.compress(lines, selector)])
# expand date vector to match length of the body dataframe.
dates_long = np.repeat(dates, num_lev)
return body, header, dates_long, dates |
<SYSTEM_TASK:>
Format the dataframe, remove empty rows, and add units attribute.
<END_TASK>
<USER_TASK:>
Description:
def _clean_body_df(self, df):
"""Format the dataframe, remove empty rows, and add units attribute.""" |
if self.suffix == '-drvd.txt':
df = df.dropna(subset=('temperature', 'reported_relative_humidity',
'u_wind', 'v_wind'), how='all').reset_index(drop=True)
df.units = {'pressure': 'hPa',
'reported_height': 'meter',
'calculated_height': 'meter',
'temperature': 'Kelvin',
'temperature_gradient': 'Kelvin / kilometer',
'potential_temperature': 'Kelvin',
'potential_temperature_gradient': 'Kelvin / kilometer',
'virtual_temperature': 'Kelvin',
'virtual_potential_temperature': 'Kelvin',
'vapor_pressure': 'Pascal',
'saturation_vapor_pressure': 'Pascal',
'reported_relative_humidity': 'percent',
'calculated_relative_humidity': 'percent',
'u_wind': 'meter / second',
'u_wind_gradient': '(meter / second) / kilometer)',
'v_wind': 'meter / second',
'v_wind_gradient': '(meter / second) / kilometer)',
'refractive_index': 'unitless'}
else:
df['u_wind'], df['v_wind'] = get_wind_components(df['speed'],
np.deg2rad(df['direction']))
df['u_wind'] = np.round(df['u_wind'], 1)
df['v_wind'] = np.round(df['v_wind'], 1)
df = df.dropna(subset=('temperature', 'direction', 'speed',
'dewpoint_depression', 'u_wind', 'v_wind'),
how='all').reset_index(drop=True)
df['dewpoint'] = df['temperature'] - df['dewpoint_depression']
df.drop('dewpoint_depression', axis=1, inplace=True)
df.units = {'etime': 'second',
'pressure': 'hPa',
'height': 'meter',
'temperature': 'degC',
'dewpoint': 'degC',
'direction': 'degrees',
'speed': 'meter / second',
'u_wind': 'meter / second',
'v_wind': 'meter / second'}
return df |
<SYSTEM_TASK:>
Format the header dataframe and add units.
<END_TASK>
<USER_TASK:>
Description:
def _clean_header_df(self, df):
"""Format the header dataframe and add units.""" |
if self.suffix == '-drvd.txt':
df.units = {'release_time': 'second',
'precipitable_water': 'millimeter',
'inv_pressure': 'hPa',
'inv_height': 'meter',
'inv_strength': 'Kelvin',
'mixed_layer_pressure': 'hPa',
'mixed_layer_height': 'meter',
'freezing_point_pressure': 'hPa',
'freezing_point_height': 'meter',
'lcl_pressure': 'hPa',
'lcl_height': 'meter',
'lfc_pressure': 'hPa',
'lfc_height': 'meter',
'lnb_pressure': 'hPa',
'lnb_height': 'meter',
'lifted_index': 'degC',
'showalter_index': 'degC',
'k_index': 'degC',
'total_totals_index': 'degC',
'cape': 'Joule / kilogram',
'convective_inhibition': 'Joule / kilogram'}
else:
df.units = {'release_time': 'second',
'latitude': 'degrees',
'longitude': 'degrees'}
return df |
<SYSTEM_TASK:>
Retrieve the realtime buoy data from NDBC.
<END_TASK>
<USER_TASK:>
Description:
def realtime_observations(cls, buoy, data_type='txt'):
"""Retrieve the realtime buoy data from NDBC.
Parameters
----------
buoy : str
Name of buoy
data_type : str
Type of data requested, must be one of
'txt' standard meteorological data
'drift' meteorological data from drifting buoys and limited moored buoy data
mainly from international partners
'cwind' continuous winds data (10 minute average)
'spec' spectral wave summaries
'ocean' oceanographic data
'srad' solar radiation data
'dart' water column height
'supl' supplemental measurements data
'rain' hourly rain data
Returns
-------
Raw data string
""" |
endpoint = cls()
parsers = {'txt': endpoint._parse_met,
'drift': endpoint._parse_drift,
'cwind': endpoint._parse_cwind,
'spec': endpoint._parse_spec,
'ocean': endpoint._parse_ocean,
'srad': endpoint._parse_srad,
'dart': endpoint._parse_dart,
'supl': endpoint._parse_supl,
'rain': endpoint._parse_rain}
if data_type not in parsers:
raise KeyError('Data type must be txt, drift, cwind, spec, ocean, srad, dart,'
'supl, or rain for parsed realtime data.')
raw_data = endpoint.raw_buoy_data(buoy, data_type=data_type)
return parsers[data_type](raw_data) |
<SYSTEM_TASK:>
Determine which types of data are available for a given buoy.
<END_TASK>
<USER_TASK:>
Description:
def buoy_data_types(cls, buoy):
"""Determine which types of data are available for a given buoy.
Parameters
----------
buoy : str
Buoy name
Returns
-------
dict of valid file extensions and their descriptions
""" |
endpoint = cls()
file_types = {'txt': 'standard meteorological data',
'drift': 'meteorological data from drifting buoys and limited moored'
'buoy data mainly from international partners',
'cwind': 'continuous wind data (10 minute average)',
'spec': 'spectral wave summaries',
'data_spec': 'raw spectral wave data',
'swdir': 'spectral wave data (alpha1)',
'swdir2': 'spectral wave data (alpha2)',
'swr1': 'spectral wave data (r1)',
'swr2': 'spectral wave data (r2)',
'adcp': 'acoustic doppler current profiler',
'ocean': 'oceanographic data',
'tide': 'tide data',
'srad': 'solar radiation data',
'dart': 'water column height',
'supl': 'supplemental measurements data',
'rain': 'hourly rain data'}
available_data = {}
buoy_url = 'https://www.ndbc.noaa.gov/data/realtime2/' + buoy + '.'
for key in file_types:
if endpoint._check_if_url_valid(buoy_url + key):
available_data[key] = file_types[key]
return available_data |
<SYSTEM_TASK:>
Retrieve the raw buoy data contents from NDBC.
<END_TASK>
<USER_TASK:>
Description:
def raw_buoy_data(cls, buoy, data_type='txt'):
"""Retrieve the raw buoy data contents from NDBC.
Parameters
----------
buoy : str
Name of buoy
data_type : str
Type of data requested, must be one of
'txt' standard meteorological data
'drift' meteorological data from drifting buoys and limited moored buoy data
mainly from international partners
'cwind' continuous winds data (10 minute average)
'spec' spectral wave summaries
'data_spec' raw spectral wave data
'swdir' spectral wave data (alpha1)
'swdir2' spectral wave data (alpha2)
'swr1' spectral wave data (r1)
'swr2' spectral wave data (r2)
'adcp' acoustic doppler current profiler
'ocean' oceanographic data
'tide' tide data
'srad' solar radiation data
'dart' water column height
'supl' supplemental measurements data
'rain' hourly rain data
Returns
-------
Raw data string
""" |
endpoint = cls()
resp = endpoint.get_path('data/realtime2/{}.{}'.format(buoy, data_type))
return resp.text |
<SYSTEM_TASK:>
Create a new HTTP session with our user-agent set.
<END_TASK>
<USER_TASK:>
Description:
def create_session(self):
"""Create a new HTTP session with our user-agent set.
Returns
-------
session : requests.Session
The created session
See Also
--------
urlopen, set_session_options
""" |
ret = requests.Session()
ret.headers['User-Agent'] = self.user_agent
for k, v in self.options.items():
setattr(ret, k, v)
return ret |
<SYSTEM_TASK:>
GET a file-like object for a URL using HTTP.
<END_TASK>
<USER_TASK:>
Description:
def urlopen(self, url, **kwargs):
"""GET a file-like object for a URL using HTTP.
This is a thin wrapper around :meth:`requests.Session.get` that returns a file-like
object wrapped around the resulting content.
Parameters
----------
url : str
The URL to request
kwargs : arbitrary keyword arguments
Additional keyword arguments to pass to :meth:`requests.Session.get`.
Returns
-------
fobj : file-like object
A file-like interface to the content in the response
See Also
--------
:meth:`requests.Session.get`
""" |
return BytesIO(self.create_session().get(url, **kwargs).content) |
<SYSTEM_TASK:>
Add a request for a specific time to the query.
<END_TASK>
<USER_TASK:>
Description:
def time(self, time):
"""Add a request for a specific time to the query.
This modifies the query in-place, but returns `self` so that multiple queries
can be chained together on one line.
This replaces any existing temporal queries that have been set.
Parameters
----------
time : datetime.datetime
The time to request
Returns
-------
self : DataQuery
Returns self for chaining calls
""" |
self._set_query(self.time_query, time=self._format_time(time))
return self |
<SYSTEM_TASK:>
Add a request for a time range to the query.
<END_TASK>
<USER_TASK:>
Description:
def time_range(self, start, end):
"""Add a request for a time range to the query.
This modifies the query in-place, but returns `self` so that multiple queries
can be chained together on one line.
This replaces any existing temporal queries that have been set.
Parameters
----------
start : datetime.datetime
The start of the requested time range
end : datetime.datetime
The end of the requested time range
Returns
-------
self : DataQuery
Returns self for chaining calls
""" |
self._set_query(self.time_query, time_start=self._format_time(start),
time_end=self._format_time(end))
return self |
<SYSTEM_TASK:>
Make a GET request, including a query, to the endpoint.
<END_TASK>
<USER_TASK:>
Description:
def get_query(self, query):
"""Make a GET request, including a query, to the endpoint.
The path of the request is to the base URL assigned to the endpoint.
Parameters
----------
query : DataQuery
The query to pass when making the request
Returns
-------
resp : requests.Response
The server's response to the request
See Also
--------
get_path, get
""" |
url = self._base[:-1] if self._base[-1] == '/' else self._base
return self.get(url, query) |
<SYSTEM_TASK:>
Make a GET request, optionally including a query, to a relative path.
<END_TASK>
<USER_TASK:>
Description:
def get_path(self, path, query=None):
"""Make a GET request, optionally including a query, to a relative path.
The path of the request includes a path on top of the base URL
assigned to the endpoint.
Parameters
----------
path : str
The path to request, relative to the endpoint
query : DataQuery, optional
The query to pass when making the request
Returns
-------
resp : requests.Response
The server's response to the request
See Also
--------
get_query, get, url_path
""" |
return self.get(self.url_path(path), query) |
<SYSTEM_TASK:>
Make a GET request, optionally including a parameters, to a path.
<END_TASK>
<USER_TASK:>
Description:
def get(self, path, params=None):
"""Make a GET request, optionally including a parameters, to a path.
The path of the request is the full URL.
Parameters
----------
path : str
The URL to request
params : DataQuery, optional
The query to pass when making the request
Returns
-------
resp : requests.Response
The server's response to the request
Raises
------
HTTPError
If the server returns anything other than a 200 (OK) code
See Also
--------
get_query, get
""" |
resp = self._session.get(path, params=params)
if resp.status_code != 200:
if resp.headers.get('Content-Type', '').startswith('text/html'):
text = resp.reason
else:
text = resp.text
raise requests.HTTPError('Error accessing {0}\n'
'Server Error ({1:d}: {2})'.format(resp.request.url,
resp.status_code,
text))
return resp |
<SYSTEM_TASK:>
Return the full path to the Group, including any parent Groups.
<END_TASK>
<USER_TASK:>
Description:
def path(self):
"""Return the full path to the Group, including any parent Groups.""" |
# If root, return '/'
if self.dataset is self:
return ''
else: # Otherwise recurse
return self.dataset.path + '/' + self.name |
<SYSTEM_TASK:>
Populate the Variable from an NCStream object.
<END_TASK>
<USER_TASK:>
Description:
def load_from_stream(self, var):
"""Populate the Variable from an NCStream object.""" |
dims = []
for d in var.shape:
dim = Dimension(None, d.name)
dim.load_from_stream(d)
dims.append(dim)
self.dimensions = tuple(dim.name for dim in dims)
self.shape = tuple(dim.size for dim in dims)
self.ndim = len(var.shape)
self._unpack_attrs(var.atts)
data, dt, type_name = unpack_variable(var)
if data is not None:
data = data.reshape(self.shape)
self._data = data
self.dtype = dt
self.datatype = type_name
if hasattr(var, 'enumType') and var.enumType:
self.datatype = var.enumType
self._enum = True |
<SYSTEM_TASK:>
Get the needed header information to initialize dataset.
<END_TASK>
<USER_TASK:>
Description:
def _read_header(self):
"""Get the needed header information to initialize dataset.""" |
self._header = self.cdmrf.fetch_header()
self.load_from_stream(self._header) |
<SYSTEM_TASK:>
Retrieve upper air observations from Iowa State's archive for a single station.
<END_TASK>
<USER_TASK:>
Description:
def request_data(cls, time, site_id, **kwargs):
"""Retrieve upper air observations from Iowa State's archive for a single station.
Parameters
----------
time : datetime
The date and time of the desired observation.
site_id : str
The three letter ICAO identifier of the station for which data should be
downloaded.
kwargs
Arbitrary keyword arguments to use to initialize source
Returns
-------
:class:`pandas.DataFrame` containing the data
""" |
endpoint = cls()
df = endpoint._get_data(time, site_id, None, **kwargs)
return df |
<SYSTEM_TASK:>
Retrieve upper air observations from Iowa State's archive for all stations.
<END_TASK>
<USER_TASK:>
Description:
def request_all_data(cls, time, pressure=None, **kwargs):
"""Retrieve upper air observations from Iowa State's archive for all stations.
Parameters
----------
time : datetime
The date and time of the desired observation.
pressure : float, optional
The mandatory pressure level at which to request data (in hPa). If none is given,
all the available data in the profiles is returned.
kwargs
Arbitrary keyword arguments to use to initialize source
Returns
-------
:class:`pandas.DataFrame` containing the data
""" |
endpoint = cls()
df = endpoint._get_data(time, None, pressure, **kwargs)
return df |
<SYSTEM_TASK:>
Download data from Iowa State's upper air archive.
<END_TASK>
<USER_TASK:>
Description:
def _get_data(self, time, site_id, pressure=None):
"""Download data from Iowa State's upper air archive.
Parameters
----------
time : datetime
Date and time for which data should be downloaded
site_id : str
Site id for which data should be downloaded
pressure : float, optional
Mandatory pressure level at which to request data (in hPa).
Returns
-------
:class:`pandas.DataFrame` containing the data
""" |
json_data = self._get_data_raw(time, site_id, pressure)
data = {}
for profile in json_data['profiles']:
for pt in profile['profile']:
for field in ('drct', 'dwpc', 'hght', 'pres', 'sknt', 'tmpc'):
data.setdefault(field, []).append(np.nan if pt[field] is None
else pt[field])
for field in ('station', 'valid'):
data.setdefault(field, []).append(np.nan if profile[field] is None
else profile[field])
# Make sure that the first entry has a valid temperature and dewpoint
idx = np.argmax(~(np.isnan(data['tmpc']) | np.isnan(data['dwpc'])))
# Stuff data into a pandas dataframe
df = pd.DataFrame()
df['pressure'] = ma.masked_invalid(data['pres'][idx:])
df['height'] = ma.masked_invalid(data['hght'][idx:])
df['temperature'] = ma.masked_invalid(data['tmpc'][idx:])
df['dewpoint'] = ma.masked_invalid(data['dwpc'][idx:])
df['direction'] = ma.masked_invalid(data['drct'][idx:])
df['speed'] = ma.masked_invalid(data['sknt'][idx:])
df['station'] = data['station'][idx:]
df['time'] = [datetime.strptime(valid, '%Y-%m-%dT%H:%M:%SZ')
for valid in data['valid'][idx:]]
# Calculate the u and v winds
df['u_wind'], df['v_wind'] = get_wind_components(df['speed'],
np.deg2rad(df['direction']))
# Drop any rows with all NaN values for T, Td, winds
df = df.dropna(subset=('temperature', 'dewpoint', 'direction', 'speed',
'u_wind', 'v_wind'), how='all').reset_index(drop=True)
# Add unit dictionary
df.units = {'pressure': 'hPa',
'height': 'meter',
'temperature': 'degC',
'dewpoint': 'degC',
'direction': 'degrees',
'speed': 'knot',
'u_wind': 'knot',
'v_wind': 'knot',
'station': None,
'time': None}
return df |
<SYSTEM_TASK:>
r"""Download data from the Iowa State's upper air archive.
<END_TASK>
<USER_TASK:>
Description:
def _get_data_raw(self, time, site_id, pressure=None):
r"""Download data from the Iowa State's upper air archive.
Parameters
----------
time : datetime
Date and time for which data should be downloaded
site_id : str
Site id for which data should be downloaded
pressure : float, optional
Mandatory pressure level at which to request data (in hPa).
Returns
-------
list of json data
""" |
query = {'ts': time.strftime('%Y%m%d%H00')}
if site_id is not None:
query['station'] = site_id
if pressure is not None:
query['pressure'] = pressure
resp = self.get_path('raob.py', query)
json_data = json.loads(resp.text)
# See if the return is valid, but has no data
if not (json_data['profiles'] and json_data['profiles'][0]['profile']):
message = 'No data available '
if time is not None:
message += 'for {time:%Y-%m-%d %HZ} '.format(time=time)
if site_id is not None:
message += 'for station {stid}'.format(stid=site_id)
if pressure is not None:
message += 'for pressure {pres}'.format(pres=pressure)
message = message[:-1] + '.'
raise ValueError(message)
return json_data |
<SYSTEM_TASK:>
Specify one or more stations for the query.
<END_TASK>
<USER_TASK:>
Description:
def stations(self, *stns):
"""Specify one or more stations for the query.
This modifies the query in-place, but returns `self` so that multiple
queries can be chained together on one line.
This replaces any existing spatial queries that have been set.
Parameters
----------
stns : one or more strings
One or more names of variables to request
Returns
-------
self : RadarQuery
Returns self for chaining calls
""" |
self._set_query(self.spatial_query, stn=stns)
return self |
<SYSTEM_TASK:>
Fetch a parsed THREDDS catalog from the radar server.
<END_TASK>
<USER_TASK:>
Description:
def get_catalog(self, query):
"""Fetch a parsed THREDDS catalog from the radar server.
Requests a catalog of radar data files data from the radar server given the
parameters in `query` and returns a :class:`~siphon.catalog.TDSCatalog` instance.
Parameters
----------
query : RadarQuery
The parameters to send to the radar server
Returns
-------
catalog : TDSCatalog
The catalog of matching data files
Raises
------
:class:`~siphon.http_util.BadQueryError`
When the query cannot be handled by the server
See Also
--------
get_catalog_raw
""" |
# TODO: Refactor TDSCatalog so we don't need two requests, or to do URL munging
try:
url = self._base[:-1] if self._base[-1] == '/' else self._base
url += '?' + str(query)
return TDSCatalog(url)
except ET.ParseError:
raise BadQueryError(self.get_catalog_raw(query)) |
<SYSTEM_TASK:>
Request data from the ACIS Web Services API.
<END_TASK>
<USER_TASK:>
Description:
def acis_request(method, params):
"""Request data from the ACIS Web Services API.
Makes a request from the ACIS Web Services API for data
based on a given method (StnMeta,StnData,MultiStnData,GridData,General)
and parameters string. Information about the parameters can be obtained at:
http://www.rcc-acis.org/docs_webservices.html
If a connection to the API fails, then it will raise an exception. Some bad
calls will also return empty dictionaries.
ACIS Web Services is a distributed system! A call to the main URL can be
delivered to any climate center running a public instance of the service.
This makes the calls efficient, but also occasionaly results in failed
calls when a server you are directed to is having problems. Generally,
reconnecting after waiting a few seconds will resolve a problem. If problems
are persistent, contact ACIS developers at the High Plains Regional Climate
Center or Northeast Regional Climate Center who will look into server
issues.
Parameters
----------
method : str
The Web Services request method (StnMeta, StnData, MultiStnData, GridData, General)
params : dict
A JSON array of parameters (See Web Services API)
Returns
-------
A dictionary of data based on the JSON parameters
Raises
------
:class: `ACIS_API_Exception`
When the API is unable to establish a connection or returns
unparsable data.
""" |
base_url = 'http://data.rcc-acis.org/' # ACIS Web API URL
timeout = 300 if method == 'MultiStnData' else 60
try:
response = session_manager.create_session().post(base_url + method, json=params,
timeout=timeout)
return response.json()
except requests.exceptions.Timeout:
raise AcisApiException('Connection Timeout')
except requests.exceptions.TooManyRedirects:
raise AcisApiException('Bad URL. Check your ACIS connection method string.')
except ValueError:
raise AcisApiException('No data returned! The ACIS parameter dictionary'
'may be incorrectly formatted') |
<SYSTEM_TASK:>
Combine multiple Point tags into an array.
<END_TASK>
<USER_TASK:>
Description:
def combine_xml_points(l, units, handle_units):
"""Combine multiple Point tags into an array.""" |
ret = {}
for item in l:
for key, value in item.items():
ret.setdefault(key, []).append(value)
for key, value in ret.items():
if key != 'date':
ret[key] = handle_units(value, units.get(key, None))
return ret |
<SYSTEM_TASK:>
Parse the CSV header returned by TDS.
<END_TASK>
<USER_TASK:>
Description:
def parse_csv_header(line):
"""Parse the CSV header returned by TDS.""" |
units = {}
names = []
for var in line.split(','):
start = var.find('[')
if start < 0:
names.append(str(var))
continue
else:
names.append(str(var[:start]))
end = var.find(']', start)
unitstr = var[start + 1:end]
eq = unitstr.find('=')
if eq >= 0:
# go past = and ", skip final "
units[names[-1]] = unitstr[eq + 2:-1]
return names, units |
<SYSTEM_TASK:>
Fetch parsed data from a THREDDS server using NCSS.
<END_TASK>
<USER_TASK:>
Description:
def get_data(self, query):
"""Fetch parsed data from a THREDDS server using NCSS.
Requests data from the NCSS endpoint given the parameters in `query` and
handles parsing of the returned content based on the mimetype.
Parameters
----------
query : NCSSQuery
The parameters to send to the NCSS endpoint
Returns
-------
Parsed data response from the server. Exact format depends on the format of the
response.
See Also
--------
get_data_raw
""" |
resp = self.get_query(query)
return response_handlers(resp, self.unit_handler) |
<SYSTEM_TASK:>
Register a function to handle a particular mimetype.
<END_TASK>
<USER_TASK:>
Description:
def register(self, mimetype):
"""Register a function to handle a particular mimetype.""" |
def dec(func):
self._reg[mimetype] = func
return func
return dec |
<SYSTEM_TASK:>
Translate typed values into the appropriate python object.
<END_TASK>
<USER_TASK:>
Description:
def handle_typed_values(val, type_name, value_type):
"""Translate typed values into the appropriate python object.
Takes an element name, value, and type and returns a list
with the string value(s) properly converted to a python type.
TypedValues are handled in ucar.ma2.DataType in netcdfJava
in the DataType enum. Possibilities are:
"boolean"
"byte"
"char"
"short"
"int"
"long"
"float"
"double"
"Sequence"
"String"
"Structure"
"enum1"
"enum2"
"enum4"
"opaque"
"object"
All of these are values written as strings in the xml, so simply
applying int, float to the values will work in most cases (i.e.
the TDS encodes them as string values properly).
Examle XML element:
<attribute name="scale_factor" type="double" value="0.0010000000474974513"/>
Parameters
----------
val : string
The string representation of the value attribute of the xml element
type_name : string
The string representation of the name attribute of the xml element
value_type : string
The string representation of the type attribute of the xml element
Returns
-------
val : list
A list containing the properly typed python values.
""" |
if value_type in ['byte', 'short', 'int', 'long']:
try:
val = [int(v) for v in re.split('[ ,]', val) if v]
except ValueError:
log.warning('Cannot convert "%s" to int. Keeping type as str.', val)
elif value_type in ['float', 'double']:
try:
val = [float(v) for v in re.split('[ ,]', val) if v]
except ValueError:
log.warning('Cannot convert "%s" to float. Keeping type as str.', val)
elif value_type == 'boolean':
try:
# special case for boolean type
val = val.split()
# values must be either true or false
for potential_bool in val:
if potential_bool not in ['true', 'false']:
raise ValueError
val = [True if item == 'true' else False for item in val]
except ValueError:
msg = 'Cannot convert values %s to boolean.'
msg += ' Keeping type as str.'
log.warning(msg, val)
elif value_type == 'String':
# nothing special for String type
pass
else:
# possibilities - Sequence, Structure, enum, opaque, object,
# and char.
# Not sure how to handle these as I do not have an example
# of how they would show up in dataset.xml
log.warning('%s type %s not understood. Keeping as String.',
type_name, value_type)
if not isinstance(val, list):
val = [val]
return val |
<SYSTEM_TASK:>
r"""Download and parse upper air observations from an online archive.
<END_TASK>
<USER_TASK:>
Description:
def _get_data(self, time, site_id):
r"""Download and parse upper air observations from an online archive.
Parameters
----------
time : datetime
The date and time of the desired observation.
site_id : str
The three letter ICAO identifier of the station for which data should be
downloaded.
Returns
-------
:class:`pandas.DataFrame` containing the data
""" |
raw_data = self._get_data_raw(time, site_id)
soup = BeautifulSoup(raw_data, 'html.parser')
tabular_data = StringIO(soup.find_all('pre')[0].contents[0])
col_names = ['pressure', 'height', 'temperature', 'dewpoint', 'direction', 'speed']
df = pd.read_fwf(tabular_data, skiprows=5, usecols=[0, 1, 2, 3, 6, 7], names=col_names)
df['u_wind'], df['v_wind'] = get_wind_components(df['speed'],
np.deg2rad(df['direction']))
# Drop any rows with all NaN values for T, Td, winds
df = df.dropna(subset=('temperature', 'dewpoint', 'direction', 'speed',
'u_wind', 'v_wind'), how='all').reset_index(drop=True)
# Parse metadata
meta_data = soup.find_all('pre')[1].contents[0]
lines = meta_data.splitlines()
# If the station doesn't have a name identified we need to insert a
# record showing this for parsing to proceed.
if 'Station number' in lines[1]:
lines.insert(1, 'Station identifier: ')
station = lines[1].split(':')[1].strip()
station_number = int(lines[2].split(':')[1].strip())
sounding_time = datetime.strptime(lines[3].split(':')[1].strip(), '%y%m%d/%H%M')
latitude = float(lines[4].split(':')[1].strip())
longitude = float(lines[5].split(':')[1].strip())
elevation = float(lines[6].split(':')[1].strip())
df['station'] = station
df['station_number'] = station_number
df['time'] = sounding_time
df['latitude'] = latitude
df['longitude'] = longitude
df['elevation'] = elevation
# Add unit dictionary
df.units = {'pressure': 'hPa',
'height': 'meter',
'temperature': 'degC',
'dewpoint': 'degC',
'direction': 'degrees',
'speed': 'knot',
'u_wind': 'knot',
'v_wind': 'knot',
'station': None,
'station_number': None,
'time': None,
'latitude': 'degrees',
'longitude': 'degrees',
'elevation': 'meter'}
return df |
<SYSTEM_TASK:>
Download data from the University of Wyoming's upper air archive.
<END_TASK>
<USER_TASK:>
Description:
def _get_data_raw(self, time, site_id):
"""Download data from the University of Wyoming's upper air archive.
Parameters
----------
time : datetime
Date and time for which data should be downloaded
site_id : str
Site id for which data should be downloaded
Returns
-------
text of the server response
""" |
path = ('?region=naconf&TYPE=TEXT%3ALIST'
'&YEAR={time:%Y}&MONTH={time:%m}&FROM={time:%d%H}&TO={time:%d%H}'
'&STNM={stid}').format(time=time, stid=site_id)
resp = self.get_path(path)
# See if the return is valid, but has no data
if resp.text.find('Can\'t') != -1:
raise ValueError(
'No data available for {time:%Y-%m-%d %HZ} '
'for station {stid}.'.format(time=time, stid=site_id))
return resp.text |
<SYSTEM_TASK:>
Handle reading an NcStream v1 data block from a file-like object.
<END_TASK>
<USER_TASK:>
Description:
def read_ncstream_data(fobj):
"""Handle reading an NcStream v1 data block from a file-like object.""" |
data = read_proto_object(fobj, stream.Data)
if data.dataType in (stream.STRING, stream.OPAQUE) or data.vdata:
log.debug('Reading string/opaque/vlen')
num_obj = read_var_int(fobj)
log.debug('Num objects: %d', num_obj)
blocks = [read_block(fobj) for _ in range(num_obj)]
if data.dataType == stream.STRING:
blocks = [b.decode('utf-8', errors='ignore') for b in blocks]
# Again endian isn't coded properly
dt = data_type_to_numpy(data.dataType).newbyteorder('>')
if data.vdata:
return np.array([np.frombuffer(b, dtype=dt) for b in blocks])
else:
return np.array(blocks, dtype=dt)
elif data.dataType in _dtypeLookup:
log.debug('Reading array data')
bin_data = read_block(fobj)
log.debug('Binary data: %s', bin_data)
# Hard code to big endian for now since it's not encoded correctly
dt = data_type_to_numpy(data.dataType).newbyteorder('>')
# Handle decompressing the bytes
if data.compress == stream.DEFLATE:
bin_data = zlib.decompress(bin_data)
assert len(bin_data) == data.uncompressedSize
elif data.compress != stream.NONE:
raise NotImplementedError('Compression type {0} not implemented!'.format(
data.compress))
# Turn bytes into an array
return reshape_array(data, np.frombuffer(bin_data, dtype=dt))
elif data.dataType == stream.STRUCTURE:
sd = read_proto_object(fobj, stream.StructureData)
# Make a datatype appropriate to the rows of struct
endian = '>' if data.bigend else '<'
dt = np.dtype([(endian, np.void, sd.rowLength)])
# Turn bytes into an array
return reshape_array(data, np.frombuffer(sd.data, dtype=dt))
elif data.dataType == stream.SEQUENCE:
log.debug('Reading sequence')
blocks = []
magic = read_magic(fobj)
while magic != MAGIC_VEND:
if magic == MAGIC_VDATA:
log.error('Bad magic for struct/seq data!')
blocks.append(read_proto_object(fobj, stream.StructureData))
magic = read_magic(fobj)
return data, blocks
else:
raise NotImplementedError("Don't know how to handle data type: {0}".format(
data.dataType)) |
<SYSTEM_TASK:>
Handle reading an NcStream error from a file-like object and raise as error.
<END_TASK>
<USER_TASK:>
Description:
def read_ncstream_err(fobj):
"""Handle reading an NcStream error from a file-like object and raise as error.""" |
err = read_proto_object(fobj, stream.Error)
raise RuntimeError(err.message) |
<SYSTEM_TASK:>
Read messages from a file-like object until stream is exhausted.
<END_TASK>
<USER_TASK:>
Description:
def read_messages(fobj, magic_table):
"""Read messages from a file-like object until stream is exhausted.""" |
messages = []
while True:
magic = read_magic(fobj)
if not magic:
break
func = magic_table.get(magic)
if func is not None:
messages.append(func(fobj))
else:
log.error('Unknown magic: ' + str(' '.join('{0:02x}'.format(b)
for b in bytearray(magic))))
return messages |
<SYSTEM_TASK:>
Read a block of data and parse using the given protobuf object.
<END_TASK>
<USER_TASK:>
Description:
def read_proto_object(fobj, klass):
"""Read a block of data and parse using the given protobuf object.""" |
log.debug('%s chunk', klass.__name__)
obj = klass()
obj.ParseFromString(read_block(fobj))
log.debug('Header: %s', str(obj))
return obj |
<SYSTEM_TASK:>
Read a block.
<END_TASK>
<USER_TASK:>
Description:
def read_block(fobj):
"""Read a block.
Reads a block from a file object by first reading the number of bytes to read, which must
be encoded as a variable-byte length integer.
Parameters
----------
fobj : file-like object
The file to read from.
Returns
-------
bytes
block of bytes read
""" |
num = read_var_int(fobj)
log.debug('Next block: %d bytes', num)
return fobj.read(num) |
<SYSTEM_TASK:>
Process vlen coming back from NCStream v2.
<END_TASK>
<USER_TASK:>
Description:
def process_vlen(data_header, array):
"""Process vlen coming back from NCStream v2.
This takes the array of values and slices into an object array, with entries containing
the appropriate pieces of the original array. Sizes are controlled by the passed in
`data_header`.
Parameters
----------
data_header : Header
array : :class:`numpy.ndarray`
Returns
-------
ndarray
object array containing sub-sequences from the original primitive array
""" |
source = iter(array)
return np.array([np.fromiter(itertools.islice(source, size), dtype=array.dtype)
for size in data_header.vlens]) |
<SYSTEM_TASK:>
Convert DataCol from NCStream v2 into an array with appropriate type.
<END_TASK>
<USER_TASK:>
Description:
def datacol_to_array(datacol):
"""Convert DataCol from NCStream v2 into an array with appropriate type.
Depending on the data type specified, this extracts data from the appropriate members
and packs into a :class:`numpy.ndarray`, recursing as necessary for compound data types.
Parameters
----------
datacol : DataCol
Returns
-------
ndarray
array containing extracted data
""" |
if datacol.dataType == stream.STRING:
arr = np.array(datacol.stringdata, dtype=np.object)
elif datacol.dataType == stream.OPAQUE:
arr = np.array(datacol.opaquedata, dtype=np.object)
elif datacol.dataType == stream.STRUCTURE:
members = OrderedDict((mem.name, datacol_to_array(mem))
for mem in datacol.structdata.memberData)
log.debug('Struct members:\n%s', str(members))
# str() around name necessary because protobuf gives unicode names, but dtype doesn't
# support them on Python 2
dt = np.dtype([(str(name), arr.dtype) for name, arr in members.items()])
log.debug('Struct dtype: %s', str(dt))
arr = np.empty((datacol.nelems,), dtype=dt)
for name, arr_data in members.items():
arr[name] = arr_data
else:
# Make an appropriate datatype
endian = '>' if datacol.bigend else '<'
dt = data_type_to_numpy(datacol.dataType).newbyteorder(endian)
# Turn bytes into an array
arr = np.frombuffer(datacol.primdata, dtype=dt)
if arr.size != datacol.nelems:
log.warning('Array size %d does not agree with nelems %d',
arr.size, datacol.nelems)
if datacol.isVlen:
arr = process_vlen(datacol, arr)
if arr.dtype == np.object_:
arr = reshape_array(datacol, arr)
else:
# In this case, the array collapsed, need different resize that
# correctly sizes from elements
shape = tuple(r.size for r in datacol.section.range) + (datacol.vlens[0],)
arr = arr.reshape(*shape)
else:
arr = reshape_array(datacol, arr)
return arr |
<SYSTEM_TASK:>
Extract the appropriate array shape from the header.
<END_TASK>
<USER_TASK:>
Description:
def reshape_array(data_header, array):
"""Extract the appropriate array shape from the header.
Can handle taking a data header and either bytes containing data or a StructureData
instance, which will have binary data as well as some additional information.
Parameters
----------
array : :class:`numpy.ndarray`
data_header : Data
""" |
shape = tuple(r.size for r in data_header.section.range)
if shape:
return array.reshape(*shape)
else:
return array |
<SYSTEM_TASK:>
Convert an ncstream datatype to a numpy one.
<END_TASK>
<USER_TASK:>
Description:
def data_type_to_numpy(datatype, unsigned=False):
"""Convert an ncstream datatype to a numpy one.""" |
basic_type = _dtypeLookup[datatype]
if datatype in (stream.STRING, stream.OPAQUE):
return np.dtype(basic_type)
if unsigned:
basic_type = basic_type.replace('i', 'u')
return np.dtype('=' + basic_type) |
<SYSTEM_TASK:>
Convert a Structure specification to a numpy structured dtype.
<END_TASK>
<USER_TASK:>
Description:
def struct_to_dtype(struct):
"""Convert a Structure specification to a numpy structured dtype.""" |
# str() around name necessary because protobuf gives unicode names, but dtype doesn't
# support them on Python 2
fields = [(str(var.name), data_type_to_numpy(var.dataType, var.unsigned))
for var in struct.vars]
for s in struct.structs:
fields.append((str(s.name), struct_to_dtype(s)))
log.debug('Structure fields: %s', fields)
dt = np.dtype(fields)
return dt |
<SYSTEM_TASK:>
Unpack an NCStream Variable into information we can use.
<END_TASK>
<USER_TASK:>
Description:
def unpack_variable(var):
"""Unpack an NCStream Variable into information we can use.""" |
# If we actually get a structure instance, handle turning that into a variable
if var.dataType == stream.STRUCTURE:
return None, struct_to_dtype(var), 'Structure'
elif var.dataType == stream.SEQUENCE:
log.warning('Sequence support not implemented!')
dt = data_type_to_numpy(var.dataType, var.unsigned)
if var.dataType == stream.OPAQUE:
type_name = 'opaque'
elif var.dataType == stream.STRING:
type_name = 'string'
else:
type_name = dt.name
if var.data:
log.debug('Storing variable data: %s %s', dt, var.data)
if var.dataType == stream.STRING:
data = var.data
else:
# Always sent big endian
data = np.frombuffer(var.data, dtype=dt.newbyteorder('>'))
else:
data = None
return data, dt, type_name |
<SYSTEM_TASK:>
Unpack an embedded attribute into a python or numpy object.
<END_TASK>
<USER_TASK:>
Description:
def unpack_attribute(att):
"""Unpack an embedded attribute into a python or numpy object.""" |
if att.unsigned:
log.warning('Unsupported unsigned attribute!')
# TDS 5.0 now has a dataType attribute that takes precedence
if att.len == 0: # Empty
val = None
elif att.dataType == stream.STRING: # Then look for new datatype string
val = att.sdata
elif att.dataType: # Then a non-zero new data type
val = np.frombuffer(att.data,
dtype='>' + _dtypeLookup[att.dataType], count=att.len)
elif att.type: # Then non-zero old-data type0
val = np.frombuffer(att.data,
dtype=_attrConverters[att.type], count=att.len)
elif att.sdata: # This leaves both 0, try old string
val = att.sdata
else: # Assume new datatype is Char (0)
val = np.array(att.data, dtype=_dtypeLookup[att.dataType])
if att.len == 1:
val = val[0]
return att.name, val |
<SYSTEM_TASK:>
Read a variable-length integer.
<END_TASK>
<USER_TASK:>
Description:
def read_var_int(file_obj):
"""Read a variable-length integer.
Parameters
----------
file_obj : file-like object
The file to read from.
Returns
-------
int
the variable-length value read
""" |
# Read all bytes from here, stopping with the first one that does not have
# the MSB set. Save the lower 7 bits, and keep stacking to the *left*.
val = 0
shift = 0
while True:
# Read next byte
next_val = ord(file_obj.read(1))
val |= ((next_val & 0x7F) << shift)
shift += 7
if not next_val & 0x80:
break
return val |
<SYSTEM_TASK:>
Retrieve data from CDMRemote for one or more variables.
<END_TASK>
<USER_TASK:>
Description:
def fetch_data(self, **var):
"""Retrieve data from CDMRemote for one or more variables.""" |
varstr = ','.join(name + self._convert_indices(ind)
for name, ind in var.items())
query = self.query().add_query_parameter(req='data', var=varstr)
return self._fetch(query) |
<SYSTEM_TASK:>
Generate a new query for CDMRemote.
<END_TASK>
<USER_TASK:>
Description:
def query(self):
"""Generate a new query for CDMRemote.
This handles turning on compression if necessary.
Returns
-------
HTTPQuery
The created query.
""" |
q = super(CDMRemote, self).query()
# Turn on compression if it's been set on the object
if self.deflate:
q.add_query_parameter(deflate=self.deflate)
return q |
<SYSTEM_TASK:>
Login to verisure app api
<END_TASK>
<USER_TASK:>
Description:
def login(self):
""" Login to verisure app api
Login before calling any read or write commands
""" |
if os.path.exists(self._cookieFileName):
with open(self._cookieFileName, 'r') as cookieFile:
self._vid = cookieFile.read().strip()
try:
self._get_installations()
except ResponseError:
self._vid = None
os.remove(self._cookieFileName)
if self._vid is None:
self._create_cookie()
with open(self._cookieFileName, 'w') as cookieFile:
cookieFile.write(self._vid)
self._get_installations()
self._giid = self.installations[0]['giid'] |
<SYSTEM_TASK:>
Get information about installations
<END_TASK>
<USER_TASK:>
Description:
def _get_installations(self):
""" Get information about installations """ |
response = None
for base_url in urls.BASE_URLS:
urls.BASE_URL = base_url
try:
response = requests.get(
urls.get_installations(self._username),
headers={
'Cookie': 'vid={}'.format(self._vid),
'Accept': 'application/json,'
'text/javascript, */*; q=0.01',
})
if 2 == response.status_code // 100:
break
elif 503 == response.status_code:
continue
else:
raise ResponseError(response.status_code, response.text)
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
self.installations = json.loads(response.text) |
<SYSTEM_TASK:>
Turn on or off smartplug
<END_TASK>
<USER_TASK:>
Description:
def set_smartplug_state(self, device_label, state):
""" Turn on or off smartplug
Args:
device_label (str): Smartplug device label
state (boolean): new status, 'True' or 'False'
""" |
response = None
try:
response = requests.post(
urls.smartplug(self._giid),
headers={
'Content-Type': 'application/json',
'Cookie': 'vid={}'.format(self._vid)},
data=json.dumps([{
"deviceLabel": device_label,
"state": state}]))
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response) |
<SYSTEM_TASK:>
Get recent events
<END_TASK>
<USER_TASK:>
Description:
def get_history(self, filters=(), pagesize=15, offset=0):
""" Get recent events
Args:
filters (string set): 'ARM', 'DISARM', 'FIRE', 'INTRUSION',
'TECHNICAL', 'SOS', 'WARNING', 'LOCK',
'UNLOCK'
pagesize (int): Number of events to display
offset (int): Skip pagesize * offset first events
""" |
response = None
try:
response = requests.get(
urls.history(self._giid),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Cookie': 'vid={}'.format(self._vid)},
params={
"offset": int(offset),
"pagesize": int(pagesize),
"notificationCategories": filters})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text) |
<SYSTEM_TASK:>
Lock or unlock
<END_TASK>
<USER_TASK:>
Description:
def set_lock_state(self, code, device_label, state):
""" Lock or unlock
Args:
code (str): Lock code
device_label (str): device label of lock
state (str): 'lock' or 'unlock'
""" |
response = None
try:
response = requests.put(
urls.set_lockstate(self._giid, device_label, state),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Content-Type': 'application/json',
'Cookie': 'vid={}'.format(self._vid)},
data=json.dumps({"code": str(code)}))
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text) |
<SYSTEM_TASK:>
Get lock configuration
<END_TASK>
<USER_TASK:>
Description:
def get_lock_config(self, device_label):
""" Get lock configuration
Args:
device_label (str): device label of lock
""" |
response = None
try:
response = requests.get(
urls.lockconfig(self._giid, device_label),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Cookie': 'vid={}'.format(self._vid)})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text) |
<SYSTEM_TASK:>
Set lock configuration
<END_TASK>
<USER_TASK:>
Description:
def set_lock_config(self, device_label, volume=None, voice_level=None,
auto_lock_enabled=None):
""" Set lock configuration
Args:
device_label (str): device label of lock
volume (str): 'SILENCE', 'LOW' or 'HIGH'
voice_level (str): 'ESSENTIAL' or 'NORMAL'
auto_lock_enabled (boolean): auto lock enabled
""" |
response = None
data = {}
if volume:
data['volume'] = volume
if voice_level:
data['voiceLevel'] = voice_level
if auto_lock_enabled is not None:
data['autoLockEnabled'] = auto_lock_enabled
try:
response = requests.put(
urls.lockconfig(self._giid, device_label),
headers={
'Content-Type': 'application/json',
'Cookie': 'vid={}'.format(self._vid)},
data=json.dumps(data))
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response) |
<SYSTEM_TASK:>
Capture smartcam image
<END_TASK>
<USER_TASK:>
Description:
def capture_image(self, device_label):
""" Capture smartcam image
Args:
device_label (str): device label of camera
""" |
response = None
try:
response = requests.post(
urls.imagecapture(self._giid, device_label),
headers={
'Content-Type': 'application/json',
'Cookie': 'vid={}'.format(self._vid)})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response) |
<SYSTEM_TASK:>
Get smartcam image series
<END_TASK>
<USER_TASK:>
Description:
def get_camera_imageseries(self, number_of_imageseries=10, offset=0):
""" Get smartcam image series
Args:
number_of_imageseries (int): number of image series to get
offset (int): skip offset amount of image series
""" |
response = None
try:
response = requests.get(
urls.get_imageseries(self._giid),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Cookie': 'vid={}'.format(self._vid)},
params={
"numberOfImageSeries": int(number_of_imageseries),
"offset": int(offset),
"fromDate": "",
"toDate": "",
"onlyNotViewed": "",
"_": self._giid})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text) |
<SYSTEM_TASK:>
Download image taken by a smartcam
<END_TASK>
<USER_TASK:>
Description:
def download_image(self, device_label, image_id, file_name):
""" Download image taken by a smartcam
Args:
device_label (str): device label of camera
image_id (str): image id from image series
file_name (str): path to file
""" |
response = None
try:
response = requests.get(
urls.download_image(self._giid, device_label, image_id),
headers={
'Cookie': 'vid={}'.format(self._vid)},
stream=True)
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
with open(file_name, 'wb') as image_file:
for chunk in response.iter_content(chunk_size=1024):
if chunk:
image_file.write(chunk) |
<SYSTEM_TASK:>
Print the result of a verisure request
<END_TASK>
<USER_TASK:>
Description:
def print_result(overview, *names):
""" Print the result of a verisure request """ |
if names:
for name in names:
toprint = overview
for part in name.split('/'):
toprint = toprint[part]
print(json.dumps(toprint, indent=4, separators=(',', ': ')))
else:
print(json.dumps(overview, indent=4, separators=(',', ': '))) |
<SYSTEM_TASK:>
Shortcut to retrieving the ContentType id of the model.
<END_TASK>
<USER_TASK:>
Description:
def type_id(self):
"""
Shortcut to retrieving the ContentType id of the model.
""" |
try:
return ContentType.objects.get_for_model(self.model, for_concrete_model=False).id
except DatabaseError as e:
raise DatabaseError("Unable to fetch ContentType object, is a plugin being registered before the initial syncdb? (original error: {0})".format(str(e))) |
<SYSTEM_TASK:>
Return a cache key for the content item output.
<END_TASK>
<USER_TASK:>
Description:
def get_rendering_cache_key(placeholder_name, contentitem):
"""
Return a cache key for the content item output.
.. seealso::
The :func:`ContentItem.clear_cache() <fluent_contents.models.ContentItem.clear_cache>` function
can be used to remove the cache keys of a retrieved object.
""" |
if not contentitem.pk:
return None
return "contentitem.@{0}.{1}.{2}".format(
placeholder_name,
contentitem.plugin.type_name, # always returns the upcasted name.
contentitem.pk, # already unique per language_code
) |
<SYSTEM_TASK:>
Return a cache key for an existing placeholder object.
<END_TASK>
<USER_TASK:>
Description:
def get_placeholder_cache_key(placeholder, language_code):
"""
Return a cache key for an existing placeholder object.
This key is used to cache the entire output of a placeholder.
""" |
return _get_placeholder_cache_key_for_id(
placeholder.parent_type_id,
placeholder.parent_id,
placeholder.slot,
language_code
) |
<SYSTEM_TASK:>
Return a cache key for a placeholder.
<END_TASK>
<USER_TASK:>
Description:
def get_placeholder_cache_key_for_parent(parent_object, placeholder_name, language_code):
"""
Return a cache key for a placeholder.
This key is used to cache the entire output of a placeholder.
""" |
parent_type = ContentType.objects.get_for_model(parent_object)
return _get_placeholder_cache_key_for_id(
parent_type.id,
parent_object.pk,
placeholder_name,
language_code
) |
<SYSTEM_TASK:>
See if there are items that point to a removed model.
<END_TASK>
<USER_TASK:>
Description:
def remove_stale_items(self, stale_cts):
"""
See if there are items that point to a removed model.
""" |
stale_ct_ids = list(stale_cts.keys())
items = (ContentItem.objects
.non_polymorphic() # very important, or polymorphic skips them on fetching derived data
.filter(polymorphic_ctype__in=stale_ct_ids)
.order_by('polymorphic_ctype', 'pk')
)
if not items:
self.stdout.write("No stale items found.")
return
if self.dry_run:
self.stdout.write("The following content items are stale:")
else:
self.stdout.write("The following content items were stale:")
for item in items:
ct = stale_cts[item.polymorphic_ctype_id]
self.stdout.write("- #{id} points to removed {app_label}.{model}".format(
id=item.pk, app_label=ct.app_label, model=ct.model
))
if not self.dry_run:
try:
item.delete()
except PluginNotFound:
Model.delete(item) |
<SYSTEM_TASK:>
See if there are items that no longer point to an existing parent.
<END_TASK>
<USER_TASK:>
Description:
def remove_unreferenced_items(self, stale_cts):
"""
See if there are items that no longer point to an existing parent.
""" |
stale_ct_ids = list(stale_cts.keys())
parent_types = (ContentItem.objects.order_by()
.exclude(polymorphic_ctype__in=stale_ct_ids)
.values_list('parent_type', flat=True).distinct())
num_unreferenced = 0
for ct_id in parent_types:
parent_ct = ContentType.objects.get_for_id(ct_id)
unreferenced_items = (ContentItem.objects
.filter(parent_type=ct_id)
.order_by('polymorphic_ctype', 'pk'))
if parent_ct.model_class() is not None:
# Only select the items that are part of removed pages,
# unless the parent type was removed - then removing all is correct.
unreferenced_items = unreferenced_items.exclude(
parent_id__in=parent_ct.get_all_objects_for_this_type()
)
if unreferenced_items:
for item in unreferenced_items:
self.stdout.write(
"- {cls}#{id} points to nonexisting {app_label}.{model}".format(
cls=item.__class__.__name__, id=item.pk,
app_label=parent_ct.app_label, model=parent_ct.model
))
num_unreferenced += 1
if not self.dry_run and self.remove_unreferenced:
item.delete()
if not num_unreferenced:
self.stdout.write("No unreferenced items found.")
else:
self.stdout.write("{0} unreferenced items found.".format(num_unreferenced))
if not self.remove_unreferenced:
self.stdout.write("Re-run this command with --remove-unreferenced to remove these items") |
Subsets and Splits