sentence1
stringlengths 52
3.87M
| sentence2
stringlengths 1
47.2k
| label
stringclasses 1
value |
---|---|---|
def set_metadata(self, set_id, fp):
"""
Set the XML metadata on a set.
:param file fp: file-like object to read the XML metadata from.
"""
base_url = self.client.get_url('SET', 'GET', 'single', {'id': set_id})
self._metadata.set(base_url, fp) | Set the XML metadata on a set.
:param file fp: file-like object to read the XML metadata from. | entailment |
def set_metadata(self, fp):
"""
Set the XML metadata on a set.
:param file fp: file-like object to read the XML metadata from.
"""
base_url = self._client.get_url('SET', 'GET', 'single', {'id': self.id})
self._manager._metadata.set(base_url, fp)
# reload myself
r = self._client.request('GET', base_url)
return self._deserialize(r.json(), self._manager) | Set the XML metadata on a set.
:param file fp: file-like object to read the XML metadata from. | entailment |
def set(self, parent_url, fp):
"""
If the parent object already has XML metadata, it will be overwritten.
Accepts XML metadata in any of the three supported formats.
The format will be detected from the XML content.
The Metadata object becomes invalid after setting
:param file fp: A reference to an open file-like object which the content will be read from.
"""
url = parent_url + self.client.get_url_path('METADATA', 'POST', 'set', {})
r = self.client.request('POST', url, data=fp, headers={'Content-Type': 'text/xml'})
if r.status_code not in [200, 201]:
raise exceptions.ServerError("Expected success response, got %s: %s" % (r.status_code, url)) | If the parent object already has XML metadata, it will be overwritten.
Accepts XML metadata in any of the three supported formats.
The format will be detected from the XML content.
The Metadata object becomes invalid after setting
:param file fp: A reference to an open file-like object which the content will be read from. | entailment |
def get_xml(self, fp, format=FORMAT_NATIVE):
"""
Returns the XML metadata for this source, converted to the requested format.
Converted metadata may not contain all the same information as the native format.
:param file fp: A path, or an open file-like object which the content should be written to.
:param str format: desired format for the output. This should be one of the available
formats from :py:meth:`.get_formats`, or :py:attr:`.FORMAT_NATIVE` for the native format.
If you pass this function an open file-like object as the fp parameter, the function will
not close that file for you.
"""
r = self._client.request('GET', getattr(self, format), stream=True)
filename = stream.stream_response_to_file(r, path=fp)
return filename | Returns the XML metadata for this source, converted to the requested format.
Converted metadata may not contain all the same information as the native format.
:param file fp: A path, or an open file-like object which the content should be written to.
:param str format: desired format for the output. This should be one of the available
formats from :py:meth:`.get_formats`, or :py:attr:`.FORMAT_NATIVE` for the native format.
If you pass this function an open file-like object as the fp parameter, the function will
not close that file for you. | entailment |
def get_formats(self):
""" Return the available format names for this metadata """
formats = []
for key in (self.FORMAT_DC, self.FORMAT_FGDC, self.FORMAT_ISO):
if hasattr(self, key):
formats.append(key)
return formats | Return the available format names for this metadata | entailment |
def is_bound(method):
"""
Decorator that asserts the model instance is bound.
Requires:
1. an ``id`` attribute
2. a ``url`` attribute
2. a manager set
"""
@functools.wraps(method)
def wrapper(self, *args, **kwargs):
if not self._is_bound:
raise ValueError("%r must be bound to call %s()" % (self, method.__name__))
return method(self, *args, **kwargs)
return wrapper | Decorator that asserts the model instance is bound.
Requires:
1. an ``id`` attribute
2. a ``url`` attribute
2. a manager set | entailment |
def query_image_id(self, image_id):
"""Query OPUS via the image_id.
This is a query using the 'primaryfilespec' field of the OPUS database.
It returns a list of URLS into the `obsids` attribute.
This example queries for an image of Titan:
>>> opus = opusapi.OPUS()
>>> opus.query_image_id('N1695760475_1')
After this, one can call `download_results()` to retrieve the found
data into the standard locations into the database_path as defined in
`.pyciss.yaml` (the config file),
"""
myquery = {"primaryfilespec": image_id}
self.create_files_request(myquery, fmt="json")
self.unpack_json_response()
return self.obsids | Query OPUS via the image_id.
This is a query using the 'primaryfilespec' field of the OPUS database.
It returns a list of URLS into the `obsids` attribute.
This example queries for an image of Titan:
>>> opus = opusapi.OPUS()
>>> opus.query_image_id('N1695760475_1')
After this, one can call `download_results()` to retrieve the found
data into the standard locations into the database_path as defined in
`.pyciss.yaml` (the config file), | entailment |
def create_request_with_query(self, kind, query, size="thumb", fmt="json"):
"""api/data.[fmt], api/images/[size].[fmt] api/files.[fmt]
kind = ['data', 'images', 'files']
"""
if kind == "data" or kind == "files":
url = "{}/{}.{}".format(base_url, kind, fmt)
elif kind == "images":
url = "{}/images/{}.{}".format(base_url, size, fmt)
self.url = url
self.r = requests.get(url, params=unquote(urlencode(query))) | api/data.[fmt], api/images/[size].[fmt] api/files.[fmt]
kind = ['data', 'images', 'files'] | entailment |
def get_between_times(self, t1, t2, target=None):
"""
Query for OPUS data between times t1 and t2.
Parameters
----------
t1, t2 : datetime.datetime, strings
Start and end time for the query. If type is datetime, will be
converted to isoformat string. If type is string already, it needs
to be in an accepted international format for time strings.
target : str
Potential target for the observation query. Most likely will reduce
the amount of data matching the query a lot.
Returns
-------
None, but set's state of the object to have new query results stored
in self.obsids.
"""
try:
# checking if times have isoformat() method (datetimes have)
t1 = t1.isoformat()
t2 = t2.isoformat()
except AttributeError:
# if not, should already be a string, so do nothing.
pass
myquery = self._get_time_query(t1, t2)
if target is not None:
myquery["target"] = target
self.create_files_request(myquery, fmt="json")
self.unpack_json_response() | Query for OPUS data between times t1 and t2.
Parameters
----------
t1, t2 : datetime.datetime, strings
Start and end time for the query. If type is datetime, will be
converted to isoformat string. If type is string already, it needs
to be in an accepted international format for time strings.
target : str
Potential target for the observation query. Most likely will reduce
the amount of data matching the query a lot.
Returns
-------
None, but set's state of the object to have new query results stored
in self.obsids. | entailment |
def show_images(self, size="small"):
"""Shows preview images using the Jupyter notebook HTML display.
Parameters
==========
size : {'small', 'med', 'thumb', 'full'}
Determines the size of the preview image to be shown.
"""
d = dict(small=256, med=512, thumb=100, full=1024)
try:
width = d[size]
except KeyError:
print("Allowed keys:", d.keys())
return
img_urls = [i._get_img_url(size) for i in self.obsids]
imagesList = "".join(
[
"<img style='width: {0}px; margin: 0px; float: "
"left; border: 1px solid black;' "
"src='{1}' />".format(width, s)
for s in img_urls
]
)
display(HTML(imagesList)) | Shows preview images using the Jupyter notebook HTML display.
Parameters
==========
size : {'small', 'med', 'thumb', 'full'}
Determines the size of the preview image to be shown. | entailment |
def download_results(self, savedir=None, raw=True, calib=False, index=None):
"""Download the previously found and stored Opus obsids.
Parameters
==========
savedir: str or pathlib.Path, optional
If the database root folder as defined by the config.ini should not be used,
provide a different savedir here. It will be handed to PathManager.
"""
obsids = self.obsids if index is None else [self.obsids[index]]
for obsid in obsids:
pm = io.PathManager(obsid.img_id, savedir=savedir)
pm.basepath.mkdir(exist_ok=True)
to_download = []
if raw is True:
to_download.extend(obsid.raw_urls)
if calib is True:
to_download.extend(obsid.calib_urls)
for url in to_download:
basename = Path(url).name
print("Downloading", basename)
store_path = str(pm.basepath / basename)
try:
urlretrieve(url, store_path)
except Exception as e:
urlretrieve(url.replace("https", "http"), store_path)
return str(pm.basepath) | Download the previously found and stored Opus obsids.
Parameters
==========
savedir: str or pathlib.Path, optional
If the database root folder as defined by the config.ini should not be used,
provide a different savedir here. It will be handed to PathManager. | entailment |
def download_previews(self, savedir=None):
"""Download preview files for the previously found and stored Opus obsids.
Parameters
==========
savedir: str or pathlib.Path, optional
If the database root folder as defined by the config.ini should not be used,
provide a different savedir here. It will be handed to PathManager.
"""
for obsid in self.obsids:
pm = io.PathManager(obsid.img_id, savedir=savedir)
pm.basepath.mkdir(exist_ok=True)
basename = Path(obsid.medium_img_url).name
print("Downloading", basename)
urlretrieve(obsid.medium_img_url, str(pm.basepath / basename)) | Download preview files for the previously found and stored Opus obsids.
Parameters
==========
savedir: str or pathlib.Path, optional
If the database root folder as defined by the config.ini should not be used,
provide a different savedir here. It will be handed to PathManager. | entailment |
def which_epi_janus_resonance(name, time):
"""Find which swap situtation we are in by time.
Starting from 2006-01-21 where a Janus-Epimetheus swap occured, and
defining the next 4 years until the next swap as `scenario1, and the 4
years after that `scenario2`.
Calculate in units of 4 years, in which scenario the given time falls.
Parameters
----------
time : timestring, datetime
Time of the image. The astropy Time object can deal with both formats.
Returns
-------
str
The given name string (either `janus` or `epimetheus`) and attach
a 1 or 2, as appropriate.
"""
t1 = Time('2002-01-21').to_datetime()
delta = Time(time).to_datetime() - t1
yearfraction = delta.days / 365
if int(yearfraction / 4) % 2 == 0:
return name + '2'
else:
return name + '1' | Find which swap situtation we are in by time.
Starting from 2006-01-21 where a Janus-Epimetheus swap occured, and
defining the next 4 years until the next swap as `scenario1, and the 4
years after that `scenario2`.
Calculate in units of 4 years, in which scenario the given time falls.
Parameters
----------
time : timestring, datetime
Time of the image. The astropy Time object can deal with both formats.
Returns
-------
str
The given name string (either `janus` or `epimetheus`) and attach
a 1 or 2, as appropriate. | entailment |
def list_drafts(self):
"""
A filterable list views of layers, returning the draft version of each layer.
If the most recent version of a layer or table has been published already,
it won’t be returned here.
"""
target_url = self.client.get_url('LAYER', 'GET', 'multidraft')
return base.Query(self, target_url) | A filterable list views of layers, returning the draft version of each layer.
If the most recent version of a layer or table has been published already,
it won’t be returned here. | entailment |
def list_versions(self, layer_id):
"""
Filterable list of versions of a layer, always ordered newest to oldest.
If the version’s source supports revisions, you can get a specific revision using
``.filter(data__source__revision=value)``. Specific values depend on the source type.
Use ``data__source_revision__lt`` or ``data__source_revision__gte`` to filter
using ``<`` or ``>=`` operators respectively.
"""
target_url = self.client.get_url('VERSION', 'GET', 'multi', {'layer_id': layer_id})
return base.Query(self, target_url, valid_filter_attributes=('data',), valid_sort_attributes=()) | Filterable list of versions of a layer, always ordered newest to oldest.
If the version’s source supports revisions, you can get a specific revision using
``.filter(data__source__revision=value)``. Specific values depend on the source type.
Use ``data__source_revision__lt`` or ``data__source_revision__gte`` to filter
using ``<`` or ``>=`` operators respectively. | entailment |
def get_version(self, layer_id, version_id, expand=[]):
"""
Get a specific version of a layer.
"""
target_url = self.client.get_url('VERSION', 'GET', 'single', {'layer_id': layer_id, 'version_id': version_id})
return self._get(target_url, expand=expand) | Get a specific version of a layer. | entailment |
def get_draft(self, layer_id, expand=[]):
"""
Get the current draft version of a layer.
:raises NotFound: if there is no draft version.
"""
target_url = self.client.get_url('VERSION', 'GET', 'draft', {'layer_id': layer_id})
return self._get(target_url, expand=expand) | Get the current draft version of a layer.
:raises NotFound: if there is no draft version. | entailment |
def get_published(self, layer_id, expand=[]):
"""
Get the latest published version of this layer.
:raises NotFound: if there is no published version.
"""
target_url = self.client.get_url('VERSION', 'GET', 'published', {'layer_id': layer_id})
return self._get(target_url, expand=expand) | Get the latest published version of this layer.
:raises NotFound: if there is no published version. | entailment |
def create_draft(self, layer_id):
"""
Creates a new draft version.
If anything in the data object has changed then an import will begin immediately.
Otherwise to force a re-import from the previous sources call :py:meth:`koordinates.layers.LayerManager.start_import`.
:rtype: Layer
:return: the new version
:raises Conflict: if there is already a draft version for this layer.
"""
target_url = self.client.get_url('VERSION', 'POST', 'create', {'layer_id': layer_id})
r = self.client.request('POST', target_url, json={})
return self.create_from_result(r.json()) | Creates a new draft version.
If anything in the data object has changed then an import will begin immediately.
Otherwise to force a re-import from the previous sources call :py:meth:`koordinates.layers.LayerManager.start_import`.
:rtype: Layer
:return: the new version
:raises Conflict: if there is already a draft version for this layer. | entailment |
def start_import(self, layer_id, version_id):
"""
Starts importing the specified draft version (cancelling any running import),
even if the data object hasn’t changed from the previous version.
"""
target_url = self.client.get_url('VERSION', 'POST', 'import', {'layer_id': layer_id, 'version_id': version_id})
r = self.client.request('POST', target_url, json={})
return self.create_from_result(r.json()) | Starts importing the specified draft version (cancelling any running import),
even if the data object hasn’t changed from the previous version. | entailment |
def start_update(self, layer_id):
"""
A shortcut to create a new version and start importing it.
Effectively the same as :py:meth:`koordinates.layers.LayerManager.create_draft` followed by :py:meth:`koordinates.layers.LayerManager.start_import`.
"""
target_url = self.client.get_url('LAYER', 'POST', 'update', {'layer_id': layer_id})
r = self.client.request('POST', target_url, json={})
return self.parent.create_from_result(r.json()) | A shortcut to create a new version and start importing it.
Effectively the same as :py:meth:`koordinates.layers.LayerManager.create_draft` followed by :py:meth:`koordinates.layers.LayerManager.start_import`. | entailment |
def set_metadata(self, layer_id, version_id, fp):
"""
Set the XML metadata on a layer draft version.
:param file fp: file-like object to read the XML metadata from.
:raises NotAllowed: if the version is already published.
"""
base_url = self.client.get_url('VERSION', 'GET', 'single', {'layer_id': layer_id, 'version_id': version_id})
self._metadata.set(base_url, fp) | Set the XML metadata on a layer draft version.
:param file fp: file-like object to read the XML metadata from.
:raises NotAllowed: if the version is already published. | entailment |
def is_published_version(self):
""" Return if this version is the published version of a layer """
pub_ver = getattr(self, 'published_version', None)
this_ver = getattr(self, 'this_version', None)
return this_ver and pub_ver and (this_ver == pub_ver) | Return if this version is the published version of a layer | entailment |
def is_draft_version(self):
""" Return if this version is the draft version of a layer """
pub_ver = getattr(self, 'published_version', None)
latest_ver = getattr(self, 'latest_version', None)
this_ver = getattr(self, 'this_version', None)
return this_ver and latest_ver and (this_ver == latest_ver) and (latest_ver != pub_ver) | Return if this version is the draft version of a layer | entailment |
def list_versions(self):
"""
Filterable list of versions of a layer, always ordered newest to oldest.
If the version’s source supports revisions, you can get a specific revision using
``.filter(data__source__revision=value)``. Specific values depend on the source type.
Use ``data__source_revision__lt`` or ``data__source_revision__gte`` to filter
using ``<`` or ``>=`` operators respectively.
"""
target_url = self._client.get_url('VERSION', 'GET', 'multi', {'layer_id': self.id})
return base.Query(self._manager, target_url, valid_filter_attributes=('data',), valid_sort_attributes=()) | Filterable list of versions of a layer, always ordered newest to oldest.
If the version’s source supports revisions, you can get a specific revision using
``.filter(data__source__revision=value)``. Specific values depend on the source type.
Use ``data__source_revision__lt`` or ``data__source_revision__gte`` to filter
using ``<`` or ``>=`` operators respectively. | entailment |
def get_version(self, version_id, expand=[]):
"""
Get a specific version of this layer
"""
target_url = self._client.get_url('VERSION', 'GET', 'single', {'layer_id': self.id, 'version_id': version_id})
return self._manager._get(target_url, expand=expand) | Get a specific version of this layer | entailment |
def get_draft_version(self, expand=[]):
"""
Get the current draft version of this layer.
:raises NotFound: if there is no draft version.
"""
target_url = self._client.get_url('VERSION', 'GET', 'draft', {'layer_id': self.id})
return self._manager._get(target_url, expand=expand) | Get the current draft version of this layer.
:raises NotFound: if there is no draft version. | entailment |
def start_import(self, version_id=None):
"""
Starts importing this draft layerversion (cancelling any running import), even
if the data object hasn’t changed from the previous version.
:raises Conflict: if this version is already published.
"""
if not version_id:
version_id = self.version.id
target_url = self._client.get_url('VERSION', 'POST', 'import', {'layer_id': self.id, 'version_id': version_id})
r = self._client.request('POST', target_url, json={})
return self._deserialize(r.json(), self._manager) | Starts importing this draft layerversion (cancelling any running import), even
if the data object hasn’t changed from the previous version.
:raises Conflict: if this version is already published. | entailment |
def start_update(self):
"""
A shortcut to create a new version and start importing it.
Effectively the same as :py:meth:`.create_draft_version` followed by :py:meth:`koordinates.layers.Layer.start_import`.
:rtype: Layer
:return: the new version
:raises Conflict: if there is already a draft version for this layer.
"""
target_url = self._client.get_url('LAYER', 'POST', 'update', {'layer_id': self.id})
r = self._client.request('POST', target_url, json={})
return self._manager.create_from_result(r.json()) | A shortcut to create a new version and start importing it.
Effectively the same as :py:meth:`.create_draft_version` followed by :py:meth:`koordinates.layers.Layer.start_import`.
:rtype: Layer
:return: the new version
:raises Conflict: if there is already a draft version for this layer. | entailment |
def publish(self, version_id=None):
"""
Creates a publish task just for this version, which publishes as soon as any import is complete.
:return: the publish task
:rtype: Publish
:raises Conflict: If the version is already published, or already has a publish job.
"""
if not version_id:
version_id = self.version.id
target_url = self._client.get_url('VERSION', 'POST', 'publish', {'layer_id': self.id, 'version_id': version_id})
r = self._client.request('POST', target_url, json={})
return self._client.get_manager(Publish).create_from_result(r.json()) | Creates a publish task just for this version, which publishes as soon as any import is complete.
:return: the publish task
:rtype: Publish
:raises Conflict: If the version is already published, or already has a publish job. | entailment |
def save(self, with_data=False):
"""
Edits this draft layerversion.
# If anything in the data object has changed, cancel any existing import and start a new one.
:param bool with_data: if ``True``, send the data object, which will start a new import and cancel
any existing one. If ``False``, the data object will *not* be sent, and no import will start.
:raises NotAllowed: if the version is already published.
"""
target_url = self._client.get_url('VERSION', 'PUT', 'edit', {'layer_id': self.id, 'version_id': self.version.id})
r = self._client.request('PUT', target_url, json=self._serialize(with_data=with_data))
return self._deserialize(r.json(), self._manager) | Edits this draft layerversion.
# If anything in the data object has changed, cancel any existing import and start a new one.
:param bool with_data: if ``True``, send the data object, which will start a new import and cancel
any existing one. If ``False``, the data object will *not* be sent, and no import will start.
:raises NotAllowed: if the version is already published. | entailment |
def delete_version(self, version_id=None):
"""
Deletes this draft version (revert to published)
:raises NotAllowed: if this version is already published.
:raises Conflict: if this version is already deleted.
"""
if not version_id:
version_id = self.version.id
target_url = self._client.get_url('VERSION', 'DELETE', 'single', {'layer_id': self.id, 'version_id': version_id})
r = self._client.request('DELETE', target_url)
logger.info("delete_version(): %s", r.status_code) | Deletes this draft version (revert to published)
:raises NotAllowed: if this version is already published.
:raises Conflict: if this version is already deleted. | entailment |
def _get_item_class(self, url):
""" Return the model class matching a URL """
if '/layers/' in url:
return Layer
elif '/tables/' in url:
return Table
elif '/sets/' in url:
return Set
# elif '/documents/' in url:
# return Document
else:
raise NotImplementedError("No support for catalog results of type %s" % url) | Return the model class matching a URL | entailment |
def get_year_since_resonance(ringcube):
"Calculate the fraction of the year since moon swap."
t0 = dt(2006, 1, 21)
td = ringcube.imagetime - t0
return td.days / 365.25 | Calculate the fraction of the year since moon swap. | entailment |
def create_polynoms():
"""Create and return poly1d objects.
Uses the parameters from Morgan to create poly1d objects for
calculations.
"""
fname = pr.resource_filename('pyciss', 'data/soliton_prediction_parameters.csv')
res_df = pd.read_csv(fname)
polys = {}
for resorder, row in zip('65 54 43 21'.split(),
range(4)):
p = poly1d([res_df.loc[row, 'Slope (km/yr)'], res_df.loc[row, 'Intercept (km)']])
polys['janus ' + ':'.join(resorder)] = p
return polys | Create and return poly1d objects.
Uses the parameters from Morgan to create poly1d objects for
calculations. | entailment |
def check_for_soliton(img_id):
"""Workhorse function.
Creates the polynom.
Calculates radius constraints from attributes in `ringcube` object.
Parameters
----------
ringcube : pyciss.ringcube.RingCube
A containter class for a ring-projected ISS image file.
Returns
-------
dict
Dictionary with all solitons found. Reason why it is a dict is
that it could be more than one in one image.
"""
pm = io.PathManager(img_id)
try:
ringcube = RingCube(pm.cubepath)
except FileNotFoundError:
ringcube = RingCube(pm.undestriped)
polys = create_polynoms()
minrad = ringcube.minrad.to(u.km)
maxrad = ringcube.maxrad.to(u.km)
delta_years = get_year_since_resonance(ringcube)
soliton_radii = {}
for k, p in polys.items():
current_r = p(delta_years) * u.km
if minrad < current_r < maxrad:
soliton_radii[k] = current_r
return soliton_radii if soliton_radii else None | Workhorse function.
Creates the polynom.
Calculates radius constraints from attributes in `ringcube` object.
Parameters
----------
ringcube : pyciss.ringcube.RingCube
A containter class for a ring-projected ISS image file.
Returns
-------
dict
Dictionary with all solitons found. Reason why it is a dict is
that it could be more than one in one image. | entailment |
def get_manager(self, model):
"""
Return the active manager for the given model.
:param model: Model class to look up the manager instance for.
:return: Manager instance for the model associated with this client.
"""
if isinstance(model, six.string_types):
# undocumented string lookup
for k, m in self._manager_map.items():
if k.__name__ == model:
return m
else:
raise KeyError(model)
return self._manager_map[model] | Return the active manager for the given model.
:param model: Model class to look up the manager instance for.
:return: Manager instance for the model associated with this client. | entailment |
def _assemble_headers(self, method, user_headers=None):
"""
Takes the supplied headers and adds in any which
are defined at a client level and then returns
the result.
:param user_headers: a `dict` containing headers defined at the
request level, optional.
:return: a `dict` instance
"""
headers = copy.deepcopy(user_headers or {})
if method not in ('GET', 'HEAD'):
headers.setdefault('Content-Type', 'application/json')
return headers | Takes the supplied headers and adds in any which
are defined at a client level and then returns
the result.
:param user_headers: a `dict` containing headers defined at the
request level, optional.
:return: a `dict` instance | entailment |
def reverse_url(self, datatype, url, verb='GET', urltype='single', api_version=None):
"""
Extracts parameters from a populated URL
:param datatype: a string identifying the data the url accesses.
:param url: the fully-qualified URL to extract parameters from.
:param verb: the HTTP verb needed for use with the url.
:param urltype: an adjective used to the nature of the request.
:return: dict
"""
api_version = api_version or 'v1'
templates = getattr(self, 'URL_TEMPLATES__%s' % api_version)
# this is fairly simplistic, if necessary we could use the parse lib
template_url = r"https://(?P<api_host>.+)/services/api/(?P<api_version>.+)"
template_url += re.sub(r'{([^}]+)}', r'(?P<\1>.+)', templates[datatype][verb][urltype])
# /foo/{foo_id}/bar/{id}/
m = re.match(template_url, url or '')
if not m:
raise KeyError("No reverse match from '%s' to %s.%s.%s" % (url, datatype, verb, urltype))
r = m.groupdict()
del r['api_host']
if r.pop('api_version') != api_version:
raise ValueError("API version mismatch")
return r | Extracts parameters from a populated URL
:param datatype: a string identifying the data the url accesses.
:param url: the fully-qualified URL to extract parameters from.
:param verb: the HTTP verb needed for use with the url.
:param urltype: an adjective used to the nature of the request.
:return: dict | entailment |
def get_url(self, datatype, verb, urltype, params={}, api_host=None, api_version=None):
"""Returns a fully formed url
:param datatype: a string identifying the data the url will access.
:param verb: the HTTP verb needed for use with the url.
:param urltype: an adjective used to the nature of the request.
:param \*\*params: substitution variables for the URL.
:return: string
:rtype: A fully formed url.
"""
api_version = api_version or 'v1'
api_host = api_host or self.host
subst = params.copy()
subst['api_host'] = api_host
subst['api_version'] = api_version
url = "https://{api_host}/services/api/{api_version}"
url += self.get_url_path(datatype, verb, urltype, params, api_version)
return url.format(**subst) | Returns a fully formed url
:param datatype: a string identifying the data the url will access.
:param verb: the HTTP verb needed for use with the url.
:param urltype: an adjective used to the nature of the request.
:param \*\*params: substitution variables for the URL.
:return: string
:rtype: A fully formed url. | entailment |
def open_store_variable(self, name, var):
"""Turn CDMRemote variable into something like a numpy.ndarray."""
data = indexing.LazilyOuterIndexedArray(CDMArrayWrapper(name, self))
return Variable(var.dimensions, data, {a: getattr(var, a) for a in var.ncattrs()}) | Turn CDMRemote variable into something like a numpy.ndarray. | entailment |
def get_attrs(self):
"""Get the global attributes from underlying data set."""
return FrozenOrderedDict((a, getattr(self.ds, a)) for a in self.ds.ncattrs()) | Get the global attributes from underlying data set. | entailment |
def get_dimensions(self):
"""Get the dimensions from underlying data set."""
return FrozenOrderedDict((k, len(v)) for k, v in self.ds.dimensions.items()) | Get the dimensions from underlying data set. | entailment |
def _find_base_tds_url(catalog_url):
"""Identify the base URL of the THREDDS server from the catalog URL.
Will retain URL scheme, host, port and username/password when present.
"""
url_components = urlparse(catalog_url)
if url_components.path:
return catalog_url.split(url_components.path)[0]
else:
return catalog_url | Identify the base URL of the THREDDS server from the catalog URL.
Will retain URL scheme, host, port and username/password when present. | entailment |
def filter_time_nearest(self, time, regex=None):
"""Filter keys for an item closest to the desired time.
Loops over all keys in the collection and uses `regex` to extract and build
`datetime`s. The collection of `datetime`s is compared to `start` and the value that
has a `datetime` closest to that requested is returned.If none of the keys in the
collection match the regex, indicating that the keys are not date/time-based,
a ``ValueError`` is raised.
Parameters
----------
time : ``datetime.datetime``
The desired time
regex : str, optional
The regular expression to use to extract date/time information from the key. If
given, this should contain named groups: 'year', 'month', 'day', 'hour', 'minute',
'second', and 'microsecond', as appropriate. When a match is found, any of those
groups missing from the pattern will be assigned a value of 0. The default pattern
looks for patterns like: 20171118_2356.
Returns
-------
The value with a time closest to that desired
"""
return min(self._get_datasets_with_times(regex),
key=lambda i: abs((i[0] - time).total_seconds()))[-1] | Filter keys for an item closest to the desired time.
Loops over all keys in the collection and uses `regex` to extract and build
`datetime`s. The collection of `datetime`s is compared to `start` and the value that
has a `datetime` closest to that requested is returned.If none of the keys in the
collection match the regex, indicating that the keys are not date/time-based,
a ``ValueError`` is raised.
Parameters
----------
time : ``datetime.datetime``
The desired time
regex : str, optional
The regular expression to use to extract date/time information from the key. If
given, this should contain named groups: 'year', 'month', 'day', 'hour', 'minute',
'second', and 'microsecond', as appropriate. When a match is found, any of those
groups missing from the pattern will be assigned a value of 0. The default pattern
looks for patterns like: 20171118_2356.
Returns
-------
The value with a time closest to that desired | entailment |
def filter_time_range(self, start, end, regex=None):
"""Filter keys for all items within the desired time range.
Loops over all keys in the collection and uses `regex` to extract and build
`datetime`s. From the collection of `datetime`s, all values within `start` and `end`
(inclusive) are returned. If none of the keys in the collection match the regex,
indicating that the keys are not date/time-based, a ``ValueError`` is raised.
Parameters
----------
start : ``datetime.datetime``
The start of the desired time range, inclusive
end : ``datetime.datetime``
The end of the desired time range, inclusive
regex : str, optional
The regular expression to use to extract date/time information from the key. If
given, this should contain named groups: 'year', 'month', 'day', 'hour', 'minute',
'second', and 'microsecond', as appropriate. When a match is found, any of those
groups missing from the pattern will be assigned a value of 0. The default pattern
looks for patterns like: 20171118_2356.
Returns
-------
All values corresponding to times within the specified range
"""
return [item[-1] for item in self._get_datasets_with_times(regex)
if start <= item[0] <= end] | Filter keys for all items within the desired time range.
Loops over all keys in the collection and uses `regex` to extract and build
`datetime`s. From the collection of `datetime`s, all values within `start` and `end`
(inclusive) are returned. If none of the keys in the collection match the regex,
indicating that the keys are not date/time-based, a ``ValueError`` is raised.
Parameters
----------
start : ``datetime.datetime``
The start of the desired time range, inclusive
end : ``datetime.datetime``
The end of the desired time range, inclusive
regex : str, optional
The regular expression to use to extract date/time information from the key. If
given, this should contain named groups: 'year', 'month', 'day', 'hour', 'minute',
'second', and 'microsecond', as appropriate. When a match is found, any of those
groups missing from the pattern will be assigned a value of 0. The default pattern
looks for patterns like: 20171118_2356.
Returns
-------
All values corresponding to times within the specified range | entailment |
def pop(self, key, *args, **kwargs):
"""Remove and return the value associated with case-insensitive ``key``."""
return super(CaseInsensitiveDict, self).pop(CaseInsensitiveStr(key)) | Remove and return the value associated with case-insensitive ``key``. | entailment |
def _keys_to_lower(self):
"""Convert key set to lowercase."""
for k in list(self.keys()):
val = super(CaseInsensitiveDict, self).__getitem__(k)
super(CaseInsensitiveDict, self).__delitem__(k)
self.__setitem__(CaseInsensitiveStr(k), val) | Convert key set to lowercase. | entailment |
def resolve_url(self, catalog_url):
"""Resolve the url of the dataset when reading latest.xml.
Parameters
----------
catalog_url : str
The catalog url to be resolved
"""
if catalog_url != '':
resolver_base = catalog_url.split('catalog.xml')[0]
resolver_url = resolver_base + self.url_path
resolver_xml = session_manager.urlopen(resolver_url)
tree = ET.parse(resolver_xml)
root = tree.getroot()
if 'name' in root.attrib:
self.catalog_name = root.attrib['name']
else:
self.catalog_name = 'No name found'
resolved_url = ''
found = False
for child in root.iter():
if not found:
tag_type = child.tag.split('}')[-1]
if tag_type == 'dataset':
if 'urlPath' in child.attrib:
ds = Dataset(child)
resolved_url = ds.url_path
found = True
if found:
return resolved_url
else:
log.warning('no dataset url path found in latest.xml!') | Resolve the url of the dataset when reading latest.xml.
Parameters
----------
catalog_url : str
The catalog url to be resolved | entailment |
def make_access_urls(self, catalog_url, all_services, metadata=None):
"""Make fully qualified urls for the access methods enabled on the dataset.
Parameters
----------
catalog_url : str
The top level server url
all_services : List[SimpleService]
list of :class:`SimpleService` objects associated with the dataset
metadata : dict
Metadata from the :class:`TDSCatalog`
"""
all_service_dict = CaseInsensitiveDict({})
for service in all_services:
all_service_dict[service.name] = service
if isinstance(service, CompoundService):
for subservice in service.services:
all_service_dict[subservice.name] = subservice
service_name = metadata.get('serviceName', None)
access_urls = CaseInsensitiveDict({})
server_url = _find_base_tds_url(catalog_url)
# process access urls for datasets that reference top
# level catalog services (individual or compound service
# types).
if service_name in all_service_dict:
service = all_service_dict[service_name]
if service.service_type != 'Resolver':
# if service is a CompoundService, create access url
# for each SimpleService
if isinstance(service, CompoundService):
for subservice in service.services:
server_base = urljoin(server_url, subservice.base)
access_urls[subservice.service_type] = urljoin(server_base,
self.url_path)
else:
server_base = urljoin(server_url, service.base)
access_urls[service.service_type] = urljoin(server_base, self.url_path)
# process access children of dataset elements
for service_type in self.access_element_info:
url_path = self.access_element_info[service_type]
if service_type in all_service_dict:
server_base = urljoin(server_url, all_service_dict[service_type].base)
access_urls[service_type] = urljoin(server_base, url_path)
self.access_urls = access_urls | Make fully qualified urls for the access methods enabled on the dataset.
Parameters
----------
catalog_url : str
The top level server url
all_services : List[SimpleService]
list of :class:`SimpleService` objects associated with the dataset
metadata : dict
Metadata from the :class:`TDSCatalog` | entailment |
def add_access_element_info(self, access_element):
"""Create an access method from a catalog element."""
service_name = access_element.attrib['serviceName']
url_path = access_element.attrib['urlPath']
self.access_element_info[service_name] = url_path | Create an access method from a catalog element. | entailment |
def download(self, filename=None):
"""Download the dataset to a local file.
Parameters
----------
filename : str, optional
The full path to which the dataset will be saved
"""
if filename is None:
filename = self.name
with self.remote_open() as infile:
with open(filename, 'wb') as outfile:
outfile.write(infile.read()) | Download the dataset to a local file.
Parameters
----------
filename : str, optional
The full path to which the dataset will be saved | entailment |
def remote_access(self, service=None, use_xarray=None):
"""Access the remote dataset.
Open the remote dataset and get a netCDF4-compatible `Dataset` object providing
index-based subsetting capabilities.
Parameters
----------
service : str, optional
The name of the service to use for access to the dataset, either
'CdmRemote' or 'OPENDAP'. Defaults to 'CdmRemote'.
Returns
-------
Dataset
Object for netCDF4-like access to the dataset
"""
if service is None:
service = 'CdmRemote' if 'CdmRemote' in self.access_urls else 'OPENDAP'
if service not in (CaseInsensitiveStr('CdmRemote'), CaseInsensitiveStr('OPENDAP')):
raise ValueError(service + ' is not a valid service for remote_access')
return self.access_with_service(service, use_xarray) | Access the remote dataset.
Open the remote dataset and get a netCDF4-compatible `Dataset` object providing
index-based subsetting capabilities.
Parameters
----------
service : str, optional
The name of the service to use for access to the dataset, either
'CdmRemote' or 'OPENDAP'. Defaults to 'CdmRemote'.
Returns
-------
Dataset
Object for netCDF4-like access to the dataset | entailment |
def subset(self, service=None):
"""Subset the dataset.
Open the remote dataset and get a client for talking to ``service``.
Parameters
----------
service : str, optional
The name of the service for subsetting the dataset. Defaults to 'NetcdfSubset'
or 'NetcdfServer', in that order, depending on the services listed in the
catalog.
Returns
-------
a client for communicating using ``service``
"""
if service is None:
for serviceName in self.ncssServiceNames:
if serviceName in self.access_urls:
service = serviceName
break
else:
raise RuntimeError('Subset access is not available for this dataset.')
elif service not in self.ncssServiceNames:
raise ValueError(service + ' is not a valid service for subset. Options are: '
+ ', '.join(self.ncssServiceNames))
return self.access_with_service(service) | Subset the dataset.
Open the remote dataset and get a client for talking to ``service``.
Parameters
----------
service : str, optional
The name of the service for subsetting the dataset. Defaults to 'NetcdfSubset'
or 'NetcdfServer', in that order, depending on the services listed in the
catalog.
Returns
-------
a client for communicating using ``service`` | entailment |
def access_with_service(self, service, use_xarray=None):
"""Access the dataset using a particular service.
Return an Python object capable of communicating with the server using the particular
service. For instance, for 'HTTPServer' this is a file-like object capable of
HTTP communication; for OPENDAP this is a netCDF4 dataset.
Parameters
----------
service : str
The name of the service for accessing the dataset
Returns
-------
An instance appropriate for communicating using ``service``.
"""
service = CaseInsensitiveStr(service)
if service == 'CdmRemote':
if use_xarray:
from .cdmr.xarray_support import CDMRemoteStore
try:
import xarray as xr
provider = lambda url: xr.open_dataset(CDMRemoteStore(url)) # noqa: E731
except ImportError:
raise ImportError('CdmRemote access needs xarray to be installed.')
else:
from .cdmr import Dataset as CDMRDataset
provider = CDMRDataset
elif service == 'OPENDAP':
if use_xarray:
try:
import xarray as xr
provider = xr.open_dataset
except ImportError:
raise ImportError('xarray to be installed if `use_xarray` is True.')
else:
try:
from netCDF4 import Dataset as NC4Dataset
provider = NC4Dataset
except ImportError:
raise ImportError('OPENDAP access needs netCDF4-python to be installed.')
elif service in self.ncssServiceNames:
from .ncss import NCSS
provider = NCSS
elif service == 'HTTPServer':
provider = session_manager.urlopen
else:
raise ValueError(service + ' is not an access method supported by Siphon')
try:
return provider(self.access_urls[service])
except KeyError:
raise ValueError(service + ' is not available for this dataset') | Access the dataset using a particular service.
Return an Python object capable of communicating with the server using the particular
service. For instance, for 'HTTPServer' this is a file-like object capable of
HTTP communication; for OPENDAP this is a netCDF4 dataset.
Parameters
----------
service : str
The name of the service for accessing the dataset
Returns
-------
An instance appropriate for communicating using ``service``. | entailment |
def get_wind_components(speed, wdir):
r"""Calculate the U, V wind vector components from the speed and direction.
Parameters
----------
speed : array_like
The wind speed (magnitude)
wdir : array_like
The wind direction, specified as the direction from which the wind is
blowing, with 0 being North.
Returns
-------
u, v : tuple of array_like
The wind components in the X (East-West) and Y (North-South)
directions, respectively.
"""
u = -speed * np.sin(wdir)
v = -speed * np.cos(wdir)
return u, v | r"""Calculate the U, V wind vector components from the speed and direction.
Parameters
----------
speed : array_like
The wind speed (magnitude)
wdir : array_like
The wind direction, specified as the direction from which the wind is
blowing, with 0 being North.
Returns
-------
u, v : tuple of array_like
The wind components in the X (East-West) and Y (North-South)
directions, respectively. | entailment |
def _get_metadata(self):
"""Get header information and store as metadata for the endpoint."""
self.metadata = self.fetch_header()
self.variables = {g.name for g in self.metadata.grids} | Get header information and store as metadata for the endpoint. | entailment |
def fetch_header(self):
"""Make a header request to the endpoint."""
query = self.query().add_query_parameter(req='header')
return self._parse_messages(self.get_query(query).content)[0] | Make a header request to the endpoint. | entailment |
def fetch_feature_type(self):
"""Request the featureType from the endpoint."""
query = self.query().add_query_parameter(req='featureType')
return self.get_query(query).content | Request the featureType from the endpoint. | entailment |
def fetch_coords(self, query):
"""Pull down coordinate data from the endpoint."""
q = query.add_query_parameter(req='coord')
return self._parse_messages(self.get_query(q).content) | Pull down coordinate data from the endpoint. | entailment |
def request_data(cls, time, site_id, derived=False):
"""Retreive IGRA version 2 data for one station.
Parameters
--------
site_id : str
11-character IGRA2 station identifier.
time : datetime
The date and time of the desired observation. If list of two times is given,
dataframes for all dates within the two dates will be returned.
Returns
-------
:class: `pandas.DataFrame` containing the data.
"""
igra2 = cls()
# Set parameters for data query
if derived:
igra2.ftpsite = igra2.ftpsite + 'derived/derived-por/'
igra2.suffix = igra2.suffix + '-drvd.txt'
else:
igra2.ftpsite = igra2.ftpsite + 'data/data-por/'
igra2.suffix = igra2.suffix + '-data.txt'
if type(time) == datetime.datetime:
igra2.begin_date = time
igra2.end_date = time
else:
igra2.begin_date, igra2.end_date = time
igra2.site_id = site_id
df, headers = igra2._get_data()
return df, headers | Retreive IGRA version 2 data for one station.
Parameters
--------
site_id : str
11-character IGRA2 station identifier.
time : datetime
The date and time of the desired observation. If list of two times is given,
dataframes for all dates within the two dates will be returned.
Returns
-------
:class: `pandas.DataFrame` containing the data. | entailment |
def _get_data(self):
"""Process the IGRA2 text file for observations at site_id matching time.
Return:
-------
:class: `pandas.DataFrame` containing the body data.
:class: `pandas.DataFrame` containing the header data.
"""
# Split the list of times into begin and end dates. If only
# one date is supplied, set both begin and end dates equal to that date.
body, header, dates_long, dates = self._get_data_raw()
params = self._get_fwf_params()
df_body = pd.read_fwf(StringIO(body), **params['body'])
df_header = pd.read_fwf(StringIO(header), **params['header'])
df_body['date'] = dates_long
df_body = self._clean_body_df(df_body)
df_header = self._clean_header_df(df_header)
df_header['date'] = dates
return df_body, df_header | Process the IGRA2 text file for observations at site_id matching time.
Return:
-------
:class: `pandas.DataFrame` containing the body data.
:class: `pandas.DataFrame` containing the header data. | entailment |
def _get_data_raw(self):
"""Download observations matching the time range.
Returns a tuple with a string for the body, string for the headers,
and a list of dates.
"""
# Import need to be here so we can monkeypatch urlopen for testing and avoid
# downloading live data for testing
try:
from urllib.request import urlopen
except ImportError:
from urllib2 import urlopen
with closing(urlopen(self.ftpsite + self.site_id + self.suffix + '.zip')) as url:
f = ZipFile(BytesIO(url.read()), 'r').open(self.site_id + self.suffix)
lines = [line.decode('utf-8') for line in f.readlines()]
body, header, dates_long, dates = self._select_date_range(lines)
return body, header, dates_long, dates | Download observations matching the time range.
Returns a tuple with a string for the body, string for the headers,
and a list of dates. | entailment |
def _select_date_range(self, lines):
"""Identify lines containing headers within the range begin_date to end_date.
Parameters
-----
lines: list
list of lines from the IGRA2 data file.
"""
headers = []
num_lev = []
dates = []
# Get indices of headers, and make a list of dates and num_lev
for idx, line in enumerate(lines):
if line[0] == '#':
year, month, day, hour = map(int, line[13:26].split())
# All soundings have YMD, most have hour
try:
date = datetime.datetime(year, month, day, hour)
except ValueError:
date = datetime.datetime(year, month, day)
# Check date
if self.begin_date <= date <= self.end_date:
headers.append(idx)
num_lev.append(int(line[32:36]))
dates.append(date)
if date > self.end_date:
break
if len(dates) == 0:
# Break if no matched dates.
# Could improve this later by showing the date range for the station.
raise ValueError('No dates match selection.')
# Compress body of data into a string
begin_idx = min(headers)
end_idx = max(headers) + num_lev[-1]
# Make a boolean vector that selects only list indices within the time range
selector = np.zeros(len(lines), dtype=bool)
selector[begin_idx:end_idx + 1] = True
selector[headers] = False
body = ''.join([line for line in itertools.compress(lines, selector)])
selector[begin_idx:end_idx + 1] = ~selector[begin_idx:end_idx + 1]
header = ''.join([line for line in itertools.compress(lines, selector)])
# expand date vector to match length of the body dataframe.
dates_long = np.repeat(dates, num_lev)
return body, header, dates_long, dates | Identify lines containing headers within the range begin_date to end_date.
Parameters
-----
lines: list
list of lines from the IGRA2 data file. | entailment |
def _get_fwf_params(self):
"""Produce a dictionary with names, colspecs, and dtype for IGRA2 data.
Returns a dict with entries 'body' and 'header'.
"""
def _cdec(power=1):
"""Make a function to convert string 'value*10^power' to float."""
def _cdec_power(val):
if val in ['-9999', '-8888', '-99999']:
return np.nan
else:
return float(val) / 10**power
return _cdec_power
def _cflag(val):
"""Replace alphabetic flags A and B with numeric."""
if val == 'A':
return 1
elif val == 'B':
return 2
else:
return 0
def _ctime(strformat='MMMSS'):
"""Return a function converting a string from MMMSS or HHMM to seconds."""
def _ctime_strformat(val):
time = val.strip().zfill(5)
if int(time) < 0:
return np.nan
elif int(time) == 9999:
return np.nan
else:
if strformat == 'MMMSS':
minutes = int(time[0:3])
seconds = int(time[3:5])
time_seconds = minutes * 60 + seconds
elif strformat == 'HHMM':
hours = int(time[0:2])
minutes = int(time[2:4])
time_seconds = hours * 3600 + minutes * 60
else:
sys.exit('Unrecognized time format')
return time_seconds
return _ctime_strformat
def _clatlon(x):
n = len(x)
deg = x[0:n - 4]
dec = x[n - 4:]
return float(deg + '.' + dec)
if self.suffix == '-drvd.txt':
names_body = ['pressure', 'reported_height', 'calculated_height',
'temperature', 'temperature_gradient', 'potential_temperature',
'potential_temperature_gradient', 'virtual_temperature',
'virtual_potential_temperature', 'vapor_pressure',
'saturation_vapor_pressure', 'reported_relative_humidity',
'calculated_relative_humidity', 'u_wind', 'u_wind_gradient',
'v_wind', 'v_wind_gradient', 'refractive_index']
colspecs_body = [(0, 7), (8, 15), (16, 23), (24, 31), (32, 39),
(40, 47), (48, 55), (56, 63), (64, 71), (72, 79),
(80, 87), (88, 95), (96, 103), (104, 111), (112, 119),
(120, 127), (128, 135), (137, 143), (144, 151)]
conv_body = {'pressure': _cdec(power=2),
'reported_height': int,
'calculated_height': int,
'temperature': _cdec(),
'temperature_gradient': _cdec(),
'potential_temperature': _cdec(),
'potential_temperature_gradient': _cdec(),
'virtual_temperature': _cdec(),
'virtual_potential_temperature': _cdec(),
'vapor_pressure': _cdec(power=3),
'saturation_vapor_pressure': _cdec(power=3),
'reported_relative_humidity': _cdec(),
'calculated_relative_humidity': _cdec(),
'u_wind': _cdec(),
'u_wind_gradient': _cdec(),
'v_wind': _cdec(),
'v_wind_gradient': _cdec(),
'refractive_index': int}
names_header = ['site_id', 'year', 'month', 'day', 'hour', 'release_time',
'number_levels', 'precipitable_water', 'inv_pressure',
'inv_height', 'inv_strength', 'mixed_layer_pressure',
'mixed_layer_height', 'freezing_point_pressure',
'freezing_point_height', 'lcl_pressure', 'lcl_height',
'lfc_pressure', 'lfc_height', 'lnb_pressure', 'lnb_height',
'lifted_index', 'showalter_index', 'k_index', 'total_totals_index',
'cape', 'convective_inhibition']
colspecs_header = [(1, 12), (13, 17), (18, 20), (21, 23), (24, 26),
(27, 31), (31, 36), (37, 43), (43, 48), (49, 55),
(55, 61), (61, 67), (67, 73), (73, 79), (79, 85),
(85, 91), (91, 97), (97, 103), (103, 109), (109, 115),
(115, 121), (121, 127), (127, 133), (133, 139),
(139, 145), (145, 151), (151, 157)]
conv_header = {'site_id': str,
'year': int,
'month': int,
'day': int,
'hour': int,
'release_time': _ctime(strformat='HHMM'),
'number_levels': int,
'precipitable_water': _cdec(power=2),
'inv_pressure': _cdec(power=2),
'inv_height': int,
'inv_strength': _cdec(),
'mixed_layer_pressure': _cdec(power=2),
'mixed_layer_height': int,
'freezing_point_pressure': _cdec(power=2),
'freezing_point_height': int,
'lcl_pressure': _cdec(power=2),
'lcl_height': int,
'lfc_pressure': _cdec(power=2),
'lfc_height': int,
'lnb_pressure': _cdec(power=2),
'lnb_height': int,
'lifted_index': int,
'showalter_index': int,
'k_index': int,
'total_totals_index': int,
'cape': int,
'convective_inhibition': int}
na_vals = ['-99999']
else:
names_body = ['lvltyp1', 'lvltyp2', 'etime', 'pressure',
'pflag', 'height', 'zflag', 'temperature', 'tflag',
'relative_humidity', 'dewpoint_depression',
'direction', 'speed']
colspecs_body = [(0, 1), (1, 2), (3, 8), (9, 15), (15, 16),
(16, 21), (21, 22), (22, 27), (27, 28),
(28, 33), (34, 39), (40, 45), (46, 51)]
conv_body = {'lvltyp1': int,
'lvltyp2': int,
'etime': _ctime(strformat='MMMSS'),
'pressure': _cdec(power=2),
'pflag': _cflag,
'height': int,
'zflag': _cflag,
'temperature': _cdec(),
'tflag': _cflag,
'relative_humidity': _cdec(),
'dewpoint_depression': _cdec(),
'direction': int,
'speed': _cdec()}
names_header = ['site_id', 'year', 'month', 'day', 'hour', 'release_time',
'number_levels', 'pressure_source_code',
'non_pressure_source_code',
'latitude', 'longitude']
colspecs_header = [(1, 12), (13, 17), (18, 20), (21, 23), (24, 26),
(27, 31), (32, 36), (37, 45), (46, 54), (55, 62), (63, 71)]
na_vals = ['-8888', '-9999']
conv_header = {'release_time': _ctime(strformat='HHMM'),
'number_levels': int,
'latitude': _clatlon,
'longitude': _clatlon}
return {'body': {'names': names_body,
'colspecs': colspecs_body,
'converters': conv_body,
'na_values': na_vals,
'index_col': False},
'header': {'names': names_header,
'colspecs': colspecs_header,
'converters': conv_header,
'na_values': na_vals,
'index_col': False}} | Produce a dictionary with names, colspecs, and dtype for IGRA2 data.
Returns a dict with entries 'body' and 'header'. | entailment |
def _clean_body_df(self, df):
"""Format the dataframe, remove empty rows, and add units attribute."""
if self.suffix == '-drvd.txt':
df = df.dropna(subset=('temperature', 'reported_relative_humidity',
'u_wind', 'v_wind'), how='all').reset_index(drop=True)
df.units = {'pressure': 'hPa',
'reported_height': 'meter',
'calculated_height': 'meter',
'temperature': 'Kelvin',
'temperature_gradient': 'Kelvin / kilometer',
'potential_temperature': 'Kelvin',
'potential_temperature_gradient': 'Kelvin / kilometer',
'virtual_temperature': 'Kelvin',
'virtual_potential_temperature': 'Kelvin',
'vapor_pressure': 'Pascal',
'saturation_vapor_pressure': 'Pascal',
'reported_relative_humidity': 'percent',
'calculated_relative_humidity': 'percent',
'u_wind': 'meter / second',
'u_wind_gradient': '(meter / second) / kilometer)',
'v_wind': 'meter / second',
'v_wind_gradient': '(meter / second) / kilometer)',
'refractive_index': 'unitless'}
else:
df['u_wind'], df['v_wind'] = get_wind_components(df['speed'],
np.deg2rad(df['direction']))
df['u_wind'] = np.round(df['u_wind'], 1)
df['v_wind'] = np.round(df['v_wind'], 1)
df = df.dropna(subset=('temperature', 'direction', 'speed',
'dewpoint_depression', 'u_wind', 'v_wind'),
how='all').reset_index(drop=True)
df['dewpoint'] = df['temperature'] - df['dewpoint_depression']
df.drop('dewpoint_depression', axis=1, inplace=True)
df.units = {'etime': 'second',
'pressure': 'hPa',
'height': 'meter',
'temperature': 'degC',
'dewpoint': 'degC',
'direction': 'degrees',
'speed': 'meter / second',
'u_wind': 'meter / second',
'v_wind': 'meter / second'}
return df | Format the dataframe, remove empty rows, and add units attribute. | entailment |
def _clean_header_df(self, df):
"""Format the header dataframe and add units."""
if self.suffix == '-drvd.txt':
df.units = {'release_time': 'second',
'precipitable_water': 'millimeter',
'inv_pressure': 'hPa',
'inv_height': 'meter',
'inv_strength': 'Kelvin',
'mixed_layer_pressure': 'hPa',
'mixed_layer_height': 'meter',
'freezing_point_pressure': 'hPa',
'freezing_point_height': 'meter',
'lcl_pressure': 'hPa',
'lcl_height': 'meter',
'lfc_pressure': 'hPa',
'lfc_height': 'meter',
'lnb_pressure': 'hPa',
'lnb_height': 'meter',
'lifted_index': 'degC',
'showalter_index': 'degC',
'k_index': 'degC',
'total_totals_index': 'degC',
'cape': 'Joule / kilogram',
'convective_inhibition': 'Joule / kilogram'}
else:
df.units = {'release_time': 'second',
'latitude': 'degrees',
'longitude': 'degrees'}
return df | Format the header dataframe and add units. | entailment |
def realtime_observations(cls, buoy, data_type='txt'):
"""Retrieve the realtime buoy data from NDBC.
Parameters
----------
buoy : str
Name of buoy
data_type : str
Type of data requested, must be one of
'txt' standard meteorological data
'drift' meteorological data from drifting buoys and limited moored buoy data
mainly from international partners
'cwind' continuous winds data (10 minute average)
'spec' spectral wave summaries
'ocean' oceanographic data
'srad' solar radiation data
'dart' water column height
'supl' supplemental measurements data
'rain' hourly rain data
Returns
-------
Raw data string
"""
endpoint = cls()
parsers = {'txt': endpoint._parse_met,
'drift': endpoint._parse_drift,
'cwind': endpoint._parse_cwind,
'spec': endpoint._parse_spec,
'ocean': endpoint._parse_ocean,
'srad': endpoint._parse_srad,
'dart': endpoint._parse_dart,
'supl': endpoint._parse_supl,
'rain': endpoint._parse_rain}
if data_type not in parsers:
raise KeyError('Data type must be txt, drift, cwind, spec, ocean, srad, dart,'
'supl, or rain for parsed realtime data.')
raw_data = endpoint.raw_buoy_data(buoy, data_type=data_type)
return parsers[data_type](raw_data) | Retrieve the realtime buoy data from NDBC.
Parameters
----------
buoy : str
Name of buoy
data_type : str
Type of data requested, must be one of
'txt' standard meteorological data
'drift' meteorological data from drifting buoys and limited moored buoy data
mainly from international partners
'cwind' continuous winds data (10 minute average)
'spec' spectral wave summaries
'ocean' oceanographic data
'srad' solar radiation data
'dart' water column height
'supl' supplemental measurements data
'rain' hourly rain data
Returns
-------
Raw data string | entailment |
def _parse_met(content):
"""Parse standard meteorological data from NDBC buoys.
Parameters
----------
content : str
Data to parse
Returns
-------
:class:`pandas.DataFrame` containing the data
"""
col_names = ['year', 'month', 'day', 'hour', 'minute',
'wind_direction', 'wind_speed', 'wind_gust',
'wave_height', 'dominant_wave_period', 'average_wave_period',
'dominant_wave_direction', 'pressure',
'air_temperature', 'water_temperature', 'dewpoint',
'visibility', '3hr_pressure_tendency', 'water_level_above_mean']
col_units = {'wind_direction': 'degrees',
'wind_speed': 'meters/second',
'wind_gust': 'meters/second',
'wave_height': 'meters',
'dominant_wave_period': 'seconds',
'average_wave_period': 'seconds',
'dominant_wave_direction': 'degrees',
'pressure': 'hPa',
'air_temperature': 'degC',
'water_temperature': 'degC',
'dewpoint': 'degC',
'visibility': 'nautical_mile',
'3hr_pressure_tendency': 'hPa',
'water_level_above_mean': 'feet',
'time': None}
df = pd.read_table(StringIO(content), comment='#', na_values='MM',
names=col_names, sep=r'\s+')
df['time'] = pd.to_datetime(df[['year', 'month', 'day', 'hour', 'minute']], utc=True)
df = df.drop(columns=['year', 'month', 'day', 'hour', 'minute'])
df.units = col_units
return df | Parse standard meteorological data from NDBC buoys.
Parameters
----------
content : str
Data to parse
Returns
-------
:class:`pandas.DataFrame` containing the data | entailment |
def _parse_supl(content):
"""Parse supplemental measurements data.
Parameters
----------
content : str
Data to parse
Returns
-------
:class:`pandas.DataFrame` containing the data
"""
col_names = ['year', 'month', 'day', 'hour', 'minute',
'hourly_low_pressure', 'hourly_low_pressure_time',
'hourly_high_wind', 'hourly_high_wind_direction',
'hourly_high_wind_time']
col_units = {'hourly_low_pressure': 'hPa',
'hourly_low_pressure_time': None,
'hourly_high_wind': 'meters/second',
'hourly_high_wind_direction': 'degrees',
'hourly_high_wind_time': None,
'time': None}
df = pd.read_table(StringIO(content), comment='#', na_values='MM',
names=col_names, sep=r'\s+')
df['time'] = pd.to_datetime(df[['year', 'month', 'day', 'hour', 'minute']], utc=True)
df['hours'] = np.floor(df['hourly_low_pressure_time'] / 100)
df['minutes'] = df['hourly_low_pressure_time'] - df['hours'] * 100
df['hours'] = df['hours'].replace(99, np.nan)
df['minutes'] = df['minutes'].replace(99, np.nan)
df['hourly_low_pressure_time'] = pd.to_datetime(df[['year', 'month', 'day', 'hours',
'minutes']], utc=True)
df['hours'] = np.floor(df['hourly_high_wind_time'] / 100)
df['minutes'] = df['hourly_high_wind_time'] - df['hours'] * 100
df['hours'] = df['hours'].replace(99, np.nan)
df['minutes'] = df['minutes'].replace(99, np.nan)
df['hourly_high_wind_time'] = pd.to_datetime(df[['year', 'month', 'day',
'hours', 'minutes']], utc=True)
df = df.drop(columns=['year', 'month', 'day', 'hour', 'minute', 'hours', 'minutes'])
df.units = col_units
return df | Parse supplemental measurements data.
Parameters
----------
content : str
Data to parse
Returns
-------
:class:`pandas.DataFrame` containing the data | entailment |
def _check_if_url_valid(url):
"""Check if a url is valid (returns 200) or not.
Parameters
----------
url : str
URL to check
Returns
-------
bool if url is valid
"""
r = requests.head(url)
if r.status_code == 200:
return True
else:
return False | Check if a url is valid (returns 200) or not.
Parameters
----------
url : str
URL to check
Returns
-------
bool if url is valid | entailment |
def buoy_data_types(cls, buoy):
"""Determine which types of data are available for a given buoy.
Parameters
----------
buoy : str
Buoy name
Returns
-------
dict of valid file extensions and their descriptions
"""
endpoint = cls()
file_types = {'txt': 'standard meteorological data',
'drift': 'meteorological data from drifting buoys and limited moored'
'buoy data mainly from international partners',
'cwind': 'continuous wind data (10 minute average)',
'spec': 'spectral wave summaries',
'data_spec': 'raw spectral wave data',
'swdir': 'spectral wave data (alpha1)',
'swdir2': 'spectral wave data (alpha2)',
'swr1': 'spectral wave data (r1)',
'swr2': 'spectral wave data (r2)',
'adcp': 'acoustic doppler current profiler',
'ocean': 'oceanographic data',
'tide': 'tide data',
'srad': 'solar radiation data',
'dart': 'water column height',
'supl': 'supplemental measurements data',
'rain': 'hourly rain data'}
available_data = {}
buoy_url = 'https://www.ndbc.noaa.gov/data/realtime2/' + buoy + '.'
for key in file_types:
if endpoint._check_if_url_valid(buoy_url + key):
available_data[key] = file_types[key]
return available_data | Determine which types of data are available for a given buoy.
Parameters
----------
buoy : str
Buoy name
Returns
-------
dict of valid file extensions and their descriptions | entailment |
def raw_buoy_data(cls, buoy, data_type='txt'):
"""Retrieve the raw buoy data contents from NDBC.
Parameters
----------
buoy : str
Name of buoy
data_type : str
Type of data requested, must be one of
'txt' standard meteorological data
'drift' meteorological data from drifting buoys and limited moored buoy data
mainly from international partners
'cwind' continuous winds data (10 minute average)
'spec' spectral wave summaries
'data_spec' raw spectral wave data
'swdir' spectral wave data (alpha1)
'swdir2' spectral wave data (alpha2)
'swr1' spectral wave data (r1)
'swr2' spectral wave data (r2)
'adcp' acoustic doppler current profiler
'ocean' oceanographic data
'tide' tide data
'srad' solar radiation data
'dart' water column height
'supl' supplemental measurements data
'rain' hourly rain data
Returns
-------
Raw data string
"""
endpoint = cls()
resp = endpoint.get_path('data/realtime2/{}.{}'.format(buoy, data_type))
return resp.text | Retrieve the raw buoy data contents from NDBC.
Parameters
----------
buoy : str
Name of buoy
data_type : str
Type of data requested, must be one of
'txt' standard meteorological data
'drift' meteorological data from drifting buoys and limited moored buoy data
mainly from international partners
'cwind' continuous winds data (10 minute average)
'spec' spectral wave summaries
'data_spec' raw spectral wave data
'swdir' spectral wave data (alpha1)
'swdir2' spectral wave data (alpha2)
'swr1' spectral wave data (r1)
'swr2' spectral wave data (r2)
'adcp' acoustic doppler current profiler
'ocean' oceanographic data
'tide' tide data
'srad' solar radiation data
'dart' water column height
'supl' supplemental measurements data
'rain' hourly rain data
Returns
-------
Raw data string | entailment |
def create_session(self):
"""Create a new HTTP session with our user-agent set.
Returns
-------
session : requests.Session
The created session
See Also
--------
urlopen, set_session_options
"""
ret = requests.Session()
ret.headers['User-Agent'] = self.user_agent
for k, v in self.options.items():
setattr(ret, k, v)
return ret | Create a new HTTP session with our user-agent set.
Returns
-------
session : requests.Session
The created session
See Also
--------
urlopen, set_session_options | entailment |
def urlopen(self, url, **kwargs):
"""GET a file-like object for a URL using HTTP.
This is a thin wrapper around :meth:`requests.Session.get` that returns a file-like
object wrapped around the resulting content.
Parameters
----------
url : str
The URL to request
kwargs : arbitrary keyword arguments
Additional keyword arguments to pass to :meth:`requests.Session.get`.
Returns
-------
fobj : file-like object
A file-like interface to the content in the response
See Also
--------
:meth:`requests.Session.get`
"""
return BytesIO(self.create_session().get(url, **kwargs).content) | GET a file-like object for a URL using HTTP.
This is a thin wrapper around :meth:`requests.Session.get` that returns a file-like
object wrapped around the resulting content.
Parameters
----------
url : str
The URL to request
kwargs : arbitrary keyword arguments
Additional keyword arguments to pass to :meth:`requests.Session.get`.
Returns
-------
fobj : file-like object
A file-like interface to the content in the response
See Also
--------
:meth:`requests.Session.get` | entailment |
def lonlat_box(self, west, east, south, north):
"""Add a latitude/longitude bounding box to the query.
This adds a request for a spatial bounding box, bounded by ('north', 'south')
for latitude and ('east', 'west') for the longitude. This modifies the query
in-place, but returns `self` so that multiple queries can be chained together
on one line.
This replaces any existing spatial queries that have been set.
Parameters
----------
west: float
The bounding longitude to the west, in degrees east of the prime meridian
east : float
The bounding longitude to the east, in degrees east of the prime meridian
south : float
The bounding latitude to the south, in degrees north of the equator
north : float
The bounding latitude to the north, in degrees north of the equator
Returns
-------
self : DataQuery
Returns self for chaining calls
"""
self._set_query(self.spatial_query, west=west, east=east, south=south,
north=north)
return self | Add a latitude/longitude bounding box to the query.
This adds a request for a spatial bounding box, bounded by ('north', 'south')
for latitude and ('east', 'west') for the longitude. This modifies the query
in-place, but returns `self` so that multiple queries can be chained together
on one line.
This replaces any existing spatial queries that have been set.
Parameters
----------
west: float
The bounding longitude to the west, in degrees east of the prime meridian
east : float
The bounding longitude to the east, in degrees east of the prime meridian
south : float
The bounding latitude to the south, in degrees north of the equator
north : float
The bounding latitude to the north, in degrees north of the equator
Returns
-------
self : DataQuery
Returns self for chaining calls | entailment |
def lonlat_point(self, lon, lat):
"""Add a latitude/longitude point to the query.
This adds a request for a (`lon`, `lat`) point. This modifies the query
in-place, but returns `self` so that multiple queries can be chained together on
one line.
This replaces any existing spatial queries that have been set.
Parameters
----------
lon: float
The longitude to request
lat : float
The latitude to request
Returns
-------
self : DataQuery
Returns self for chaining calls
"""
self._set_query(self.spatial_query, longitude=lon, latitude=lat)
return self | Add a latitude/longitude point to the query.
This adds a request for a (`lon`, `lat`) point. This modifies the query
in-place, but returns `self` so that multiple queries can be chained together on
one line.
This replaces any existing spatial queries that have been set.
Parameters
----------
lon: float
The longitude to request
lat : float
The latitude to request
Returns
-------
self : DataQuery
Returns self for chaining calls | entailment |
def time(self, time):
"""Add a request for a specific time to the query.
This modifies the query in-place, but returns `self` so that multiple queries
can be chained together on one line.
This replaces any existing temporal queries that have been set.
Parameters
----------
time : datetime.datetime
The time to request
Returns
-------
self : DataQuery
Returns self for chaining calls
"""
self._set_query(self.time_query, time=self._format_time(time))
return self | Add a request for a specific time to the query.
This modifies the query in-place, but returns `self` so that multiple queries
can be chained together on one line.
This replaces any existing temporal queries that have been set.
Parameters
----------
time : datetime.datetime
The time to request
Returns
-------
self : DataQuery
Returns self for chaining calls | entailment |
def time_range(self, start, end):
"""Add a request for a time range to the query.
This modifies the query in-place, but returns `self` so that multiple queries
can be chained together on one line.
This replaces any existing temporal queries that have been set.
Parameters
----------
start : datetime.datetime
The start of the requested time range
end : datetime.datetime
The end of the requested time range
Returns
-------
self : DataQuery
Returns self for chaining calls
"""
self._set_query(self.time_query, time_start=self._format_time(start),
time_end=self._format_time(end))
return self | Add a request for a time range to the query.
This modifies the query in-place, but returns `self` so that multiple queries
can be chained together on one line.
This replaces any existing temporal queries that have been set.
Parameters
----------
start : datetime.datetime
The start of the requested time range
end : datetime.datetime
The end of the requested time range
Returns
-------
self : DataQuery
Returns self for chaining calls | entailment |
def get_query(self, query):
"""Make a GET request, including a query, to the endpoint.
The path of the request is to the base URL assigned to the endpoint.
Parameters
----------
query : DataQuery
The query to pass when making the request
Returns
-------
resp : requests.Response
The server's response to the request
See Also
--------
get_path, get
"""
url = self._base[:-1] if self._base[-1] == '/' else self._base
return self.get(url, query) | Make a GET request, including a query, to the endpoint.
The path of the request is to the base URL assigned to the endpoint.
Parameters
----------
query : DataQuery
The query to pass when making the request
Returns
-------
resp : requests.Response
The server's response to the request
See Also
--------
get_path, get | entailment |
def get_path(self, path, query=None):
"""Make a GET request, optionally including a query, to a relative path.
The path of the request includes a path on top of the base URL
assigned to the endpoint.
Parameters
----------
path : str
The path to request, relative to the endpoint
query : DataQuery, optional
The query to pass when making the request
Returns
-------
resp : requests.Response
The server's response to the request
See Also
--------
get_query, get, url_path
"""
return self.get(self.url_path(path), query) | Make a GET request, optionally including a query, to a relative path.
The path of the request includes a path on top of the base URL
assigned to the endpoint.
Parameters
----------
path : str
The path to request, relative to the endpoint
query : DataQuery, optional
The query to pass when making the request
Returns
-------
resp : requests.Response
The server's response to the request
See Also
--------
get_query, get, url_path | entailment |
def get(self, path, params=None):
"""Make a GET request, optionally including a parameters, to a path.
The path of the request is the full URL.
Parameters
----------
path : str
The URL to request
params : DataQuery, optional
The query to pass when making the request
Returns
-------
resp : requests.Response
The server's response to the request
Raises
------
HTTPError
If the server returns anything other than a 200 (OK) code
See Also
--------
get_query, get
"""
resp = self._session.get(path, params=params)
if resp.status_code != 200:
if resp.headers.get('Content-Type', '').startswith('text/html'):
text = resp.reason
else:
text = resp.text
raise requests.HTTPError('Error accessing {0}\n'
'Server Error ({1:d}: {2})'.format(resp.request.url,
resp.status_code,
text))
return resp | Make a GET request, optionally including a parameters, to a path.
The path of the request is the full URL.
Parameters
----------
path : str
The URL to request
params : DataQuery, optional
The query to pass when making the request
Returns
-------
resp : requests.Response
The server's response to the request
Raises
------
HTTPError
If the server returns anything other than a 200 (OK) code
See Also
--------
get_query, get | entailment |
def path(self):
"""Return the full path to the Group, including any parent Groups."""
# If root, return '/'
if self.dataset is self:
return ''
else: # Otherwise recurse
return self.dataset.path + '/' + self.name | Return the full path to the Group, including any parent Groups. | entailment |
def load_from_stream(self, group):
"""Load a Group from an NCStream object."""
self._unpack_attrs(group.atts)
self.name = group.name
for dim in group.dims:
new_dim = Dimension(self, dim.name)
self.dimensions[dim.name] = new_dim
new_dim.load_from_stream(dim)
for var in group.vars:
new_var = Variable(self, var.name)
self.variables[var.name] = new_var
new_var.load_from_stream(var)
for grp in group.groups:
new_group = Group(self)
self.groups[grp.name] = new_group
new_group.load_from_stream(grp)
for struct in group.structs:
new_var = Variable(self, struct.name)
self.variables[struct.name] = new_var
new_var.load_from_stream(struct)
if group.enumTypes:
for en in group.enumTypes:
self.types[en.name] = enum.Enum(en.name,
[(typ.value, typ.code) for typ in en.map]) | Load a Group from an NCStream object. | entailment |
def load_from_stream(self, var):
"""Populate the Variable from an NCStream object."""
dims = []
for d in var.shape:
dim = Dimension(None, d.name)
dim.load_from_stream(d)
dims.append(dim)
self.dimensions = tuple(dim.name for dim in dims)
self.shape = tuple(dim.size for dim in dims)
self.ndim = len(var.shape)
self._unpack_attrs(var.atts)
data, dt, type_name = unpack_variable(var)
if data is not None:
data = data.reshape(self.shape)
self._data = data
self.dtype = dt
self.datatype = type_name
if hasattr(var, 'enumType') and var.enumType:
self.datatype = var.enumType
self._enum = True | Populate the Variable from an NCStream object. | entailment |
def load_from_stream(self, dim):
"""Load from an NCStream object."""
self.unlimited = dim.isUnlimited
self.private = dim.isPrivate
self.vlen = dim.isVlen
if not self.vlen:
self.size = dim.length | Load from an NCStream object. | entailment |
def _read_header(self):
"""Get the needed header information to initialize dataset."""
self._header = self.cdmrf.fetch_header()
self.load_from_stream(self._header) | Get the needed header information to initialize dataset. | entailment |
def load_from_stream(self, header):
"""Populate the CoverageDataset from the protobuf information."""
self._unpack_attrs(header.atts)
self.name = header.name
self.lon_lat_domain = header.latlonRect
self.proj_domain = header.projRect
self.date_range = header.dateRange
self.type = header.coverageType
for sys in header.coordSys:
self.coord_systems[sys.name] = sys
for trans in header.coordTransforms:
self.transforms[trans.name] = trans
for ax in header.coordAxes:
self.axes[ax.name] = ax
for cov in header.grids:
self.grids[cov.name] = cov | Populate the CoverageDataset from the protobuf information. | entailment |
def request_data(cls, time, site_id, **kwargs):
"""Retrieve upper air observations from Iowa State's archive for a single station.
Parameters
----------
time : datetime
The date and time of the desired observation.
site_id : str
The three letter ICAO identifier of the station for which data should be
downloaded.
kwargs
Arbitrary keyword arguments to use to initialize source
Returns
-------
:class:`pandas.DataFrame` containing the data
"""
endpoint = cls()
df = endpoint._get_data(time, site_id, None, **kwargs)
return df | Retrieve upper air observations from Iowa State's archive for a single station.
Parameters
----------
time : datetime
The date and time of the desired observation.
site_id : str
The three letter ICAO identifier of the station for which data should be
downloaded.
kwargs
Arbitrary keyword arguments to use to initialize source
Returns
-------
:class:`pandas.DataFrame` containing the data | entailment |
def request_all_data(cls, time, pressure=None, **kwargs):
"""Retrieve upper air observations from Iowa State's archive for all stations.
Parameters
----------
time : datetime
The date and time of the desired observation.
pressure : float, optional
The mandatory pressure level at which to request data (in hPa). If none is given,
all the available data in the profiles is returned.
kwargs
Arbitrary keyword arguments to use to initialize source
Returns
-------
:class:`pandas.DataFrame` containing the data
"""
endpoint = cls()
df = endpoint._get_data(time, None, pressure, **kwargs)
return df | Retrieve upper air observations from Iowa State's archive for all stations.
Parameters
----------
time : datetime
The date and time of the desired observation.
pressure : float, optional
The mandatory pressure level at which to request data (in hPa). If none is given,
all the available data in the profiles is returned.
kwargs
Arbitrary keyword arguments to use to initialize source
Returns
-------
:class:`pandas.DataFrame` containing the data | entailment |
def _get_data(self, time, site_id, pressure=None):
"""Download data from Iowa State's upper air archive.
Parameters
----------
time : datetime
Date and time for which data should be downloaded
site_id : str
Site id for which data should be downloaded
pressure : float, optional
Mandatory pressure level at which to request data (in hPa).
Returns
-------
:class:`pandas.DataFrame` containing the data
"""
json_data = self._get_data_raw(time, site_id, pressure)
data = {}
for profile in json_data['profiles']:
for pt in profile['profile']:
for field in ('drct', 'dwpc', 'hght', 'pres', 'sknt', 'tmpc'):
data.setdefault(field, []).append(np.nan if pt[field] is None
else pt[field])
for field in ('station', 'valid'):
data.setdefault(field, []).append(np.nan if profile[field] is None
else profile[field])
# Make sure that the first entry has a valid temperature and dewpoint
idx = np.argmax(~(np.isnan(data['tmpc']) | np.isnan(data['dwpc'])))
# Stuff data into a pandas dataframe
df = pd.DataFrame()
df['pressure'] = ma.masked_invalid(data['pres'][idx:])
df['height'] = ma.masked_invalid(data['hght'][idx:])
df['temperature'] = ma.masked_invalid(data['tmpc'][idx:])
df['dewpoint'] = ma.masked_invalid(data['dwpc'][idx:])
df['direction'] = ma.masked_invalid(data['drct'][idx:])
df['speed'] = ma.masked_invalid(data['sknt'][idx:])
df['station'] = data['station'][idx:]
df['time'] = [datetime.strptime(valid, '%Y-%m-%dT%H:%M:%SZ')
for valid in data['valid'][idx:]]
# Calculate the u and v winds
df['u_wind'], df['v_wind'] = get_wind_components(df['speed'],
np.deg2rad(df['direction']))
# Drop any rows with all NaN values for T, Td, winds
df = df.dropna(subset=('temperature', 'dewpoint', 'direction', 'speed',
'u_wind', 'v_wind'), how='all').reset_index(drop=True)
# Add unit dictionary
df.units = {'pressure': 'hPa',
'height': 'meter',
'temperature': 'degC',
'dewpoint': 'degC',
'direction': 'degrees',
'speed': 'knot',
'u_wind': 'knot',
'v_wind': 'knot',
'station': None,
'time': None}
return df | Download data from Iowa State's upper air archive.
Parameters
----------
time : datetime
Date and time for which data should be downloaded
site_id : str
Site id for which data should be downloaded
pressure : float, optional
Mandatory pressure level at which to request data (in hPa).
Returns
-------
:class:`pandas.DataFrame` containing the data | entailment |
def _get_data_raw(self, time, site_id, pressure=None):
r"""Download data from the Iowa State's upper air archive.
Parameters
----------
time : datetime
Date and time for which data should be downloaded
site_id : str
Site id for which data should be downloaded
pressure : float, optional
Mandatory pressure level at which to request data (in hPa).
Returns
-------
list of json data
"""
query = {'ts': time.strftime('%Y%m%d%H00')}
if site_id is not None:
query['station'] = site_id
if pressure is not None:
query['pressure'] = pressure
resp = self.get_path('raob.py', query)
json_data = json.loads(resp.text)
# See if the return is valid, but has no data
if not (json_data['profiles'] and json_data['profiles'][0]['profile']):
message = 'No data available '
if time is not None:
message += 'for {time:%Y-%m-%d %HZ} '.format(time=time)
if site_id is not None:
message += 'for station {stid}'.format(stid=site_id)
if pressure is not None:
message += 'for pressure {pres}'.format(pres=pressure)
message = message[:-1] + '.'
raise ValueError(message)
return json_data | r"""Download data from the Iowa State's upper air archive.
Parameters
----------
time : datetime
Date and time for which data should be downloaded
site_id : str
Site id for which data should be downloaded
pressure : float, optional
Mandatory pressure level at which to request data (in hPa).
Returns
-------
list of json data | entailment |
def parse_station_table(root):
"""Parse station list XML file."""
stations = [parse_xml_station(elem) for elem in root.findall('station')]
return {st.id: st for st in stations} | Parse station list XML file. | entailment |
def parse_xml_station(elem):
"""Create a :class:`Station` instance from an XML tag."""
stid = elem.attrib['id']
name = elem.find('name').text
lat = float(elem.find('latitude').text)
lon = float(elem.find('longitude').text)
elev = float(elem.find('elevation').text)
return Station(id=stid, elevation=elev, latitude=lat, longitude=lon, name=name) | Create a :class:`Station` instance from an XML tag. | entailment |
def stations(self, *stns):
"""Specify one or more stations for the query.
This modifies the query in-place, but returns `self` so that multiple
queries can be chained together on one line.
This replaces any existing spatial queries that have been set.
Parameters
----------
stns : one or more strings
One or more names of variables to request
Returns
-------
self : RadarQuery
Returns self for chaining calls
"""
self._set_query(self.spatial_query, stn=stns)
return self | Specify one or more stations for the query.
This modifies the query in-place, but returns `self` so that multiple
queries can be chained together on one line.
This replaces any existing spatial queries that have been set.
Parameters
----------
stns : one or more strings
One or more names of variables to request
Returns
-------
self : RadarQuery
Returns self for chaining calls | entailment |
def validate_query(self, query):
"""Validate a query.
Determines whether `query` is well-formed. This includes checking for all
required parameters, as well as checking parameters for valid values.
Parameters
----------
query : RadarQuery
The query to validate
Returns
-------
valid : bool
Whether `query` is valid.
"""
valid = True
# Make sure all stations are in the table
if 'stn' in query.spatial_query:
valid = valid and all(stid in self.stations
for stid in query.spatial_query['stn'])
if query.var:
valid = valid and all(var in self.variables for var in query.var)
return valid | Validate a query.
Determines whether `query` is well-formed. This includes checking for all
required parameters, as well as checking parameters for valid values.
Parameters
----------
query : RadarQuery
The query to validate
Returns
-------
valid : bool
Whether `query` is valid. | entailment |
def get_catalog(self, query):
"""Fetch a parsed THREDDS catalog from the radar server.
Requests a catalog of radar data files data from the radar server given the
parameters in `query` and returns a :class:`~siphon.catalog.TDSCatalog` instance.
Parameters
----------
query : RadarQuery
The parameters to send to the radar server
Returns
-------
catalog : TDSCatalog
The catalog of matching data files
Raises
------
:class:`~siphon.http_util.BadQueryError`
When the query cannot be handled by the server
See Also
--------
get_catalog_raw
"""
# TODO: Refactor TDSCatalog so we don't need two requests, or to do URL munging
try:
url = self._base[:-1] if self._base[-1] == '/' else self._base
url += '?' + str(query)
return TDSCatalog(url)
except ET.ParseError:
raise BadQueryError(self.get_catalog_raw(query)) | Fetch a parsed THREDDS catalog from the radar server.
Requests a catalog of radar data files data from the radar server given the
parameters in `query` and returns a :class:`~siphon.catalog.TDSCatalog` instance.
Parameters
----------
query : RadarQuery
The parameters to send to the radar server
Returns
-------
catalog : TDSCatalog
The catalog of matching data files
Raises
------
:class:`~siphon.http_util.BadQueryError`
When the query cannot be handled by the server
See Also
--------
get_catalog_raw | entailment |
def acis_request(method, params):
"""Request data from the ACIS Web Services API.
Makes a request from the ACIS Web Services API for data
based on a given method (StnMeta,StnData,MultiStnData,GridData,General)
and parameters string. Information about the parameters can be obtained at:
http://www.rcc-acis.org/docs_webservices.html
If a connection to the API fails, then it will raise an exception. Some bad
calls will also return empty dictionaries.
ACIS Web Services is a distributed system! A call to the main URL can be
delivered to any climate center running a public instance of the service.
This makes the calls efficient, but also occasionaly results in failed
calls when a server you are directed to is having problems. Generally,
reconnecting after waiting a few seconds will resolve a problem. If problems
are persistent, contact ACIS developers at the High Plains Regional Climate
Center or Northeast Regional Climate Center who will look into server
issues.
Parameters
----------
method : str
The Web Services request method (StnMeta, StnData, MultiStnData, GridData, General)
params : dict
A JSON array of parameters (See Web Services API)
Returns
-------
A dictionary of data based on the JSON parameters
Raises
------
:class: `ACIS_API_Exception`
When the API is unable to establish a connection or returns
unparsable data.
"""
base_url = 'http://data.rcc-acis.org/' # ACIS Web API URL
timeout = 300 if method == 'MultiStnData' else 60
try:
response = session_manager.create_session().post(base_url + method, json=params,
timeout=timeout)
return response.json()
except requests.exceptions.Timeout:
raise AcisApiException('Connection Timeout')
except requests.exceptions.TooManyRedirects:
raise AcisApiException('Bad URL. Check your ACIS connection method string.')
except ValueError:
raise AcisApiException('No data returned! The ACIS parameter dictionary'
'may be incorrectly formatted') | Request data from the ACIS Web Services API.
Makes a request from the ACIS Web Services API for data
based on a given method (StnMeta,StnData,MultiStnData,GridData,General)
and parameters string. Information about the parameters can be obtained at:
http://www.rcc-acis.org/docs_webservices.html
If a connection to the API fails, then it will raise an exception. Some bad
calls will also return empty dictionaries.
ACIS Web Services is a distributed system! A call to the main URL can be
delivered to any climate center running a public instance of the service.
This makes the calls efficient, but also occasionaly results in failed
calls when a server you are directed to is having problems. Generally,
reconnecting after waiting a few seconds will resolve a problem. If problems
are persistent, contact ACIS developers at the High Plains Regional Climate
Center or Northeast Regional Climate Center who will look into server
issues.
Parameters
----------
method : str
The Web Services request method (StnMeta, StnData, MultiStnData, GridData, General)
params : dict
A JSON array of parameters (See Web Services API)
Returns
-------
A dictionary of data based on the JSON parameters
Raises
------
:class: `ACIS_API_Exception`
When the API is unable to establish a connection or returns
unparsable data. | entailment |
def parse_xml(data, handle_units):
"""Parse XML data returned by NCSS."""
root = ET.fromstring(data)
return squish(parse_xml_dataset(root, handle_units)) | Parse XML data returned by NCSS. | entailment |
def parse_xml_point(elem):
"""Parse an XML point tag."""
point = {}
units = {}
for data in elem.findall('data'):
name = data.get('name')
unit = data.get('units')
point[name] = float(data.text) if name != 'date' else parse_iso_date(data.text)
if unit:
units[name] = unit
return point, units | Parse an XML point tag. | entailment |
Subsets and Splits