text_prompt
stringlengths 157
13.1k
| code_prompt
stringlengths 7
19.8k
⌀ |
---|---|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def assert_series_equal(left, right, data_function=None, data_args=None):
""" For unit testing equality of two Series. :param left: first Series :param right: second Series :param data_function: if provided will use this function to assert compare the df.data :param data_args: arguments to pass to the data_function :return: nothing """ |
assert type(left) == type(right)
if data_function:
data_args = {} if not data_args else data_args
data_function(left.data, right.data, **data_args)
else:
assert left.data == right.data
assert left.index == right.index
assert left.data_name == right.data_name
assert left.index_name == right.index_name
assert left.sort == right.sort
if isinstance(left, rc.ViewSeries):
assert left.offset == right.offset
if isinstance(left, rc.Series):
assert left.blist == right.blist |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get(self, timeout=None):
"""Return result from the pipeline.""" |
result = None
for stage in self._output_stages:
result = stage.get(timeout)
return result |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def load(self):
"""Load the lyrics from MetroLyrics.""" |
page = requests.get(self._url)
# Forces utf-8 to prevent character mangling
page.encoding = 'utf-8'
tree = html.fromstring(page.text)
lyric_div = tree.get_element_by_id('lyrics-body-text')
verses = [c.text_content() for c in lyric_div.find_class('verse')]
self._lyrics = '\n\n'.join(verses)
return self |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def load(self, verbose=False):
""" Load the list of songs. Note that this only loads a list of songs that this artist was the main artist of. If they were only featured in the song, that song won't be listed here. There is a list on the artist page for that, I just haven't added any parsing code for that, since I don't need it. """ |
self._songs = []
page_num = 1
total_pages = 1
while page_num <= total_pages:
if verbose:
print('retrieving page %d' % page_num)
page = requests.get(ARTIST_URL.format(artist=self.name,
n=page_num))
tree = html.fromstring(page.text)
song_rows_xp = r'//*[@id="popular"]/div/table/tbody/tr'
songlist_pagination_xp = r'//*[@id="main-content"]/div[1]/'\
'div[2]/p/span/a'
rows = tree.xpath(song_rows_xp)
for row in rows:
song_link = row.xpath(r'./td/a[contains(@class,"title")]')
assert len(song_link) == 1
self._songs.append(Song(url=song_link[0].attrib['href']))
total_pages = len(tree.xpath(songlist_pagination_xp))
page_num += 1
return self |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def distance(p0, p1, deg=True, r=r_earth_mean):
""" Return the distance between two points on the surface of the Earth. Parameters p0 : point-like (or array of point-like) [longitude, latitude] objects p1 : point-like (or array of point-like) [longitude, latitude] objects deg : bool, optional (default True) indicates if p0 and p1 are specified in degrees r : float, optional (default r_earth_mean) radius of the sphere Returns ------- d : float Reference --------- http://www.movable-type.co.uk/scripts/latlong.html - Distance Note: Spherical earth model. By default uses radius of 6371.0 km. """ |
single, (p0, p1) = _to_arrays((p0, 2), (p1, 2))
if deg:
p0 = np.radians(p0)
p1 = np.radians(p1)
lon0, lat0 = p0[:,0], p0[:,1]
lon1, lat1 = p1[:,0], p1[:,1]
# h_x used to denote haversine(x): sin^2(x / 2)
h_dlat = sin((lat1 - lat0) / 2.0) ** 2
h_dlon = sin((lon1 - lon0) / 2.0) ** 2
h_angle = h_dlat + cos(lat0) * cos(lat1) * h_dlon
angle = 2.0 * arcsin(sqrt(h_angle))
d = r * angle
if single:
d = d[0]
return d |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def course(p0, p1, deg=True, bearing=False):
""" Compute the initial bearing along the great circle from p0 to p1 NB: The angle returned by course() is not the traditional definition of bearing. It is definted such that 0 degrees to due East increasing counter-clockwise such that 90 degrees is due North. To obtain the bearing (0 degrees is due North increasing clockwise so that 90 degrees is due East), set the bearing flag input to True. Parameters p0 : point-like (or array of point-like) [lon, lat] objects p1 : point-like (or array of point-like) [lon, lat] objects deg : bool, optional (default True) indicates if p0 and p1 are specified in degrees. The returned angle is returned in the same units as the input. bearing : bool, optional (default False) If True, use the classical definition of bearing where 0 degrees is due North increasing clockwise so that and 90 degrees is due East. Reference --------- http://www.movable-type.co.uk/scripts/latlong.html - Bearing """ |
single, (p0, p1) = _to_arrays((p0, 2), (p1, 2))
if deg:
p0 = np.radians(p0)
p1 = np.radians(p1)
lon0, lat0 = p0[:,0], p0[:,1]
lon1, lat1 = p1[:,0], p1[:,1]
dlon = lon1 - lon0
a = sin(dlon) * cos(lat1)
b = cos(lat0) * sin(lat1) - sin(lat0) * cos(lat1) * cos(dlon)
if bearing:
angle = arctan2(a, b)
else:
angle = arctan2(b, a)
if deg:
angle = np.degrees(angle)
if single:
angle = angle[0]
return angle |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def propagate(p0, angle, d, deg=True, bearing=False, r=r_earth_mean):
""" Given an initial point and angle, move distance d along the surface Parameters p0 : point-like (or array of point-like) [lon, lat] objects angle : float (or array of float) bearing. Note that by default, 0 degrees is due East increasing clockwise so that 90 degrees is due North. See the bearing flag to change the meaning of this angle d : float (or array of float) distance to move. The units of d should be consistent with input r deg : bool, optional (default True) Whether both p0 and angle are specified in degrees. The output points will also match the value of this flag. bearing : bool, optional (default False) Indicates whether to interpret the input angle as the classical definition of bearing. r : float, optional (default r_earth_mean) radius of the sphere Reference --------- http://www.movable-type.co.uk/scripts/latlong.html - Destination Note: Spherical earth model. By default uses radius of 6371.0 km. """ |
single, (p0, angle, d) = _to_arrays((p0, 2), (angle, 1), (d, 1))
if deg:
p0 = np.radians(p0)
angle = np.radians(angle)
if not bearing:
angle = np.pi / 2.0 - angle
lon0, lat0 = p0[:,0], p0[:,1]
angd = d / r
lat1 = arcsin(sin(lat0) * cos(angd) + cos(lat0) * sin(angd) * cos(angle))
a = sin(angle) * sin(angd) * cos(lat0)
b = cos(angd) - sin(lat0) * sin(lat1)
lon1 = lon0 + arctan2(a, b)
p1 = np.column_stack([lon1, lat1])
if deg:
p1 = np.degrees(p1)
if single:
p1 = p1[0]
return p1 |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def validate(self, signature, timestamp, nonce):
"""Validate request signature. :param signature: A string signature parameter sent by weixin. :param timestamp: A int timestamp parameter sent by weixin. :param nonce: A int nonce parameter sent by weixin. """ |
if not self.token:
raise RuntimeError('WEIXIN_TOKEN is missing')
if self.expires_in:
try:
timestamp = int(timestamp)
except (ValueError, TypeError):
# fake timestamp
return False
delta = time.time() - timestamp
if delta < 0:
# this is a fake timestamp
return False
if delta > self.expires_in:
# expired timestamp
return False
values = [self.token, str(timestamp), str(nonce)]
s = ''.join(sorted(values))
hsh = hashlib.sha1(s.encode('utf-8')).hexdigest()
return signature == hsh |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def parse(self, content):
"""Parse xml body sent by weixin. :param content: A text of xml body. """ |
raw = {}
try:
root = etree.fromstring(content)
except SyntaxError as e:
raise ValueError(*e.args)
for child in root:
raw[child.tag] = child.text
formatted = self.format(raw)
msg_type = formatted['type']
msg_parser = getattr(self, 'parse_%s' % msg_type, None)
if callable(msg_parser):
parsed = msg_parser(raw)
else:
parsed = self.parse_invalid_type(raw)
formatted.update(parsed)
return formatted |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def reply(self, username, type='text', sender=None, **kwargs):
"""Create the reply text for weixin. The reply varies per reply type. The acceptable types are `text`, `music`, `news`, `image`, `voice`, `video`. Each type accepts different parameters, but they share some common parameters: * username: the receiver's username * type: the reply type, aka text, music and news * sender: sender is optional if you have a default value Text reply requires an additional parameter of `content`. Music reply requires 4 more parameters: * title: A string for music title * description: A string for music description * music_url: A link of the music * hq_music_url: A link of the high quality music News reply requires an additional parameter of `articles`, which is a list/tuple of articles, each one is a dict: * title: A string for article title * description: A string for article description * picurl: A link for article cover image * url: A link for article url Image and Voice reply requires an additional parameter of `media_id`. Video reply requires 3 more parameters: * media_id: A string for video `media_id` * title: A string for video title * description: A string for video description """ |
sender = sender or self.sender
if not sender:
raise RuntimeError('WEIXIN_SENDER or sender argument is missing')
if type == 'text':
content = kwargs.get('content', '')
return text_reply(username, sender, content)
if type == 'music':
values = {}
for k in ('title', 'description', 'music_url', 'hq_music_url'):
values[k] = kwargs.get(k)
return music_reply(username, sender, **values)
if type == 'news':
items = kwargs.get('articles', [])
return news_reply(username, sender, *items)
if type == 'customer_service':
service_account = kwargs.get('service_account', None)
return transfer_customer_service_reply(username, sender,
service_account)
if type == 'image':
media_id = kwargs.get('media_id', '')
return image_reply(username, sender, media_id)
if type == 'voice':
media_id = kwargs.get('media_id', '')
return voice_reply(username, sender, media_id)
if type == 'video':
values = {}
for k in ('media_id', 'title', 'description'):
values[k] = kwargs.get(k)
return video_reply(username, sender, **values) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def register(self, key=None, func=None, **kwargs):
"""Register a command helper function. You can register the function:: def print_help(**kwargs):
username = kwargs.get('sender') sender = kwargs.get('receiver') return weixin.reply( username, sender=sender, content='text reply' ) weixin.register('help', print_help) It is also accessible as a decorator:: @weixin.register('help') def print_help(*args, **kwargs):
username = kwargs.get('sender') sender = kwargs.get('receiver') return weixin.reply( username, sender=sender, content='text reply' ) """ |
if func:
if key is None:
limitation = frozenset(kwargs.items())
self._registry_without_key.append((func, limitation))
else:
self._registry[key] = func
return func
return self.__call__(key, **kwargs) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def view_func(self):
"""Default view function for Flask app. This is a simple implementation for view func, you can add it to your Flask app:: weixin = Weixin(app) app.add_url_rule('/', view_func=weixin.view_func) """ |
if request is None:
raise RuntimeError('view_func need Flask be installed')
signature = request.args.get('signature')
timestamp = request.args.get('timestamp')
nonce = request.args.get('nonce')
if not self.validate(signature, timestamp, nonce):
return 'signature failed', 400
if request.method == 'GET':
echostr = request.args.get('echostr', '')
return echostr
try:
ret = self.parse(request.data)
except ValueError:
return 'invalid', 400
if 'type' not in ret:
# not a valid message
return 'invalid', 400
if ret['type'] == 'text' and ret['content'] in self._registry:
func = self._registry[ret['content']]
else:
ret_set = frozenset(ret.items())
matched_rules = (
_func for _func, _limitation in self._registry_without_key
if _limitation.issubset(ret_set))
func = next(matched_rules, None) # first matched rule
if func is None:
if '*' in self._registry:
func = self._registry['*']
else:
func = 'failed'
if callable(func):
text = func(**ret)
else:
# plain text
text = self.reply(
username=ret['sender'],
sender=ret['receiver'],
content=func,
)
return Response(text, content_type='text/xml; charset=utf-8') |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get(self):
"""Reads the remote file from Gist and save it locally""" |
if self.gist:
content = self.github.read_gist_file(self.gist)
self.local.save(content) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def oauth_only(function):
"""Decorator to restrict some GitHubTools methods to run only with OAuth""" |
def check_for_oauth(self, *args, **kwargs):
"""
Returns False if GitHubTools instance is not authenticated, or return
the decorated fucntion if it is.
"""
if not self.is_authenticated:
self.oops("To use putgist you have to set your GETGIST_TOKEN")
self.oops("(see `putgist --help` for details)")
return False
return function(self, *args, **kwargs)
return check_for_oauth |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get(self, url, params=None, **kwargs):
"""Encapsulte requests.get to use this class instance header""" |
return requests.get(url, params=params, headers=self.add_headers(**kwargs)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def patch(self, url, data=None, **kwargs):
"""Encapsulte requests.patch to use this class instance header""" |
return requests.patch(url, data=data, headers=self.add_headers(**kwargs)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def post(self, url, data=None, **kwargs):
"""Encapsulte requests.post to use this class instance header""" |
return requests.post(url, data=data, headers=self.add_headers(**kwargs)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def backup(self):
"""Backups files with the same name of the instance filename""" |
count = 0
name = "{}.bkp".format(self.filename)
backup = os.path.join(self.cwd, name)
while os.path.exists(backup):
count += 1
name = "{}.bkp{}".format(self.filename, count)
backup = os.path.join(self.cwd, name)
self.hey("Moving existing {} to {}".format(self.filename, name))
os.rename(os.path.join(self.cwd, self.filename), backup) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def char_matcher(mode):
"""
a faster way for characters to generate token strings cache
""" |
def f_raw(inp_str, pos):
return mode if inp_str[pos] is mode else None
def f_collection(inp_str, pos):
ch = inp_str[pos]
for each in mode:
if ch is each:
return ch
return None
if isinstance(mode, str):
return f_raw
if len(mode) is 1:
mode = mode[0]
return f_raw
return f_collection |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _request(self, method, resource_uri, **kwargs):
"""Perform a method on a resource. Args: method: requests.`method` resource_uri: resource endpoint Raises: HTTPError Returns: JSON Response """ |
data = kwargs.get('data')
response = method(self.API_BASE_URL + resource_uri,
json=data, headers=self.headers)
response.raise_for_status()
return response.json() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def update(cls, customer_id, **kwargs):
""" Static method defined to update paystack customer data by id. Args: customer_id: paystack customer id. first_name: customer's first name(optional). last_name: customer's last name(optional). email: customer's email address(optional). phone:customer's phone number(optional). Returns: Json data from paystack API. """ |
return cls().requests.put('customer/{customer_id}'.format(**locals()),
data=kwargs) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def render(txt):
""" Accepts Slack formatted text and returns HTML. """ |
# Removing links to other channels
txt = re.sub(r'<#[^\|]*\|(.*)>', r'#\g<1>', txt)
# Removing links to other users
txt = re.sub(r'<(@.*)>', r'\g<1>', txt)
# handle named hyperlinks
txt = re.sub(r'<([^\|]*)\|([^\|]*)>', r'<a href="\g<1>" target="blank">\g<2></a>', txt)
# handle unnamed hyperlinks
txt = re.sub(r'<([^a|/a].*)>', r'<a href="\g<1>" target="blank">\g<1></a>', txt)
# handle ordered and unordered lists
for delimeter in LIST_DELIMITERS:
slack_tag = delimeter
class_name = LIST_DELIMITERS[delimeter]
# Wrap any lines that start with the slack_tag in <li></li>
list_regex = u'(?:^|\n){}\s?(.*)'.format(slack_tag)
list_repl = r'<li class="list-item-{}">\g<1></li>'.format(class_name)
txt = re.sub(list_regex, list_repl, txt)
# hanlde blockquotes
txt = re.sub(u'(^|\n)(?:>){3}\s?(.*)$', r'\g<1><blockquote>\g<2></blockquote>', txt, flags=re.DOTALL)
txt = re.sub(u'(?:^|\n)>\s?(.*)\n?', r'<blockquote>\g<1></blockquote>', txt)
# handle code blocks
txt = re.sub(r'```\n?(.*)```', r'<pre>\g<1></pre>', txt, flags=re.DOTALL)
txt = re.sub(r'\n(</pre>)', r'\g<1>', txt)
# handle bolding, italics, and strikethrough
for wrapper in FORMATTERS:
slack_tag = wrapper
html_tag = FORMATTERS[wrapper]
# Grab all text in formatted characters on the same line unless escaped
regex = r'(?<!\\)\{t}([^\{t}|\n]*)\{t}'.format(t=slack_tag)
repl = r'<{t}>\g<1></{t}>'.format(t=html_tag)
txt = re.sub(regex, repl, txt)
# convert line breaks
txt = txt.replace('\n', '<br />')
# clean up bad HTML
parser = CustomSlackdownHTMLParser(txt)
txt = parser.clean()
# convert multiple spaces
txt = txt.replace(r' ', '  ')
return txt |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _open_list(self, list_type):
""" Add an open list tag corresponding to the specification in the parser's LIST_TYPES. """ |
if list_type in LIST_TYPES.keys():
tag = LIST_TYPES[list_type]
else:
raise Exception('CustomSlackdownHTMLParser:_open_list: Not a valid list type.')
html = '<{t} class="list-container-{c}">'.format(
t=tag,
c=list_type
)
self.cleaned_html += html
self.current_parent_element['tag'] = LIST_TYPES[list_type]
self.current_parent_element['attrs'] = {'class': list_type} |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _close_list(self):
""" Add an close list tag corresponding to the currently open list found in current_parent_element. """ |
list_type = self.current_parent_element['attrs']['class']
tag = LIST_TYPES[list_type]
html = '</{t}>'.format(
t=tag
)
self.cleaned_html += html
self.current_parent_element['tag'] = ''
self.current_parent_element['attrs'] = {} |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def handle_starttag(self, tag, attrs):
""" Called by HTMLParser.feed when a start tag is found. """ |
# Parse the tag attributes
attrs_dict = dict(t for t in attrs)
# If the tag is a predefined parent element
if tag in PARENT_ELEMENTS:
# If parser is parsing another parent element
if self.current_parent_element['tag'] != '':
# close the parent element
self.cleaned_html += '</{}>'.format(self.current_parent_element['tag'])
self.current_parent_element['tag'] = tag
self.current_parent_element['attrs'] = {}
self.cleaned_html += '<{}>'.format(tag)
# If the tag is a list item
elif tag == 'li':
self.parsing_li = True
# Parse the class name & subsequent type
class_name = attrs_dict['class']
list_type = class_name[10:]
# Check if parsing a list
if self.current_parent_element['tag'] == 'ul' or self.current_parent_element['tag'] == 'ol':
cur_list_type = self.current_parent_element['attrs']['class']
# Parsing a different list
if cur_list_type != list_type:
# Close that list
self._close_list()
# Open new list
self._open_list(list_type)
# Not parsing a list
else:
# if parsing some other parent
if self.current_parent_element['tag'] != '':
self.cleaned_html += '</{}>'.format(self.current_parent_element['tag'])
# Open new list
self._open_list(list_type)
self.cleaned_html += '<{}>'.format(tag)
# If the tag is a line break
elif tag == 'br':
# If parsing a paragraph, close it
if self.current_parent_element['tag'] == 'p':
self.cleaned_html += '</p>'
self.current_parent_element['tag'] = ''
self.current_parent_element['attrs'] = {}
# If parsing a list, close it
elif self.current_parent_element['tag'] == 'ul' or self.current_parent_element['tag'] == 'ol':
self._close_list()
# If parsing any other parent element, keep it
elif self.current_parent_element['tag'] in PARENT_ELEMENTS:
self.cleaned_html += '<br />'
# If not in any parent element, create an empty paragraph
else:
self.cleaned_html += '<p></p>'
# If the tag is something else, like a <b> or <i> tag
else:
# If not parsing any parent element
if self.current_parent_element['tag'] == '':
self.cleaned_html += '<p>'
self.current_parent_element['tag'] = 'p'
self.cleaned_html += '<{}'.format(tag)
for attr in sorted(attrs_dict.keys()):
self.cleaned_html += ' {k}="{v}"'.format(
k=attr,
v=attrs_dict[attr]
)
self.cleaned_html += '>' |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def handle_endtag(self, tag):
""" Called by HTMLParser.feed when an end tag is found. """ |
if tag in PARENT_ELEMENTS:
self.current_parent_element['tag'] = ''
self.current_parent_element['attrs'] = ''
if tag == 'li':
self.parsing_li = True
if tag != 'br':
self.cleaned_html += '</{}>'.format(tag) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def handle_data(self, data):
""" Called by HTMLParser.feed when text is found. """ |
if self.current_parent_element['tag'] == '':
self.cleaned_html += '<p>'
self.current_parent_element['tag'] = 'p'
self.cleaned_html += data |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _remove_pre_formatting(self):
""" Removes formatting tags added to pre elements. """ |
preformatted_wrappers = [
'pre',
'code'
]
for wrapper in preformatted_wrappers:
for formatter in FORMATTERS:
tag = FORMATTERS[formatter]
character = formatter
regex = r'(<{w}>.*)<{t}>(.*)</{t}>(.*</{w}>)'.format(
t=tag,
w=wrapper
)
repl = r'\g<1>{c}\g<2>{c}\g<3>'.format(c=character)
self.cleaned_html = re.sub(regex, repl, self.cleaned_html) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def clean(self):
""" Goes through the txt input and cleans up any problematic HTML. """ |
# Calls handle_starttag, handle_endtag, and handle_data
self.feed()
# Clean up any parent tags left open
if self.current_parent_element['tag'] != '':
self.cleaned_html += '</{}>'.format(self.current_parent_element['tag'])
# Remove empty <p> added after lists
self.cleaned_html = re.sub(r'(</[u|o]l>)<p></p>', r'\g<1>', self.cleaned_html)
self._remove_pre_formatting()
return self.cleaned_html |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_model(LAB_DIR):
""" Cannon model params """ |
coeffs = np.load("%s/coeffs.npz" %LAB_DIR)['arr_0']
scatters = np.load("%s/scatters.npz" %LAB_DIR)['arr_0']
chisqs = np.load("%s/chisqs.npz" %LAB_DIR)['arr_0']
pivots = np.load("%s/pivots.npz" %LAB_DIR)['arr_0']
return coeffs, scatters, chisqs, pivots |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_normed_spectra():
""" Spectra to compare with models """ |
wl = np.load("%s/wl.npz" %LAB_DIR)['arr_0']
filenames = np.array(
[SPEC_DIR + "/Spectra" + "/" + val for val in lamost_id])
grid, fluxes, ivars, npix, SNRs = lamost.load_spectra(
lamost_id, input_grid=wl)
ds = dataset.Dataset(
wl, lamost_id, fluxes, ivars, [1],
lamost_id[0:2], fluxes[0:2], ivars[0:2])
ds.continuum_normalize_gaussian_smoothing(L=50)
np.savez(SPEC_DIR + "/" + "norm_flux.npz", ds.tr_flux)
np.savez(SPEC_DIR + "/" + "norm_ivar.npz", ds.tr_ivar)
return ds.tr_flux, ds.tr_ivar |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def wget_files():
""" Pull the files from the LAMOST archive """ |
for f in lamost_id:
short = (f.split('-')[2]).split('_')[0]
filename = "%s/%s.gz" %(short,f)
DIR = "/Users/annaho/Data/Li_Giants/Spectra_APOKASC"
searchfor = "%s/%s.gz" %(DIR,f)
if glob.glob(searchfor):
print("done")
else:
#print(searchfor)
os.system(
"wget http://dr2.lamost.org/sas/fits/%s" %(filename))
new_filename = filename.split("_")[0] + "_" + filename.split("_")[2]
os.system(
"wget http://dr2.lamost.org/sas/fits/%s" %(new_filename)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def cannon_normalize(spec_raw):
""" Normalize according to The Cannon """ |
spec = np.array([spec_raw])
wl = np.arange(0, spec.shape[1])
w = continuum_normalization.gaussian_weight_matrix(wl, L=50)
ivar = np.ones(spec.shape)*0.5
cont = continuum_normalization._find_cont_gaussian_smooth(
wl, spec, ivar, w)
norm_flux, norm_ivar = continuum_normalization._cont_norm(
spec, ivar, cont)
return norm_flux[0] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def resample(grid, wl, flux):
""" Resample spectrum onto desired grid """ |
flux_rs = (interpolate.interp1d(wl, flux))(grid)
return flux_rs |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_residuals(ds, m):
""" Using the dataset and model object, calculate the residuals and return Parameters ds: dataset object m: model object Return ------ residuals: array of residuals, spec minus model spec """ |
model_spectra = get_model_spectra(ds, m)
resid = ds.test_flux - model_spectra
return resid |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def load_model():
""" Load the model Parameters direc: directory with all of the model files Returns ------- m: model object """ |
direc = "/home/annaho/TheCannon/code/lamost/mass_age/cn"
m = model.CannonModel(2)
m.coeffs = np.load(direc + "/coeffs.npz")['arr_0'][0:3626,:] # no cols
m.scatters = np.load(direc + "/scatters.npz")['arr_0'][0:3626] # no cols
m.chisqs = np.load(direc + "/chisqs.npz")['arr_0'][0:3626] # no cols
m.pivots = np.load(direc + "/pivots.npz")['arr_0']
return m |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def fit_gaussian(x, y, yerr, p0):
""" Fit a Gaussian to the data """ |
try:
popt, pcov = curve_fit(gaussian, x, y, sigma=yerr, p0=p0, absolute_sigma=True)
except RuntimeError:
return [0],[0]
return popt, pcov |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def select(yerrs, amps, amp_errs, widths):
""" criteria for keeping an object """ |
keep_1 = np.logical_and(amps < 0, widths > 1)
keep_2 = np.logical_and(np.abs(amps) > 3*yerrs, amp_errs < 3*np.abs(amps))
keep = np.logical_and(keep_1, keep_2)
return keep |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def run_all():
""" Load the data that we're using to search for Li-rich giants. Store it in dataset and model objects. """ |
DATA_DIR = "/home/annaho/TheCannon/code/apogee_lamost/xcalib_4labels"
dates = os.listdir("/home/share/LAMOST/DR2/DR2_release")
dates = np.array(dates)
dates = np.delete(dates, np.where(dates=='.directory')[0][0])
dates = np.delete(dates, np.where(dates=='all_folders.list')[0][0])
dates = np.delete(dates, np.where(dates=='dr2.lis')[0][0])
for date in dates:
if glob.glob("*%s*.txt" %date):
print("%s done" %date)
else:
print("running %s" %date)
run_one_date(date) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_colors(catalog):
""" Pull colors from catalog Parameters catalog: filename """ |
print("Get Colors")
a = pyfits.open(catalog)
data = a[1].data
a.close()
all_ids = data['LAMOST_ID_1']
all_ids = np.array([val.strip() for val in all_ids])
# G magnitude
gmag = data['gpmag']
gmag_err = data['e_gpmag']
# R magnitude
rmag = data['rpmag']
rmag_err = data['e_rpmag']
# I magnitude
imag = data['ipmag']
imag_err = data['e_ipmag']
# W1
W1 = data['W1mag']
W1_err = data['e_W1mag']
# W1
W2 = data['W2mag']
W2_err = data['e_W2mag']
# J magnitude
Jmag = data['Jmag']
Jmag_err = data['e_Jmag']
# H magnitude
Hmag = data['Hmag']
Hmag_err = data['e_Hmag']
# K magnitude
Kmag = data['Kmag']
Kmag_err = data['e_Kmag']
# Stack
mag = np.vstack((
gmag, rmag, imag, Jmag, Hmag, Kmag, W2, W1)) # 8, nobj
mag_err = np.vstack((
gmag_err, rmag_err, imag_err, Jmag_err,
Hmag_err, Kmag_err, W2_err, W1_err))
# Make g-r, r-i, i-J, etc
col = mag[:-1] - mag[1:]
col_ivar = 1/(mag_err[:-1]**2 + mag_err[1:]**2)
# There's something wrong with the i-band, I think..so the second color r-i
#bad = col[:,1] < 0.0
#col_ivar[bad] = 0.0
return all_ids, col, col_ivar |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def draw_spectra(md, ds):
""" Generate best-fit spectra for all the test objects Parameters md: model The Cannon spectral model ds: Dataset Dataset object Returns ------- best_fluxes: ndarray The best-fit test fluxes best_ivars: The best-fit test inverse variances """ |
coeffs_all, covs, scatters, red_chisqs, pivots, label_vector = model.model
nstars = len(dataset.test_SNR)
cannon_flux = np.zeros(dataset.test_flux.shape)
cannon_ivar = np.zeros(dataset.test_ivar.shape)
for i in range(nstars):
x = label_vector[:,i,:]
spec_fit = np.einsum('ij, ij->i', x, coeffs_all)
cannon_flux[i,:] = spec_fit
bad = dataset.test_ivar[i,:] == SMALL**2
cannon_ivar[i,:][~bad] = 1. / scatters[~bad] ** 2
return cannon_flux, cannon_ivar |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _find_contpix_given_cuts(f_cut, sig_cut, wl, fluxes, ivars):
""" Find and return continuum pixels given the flux and sigma cut Parameters f_cut: float the upper limit imposed on the quantity (fbar-1) sig_cut: float the upper limit imposed on the quantity (f_sig) wl: numpy ndarray of length npixels rest-frame wavelength vector fluxes: numpy ndarray of shape (nstars, npixels) pixel intensities ivars: numpy ndarray of shape nstars, npixels inverse variances, parallel to fluxes Returns ------- contmask: boolean mask of length npixels True indicates that the pixel is continuum """ |
f_bar = np.median(fluxes, axis=0)
sigma_f = np.var(fluxes, axis=0)
bad = np.logical_and(f_bar==0, sigma_f==0)
cont1 = np.abs(f_bar-1) <= f_cut
cont2 = sigma_f <= sig_cut
contmask = np.logical_and(cont1, cont2)
contmask[bad] = False
return contmask |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _find_contpix(wl, fluxes, ivars, target_frac):
""" Find continuum pix in spec, meeting a set target fraction Parameters wl: numpy ndarray rest-frame wavelength vector fluxes: numpy ndarray pixel intensities ivars: numpy ndarray inverse variances, parallel to fluxes target_frac: float the fraction of pixels in spectrum desired to be continuum Returns ------- contmask: boolean numpy ndarray True corresponds to continuum pixels """ |
print("Target frac: %s" %(target_frac))
bad1 = np.median(ivars, axis=0) == SMALL
bad2 = np.var(ivars, axis=0) == 0
bad = np.logical_and(bad1, bad2)
npixels = len(wl)-sum(bad)
f_cut = 0.0001
stepsize = 0.0001
sig_cut = 0.0001
contmask = _find_contpix_given_cuts(f_cut, sig_cut, wl, fluxes, ivars)
if npixels > 0:
frac = sum(contmask)/float(npixels)
else:
frac = 0
while (frac < target_frac):
f_cut += stepsize
sig_cut += stepsize
contmask = _find_contpix_given_cuts(f_cut, sig_cut, wl, fluxes, ivars)
if npixels > 0:
frac = sum(contmask)/float(npixels)
else:
frac = 0
if frac > 0.10*npixels:
print("Warning: Over 10% of pixels identified as continuum.")
print("%s out of %s pixels identified as continuum" %(sum(contmask),
npixels))
print("Cuts: f_cut %s, sig_cut %s" %(f_cut, sig_cut))
return contmask |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _find_contpix_regions(wl, fluxes, ivars, frac, ranges):
""" Find continuum pix in a spectrum split into chunks Parameters wl: numpy ndarray rest-frame wavelength vector fluxes: numpy ndarray pixel intensities ivars: numpy ndarray inverse variances, parallel to fluxes frac: float fraction of pixels in spectrum to be found as continuum ranges: list, array starts and ends indicating location of chunks in array Returns ------ contmask: numpy ndarray, boolean True indicates continuum pixel """ |
contmask = np.zeros(len(wl), dtype=bool)
for chunk in ranges:
start = chunk[0]
stop = chunk[1]
contmask[start:stop] = _find_contpix(
wl[start:stop], fluxes[:,start:stop], ivars[:,start:stop], frac)
return contmask |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def group_data():
""" Load the reference data, and assign each object a random integer from 0 to 7. Save the IDs. """ |
tr_obj = np.load("%s/ref_id.npz" %direc_ref)['arr_0']
groups = np.random.randint(0, 8, size=len(tr_obj))
np.savez("ref_groups.npz", groups) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def train(ds, ii):
""" Run the training step, given a dataset object. """ |
print("Loading model")
m = model.CannonModel(2)
print("Training...")
m.fit(ds)
np.savez("./ex%s_coeffs.npz" %ii, m.coeffs)
np.savez("./ex%s_scatters.npz" %ii, m.scatters)
np.savez("./ex%s_chisqs.npz" %ii, m.chisqs)
np.savez("./ex%s_pivots.npz" %ii, m.pivots)
fig = m.diagnostics_leading_coeffs(ds)
plt.savefig("ex%s_leading_coeffs.png" %ii)
# m.diagnostics_leading_coeffs_triangle(ds)
# m.diagnostics_plot_chisq(ds)
return m |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def weighted_std(values, weights):
""" Calculate standard deviation weighted by errors """ |
average = np.average(values, weights=weights)
variance = np.average((values-average)**2, weights=weights)
return np.sqrt(variance) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def estimate_noise(fluxes, contmask):
""" Estimate the scatter in a region of the spectrum taken to be continuum """ |
nstars = fluxes.shape[0]
scatter = np.zeros(nstars)
for i,spec in enumerate(fluxes):
cont = spec[contmask]
scatter[i] = stats.funcs.mad_std(cont)
return scatter |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def load_ref_spectra():
""" Pull out wl, flux, ivar from files of training spectra """ |
data_dir = "/Users/annaho/Data/AAOmega/ref_spectra"
# Load the files & count the number of training objects
ff = glob.glob("%s/*.txt" %data_dir)
nstars = len(ff)
print("We have %s training objects" %nstars)
# Read the first file to get the wavelength array
f = ff[0]
data = Table.read(f, format="ascii.fast_no_header")
wl = data['col1']
npix = len(wl)
print("We have %s pixels" %npix)
tr_flux = np.zeros((nstars,npix))
tr_ivar = np.zeros(tr_flux.shape)
for i,f in enumerate(ff):
data = Table.read(f, format="ascii.fast_no_header")
flux = data['col2']
tr_flux[i,:] = flux
sigma = data['col3']
tr_ivar[i,:] = 1.0 / sigma**2
return np.array(ff), wl, tr_flux, tr_ivar |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def load_data():
data_dir = "/Users/annaho/Data/AAOmega" out_dir = "%s/%s" %(data_dir, "Run_13_July") """ Use all the above functions to set data up for The Cannon """ |
ff, wl, tr_flux, tr_ivar = load_ref_spectra()
""" pick one that doesn't have extra dead pixels """
skylines = tr_ivar[4,:] # should be the same across all obj
np.savez("%s/skylines.npz" %out_dir, skylines)
contmask = np.load("%s/contmask_regions.npz" %data_dir)['arr_0']
scatter = estimate_noise(tr_flux, contmask)
ids, labels = load_labels()
# Select the objects in the catalog corresponding to the files
inds = []
ff_short = []
for fname in ff:
val = fname.split("/")[-1]
short = (val.split('.')[0] + '.' + val.split('.')[1])
ff_short.append(short)
if short in ids:
ind = np.where(ids==short)[0][0]
inds.append(ind)
# choose the labels
tr_id = ids[inds]
tr_label = labels[inds]
# find the corresponding spectra
ff_short = np.array(ff_short)
inds = np.array([np.where(ff_short==val)[0][0] for val in tr_id])
tr_flux_choose = tr_flux[inds]
tr_ivar_choose = tr_ivar[inds]
scatter_choose = scatter[inds]
np.savez("%s/wl.npz" %out_dir, wl)
np.savez("%s/ref_id_all.npz" %out_dir, tr_id)
np.savez("%s/ref_flux_all.npz" %out_dir, tr_flux_choose)
np.savez("%s/ref_ivar_all.npz" %out_dir, tr_ivar_choose)
np.savez("%s/ref_label_all.npz" %out_dir, tr_label)
np.savez("%s/ref_spec_scat_all.npz" %out_dir, scatter_choose)
# now, the test spectra
test_id, test_flux = load_test_spectra()
scatter = estimate_noise(test_flux, contmask)
np.savez("%s/test_id.npz" %out_dir, test_id)
np.savez("%s/test_flux.npz" %out_dir, test_flux)
np.savez("%s/test_spec_scat.npz" %out_dir, scatter) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def make_full_ivar():
""" take the scatters and skylines and make final ivars """ |
# skylines come as an ivar
# don't use them for now, because I don't really trust them...
# skylines = np.load("%s/skylines.npz" %DATA_DIR)['arr_0']
ref_flux = np.load("%s/ref_flux_all.npz" %DATA_DIR)['arr_0']
ref_scat = np.load("%s/ref_spec_scat_all.npz" %DATA_DIR)['arr_0']
test_flux = np.load("%s/test_flux.npz" %DATA_DIR)['arr_0']
test_scat = np.load("%s/test_spec_scat.npz" %DATA_DIR)['arr_0']
ref_ivar = np.ones(ref_flux.shape) / ref_scat[:,None]**2
test_ivar = np.ones(test_flux.shape) / test_scat[:,None]**2
# ref_ivar = (ref_ivar_temp * skylines[None,:]) / (ref_ivar_temp + skylines)
# test_ivar = (test_ivar_temp * skylines[None,:]) / (test_ivar_temp + skylines)
ref_bad = np.logical_or(ref_flux <= 0, ref_flux > 1.1)
test_bad = np.logical_or(test_flux <= 0, test_flux > 1.1)
SMALL = 1.0 / 1000000000.0
ref_ivar[ref_bad] = SMALL
test_ivar[test_bad] = SMALL
np.savez("%s/ref_ivar_corr.npz" %DATA_DIR, ref_ivar)
np.savez("%s/test_ivar_corr.npz" %DATA_DIR, test_ivar) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _sinusoid(x, p, L, y):
""" Return the sinusoid cont func evaluated at input x for the continuum. Parameters x: float or np.array data, input to function p: ndarray coefficients of fitting function L: float width of x data y: float or np.array output data corresponding to input x Returns ------- func: float function evaluated for the input x """ |
N = int(len(p)/2)
n = np.linspace(0, N, N+1)
k = n*np.pi/L
func = 0
for n in range(0, N):
func += p[2*n]*np.sin(k[n]*x)+p[2*n+1]*np.cos(k[n]*x)
return func |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _weighted_median(values, weights, quantile):
""" Calculate a weighted median for values above a particular quantile cut Used in pseudo continuum normalization Parameters values: np ndarray of floats the values to take the median of weights: np ndarray of floats the weights associated with the values quantile: float the cut applied to the input data Returns ------ the weighted median """ |
sindx = np.argsort(values)
cvalues = 1. * np.cumsum(weights[sindx])
if cvalues[-1] == 0: # means all the values are 0
return values[0]
cvalues = cvalues / cvalues[-1] # div by largest value
foo = sindx[cvalues > quantile]
if len(foo) == 0:
return values[0]
indx = foo[0]
return values[indx] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _find_cont_gaussian_smooth(wl, fluxes, ivars, w):
""" Returns the weighted mean block of spectra Parameters wl: numpy ndarray wavelength vector flux: numpy ndarray block of flux values ivar: numpy ndarray block of ivar values L: float width of Gaussian used to assign weights Returns ------- smoothed_fluxes: numpy ndarray block of smoothed flux values, mean spectra """ |
print("Finding the continuum")
bot = np.dot(ivars, w.T)
top = np.dot(fluxes*ivars, w.T)
bad = bot == 0
cont = np.zeros(top.shape)
cont[~bad] = top[~bad] / bot[~bad]
return cont |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _cont_norm_gaussian_smooth(dataset, L):
""" Continuum normalize by dividing by a Gaussian-weighted smoothed spectrum Parameters dataset: Dataset the dataset to continuum normalize L: float the width of the Gaussian used for weighting Returns ------- dataset: Dataset updated dataset """ |
print("Gaussian smoothing the entire dataset...")
w = gaussian_weight_matrix(dataset.wl, L)
print("Gaussian smoothing the training set")
cont = _find_cont_gaussian_smooth(
dataset.wl, dataset.tr_flux, dataset.tr_ivar, w)
norm_tr_flux, norm_tr_ivar = _cont_norm(
dataset.tr_flux, dataset.tr_ivar, cont)
print("Gaussian smoothing the test set")
cont = _find_cont_gaussian_smooth(
dataset.wl, dataset.test_flux, dataset.test_ivar, w)
norm_test_flux, norm_test_ivar = _cont_norm(
dataset.test_flux, dataset.test_ivar, cont)
return norm_tr_flux, norm_tr_ivar, norm_test_flux, norm_test_ivar |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _find_cont_fitfunc(fluxes, ivars, contmask, deg, ffunc, n_proc=1):
""" Fit a continuum to a continuum pixels in a segment of spectra Functional form can be either sinusoid or chebyshev, with specified degree Parameters fluxes: numpy ndarray of shape (nstars, npixels) training set or test set pixel intensities ivars: numpy ndarray of shape (nstars, npixels) inverse variances, parallel to fluxes contmask: numpy ndarray of length (npixels) boolean pixel mask, True indicates that pixel is continuum deg: int degree of fitting function ffunc: str type of fitting function, chebyshev or sinusoid Returns ------- cont: numpy ndarray of shape (nstars, npixels) the continuum, parallel to fluxes """ |
nstars = fluxes.shape[0]
npixels = fluxes.shape[1]
cont = np.zeros(fluxes.shape)
if n_proc == 1:
for jj in range(nstars):
flux = fluxes[jj,:]
ivar = ivars[jj,:]
pix = np.arange(0, npixels)
y = flux[contmask]
x = pix[contmask]
yivar = ivar[contmask]
yivar[yivar == 0] = SMALL**2
if ffunc=="sinusoid":
p0 = np.ones(deg*2) # one for cos, one for sin
L = max(x)-min(x)
pcont_func = _partial_func(_sinusoid, L=L, y=flux)
popt, pcov = opt.curve_fit(pcont_func, x, y, p0=p0,
sigma=1./np.sqrt(yivar))
elif ffunc=="chebyshev":
fit = np.polynomial.chebyshev.Chebyshev.fit(x=x,y=y,w=yivar,deg=deg)
for element in pix:
if ffunc=="sinusoid":
cont[jj,element] = _sinusoid(element, popt, L=L, y=flux)
elif ffunc=="chebyshev":
cont[jj,element] = fit(element)
else:
# start mp.Pool
pool = mp.Pool(processes=n_proc)
mp_results = []
for i in xrange(nstars):
mp_results.append(pool.apply_async(\
_find_cont_fitfunc,
(fluxes[i, :].reshape((1, -1)),
ivars[i, :].reshape((1, -1)),
contmask[:]),
{'deg':deg, 'ffunc':ffunc}))
# close mp.Pool
pool.close()
pool.join()
cont = np.array([mp_results[i].get().flatten() for i in xrange(nstars)])
return cont |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _find_cont_fitfunc_regions(fluxes, ivars, contmask, deg, ranges, ffunc, n_proc=1):
""" Run fit_cont, dealing with spectrum in regions or chunks This is useful if a spectrum has gaps. Parameters fluxes: ndarray of shape (nstars, npixels) training set or test set pixel intensities ivars: numpy ndarray of shape (nstars, npixels) inverse variances, parallel to fluxes contmask: numpy ndarray of length (npixels) boolean pixel mask, True indicates that pixel is continuum deg: int degree of fitting function ffunc: str type of fitting function, chebyshev or sinusoid Returns ------- cont: numpy ndarray of shape (nstars, npixels) the continuum, parallel to fluxes """ |
nstars = fluxes.shape[0]
npixels = fluxes.shape[1]
cont = np.zeros(fluxes.shape)
for chunk in ranges:
start = chunk[0]
stop = chunk[1]
if ffunc=="chebyshev":
output = _find_cont_fitfunc(fluxes[:,start:stop],
ivars[:,start:stop],
contmask[start:stop],
deg=deg, ffunc="chebyshev",
n_proc=n_proc)
elif ffunc=="sinusoid":
output = _find_cont_fitfunc(fluxes[:,start:stop],
ivars[:,start:stop],
contmask[start:stop],
deg=deg, ffunc="sinusoid",
n_proc=n_proc)
cont[:, start:stop] = output
return cont |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _find_cont_running_quantile(wl, fluxes, ivars, q, delta_lambda, verbose=False):
""" Perform continuum normalization using a running quantile Parameters wl: numpy ndarray wavelength vector fluxes: numpy ndarray of shape (nstars, npixels) pixel intensities ivars: numpy ndarray of shape (nstars, npixels) inverse variances, parallel to fluxes q: float the desired quantile cut delta_lambda: int the number of pixels over which the median is calculated Output ------ norm_fluxes: numpy ndarray of shape (nstars, npixels) normalized pixel intensities norm_ivars: numpy ndarray of shape (nstars, npixels) rescaled pixel invariances """ |
cont = np.zeros(fluxes.shape)
nstars = fluxes.shape[0]
for jj in range(nstars):
if verbose:
print("cont_norm_q(): working on star [%s/%s]..." % (jj+1, nstars))
flux = fluxes[jj,:]
ivar = ivars[jj,:]
for ll, lam in enumerate(wl):
indx = (np.where(abs(wl-lam) < delta_lambda))[0]
flux_cut = flux[indx]
ivar_cut = ivar[indx]
cont[jj, ll] = _weighted_median(flux_cut, ivar_cut, q)
return cont |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _cont_norm_running_quantile_regions(wl, fluxes, ivars, q, delta_lambda, ranges, verbose=True):
""" Perform continuum normalization using running quantile, for spectrum that comes in chunks """ |
print("contnorm.py: continuum norm using running quantile")
print("Taking spectra in %s chunks" % len(ranges))
nstars = fluxes.shape[0]
norm_fluxes = np.zeros(fluxes.shape)
norm_ivars = np.zeros(ivars.shape)
for chunk in ranges:
start = chunk[0]
stop = chunk[1]
output = _cont_norm_running_quantile(
wl[start:stop], fluxes[:,start:stop],
ivars[:,start:stop], q, delta_lambda)
norm_fluxes[:,start:stop] = output[0]
norm_ivars[:,start:stop] = output[1]
return norm_fluxes, norm_ivars |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _cont_norm_running_quantile_regions_mp(wl, fluxes, ivars, q, delta_lambda, ranges, n_proc=2, verbose=False):
""" Perform continuum normalization using running quantile, for spectrum that comes in chunks. The same as _cont_norm_running_quantile_regions(), but using multi-processing. Bo Zhang (NAOC) """ |
print("contnorm.py: continuum norm using running quantile")
print("Taking spectra in %s chunks" % len(ranges))
# nstars = fluxes.shape[0]
nchunks = len(ranges)
norm_fluxes = np.zeros(fluxes.shape)
norm_ivars = np.zeros(ivars.shape)
for i in xrange(nchunks):
chunk = ranges[i, :]
start = chunk[0]
stop = chunk[1]
if verbose:
print('@Bo Zhang: Going to normalize Chunk [%d/%d], pixel:[%d, %d] ...'
% (i+1, nchunks, start, stop))
output = _cont_norm_running_quantile_mp(
wl[start:stop], fluxes[:, start:stop],
ivars[:, start:stop], q, delta_lambda,
n_proc=n_proc, verbose=verbose)
norm_fluxes[:, start:stop] = output[0]
norm_ivars[:, start:stop] = output[1]
return norm_fluxes, norm_ivars |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _cont_norm(fluxes, ivars, cont):
""" Continuum-normalize a continuous segment of spectra. Parameters fluxes: numpy ndarray pixel intensities ivars: numpy ndarray inverse variances, parallel to fluxes contmask: boolean mask True indicates that pixel is continuum Returns ------- norm_fluxes: numpy ndarray normalized pixel intensities norm_ivars: numpy ndarray rescaled inverse variances """ |
nstars = fluxes.shape[0]
npixels = fluxes.shape[1]
norm_fluxes = np.ones(fluxes.shape)
norm_ivars = np.zeros(ivars.shape)
bad = cont == 0.
norm_fluxes = np.ones(fluxes.shape)
norm_fluxes[~bad] = fluxes[~bad] / cont[~bad]
norm_ivars = cont**2 * ivars
return norm_fluxes, norm_ivars |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _cont_norm_regions(fluxes, ivars, cont, ranges):
""" Perform continuum normalization for spectra in chunks Useful for spectra that have gaps Parameters --------- fluxes: numpy ndarray pixel intensities ivars: numpy ndarray inverse variances, parallel to fluxes cont: numpy ndarray the continuum ranges: list or np ndarray the chunks that the spectrum should be split into Returns ------- norm_fluxes: numpy ndarray normalized pixel intensities norm_ivars: numpy ndarray rescaled inverse variances """ |
nstars = fluxes.shape[0]
norm_fluxes = np.zeros(fluxes.shape)
norm_ivars = np.zeros(ivars.shape)
for chunk in ranges:
start = chunk[0]
stop = chunk[1]
output = _cont_norm(fluxes[:,start:stop],
ivars[:,start:stop],
cont[:,start:stop])
norm_fluxes[:,start:stop] = output[0]
norm_ivars[:,start:stop] = output[1]
for jj in range(nstars):
bad = (norm_ivars[jj,:] == 0.)
norm_fluxes[jj,:][bad] = 1.
return norm_fluxes, norm_ivars |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def plot_contpix(self, x, y, contpix_x, contpix_y, figname):
""" Plot baseline spec with continuum pix overlaid Parameters """ |
fig, axarr = plt.subplots(2, sharex=True)
plt.xlabel(r"Wavelength $\lambda (\AA)$")
plt.xlim(min(x), max(x))
ax = axarr[0]
ax.step(x, y, where='mid', c='k', linewidth=0.3,
label=r'$\theta_0$' + "= the leading fit coefficient")
ax.scatter(contpix_x, contpix_y, s=1, color='r',
label="continuum pixels")
ax.legend(loc='lower right',
prop={'family':'serif', 'size':'small'})
ax.set_title("Baseline Spectrum with Continuum Pixels")
ax.set_ylabel(r'$\theta_0$')
ax = axarr[1]
ax.step(x, y, where='mid', c='k', linewidth=0.3,
label=r'$\theta_0$' + "= the leading fit coefficient")
ax.scatter(contpix_x, contpix_y, s=1, color='r',
label="continuum pixels")
ax.set_title("Baseline Spectrum with Continuum Pixels, Zoomed")
ax.legend(loc='upper right', prop={'family':'serif',
'size':'small'})
ax.set_ylabel(r'$\theta_0$')
ax.set_ylim(0.95, 1.05)
print("Diagnostic plot: fitted 0th order spec w/ cont pix")
print("Saved as %s.png" % (figname))
plt.savefig(figname)
plt.close() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def diagnostics_contpix(self, data, nchunks=10, fig = "baseline_spec_with_cont_pix"):
""" Call plot_contpix once for each nth of the spectrum """ |
if data.contmask is None:
print("No contmask set")
else:
coeffs_all = self.coeffs
wl = data.wl
baseline_spec = coeffs_all[:,0]
contmask = data.contmask
contpix_x = wl[contmask]
contpix_y = baseline_spec[contmask]
rem = len(wl)%nchunks
wl_split = np.array(np.split(wl[0:len(wl)-rem],nchunks))
baseline_spec_split = np.array(
np.split(baseline_spec[0:len(wl)-rem],nchunks))
nchunks = wl_split.shape[0]
for i in range(nchunks):
fig_chunk = fig + "_%s" %str(i)
wl_chunk = wl_split[i,:]
baseline_spec_chunk = baseline_spec_split[i,:]
take = np.logical_and(
contpix_x>wl_chunk[0], contpix_x<wl_chunk[-1])
self.plot_contpix(
wl_chunk, baseline_spec_chunk,
contpix_x[take], contpix_y[take], fig_chunk) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def diagnostics_plot_chisq(self, ds, figname = "modelfit_chisqs.png"):
""" Produce a set of diagnostic plots for the model Parameters (optional) chisq_dist_plot_name: str Filename of output saved plot """ |
label_names = ds.get_plotting_labels()
lams = ds.wl
pivots = self.pivots
npixels = len(lams)
nlabels = len(pivots)
chisqs = self.chisqs
coeffs = self.coeffs
scatters = self.scatters
# Histogram of the chi squareds of ind. stars
plt.hist(np.sum(chisqs, axis=0), color='lightblue', alpha=0.7,
bins=int(np.sqrt(len(chisqs))))
dof = len(lams) - coeffs.shape[1] # for one star
plt.axvline(x=dof, c='k', linewidth=2, label="DOF")
plt.legend()
plt.title("Distribution of " + r"$\chi^2$" + " of the Model Fit")
plt.ylabel("Count")
plt.xlabel(r"$\chi^2$" + " of Individual Star")
print("Diagnostic plot: histogram of the red chi squareds of the fit")
print("Saved as %s" %figname)
plt.savefig(figname)
plt.close() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def calc_mass(nu_max, delta_nu, teff):
""" asteroseismic scaling relations """ |
NU_MAX = 3140.0 # microHz
DELTA_NU = 135.03 # microHz
TEFF = 5777.0
return (nu_max/NU_MAX)**3 * (delta_nu/DELTA_NU)**(-4) * (teff/TEFF)**1.5 |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def calc_mass_2(mh,cm,nm,teff,logg):
""" Table A2 in Martig 2016 """ |
CplusN = calc_sum(mh,cm,nm)
t = teff/4000.
return (95.8689 - 10.4042*mh - 0.7266*mh**2
+ 41.3642*cm - 5.3242*cm*mh - 46.7792*cm**2
+ 15.0508*nm - 0.9342*nm*mh - 30.5159*nm*cm - 1.6083*nm**2
- 67.6093*CplusN + 7.0486*CplusN*mh + 133.5775*CplusN*cm + 38.9439*CplusN*nm - 88.9948*CplusN**2
- 144.1765*t + 5.1180*t*mh - 73.7690*t*cm - 15.2927*t*nm + 101.7482*t*CplusN + 27.7690*t**2
- 9.4246*logg + 1.5159*logg*mh + 16.0412*logg*cm + 1.3549*logg*nm - 18.6527*logg*CplusN + 28.8015*logg*t - 4.0982*logg**2) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def calc_dist(lamost_point, training_points, coeffs):
""" avg dist from one lamost point to nearest 10 training points """ |
diff2 = (training_points - lamost_point)**2
dist = np.sqrt(np.sum(diff2*coeffs, axis=1))
return np.mean(dist[dist.argsort()][0:10]) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def name_suggest(q=None, datasetKey=None, rank=None, limit=100, offset=None, **kwargs):
'''
A quick and simple autocomplete service that returns up to 20 name usages by
doing prefix matching against the scientific name. Results are ordered by relevance.
:param q: [str] Simple search parameter. The value for this parameter can be a
simple word or a phrase. Wildcards can be added to the simple word parameters only,
e.g. ``q=*puma*`` (Required)
:param datasetKey: [str] Filters by the checklist dataset key (a uuid, see examples)
:param rank: [str] A taxonomic rank. One of ``class``, ``cultivar``, ``cultivar_group``, ``domain``, ``family``,
``form``, ``genus``, ``informal``, ``infrageneric_name``, ``infraorder``, ``infraspecific_name``,
``infrasubspecific_name``, ``kingdom``, ``order``, ``phylum``, ``section``, ``series``, ``species``, ``strain``, ``subclass``,
``subfamily``, ``subform``, ``subgenus``, ``subkingdom``, ``suborder``, ``subphylum``, ``subsection``, ``subseries``,
``subspecies``, ``subtribe``, ``subvariety``, ``superclass``, ``superfamily``, ``superorder``, ``superphylum``,
``suprageneric_name``, ``tribe``, ``unranked``, or ``variety``.
:param limit: [fixnum] Number of records to return. Maximum: ``1000``. (optional)
:param offset: [fixnum] Record number to start at. (optional)
:return: A dictionary
References: http://www.gbif.org/developer/species#searching
Usage::
from pygbif import species
species.name_suggest(q='Puma concolor')
x = species.name_suggest(q='Puma')
species.name_suggest(q='Puma', rank="genus")
species.name_suggest(q='Puma', rank="subspecies")
species.name_suggest(q='Puma', rank="species")
species.name_suggest(q='Puma', rank="infraspecific_name")
species.name_suggest(q='Puma', limit=2)
'''
url = gbif_baseurl + 'species/suggest'
args = {'q':q, 'rank':rank, 'offset':offset, 'limit':limit}
return gbif_GET(url, args, **kwargs) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def dataset_metrics(uuid, **kwargs):
'''
Get details on a GBIF dataset.
:param uuid: [str] One or more dataset UUIDs. See examples.
References: http://www.gbif.org/developer/registry#datasetMetrics
Usage::
from pygbif import registry
registry.dataset_metrics(uuid='3f8a1297-3259-4700-91fc-acc4170b27ce')
registry.dataset_metrics(uuid='66dd0960-2d7d-46ee-a491-87b9adcfe7b1')
registry.dataset_metrics(uuid=['3f8a1297-3259-4700-91fc-acc4170b27ce', '66dd0960-2d7d-46ee-a491-87b9adcfe7b1'])
'''
def getdata(x, **kwargs):
url = gbif_baseurl + 'dataset/' + x + '/metrics'
return gbif_GET(url, {}, **kwargs)
if len2(uuid) == 1:
return getdata(uuid)
else:
return [getdata(x) for x in uuid] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def datasets(data = 'all', type = None, uuid = None, query = None, id = None,
limit = 100, offset = None, **kwargs):
'''
Search for datasets and dataset metadata.
:param data: [str] The type of data to get. Default: ``all``
:param type: [str] Type of dataset, options include ``OCCURRENCE``, etc.
:param uuid: [str] UUID of the data node provider. This must be specified if data
is anything other than ``all``.
:param query: [str] Query term(s). Only used when ``data = 'all'``
:param id: [int] A metadata document id.
References http://www.gbif.org/developer/registry#datasets
Usage::
from pygbif import registry
registry.datasets(limit=5)
registry.datasets(type="OCCURRENCE")
registry.datasets(uuid="a6998220-7e3a-485d-9cd6-73076bd85657")
registry.datasets(data='contact', uuid="a6998220-7e3a-485d-9cd6-73076bd85657")
registry.datasets(data='metadata', uuid="a6998220-7e3a-485d-9cd6-73076bd85657")
registry.datasets(data='metadata', uuid="a6998220-7e3a-485d-9cd6-73076bd85657", id=598)
registry.datasets(data=['deleted','duplicate'])
registry.datasets(data=['deleted','duplicate'], limit=1)
'''
args = {'q': query, 'type': type, 'limit': limit, 'offset': offset}
data_choices = ['all', 'organization', 'contact', 'endpoint',
'identifier', 'tag', 'machinetag', 'comment',
'constituents', 'document', 'metadata', 'deleted',
'duplicate', 'subDataset', 'withNoEndpoint']
check_data(data, data_choices)
if len2(data) ==1:
return datasets_fetch(data, uuid, args, **kwargs)
else:
return [datasets_fetch(x, uuid, args, **kwargs) for x in data] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def dataset_suggest(q=None, type=None, keyword=None, owningOrg=None,
publishingOrg=None, hostingOrg=None, publishingCountry=None, decade=None,
limit = 100, offset = None, **kwargs):
'''
Search that returns up to 20 matching datasets. Results are ordered by relevance.
:param q: [str] Query term(s) for full text search. The value for this parameter can be a simple word or a phrase. Wildcards can be added to the simple word parameters only, e.g. ``q=*puma*``
:param type: [str] Type of dataset, options include OCCURRENCE, etc.
:param keyword: [str] Keyword to search by. Datasets can be tagged by keywords, which you can search on. The search is done on the merged collection of tags, the dataset keywordCollections and temporalCoverages. SEEMS TO NOT BE WORKING ANYMORE AS OF 2016-09-02.
:param owningOrg: [str] Owning organization. A uuid string. See :func:`~pygbif.registry.organizations`
:param publishingOrg: [str] Publishing organization. A uuid string. See :func:`~pygbif.registry.organizations`
:param hostingOrg: [str] Hosting organization. A uuid string. See :func:`~pygbif.registry.organizations`
:param publishingCountry: [str] Publishing country.
:param decade: [str] Decade, e.g., 1980. Filters datasets by their temporal coverage broken down to decades. Decades are given as a full year, e.g. 1880, 1960, 2000, etc, and will return datasets wholly contained in the decade as well as those that cover the entire decade or more. Facet by decade to get the break down, e.g. ``/search?facet=DECADE&facet_only=true`` (see example below)
:param limit: [int] Number of results to return. Default: ``300``
:param offset: [int] Record to start at. Default: ``0``
:return: A dictionary
References: http://www.gbif.org/developer/registry#datasetSearch
Usage::
from pygbif import registry
registry.dataset_suggest(q="Amazon", type="OCCURRENCE")
# Suggest datasets tagged with keyword "france".
registry.dataset_suggest(keyword="france")
# Suggest datasets owned by the organization with key
# "07f617d0-c688-11d8-bf62-b8a03c50a862" (UK NBN).
registry.dataset_suggest(owningOrg="07f617d0-c688-11d8-bf62-b8a03c50a862")
# Fulltext search for all datasets having the word "amsterdam" somewhere in
# its metadata (title, description, etc).
registry.dataset_suggest(q="amsterdam")
# Limited search
registry.dataset_suggest(type="OCCURRENCE", limit=2)
registry.dataset_suggest(type="OCCURRENCE", limit=2, offset=10)
# Return just descriptions
registry.dataset_suggest(type="OCCURRENCE", limit = 5, description=True)
# Search by decade
registry.dataset_suggest(decade=1980, limit = 30)
'''
url = gbif_baseurl + 'dataset/suggest'
args = {'q': q, 'type': type, 'keyword': keyword,
'publishingOrg': publishingOrg, 'hostingOrg': hostingOrg,
'owningOrg': owningOrg, 'decade': decade,
'publishingCountry': publishingCountry,
'limit': limit, 'offset': offset}
out = gbif_GET(url, args, **kwargs)
return out |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def dataset_search(q=None, type=None, keyword=None,
owningOrg=None, publishingOrg=None, hostingOrg=None, decade=None,
publishingCountry = None, facet = None, facetMincount=None,
facetMultiselect = None, hl = False, limit = 100, offset = None,
**kwargs):
'''
Full text search across all datasets. Results are ordered by relevance.
:param q: [str] Query term(s) for full text search. The value for this parameter
can be a simple word or a phrase. Wildcards can be added to the simple word
parameters only, e.g. ``q=*puma*``
:param type: [str] Type of dataset, options include OCCURRENCE, etc.
:param keyword: [str] Keyword to search by. Datasets can be tagged by keywords, which
you can search on. The search is done on the merged collection of tags, the
dataset keywordCollections and temporalCoverages. SEEMS TO NOT BE WORKING
ANYMORE AS OF 2016-09-02.
:param owningOrg: [str] Owning organization. A uuid string. See :func:`~pygbif.registry.organizations`
:param publishingOrg: [str] Publishing organization. A uuid string. See :func:`~pygbif.registry.organizations`
:param hostingOrg: [str] Hosting organization. A uuid string. See :func:`~pygbif.registry.organizations`
:param publishingCountry: [str] Publishing country.
:param decade: [str] Decade, e.g., 1980. Filters datasets by their temporal coverage
broken down to decades. Decades are given as a full year, e.g. 1880, 1960, 2000,
etc, and will return datasets wholly contained in the decade as well as those
that cover the entire decade or more. Facet by decade to get the break down,
e.g. ``/search?facet=DECADE&facet_only=true`` (see example below)
:param facet: [str] A list of facet names used to retrieve the 100 most frequent values
for a field. Allowed facets are: type, keyword, publishingOrg, hostingOrg, decade,
and publishingCountry. Additionally subtype and country are legal values but not
yet implemented, so data will not yet be returned for them.
:param facetMincount: [str] Used in combination with the facet parameter. Set
facetMincount={#} to exclude facets with a count less than {#}, e.g.
http://api.gbif.org/v1/dataset/search?facet=type&limit=0&facetMincount=10000
only shows the type value 'OCCURRENCE' because 'CHECKLIST' and 'METADATA' have
counts less than 10000.
:param facetMultiselect: [bool] Used in combination with the facet parameter. Set
facetMultiselect=True to still return counts for values that are not currently
filtered, e.g.
http://api.gbif.org/v1/dataset/search?facet=type&limit=0&type=CHECKLIST&facetMultiselect=true
still shows type values 'OCCURRENCE' and 'METADATA' even though type is being
filtered by type=CHECKLIST
:param hl: [bool] Set ``hl=True`` to highlight terms matching the query when in fulltext
search fields. The highlight will be an emphasis tag of class 'gbifH1' e.g.
http://api.gbif.org/v1/dataset/search?q=plant&hl=true
Fulltext search fields include: title, keyword, country, publishing country,
publishing organization title, hosting organization title, and description. One
additional full text field is searched which includes information from metadata
documents, but the text of this field is not returned in the response.
:param limit: [int] Number of results to return. Default: ``300``
:param offset: [int] Record to start at. Default: ``0``
:note: Note that you can pass in additional faceting parameters on a per field basis.
For example, if you want to limit the numbef of facets returned from a field ``foo`` to
3 results, pass in ``foo_facetLimit = 3``. GBIF does not allow all per field parameters,
but does allow some. See also examples.
:return: A dictionary
References: http://www.gbif.org/developer/registry#datasetSearch
Usage::
from pygbif import registry
# Gets all datasets of type "OCCURRENCE".
registry.dataset_search(type="OCCURRENCE", limit = 10)
# Fulltext search for all datasets having the word "amsterdam" somewhere in
# its metadata (title, description, etc).
registry.dataset_search(q="amsterdam", limit = 10)
# Limited search
registry.dataset_search(type="OCCURRENCE", limit=2)
registry.dataset_search(type="OCCURRENCE", limit=2, offset=10)
# Search by decade
registry.dataset_search(decade=1980, limit = 10)
# Faceting
## just facets
registry.dataset_search(facet="decade", facetMincount=10, limit=0)
## data and facets
registry.dataset_search(facet="decade", facetMincount=10, limit=2)
## many facet variables
registry.dataset_search(facet=["decade", "type"], facetMincount=10, limit=0)
## facet vars
### per variable paging
x = registry.dataset_search(
facet = ["decade", "type"],
decade_facetLimit = 3,
type_facetLimit = 3,
limit = 0
)
## highlight
x = registry.dataset_search(q="plant", hl=True, limit = 10)
[ z['description'] for z in x['results'] ]
'''
url = gbif_baseurl + 'dataset/search'
args = {'q': q, 'type': type, 'keyword': keyword,
'owningOrg': owningOrg, 'publishingOrg': publishingOrg,
'hostingOrg': hostingOrg, 'decade': decade,
'publishingCountry': publishingCountry, 'facet': facet,
'facetMincount': facetMincount, 'facetMultiselect': facetMultiselect,
'hl': hl, 'limit': limit, 'offset': offset}
gbif_kwargs = {key: kwargs[key] for key in kwargs if key not in requests_argset}
if gbif_kwargs is not None:
xx = dict(zip( [ re.sub('_', '.', x) for x in gbif_kwargs.keys() ], gbif_kwargs.values() ))
args.update(xx)
kwargs = {key: kwargs[key] for key in kwargs if key in requests_argset}
out = gbif_GET(url, args, **kwargs)
return out |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def wkt_rewind(x, digits = None):
'''
reverse WKT winding order
:param x: [str] WKT string
:param digits: [int] number of digits after decimal to use for the return string.
by default, we use the mean number of digits in your string.
:return: a string
Usage::
from pygbif import wkt_rewind
x = 'POLYGON((144.6 13.2, 144.6 13.6, 144.9 13.6, 144.9 13.2, 144.6 13.2))'
wkt_rewind(x)
wkt_rewind(x, digits = 0)
wkt_rewind(x, digits = 3)
wkt_rewind(x, digits = 7)
'''
z = wkt.loads(x)
if digits is None:
coords = z['coordinates']
nums = __flatten(coords)
dec_n = [ decimal.Decimal(str(w)).as_tuple().exponent for w in nums ]
digits = abs(statistics.mean(dec_n))
else:
if not isinstance(digits, int):
raise TypeError("'digits' must be an int")
wound = rewind(z)
back_to_wkt = wkt.dumps(wound, decimals = digits)
return back_to_wkt |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def occ_issues_lookup(issue=None, code=None):
'''
Lookup occurrence issue definitions and short codes
:param issue: Full name of issue, e.g, CONTINENT_COUNTRY_MISMATCH
:param code: an issue short code, e.g. ccm
Usage
pygbif.occ_issues_lookup(issue = 'CONTINENT_COUNTRY_MISMATCH')
pygbif.occ_issues_lookup(issue = 'MULTIMEDIA_DATE_INVALID')
pygbif.occ_issues_lookup(issue = 'ZERO_COORDINATE')
pygbif.occ_issues_lookup(code = 'cdiv')
'''
if code is None:
bb = [trymatch(issue, x) for x in gbifissues['issue'] ]
tmp = filter(None, bb)
else:
bb = [trymatch(code, x) for x in gbifissues['code'] ]
tmp = filter(None, bb)
return tmp |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def search(taxonKey=None, repatriated=None,
kingdomKey=None, phylumKey=None, classKey=None, orderKey=None,
familyKey=None, genusKey=None, subgenusKey=None, scientificName=None,
country=None, publishingCountry=None, hasCoordinate=None, typeStatus=None,
recordNumber=None, lastInterpreted=None, continent=None,
geometry=None, recordedBy=None, basisOfRecord=None, datasetKey=None,
eventDate=None, catalogNumber=None, year=None, month=None,
decimalLatitude=None, decimalLongitude=None, elevation=None,
depth=None, institutionCode=None, collectionCode=None,
hasGeospatialIssue=None, issue=None, q=None, spellCheck=None, mediatype=None,
limit=300, offset=0, establishmentMeans=None,
facet=None, facetMincount=None, facetMultiselect=None, **kwargs):
'''
Search GBIF occurrences
:param taxonKey: [int] A GBIF occurrence identifier
:param q: [str] Simple search parameter. The value for this parameter can be a simple word or a phrase.
:param spellCheck: [bool] If ``True`` ask GBIF to check your spelling of the value passed to the ``search`` parameter.
IMPORTANT: This only checks the input to the ``search`` parameter, and no others. Default: ``False``
:param repatriated: [str] Searches for records whose publishing country is different to the country where the record was recorded in
:param kingdomKey: [int] Kingdom classification key
:param phylumKey: [int] Phylum classification key
:param classKey: [int] Class classification key
:param orderKey: [int] Order classification key
:param familyKey: [int] Family classification key
:param genusKey: [int] Genus classification key
:param subgenusKey: [int] Subgenus classification key
:param scientificName: [str] A scientific name from the GBIF backbone. All included and synonym taxa are included in the search.
:param datasetKey: [str] The occurrence dataset key (a uuid)
:param catalogNumber: [str] An identifier of any form assigned by the source within a physical collection or digital dataset for the record which may not unique, but should be fairly unique in combination with the institution and collection code.
:param recordedBy: [str] The person who recorded the occurrence.
:param collectionCode: [str] An identifier of any form assigned by the source to identify the physical collection or digital dataset uniquely within the text of an institution.
:param institutionCode: [str] An identifier of any form assigned by the source to identify the institution the record belongs to. Not guaranteed to be que.
:param country: [str] The 2-letter country code (as per ISO-3166-1) of the country in which the occurrence was recorded. See here http://en.wikipedia.org/wiki/ISO_3166-1_alpha-2
:param basisOfRecord: [str] Basis of record, as defined in our BasisOfRecord enum here http://gbif.github.io/gbif-api/apidocs/org/gbif/api/vocabulary/BasisOfRecord.html Acceptable values are:
- ``FOSSIL_SPECIMEN`` An occurrence record describing a fossilized specimen.
- ``HUMAN_OBSERVATION`` An occurrence record describing an observation made by one or more people.
- ``LITERATURE`` An occurrence record based on literature alone.
- ``LIVING_SPECIMEN`` An occurrence record describing a living specimen, e.g.
- ``MACHINE_OBSERVATION`` An occurrence record describing an observation made by a machine.
- ``OBSERVATION`` An occurrence record describing an observation.
- ``PRESERVED_SPECIMEN`` An occurrence record describing a preserved specimen.
- ``UNKNOWN`` Unknown basis for the record.
:param eventDate: [date] Occurrence date in ISO 8601 format: yyyy, yyyy-MM, yyyy-MM-dd, or
MM-dd. Supports range queries, smaller,larger (e.g., ``1990,1991``, whereas ``1991,1990``
wouldn't work)
:param year: [int] The 4 digit year. A year of 98 will be interpreted as AD 98. Supports range queries,
smaller,larger (e.g., ``1990,1991``, whereas ``1991,1990`` wouldn't work)
:param month: [int] The month of the year, starting with 1 for January. Supports range queries,
smaller,larger (e.g., ``1,2``, whereas ``2,1`` wouldn't work)
:param q: [str] Query terms. The value for this parameter can be a simple word or a phrase.
:param decimalLatitude: [float] Latitude in decimals between -90 and 90 based on WGS 84.
Supports range queries, smaller,larger (e.g., ``25,30``, whereas ``30,25`` wouldn't work)
:param decimalLongitude: [float] Longitude in decimals between -180 and 180 based on WGS 84.
Supports range queries (e.g., ``-0.4,-0.2``, whereas ``-0.2,-0.4`` wouldn't work).
:param publishingCountry: [str] The 2-letter country code (as per ISO-3166-1) of the
country in which the occurrence was recorded.
:param elevation: [int/str] Elevation in meters above sea level. Supports range queries, smaller,larger
(e.g., ``5,30``, whereas ``30,5`` wouldn't work)
:param depth: [int/str] Depth in meters relative to elevation. For example 10 meters below a
lake surface with given elevation. Supports range queries, smaller,larger (e.g., ``5,30``,
whereas ``30,5`` wouldn't work)
:param geometry: [str] Searches for occurrences inside a polygon described in Well Known
Text (WKT) format. A WKT shape written as either POINT, LINESTRING, LINEARRING
POLYGON, or MULTIPOLYGON. Example of a polygon: ``((30.1 10.1, 20, 20 40, 40 40, 30.1 10.1))`` would be queried as http://bit.ly/1BzNwDq.
Polygons must have counter-clockwise ordering of points.
:param hasGeospatialIssue: [bool] Includes/excludes occurrence records which contain spatial
issues (as determined in our record interpretation), i.e. ``hasGeospatialIssue=TRUE``
returns only those records with spatial issues while ``hasGeospatialIssue=FALSE`` includes
only records without spatial issues. The absence of this parameter returns any
record with or without spatial issues.
:param issue: [str] One or more of many possible issues with each occurrence record. See
Details. Issues passed to this parameter filter results by the issue.
:param hasCoordinate: [bool] Return only occurence records with lat/long data (``true``) or
all records (``false``, default).
:param typeStatus: [str] Type status of the specimen. One of many options. See ?typestatus
:param recordNumber: [int] Number recorded by collector of the data, different from GBIF record
number. See http://rs.tdwg.org/dwc/terms/#recordNumber} for more info
:param lastInterpreted: [date] Date the record was last modified in GBIF, in ISO 8601 format:
yyyy, yyyy-MM, yyyy-MM-dd, or MM-dd. Supports range queries, smaller,larger (e.g.,
``1990,1991``, whereas ``1991,1990`` wouldn't work)
:param continent: [str] Continent. One of ``africa``, ``antarctica``, ``asia``, ``europe``, ``north_america``
(North America includes the Caribbean and reachies down and includes Panama), ``oceania``,
or ``south_america``
:param fields: [str] Default (``all``) returns all fields. ``minimal`` returns just taxon name,
key, latitude, and longitude. Or specify each field you want returned by name, e.g.
``fields = c('name','latitude','elevation')``.
:param mediatype: [str] Media type. Default is ``NULL``, so no filtering on mediatype. Options:
``NULL``, ``MovingImage``, ``Sound``, and ``StillImage``
:param limit: [int] Number of results to return. Default: ``300``
:param offset: [int] Record to start at. Default: ``0``
:param facet: [str] a character vector of length 1 or greater
:param establishmentMeans: [str] EstablishmentMeans, possible values include: INTRODUCED,
INVASIVE, MANAGED, NATIVE, NATURALISED, UNCERTAIN
:param facetMincount: [int] minimum number of records to be included in the faceting results
:param facetMultiselect: [bool] Set to ``true`` to still return counts for values that are not currently
filtered. See examples. Default: ``false``
:return: A dictionary
Usage::
from pygbif import occurrences
occurrences.search(taxonKey = 3329049)
# Return 2 results, this is the default by the way
occurrences.search(taxonKey=3329049, limit=2)
# Instead of getting a taxon key first, you can search for a name directly
# However, note that using this approach (with `scientificName="..."`)
# you are getting synonyms too. The results for using `scientifcName` and
# `taxonKey` parameters are the same in this case, but I wouldn't be surprised if for some
# names they return different results
occurrences.search(scientificName = 'Ursus americanus')
from pygbif import species
key = species.name_backbone(name = 'Ursus americanus', rank='species')['usageKey']
occurrences.search(taxonKey = key)
# Search by dataset key
occurrences.search(datasetKey='7b5d6a48-f762-11e1-a439-00145eb45e9a', limit=20)
# Search by catalog number
occurrences.search(catalogNumber="49366", limit=20)
# occurrences.search(catalogNumber=["49366","Bird.27847588"], limit=20)
# Use paging parameters (limit and offset) to page. Note the different results
# for the two queries below.
occurrences.search(datasetKey='7b5d6a48-f762-11e1-a439-00145eb45e9a', offset=10, limit=5)
occurrences.search(datasetKey='7b5d6a48-f762-11e1-a439-00145eb45e9a', offset=20, limit=5)
# Many dataset keys
# occurrences.search(datasetKey=["50c9509d-22c7-4a22-a47d-8c48425ef4a7", "7b5d6a48-f762-11e1-a439-00145eb45e9a"], limit=20)
# Search by collector name
res = occurrences.search(recordedBy="smith", limit=20)
[ x['recordedBy'] for x in res['results'] ]
# Many collector names
# occurrences.search(recordedBy=["smith","BJ Stacey"], limit=20)
# Search for many species
splist = ['Cyanocitta stelleri', 'Junco hyemalis', 'Aix sponsa']
keys = [ species.name_suggest(x)[0]['key'] for x in splist ]
out = [ occurrences.search(taxonKey = x, limit=1) for x in keys ]
[ x['results'][0]['speciesKey'] for x in out ]
# Search - q parameter
occurrences.search(q = "kingfisher", limit=20)
## spell check - only works with the `search` parameter
### spelled correctly - same result as above call
occurrences.search(q = "kingfisher", limit=20, spellCheck = True)
### spelled incorrectly - stops with suggested spelling
occurrences.search(q = "kajsdkla", limit=20, spellCheck = True)
### spelled incorrectly - stops with many suggested spellings
### and number of results for each
occurrences.search(q = "helir", limit=20, spellCheck = True)
# Search on latitidue and longitude
occurrences.search(decimalLatitude=50, decimalLongitude=10, limit=2)
# Search on a bounding box
## in well known text format
occurrences.search(geometry='POLYGON((30.1 10.1, 10 20, 20 40, 40 40, 30.1 10.1))', limit=20)
from pygbif import species
key = species.name_suggest(q='Aesculus hippocastanum')[0]['key']
occurrences.search(taxonKey=key, geometry='POLYGON((30.1 10.1, 10 20, 20 40, 40 40, 30.1 10.1))', limit=20)
## multipolygon
wkt = 'MULTIPOLYGON(((-123 38, -123 43, -116 43, -116 38, -123 38)),((-97 41, -97 45, -93 45, -93 41, -97 41)))'
occurrences.search(geometry = wkt, limit = 20)
# Search on country
occurrences.search(country='US', limit=20)
occurrences.search(country='FR', limit=20)
occurrences.search(country='DE', limit=20)
# Get only occurrences with lat/long data
occurrences.search(taxonKey=key, hasCoordinate=True, limit=20)
# Get only occurrences that were recorded as living specimens
occurrences.search(taxonKey=key, basisOfRecord="LIVING_SPECIMEN", hasCoordinate=True, limit=20)
# Get occurrences for a particular eventDate
occurrences.search(taxonKey=key, eventDate="2013", limit=20)
occurrences.search(taxonKey=key, year="2013", limit=20)
occurrences.search(taxonKey=key, month="6", limit=20)
# Get occurrences based on depth
key = species.name_backbone(name='Salmo salar', kingdom='animals')['usageKey']
occurrences.search(taxonKey=key, depth="5", limit=20)
# Get occurrences based on elevation
key = species.name_backbone(name='Puma concolor', kingdom='animals')['usageKey']
occurrences.search(taxonKey=key, elevation=50, hasCoordinate=True, limit=20)
# Get occurrences based on institutionCode
occurrences.search(institutionCode="TLMF", limit=20)
# Get occurrences based on collectionCode
occurrences.search(collectionCode="Floristic Databases MV - Higher Plants", limit=20)
# Get only those occurrences with spatial issues
occurrences.search(taxonKey=key, hasGeospatialIssue=True, limit=20)
# Search using a query string
occurrences.search(q="kingfisher", limit=20)
# Range queries
## See Detail for parameters that support range queries
### this is a range depth, with lower/upper limits in character string
occurrences.search(depth='50,100')
## Range search with year
occurrences.search(year='1999,2000', limit=20)
## Range search with latitude
occurrences.search(decimalLatitude='29.59,29.6')
# Search by specimen type status
## Look for possible values of the typeStatus parameter looking at the typestatus dataset
occurrences.search(typeStatus = 'allotype')
# Search by specimen record number
## This is the record number of the person/group that submitted the data, not GBIF's numbers
## You can see that many different groups have record number 1, so not super helpful
occurrences.search(recordNumber = 1)
# Search by last time interpreted: Date the record was last modified in GBIF
## The lastInterpreted parameter accepts ISO 8601 format dates, including
## yyyy, yyyy-MM, yyyy-MM-dd, or MM-dd. Range queries are accepted for lastInterpreted
occurrences.search(lastInterpreted = '2014-04-01')
# Search by continent
## One of africa, antarctica, asia, europe, north_america, oceania, or south_america
occurrences.search(continent = 'south_america')
occurrences.search(continent = 'africa')
occurrences.search(continent = 'oceania')
occurrences.search(continent = 'antarctica')
# Search for occurrences with images
occurrences.search(mediatype = 'StillImage')
occurrences.search(mediatype = 'MovingImage')
x = occurrences.search(mediatype = 'Sound')
[z['media'] for z in x['results']]
# Query based on issues
occurrences.search(taxonKey=1, issue='DEPTH_UNLIKELY')
occurrences.search(taxonKey=1, issue=['DEPTH_UNLIKELY','COORDINATE_ROUNDED'])
# Show all records in the Arizona State Lichen Collection that cant be matched to the GBIF
# backbone properly:
occurrences.search(datasetKey='84c0e1a0-f762-11e1-a439-00145eb45e9a', issue=['TAXON_MATCH_NONE','TAXON_MATCH_HIGHERRANK'])
# If you pass in an invalid polygon you get hopefully informative errors
### the WKT string is fine, but GBIF says bad polygon
wkt = 'POLYGON((-178.59375 64.83258989321493,-165.9375 59.24622380205539,
-147.3046875 59.065977905449806,-130.78125 51.04484764446178,-125.859375 36.70806354647625,
-112.1484375 23.367471303759686,-105.1171875 16.093320185359257,-86.8359375 9.23767076398516,
-82.96875 2.9485268155066175,-82.6171875 -14.812060061226388,-74.8828125 -18.849111862023985,
-77.34375 -47.661687803329166,-84.375 -49.975955187343295,174.7265625 -50.649460483096114,
179.296875 -42.19189902447192,-176.8359375 -35.634976650677295,176.8359375 -31.835565983656227,
163.4765625 -6.528187613695323,152.578125 1.894796132058301,135.703125 4.702353722559447,
127.96875 15.077427674847987,127.96875 23.689804541429606,139.921875 32.06861069132688,
149.4140625 42.65416193033991,159.2578125 48.3160811030533,168.3984375 57.019804336633165,
178.2421875 59.95776046458139,-179.6484375 61.16708631440347,-178.59375 64.83258989321493))'
occurrences.search(geometry = wkt)
# Faceting
## return no occurrence records with limit=0
x = occurrences.search(facet = "country", limit = 0)
x['facets']
## also return occurrence records
x = occurrences.search(facet = "establishmentMeans", limit = 10)
x['facets']
x['results']
## multiple facet variables
x = occurrences.search(facet = ["country", "basisOfRecord"], limit = 10)
x['results']
x['facets']
x['facets']['country']
x['facets']['basisOfRecord']
x['facets']['basisOfRecord']['count']
## set a minimum facet count
x = occurrences.search(facet = "country", facetMincount = 30000000L, limit = 0)
x['facets']
## paging per each faceted variable
### do so by passing in variables like "country" + "_facetLimit" = "country_facetLimit"
### or "country" + "_facetOffset" = "country_facetOffset"
x = occurrences.search(
facet = ["country", "basisOfRecord", "hasCoordinate"],
country_facetLimit = 3,
basisOfRecord_facetLimit = 6,
limit = 0
)
x['facets']
# requests package options
## There's an acceptable set of requests options (['timeout', 'cookies', 'auth',
## 'allow_redirects', 'proxies', 'verify', 'stream', 'cert']) you can pass
## in via **kwargs, e.g., set a timeout
x = occurrences.search(timeout = 1)
'''
url = gbif_baseurl + 'occurrence/search'
args = {'taxonKey': taxonKey, 'repatriated': repatriated,
'kingdomKey': kingdomKey, 'phylumKey': phylumKey, 'classKey': classKey,
'orderKey': orderKey, 'familyKey': familyKey, 'genusKey': genusKey,
'subgenusKey': subgenusKey, 'scientificName': scientificName,
'country': country, 'publishingCountry': publishingCountry,
'hasCoordinate': hasCoordinate, 'typeStatus': typeStatus,
'recordNumber': recordNumber, 'lastInterpreted': lastInterpreted,
'continent': continent, 'geometry': geometry, 'recordedBy': recordedBy,
'basisOfRecord': basisOfRecord, 'datasetKey': datasetKey, 'eventDate': eventDate,
'catalogNumber': catalogNumber, 'year': year, 'month': month,
'decimalLatitude': decimalLatitude, 'decimalLongitude': decimalLongitude,
'elevation': elevation, 'depth': depth, 'institutionCode': institutionCode,
'collectionCode': collectionCode, 'hasGeospatialIssue': hasGeospatialIssue,
'issue': issue, 'q': q, 'spellCheck': spellCheck, 'mediatype': mediatype,
'limit': limit, 'offset': offset, 'establishmentMeans': establishmentMeans,
'facetMincount': facetMincount, 'facet': facet,
'facetMultiselect': facetMultiselect}
gbif_kwargs = {key: kwargs[key] for key in kwargs if key not in requests_argset}
if gbif_kwargs is not None:
xx = dict(zip( [ re.sub('_', '.', x) for x in gbif_kwargs.keys() ], gbif_kwargs.values() ))
args.update(xx)
kwargs = {key: kwargs[key] for key in kwargs if key in requests_argset}
out = gbif_GET(url, args, **kwargs)
return out |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def networks(data = 'all', uuid = None, q = None, identifier = None,
identifierType = None, limit = 100, offset = None, **kwargs):
'''
Networks metadata.
Note: there's only 1 network now, so there's not a lot you can do with this method.
:param data: [str] The type of data to get. Default: ``all``
:param uuid: [str] UUID of the data network provider. This must be specified if data
is anything other than ``all``.
:param q: [str] Query networks. Only used when ``data = 'all'``. Ignored otherwise.
:param identifier: [fixnum] The value for this parameter can be a simple string or integer,
e.g. identifier=120
:param identifierType: [str] Used in combination with the identifier parameter to filter
identifiers by identifier type: ``DOI``, ``FTP``, ``GBIF_NODE``, ``GBIF_PARTICIPANT``,
``GBIF_PORTAL``, ``HANDLER``, ``LSID``, ``UNKNOWN``, ``URI``, ``URL``, ``UUID``
:param limit: [int] Number of results to return. Default: ``100``
:param offset: [int] Record to start at. Default: ``0``
:return: A dictionary
References: http://www.gbif.org/developer/registry#networks
Usage::
from pygbif import registry
registry.networks(limit=1)
registry.networks(uuid='2b7c7b4f-4d4f-40d3-94de-c28b6fa054a6')
'''
args = {'q': q, 'limit': limit, 'offset': offset, 'identifier': identifier,
'identifierType': identifierType}
data_choices = ['all', 'contact', 'endpoint', 'identifier',
'tag', 'machineTag', 'comment', 'constituents']
check_data(data, data_choices)
def getdata(x, uuid, args, **kwargs):
if x is not 'all' and uuid is None:
stop('You must specify a uuid if data does not equal "all"')
if uuid is None:
url = gbif_baseurl + 'network'
else:
if x is 'all':
url = gbif_baseurl + 'network/' + uuid
else:
url = gbif_baseurl + 'network/' + uuid + '/' + x
res = gbif_GET(url, args, **kwargs)
return {'meta': get_meta(res), 'data': parse_results(res, uuid)}
if len2(data) == 1:
return getdata(data, uuid, args, **kwargs)
else:
return [getdata(x, uuid, args, **kwargs) for x in data] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def map(source = 'density', z = 0, x = 0, y = 0, format = '@1x.png',
srs='EPSG:4326', bin=None, hexPerTile=None, style='classic.point',
taxonKey=None, country=None, publishingCountry=None, publisher=None,
datasetKey=None, year=None, basisOfRecord=None, **kwargs):
'''
GBIF maps API
:param source: [str] Either ``density`` for fast, precalculated tiles,
or ``adhoc`` for any search
:param z: [str] zoom level
:param x: [str] longitude
:param y: [str] latitude
:param format: [str] format of returned data. One of:
- ``.mvt`` - vector tile
- ``@Hx.png`` - 256px raster tile (for legacy clients)
- ``@1x.png`` - 512px raster tile, @2x.png for a 1024px raster tile
- ``@2x.png`` - 1024px raster tile
- ``@3x.png`` - 2048px raster tile
- ``@4x.png`` - 4096px raster tile
:param srs: [str] Spatial reference system. One of:
- ``EPSG:3857`` (Web Mercator)
- ``EPSG:4326`` (WGS84 plate caree)
- ``EPSG:3575`` (Arctic LAEA)
- ``EPSG:3031`` (Antarctic stereographic)
:param bin: [str] square or hex to aggregate occurrence counts into
squares or hexagons. Points by default.
:param hexPerTile: [str] sets the size of the hexagons (the number
horizontally across a tile)
:param squareSize: [str] sets the size of the squares. Choose a factor
of 4096 so they tessalate correctly: probably from 8, 16, 32, 64,
128, 256, 512.
:param style: [str] for raster tiles, choose from the available styles.
Defaults to classic.point.
:param taxonKey: [int] A GBIF occurrence identifier
:param datasetKey: [str] The occurrence dataset key (a uuid)
:param country: [str] The 2-letter country code (as per ISO-3166-1) of
the country in which the occurrence was recorded. See here
http://en.wikipedia.org/wiki/ISO_3166-1_alpha-2
:param basisOfRecord: [str] Basis of record, as defined in the BasisOfRecord enum
http://gbif.github.io/gbif-api/apidocs/org/gbif/api/vocabulary/BasisOfRecord.html
Acceptable values are
- ``FOSSIL_SPECIMEN`` An occurrence record describing a fossilized specimen.
- ``HUMAN_OBSERVATION`` An occurrence record describing an observation made by one or more people.
- ``LITERATURE`` An occurrence record based on literature alone.
- ``LIVING_SPECIMEN`` An occurrence record describing a living specimen, e.g.
- ``MACHINE_OBSERVATION`` An occurrence record describing an observation made by a machine.
- ``OBSERVATION`` An occurrence record describing an observation.
- ``PRESERVED_SPECIMEN`` An occurrence record describing a preserved specimen.
- ``UNKNOWN`` Unknown basis for the record.
:param year: [int] The 4 digit year. A year of 98 will be interpreted as
AD 98. Supports range queries, smaller,larger (e.g., ``1990,1991``,
whereas ``1991,1990`` wouldn't work)
:param publishingCountry: [str] The 2-letter country code (as per
ISO-3166-1) of the country in which the occurrence was recorded.
:return: An object of class GbifMap
For mvt format, see https://github.com/tilezen/mapbox-vector-tile to
decode, and example below
Usage::
from pygbif import maps
out = maps.map(taxonKey = 2435098)
out.response
out.path
out.img
out.plot()
out = maps.map(taxonKey = 2480498, year = range(2008, 2011+1))
out.response
out.path
out.img
out.plot()
# srs
maps.map(taxonKey = 2480498, year = 2010, srs = "EPSG:3857")
# bin
maps.map(taxonKey = 212, year = 1998, bin = "hex",
hexPerTile = 30, style = "classic-noborder.poly")
# style
maps.map(taxonKey = 2480498, style = "purpleYellow.point").plot()
# basisOfRecord
maps.map(taxonKey = 2480498, year = 2010,
basisOfRecord = "HUMAN_OBSERVATION", bin = "hex",
hexPerTile = 500).plot()
maps.map(taxonKey = 2480498, year = 2010,
basisOfRecord = ["HUMAN_OBSERVATION", "LIVING_SPECIMEN"],
hexPerTile = 500, bin = "hex").plot()
# map vector tiles, gives back raw bytes
from pygbif import maps
x = maps.map(taxonKey = 2480498, year = 2010,
format = ".mvt")
x.response
x.path
x.img # None
import mapbox_vector_tile
mapbox_vector_tile.decode(x.response.content)
'''
if format not in ['.mvt', '@Hx.png', '@1x.png', '@2x.png', '@3x.png', '@4x.png']:
raise ValueError("'format' not in allowed set, see docs")
if source not in ['density', 'adhoc']:
raise ValueError("'source' not in allowed set, see docs")
if srs not in ['EPSG:3857', 'EPSG:4326', 'EPSG:3575', 'EPSG:3031']:
raise ValueError("'srs' not in allowed set, see docs")
if bin is not None:
if bin not in ['square', 'hex']:
raise ValueError("'bin' not in allowed set, see docs")
if style is not None:
if style not in map_styles:
raise ValueError("'style' not in allowed set, see docs")
maps_baseurl = 'https://api.gbif.org'
url = maps_baseurl + '/v2/map/occurrence/%s/%s/%s/%s%s'
url = url % ( source, z, x, y, format )
year = __handle_year(year)
basisOfRecord = __handle_bor(basisOfRecord)
args = {'srs': srs, 'bin': bin, 'hexPerTile': hexPerTile, 'style': style,
'taxonKey': taxonKey, 'country': country,
'publishingCountry': publishingCountry, 'publisher': publisher,
'datasetKey': datasetKey, 'year': year,
'basisOfRecord': basisOfRecord}
kw = {key: kwargs[key] for key in kwargs if key not in requests_argset}
if kw is not None:
xx = dict(zip( [ re.sub('_', '.', x) for x in kw.keys() ], kw.values() ))
args.update(xx)
kwargs = {key: kwargs[key] for key in kwargs if key in requests_argset}
ctype = 'image/png' if has(format, "png") else 'application/x-protobuf'
out = gbif_GET_map(url, args, ctype, **kwargs)
# return out
return GbifMap(out) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def name_usage(key = None, name = None, data = 'all', language = None,
datasetKey = None, uuid = None, sourceId = None, rank = None, shortname = None,
limit = 100, offset = None, **kwargs):
'''
Lookup details for specific names in all taxonomies in GBIF.
:param key: [fixnum] A GBIF key for a taxon
:param name: [str] Filters by a case insensitive, canonical namestring,
e.g. 'Puma concolor'
:param data: [str] The type of data to get. Default: ``all``. Options: ``all``,
``verbatim``, ``name``, ``parents``, ``children``,
``related``, ``synonyms``, ``descriptions``, ``distributions``, ``media``,
``references``, ``speciesProfiles``, ``vernacularNames``, ``typeSpecimens``,
``root``
:param language: [str] Language, default is english
:param datasetKey: [str] Filters by the dataset's key (a uuid)
:param uuid: [str] A uuid for a dataset. Should give exact same results as datasetKey.
:param sourceId: [fixnum] Filters by the source identifier.
:param rank: [str] Taxonomic rank. Filters by taxonomic rank as one of:
``CLASS``, ``CULTIVAR``, ``CULTIVAR_GROUP``, ``DOMAIN``, ``FAMILY``, ``FORM``, ``GENUS``, ``INFORMAL``,
``INFRAGENERIC_NAME``, ``INFRAORDER``, ``INFRASPECIFIC_NAME``, ``INFRASUBSPECIFIC_NAME``,
``KINGDOM``, ``ORDER``, ``PHYLUM``, ``SECTION``, ``SERIES``, ``SPECIES``, ``STRAIN``, ``SUBCLASS``, ``SUBFAMILY``,
``SUBFORM``, ``SUBGENUS``, ``SUBKINGDOM``, ``SUBORDER``, ``SUBPHYLUM``, ``SUBSECTION``, ``SUBSERIES``,
``SUBSPECIES``, ``SUBTRIBE``, ``SUBVARIETY``, ``SUPERCLASS``, ``SUPERFAMILY``, ``SUPERORDER``,
``SUPERPHYLUM``, ``SUPRAGENERIC_NAME``, ``TRIBE``, ``UNRANKED``, ``VARIETY``
:param shortname: [str] A short name..need more info on this?
:param limit: [fixnum] Number of records to return. Default: ``100``. Maximum: ``1000``. (optional)
:param offset: [fixnum] Record number to start at. (optional)
References: http://www.gbif.org/developer/species#nameUsages
Usage::
from pygbif import species
species.name_usage(key=1)
# Name usage for a taxonomic name
species.name_usage(name='Puma', rank="GENUS")
# All name usages
species.name_usage()
# References for a name usage
species.name_usage(key=2435099, data='references')
# Species profiles, descriptions
species.name_usage(key=3119195, data='speciesProfiles')
species.name_usage(key=3119195, data='descriptions')
species.name_usage(key=2435099, data='children')
# Vernacular names for a name usage
species.name_usage(key=3119195, data='vernacularNames')
# Limit number of results returned
species.name_usage(key=3119195, data='vernacularNames', limit=3)
# Search for names by dataset with datasetKey parameter
species.name_usage(datasetKey="d7dddbf4-2cf0-4f39-9b2a-bb099caae36c")
# Search for a particular language
species.name_usage(key=3119195, language="FRENCH", data='vernacularNames')
'''
args = {'language': language, 'name': name, 'datasetKey': datasetKey,
'rank': rank, 'sourceId': sourceId, 'limit': limit, 'offset': offset}
data_choices = ['all', 'verbatim', 'name', 'parents', 'children',
'related', 'synonyms', 'descriptions',
'distributions', 'media', 'references', 'speciesProfiles',
'vernacularNames', 'typeSpecimens', 'root']
check_data(data, data_choices)
if len2(data) == 1:
return name_usage_fetch(data, key, shortname, uuid, args, **kwargs)
else:
return [name_usage_fetch(x, key, shortname, uuid, args, **kwargs) for x in data] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _check_environ(variable, value):
"""check if a variable is present in the environmental variables""" |
if is_not_none(value):
return value
else:
value = os.environ.get(variable)
if is_none(value):
stop(''.join([variable,
""" not supplied and no entry in environmental
variables"""]))
else:
return value |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def download(queries, user=None, pwd=None, email=None, pred_type='and'):
""" Spin up a download request for GBIF occurrence data. :param queries: One or more of query arguments to kick of a download job. See Details. :type queries: str or list :param pred_type: (character) One of ``equals`` (``=``), ``and`` (``&``), `or`` (``|``), ``lessThan`` (``<``), ``lessThanOrEquals`` (``<=``), ``greaterThan`` (``>``), ``greaterThanOrEquals`` (``>=``), ``in``, ``within``, ``not`` (``!``), ``like`` :param user: (character) User name within GBIF's website. Required. Set in your env vars with the option ``GBIF_USER`` :param pwd: (character) User password within GBIF's website. Required. Set in your env vars with the option ``GBIF_PWD`` :param email: (character) Email address to recieve download notice done email. Required. Set in your env vars with the option ``GBIF_EMAIL`` Argument passed have to be passed as character (e.g., ``country = US``), with a space between key (``country``), operator (``=``), and value (``US``). See the ``type`` parameter for possible options for the operator. This character string is parsed internally. - taxonKey = ``TAXON_KEY`` - scientificName = ``SCIENTIFIC_NAME`` - country = ``COUNTRY`` - publishingCountry = ``PUBLISHING_COUNTRY`` - hasCoordinate = ``HAS_COORDINATE`` - hasGeospatialIssue = ``HAS_GEOSPATIAL_ISSUE`` - typeStatus = ``TYPE_STATUS`` - recordNumber = ``RECORD_NUMBER`` - lastInterpreted = ``LAST_INTERPRETED`` - continent = ``CONTINENT`` - geometry = ``GEOMETRY`` - basisOfRecord = ``BASIS_OF_RECORD`` - datasetKey = ``DATASET_KEY`` - eventDate = ``EVENT_DATE`` - catalogNumber = ``CATALOG_NUMBER`` - year = ``YEAR`` - month = ``MONTH`` - decimalLatitude = ``DECIMAL_LATITUDE`` - decimalLongitude = ``DECIMAL_LONGITUDE`` - elevation = ``ELEVATION`` - depth = ``DEPTH`` - institutionCode = ``INSTITUTION_CODE`` - collectionCode = ``COLLECTION_CODE`` - issue = ``ISSUE`` - mediatype = ``MEDIA_TYPE`` - recordedBy = ``RECORDED_BY`` - repatriated = ``REPATRIATED`` See the API docs http://www.gbif.org/developer/occurrence#download for more info, and the predicates docs http://www.gbif.org/developer/occurrence#predicates GBIF has a limit of 12,000 characters for download queries - so if you're download request is really, really long and complex, consider breaking it up into multiple requests by one factor or another. :return: A dictionary, of results Usage:: from pygbif import occurrences as occ occ.download('basisOfRecord = LITERATURE') occ.download('taxonKey = 3119195') occ.download('decimalLatitude > 50') occ.download('elevation >= 9000') occ.download('decimalLatitude >= 65') occ.download('country = US') occ.download('institutionCode = TLMF') occ.download('catalogNumber = Bird.27847588') res = occ.download(['taxonKey = 7264332', 'hasCoordinate = TRUE']) # pass output to download_meta for more information occ.download_meta(occ.download('decimalLatitude > 75')) # Multiple queries gg = occ.download(['decimalLatitude >= 65', 'decimalLatitude <= -65'], type='or') gg = occ.download(['depth = 80', 'taxonKey = 2343454'], type='or') # Repratriated data for Costa Rica occ.download(['country = CR', 'repatriated = true']) """ |
user = _check_environ('GBIF_USER', user)
pwd = _check_environ('GBIF_PWD', pwd)
email = _check_environ('GBIF_EMAIL', email)
if isinstance(queries, str):
queries = [queries]
keyval = [_parse_args(z) for z in queries]
# USE GBIFDownload class to set up the predicates
req = GbifDownload(user, email)
req.main_pred_type = pred_type
for predicate in keyval:
req.add_predicate(predicate['key'],
predicate['value'],
predicate['type'])
out = req.post_download(user, pwd)
return out, req.payload |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def download_list(user=None, pwd=None, limit=20, offset=0):
""" Lists the downloads created by a user. :param user: [str] A user name, look at env var ``GBIF_USER`` first :param pwd: [str] Your password, look at env var ``GBIF_PWD`` first :param limit: [int] Number of records to return. Default: ``20`` :param offset: [int] Record number to start at. Default: ``0`` Usage:: from pygbif import occurrences as occ occ.download_list(user = "sckott") occ.download_list(user = "sckott", limit = 5) occ.download_list(user = "sckott", offset = 21) """ |
user = _check_environ('GBIF_USER', user)
pwd = _check_environ('GBIF_PWD', pwd)
url = 'http://api.gbif.org/v1/occurrence/download/user/' + user
args = {'limit': limit, 'offset': offset}
res = gbif_GET(url, args, auth=(user, pwd))
return {'meta': {'offset': res['offset'],
'limit': res['limit'],
'endofrecords': res['endOfRecords'],
'count': res['count']},
'results': res['results']} |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def download_get(key, path=".", **kwargs):
""" Get a download from GBIF. :param key: [str] A key generated from a request, like that from ``download`` :param path: [str] Path to write zip file to. Default: ``"."``, with a ``.zip`` appended to the end. :param **kwargs**: Further named arguments passed on to ``requests.get`` Downloads the zip file to a directory you specify on your machine. The speed of this function is of course proportional to the size of the file to download, and affected by your internet connection speed. This function only downloads the file. To open and read it, see https://github.com/BelgianBiodiversityPlatform/python-dwca-reader Usage:: from pygbif import occurrences as occ occ.download_get("0000066-140928181241064") occ.download_get("0003983-140910143529206") """ |
meta = pygbif.occurrences.download_meta(key)
if meta['status'] != 'SUCCEEDED':
raise Exception('download "%s" not of status SUCCEEDED' % key)
else:
print('Download file size: %s bytes' % meta['size'])
url = 'http://api.gbif.org/v1/occurrence/download/request/' + key
path = "%s/%s.zip" % (path, key)
gbif_GET_write(url, path, **kwargs)
print("On disk at " + path)
return {'path': path, 'size': meta['size'], 'key': key} |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def main_pred_type(self, value):
"""set main predicate combination type :param value: (character) One of ``equals`` (``=``), ``and`` (``&``), ``or`` (``|``), ``lessThan`` (``<``), ``lessThanOrEquals`` (``<=``), ``greaterThan`` (``>``), ``greaterThanOrEquals`` (``>=``), ``in``, ``within``, ``not`` (``!``), ``like`` """ |
if value not in operators:
value = operator_lkup.get(value)
if value:
self._main_pred_type = value
self.payload['predicate']['type'] = self._main_pred_type
else:
raise Exception("main predicate combiner not a valid operator") |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def add_predicate(self, key, value, predicate_type='equals'):
""" add key, value, type combination of a predicate :param key: query KEY parameter :param value: the value used in the predicate :param predicate_type: the type of predicate (e.g. ``equals``) """ |
if predicate_type not in operators:
predicate_type = operator_lkup.get(predicate_type)
if predicate_type:
self.predicates.append({'type': predicate_type,
'key': key,
'value': value
})
else:
raise Exception("predicate type not a valid operator") |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _extract_values(values_list):
"""extract values from either file or list :param values_list: list or file name (str) with list of values """ |
values = []
# check if file or list of values to iterate
if isinstance(values_list, str):
with open(values_list) as ff:
reading = csv.reader(ff)
for j in reading:
values.append(j[0])
elif isinstance(values_list, list):
values = values_list
else:
raise Exception("input datatype not supported.")
return values |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def add_iterative_predicate(self, key, values_list):
"""add an iterative predicate with a key and set of values which it can be equal to in and or function. The individual predicates are specified with the type ``equals`` and combined with a type ``or``. The main reason for this addition is the inability of using ``in`` as predicate type wfor multiple taxon_key values (cfr. http://dev.gbif.org/issues/browse/POR-2753) :param key: API key to use for the query. :param values_list: Filename or list containing the taxon keys to be s searched. """ |
values = self._extract_values(values_list)
predicate = {'type': 'equals', 'key': key, 'value': None}
predicates = []
while values:
predicate['value'] = values.pop()
predicates.append(predicate.copy())
self.predicates.append({'type': 'or', 'predicates': predicates}) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def get(key, **kwargs):
'''
Gets details for a single, interpreted occurrence
:param key: [int] A GBIF occurrence key
:return: A dictionary, of results
Usage::
from pygbif import occurrences
occurrences.get(key = 1258202889)
occurrences.get(key = 1227768771)
occurrences.get(key = 1227769518)
'''
url = gbif_baseurl + 'occurrence/' + str(key)
out = gbif_GET(url, {}, **kwargs)
return out |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def get_verbatim(key, **kwargs):
'''
Gets a verbatim occurrence record without any interpretation
:param key: [int] A GBIF occurrence key
:return: A dictionary, of results
Usage::
from pygbif import occurrences
occurrences.get_verbatim(key = 1258202889)
occurrences.get_verbatim(key = 1227768771)
occurrences.get_verbatim(key = 1227769518)
'''
url = gbif_baseurl + 'occurrence/' + str(key) + '/verbatim'
out = gbif_GET(url, {}, **kwargs)
return out |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def name_backbone(name, rank=None, kingdom=None, phylum=None, clazz=None,
order=None, family=None, genus=None, strict=False, verbose=False,
offset=None, limit=100, **kwargs):
'''
Lookup names in the GBIF backbone taxonomy.
:param name: [str] Full scientific name potentially with authorship (required)
:param rank: [str] The rank given as our rank enum. (optional)
:param kingdom: [str] If provided default matching will also try to match against this
if no direct match is found for the name alone. (optional)
:param phylum: [str] If provided default matching will also try to match against this
if no direct match is found for the name alone. (optional)
:param class: [str] If provided default matching will also try to match against this
if no direct match is found for the name alone. (optional)
:param order: [str] If provided default matching will also try to match against this
if no direct match is found for the name alone. (optional)
:param family: [str] If provided default matching will also try to match against this
if no direct match is found for the name alone. (optional)
:param genus: [str] If provided default matching will also try to match against this
if no direct match is found for the name alone. (optional)
:param strict: [bool] If True it (fuzzy) matches only the given name, but never a
taxon in the upper classification (optional)
:param verbose: [bool] If True show alternative matches considered which had been rejected.
:param offset: [int] Record to start at. Default: ``0``
:param limit: [int] Number of results to return. Default: ``100``
A list for a single taxon with many slots (with ``verbose=False`` - default), or a
list of length two, first element for the suggested taxon match, and a data.frame
with alternative name suggestions resulting from fuzzy matching (with ``verbose=True``).
If you don't get a match GBIF gives back a list of length 3 with slots synonym,
confidence, and ``matchType='NONE'``.
reference: http://www.gbif.org/developer/species#searching
Usage::
from pygbif import species
species.name_backbone(name='Helianthus annuus', kingdom='plants')
species.name_backbone(name='Helianthus', rank='genus', kingdom='plants')
species.name_backbone(name='Poa', rank='genus', family='Poaceae')
# Verbose - gives back alternatives
species.name_backbone(name='Helianthus annuus', kingdom='plants', verbose=True)
# Strictness
species.name_backbone(name='Poa', kingdom='plants', verbose=True, strict=False)
species.name_backbone(name='Helianthus annuus', kingdom='plants', verbose=True, strict=True)
# Non-existent name
species.name_backbone(name='Aso')
# Multiple equal matches
species.name_backbone(name='Oenante')
'''
url = gbif_baseurl + 'species/match'
args = {'name': name, 'rank': rank, 'kingdom': kingdom, 'phylum': phylum,
'class': clazz, 'order': order, 'family': family, 'genus': genus,
'strict': strict, 'verbose': verbose, 'offset': offset, 'limit': limit}
tt = gbif_GET(url, args, **kwargs)
return tt |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def name_parser(name, **kwargs):
'''
Parse taxon names using the GBIF name parser
:param name: [str] A character vector of scientific names. (required)
reference: http://www.gbif.org/developer/species#parser
Usage::
from pygbif import species
species.name_parser('x Agropogon littoralis')
species.name_parser(['Arrhenatherum elatius var. elatius',
'Secale cereale subsp. cereale', 'Secale cereale ssp. cereale',
'Vanessa atalanta (Linnaeus, 1758)'])
'''
url = gbif_baseurl + 'parser/name'
if name.__class__ == str:
name = [name]
return gbif_POST(url, name, **kwargs) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def name_lookup(q=None, rank=None, higherTaxonKey=None, status=None, isExtinct=None,
habitat=None, nameType=None, datasetKey=None, nomenclaturalStatus=None,
limit=100, offset=None, facet=False, facetMincount=None, facetMultiselect=None,
type=None, hl=False, verbose=False, **kwargs):
'''
Lookup names in all taxonomies in GBIF.
This service uses fuzzy lookup so that you can put in partial names and
you should get back those things that match. See examples below.
:param q: [str] Query term(s) for full text search (optional)
:param rank: [str] ``CLASS``, ``CULTIVAR``, ``CULTIVAR_GROUP``, ``DOMAIN``, ``FAMILY``,
``FORM``, ``GENUS``, ``INFORMAL``, ``INFRAGENERIC_NAME``, ``INFRAORDER``, ``INFRASPECIFIC_NAME``,
``INFRASUBSPECIFIC_NAME``, ``KINGDOM``, ``ORDER``, ``PHYLUM``, ``SECTION``, ``SERIES``, ``SPECIES``, ``STRAIN``, ``SUBCLASS``,
``SUBFAMILY``, ``SUBFORM``, ``SUBGENUS``, ``SUBKINGDOM``, ``SUBORDER``, ``SUBPHYLUM``, ``SUBSECTION``, ``SUBSERIES``,
``SUBSPECIES``, ``SUBTRIBE``, ``SUBVARIETY``, ``SUPERCLASS``, ``SUPERFAMILY``, ``SUPERORDER``, ``SUPERPHYLUM``,
``SUPRAGENERIC_NAME``, ``TRIBE``, ``UNRANKED``, ``VARIETY`` (optional)
:param verbose: [bool] If True show alternative matches considered which had been rejected.
:param higherTaxonKey: [str] Filters by any of the higher Linnean rank keys. Note this
is within the respective checklist and not searching nub keys across all checklists (optional)
:param status: [str] (optional) Filters by the taxonomic status as one of:
* ``ACCEPTED``
* ``DETERMINATION_SYNONYM`` Used for unknown child taxa referred to via spec, ssp, ...
* ``DOUBTFUL`` Treated as accepted, but doubtful whether this is correct.
* ``HETEROTYPIC_SYNONYM`` More specific subclass of ``SYNONYM``.
* ``HOMOTYPIC_SYNONYM`` More specific subclass of ``SYNONYM``.
* ``INTERMEDIATE_RANK_SYNONYM`` Used in nub only.
* ``MISAPPLIED`` More specific subclass of ``SYNONYM``.
* ``PROPARTE_SYNONYM`` More specific subclass of ``SYNONYM``.
* ``SYNONYM`` A general synonym, the exact type is unknown.
:param isExtinct: [bool] Filters by extinction status (e.g. ``isExtinct=True``)
:param habitat: [str] Filters by habitat. One of: ``marine``, ``freshwater``, or
``terrestrial`` (optional)
:param nameType: [str] (optional) Filters by the name type as one of:
* ``BLACKLISTED`` surely not a scientific name.
* ``CANDIDATUS`` Candidatus is a component of the taxonomic name for a bacterium that cannot be maintained in a Bacteriology Culture Collection.
* ``CULTIVAR`` a cultivated plant name.
* ``DOUBTFUL`` doubtful whether this is a scientific name at all.
* ``HYBRID`` a hybrid formula (not a hybrid name).
* ``INFORMAL`` a scientific name with some informal addition like "cf." or indetermined like Abies spec.
* ``SCINAME`` a scientific name which is not well formed.
* ``VIRUS`` a virus name.
* ``WELLFORMED`` a well formed scientific name according to present nomenclatural rules.
:param datasetKey: [str] Filters by the dataset's key (a uuid) (optional)
:param nomenclaturalStatus: [str] Not yet implemented, but will eventually allow for
filtering by a nomenclatural status enum
:param limit: [fixnum] Number of records to return. Maximum: ``1000``. (optional)
:param offset: [fixnum] Record number to start at. (optional)
:param facet: [str] A list of facet names used to retrieve the 100 most frequent values
for a field. Allowed facets are: ``datasetKey``, ``higherTaxonKey``, ``rank``, ``status``,
``isExtinct``, ``habitat``, and ``nameType``. Additionally ``threat`` and ``nomenclaturalStatus``
are legal values but not yet implemented, so data will not yet be returned for them. (optional)
:param facetMincount: [str] Used in combination with the facet parameter. Set
``facetMincount={#}`` to exclude facets with a count less than {#}, e.g.
http://bit.ly/1bMdByP only shows the type value ``ACCEPTED`` because the other
statuses have counts less than 7,000,000 (optional)
:param facetMultiselect: [bool] Used in combination with the facet parameter. Set
``facetMultiselect=True`` to still return counts for values that are not currently
filtered, e.g. http://bit.ly/19YLXPO still shows all status values even though
status is being filtered by ``status=ACCEPTED`` (optional)
:param type: [str] Type of name. One of ``occurrence``, ``checklist``, or ``metadata``. (optional)
:param hl: [bool] Set ``hl=True`` to highlight terms matching the query when in fulltext
search fields. The highlight will be an emphasis tag of class ``gbifH1`` e.g.
``q='plant', hl=True``. Fulltext search fields include: ``title``, ``keyword``, ``country``,
``publishing country``, ``publishing organization title``, ``hosting organization title``, and
``description``. One additional full text field is searched which includes information from
metadata documents, but the text of this field is not returned in the response. (optional)
:return: A dictionary
:references: http://www.gbif.org/developer/species#searching
Usage::
from pygbif import species
# Look up names like mammalia
species.name_lookup(q='mammalia')
# Paging
species.name_lookup(q='mammalia', limit=1)
species.name_lookup(q='mammalia', limit=1, offset=2)
# large requests, use offset parameter
first = species.name_lookup(q='mammalia', limit=1000)
second = species.name_lookup(q='mammalia', limit=1000, offset=1000)
# Get all data and parse it, removing descriptions which can be quite long
species.name_lookup('Helianthus annuus', rank="species", verbose=True)
# Get all data and parse it, removing descriptions field which can be quite long
out = species.name_lookup('Helianthus annuus', rank="species")
res = out['results']
[ z.pop('descriptions', None) for z in res ]
res
# Fuzzy searching
species.name_lookup(q='Heli', rank="genus")
# Limit records to certain number
species.name_lookup('Helianthus annuus', rank="species", limit=2)
# Query by habitat
species.name_lookup(habitat = "terrestrial", limit=2)
species.name_lookup(habitat = "marine", limit=2)
species.name_lookup(habitat = "freshwater", limit=2)
# Using faceting
species.name_lookup(facet='status', limit=0, facetMincount='70000')
species.name_lookup(facet=['status', 'higherTaxonKey'], limit=0, facetMincount='700000')
species.name_lookup(facet='nameType', limit=0)
species.name_lookup(facet='habitat', limit=0)
species.name_lookup(facet='datasetKey', limit=0)
species.name_lookup(facet='rank', limit=0)
species.name_lookup(facet='isExtinct', limit=0)
# text highlighting
species.name_lookup(q='plant', hl=True, limit=30)
# Lookup by datasetKey
species.name_lookup(datasetKey='3f8a1297-3259-4700-91fc-acc4170b27ce')
'''
args = {'q': q, 'rank': rank, 'higherTaxonKey': higherTaxonKey,
'status': status, 'isExtinct': isExtinct, 'habitat': habitat,
'nameType': nameType, 'datasetKey': datasetKey,
'nomenclaturalStatus': nomenclaturalStatus, 'limit': limit, 'offset': offset,
'facet': bn(facet), 'facetMincount': facetMincount, 'facetMultiselect': facetMultiselect,
'hl': bn(hl), 'verbose': bn(verbose), 'type': type}
gbif_kwargs = {key: kwargs[key] for key in kwargs if key not in requests_argset}
if gbif_kwargs is not None:
xx = dict(zip( [ re.sub('_', '.', x) for x in gbif_kwargs.keys() ], gbif_kwargs.values() ))
args.update(xx)
kwargs = {key: kwargs[key] for key in kwargs if key in requests_argset}
return gbif_GET(gbif_baseurl + 'species/search', args, **kwargs) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def count(taxonKey=None, basisOfRecord=None, country=None, isGeoreferenced=None,
datasetKey=None, publishingCountry=None, typeStatus=None,
issue=None, year=None, **kwargs):
'''
Returns occurrence counts for a predefined set of dimensions
:param taxonKey: [int] A GBIF occurrence identifier
:param basisOfRecord: [str] A GBIF occurrence identifier
:param country: [str] A GBIF occurrence identifier
:param isGeoreferenced: [bool] A GBIF occurrence identifier
:param datasetKey: [str] A GBIF occurrence identifier
:param publishingCountry: [str] A GBIF occurrence identifier
:param typeStatus: [str] A GBIF occurrence identifier
:param issue: [str] A GBIF occurrence identifier
:param year: [int] A GBIF occurrence identifier
:return: dict
Usage::
from pygbif import occurrences
occurrences.count(taxonKey = 3329049)
occurrences.count(country = 'CA')
occurrences.count(isGeoreferenced = True)
occurrences.count(basisOfRecord = 'OBSERVATION')
'''
url = gbif_baseurl + 'occurrence/count'
out = gbif_GET(url, {'taxonKey': taxonKey, 'basisOfRecord': basisOfRecord, 'country': country,
'isGeoreferenced': isGeoreferenced, 'datasetKey': datasetKey,
'publishingCountry': publishingCountry, 'typeStatus': typeStatus,
'issue': issue, 'year': year}, **kwargs)
return out |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def count_year(year, **kwargs):
'''
Lists occurrence counts by year
:param year: [int] year range, e.g., ``1990,2000``. Does not support ranges like ``asterisk,2010``
:return: dict
Usage::
from pygbif import occurrences
occurrences.count_year(year = '1990,2000')
'''
url = gbif_baseurl + 'occurrence/counts/year'
out = gbif_GET(url, {'year': year}, **kwargs)
return out |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def count_datasets(taxonKey = None, country = None, **kwargs):
'''
Lists occurrence counts for datasets that cover a given taxon or country
:param taxonKey: [int] Taxon key
:param country: [str] A country, two letter code
:return: dict
Usage::
from pygbif import occurrences
occurrences.count_datasets(country = "DE")
'''
url = gbif_baseurl + 'occurrence/counts/datasets'
out = gbif_GET(url, {'taxonKey': taxonKey, 'country': country}, **kwargs)
return out |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def count_countries(publishingCountry, **kwargs):
'''
Lists occurrence counts for all countries covered by the data published by the given country
:param publishingCountry: [str] A two letter country code
:return: dict
Usage::
from pygbif import occurrences
occurrences.count_countries(publishingCountry = "DE")
'''
url = gbif_baseurl + 'occurrence/counts/countries'
out = gbif_GET(url, {'publishingCountry': publishingCountry}, **kwargs)
return out |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def count_publishingcountries(country, **kwargs):
'''
Lists occurrence counts for all countries that publish data about the given country
:param country: [str] A country, two letter code
:return: dict
Usage::
from pygbif import occurrences
occurrences.count_publishingcountries(country = "DE")
'''
url = gbif_baseurl + 'occurrence/counts/publishingCountries'
out = gbif_GET(url, {"country": country}, **kwargs)
return out |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _detect_notebook() -> bool: """Detect if code is running in a Jupyter Notebook. This isn't 100% correct but seems good enough Returns ------- bool True if it detects this is a notebook, otherwise False. """ |
try:
from IPython import get_ipython
from ipykernel import zmqshell
except ImportError:
return False
kernel = get_ipython()
try:
from spyder.utils.ipython.spyder_kernel import SpyderKernel
if isinstance(kernel.kernel, SpyderKernel):
return False
except (ImportError, AttributeError):
pass
return isinstance(kernel, zmqshell.ZMQInteractiveShell) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _merge_layout(x: go.Layout, y: go.Layout) -> go.Layout: """Merge attributes from two layouts.""" |
xjson = x.to_plotly_json()
yjson = y.to_plotly_json()
if 'shapes' in yjson and 'shapes' in xjson:
xjson['shapes'] += yjson['shapes']
yjson.update(xjson)
return go.Layout(yjson) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _try_pydatetime(x):
"""Try to convert to pandas objects to datetimes. Plotly doesn't know how to handle them. """ |
try:
# for datetimeindex
x = [y.isoformat() for y in x.to_pydatetime()]
except AttributeError:
pass
try:
# for generic series
x = [y.isoformat() for y in x.dt.to_pydatetime()]
except AttributeError:
pass
return x |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.