text_prompt
stringlengths 157
13.1k
| code_prompt
stringlengths 7
19.8k
⌀ |
---|---|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def article_id_list(soup):
"""return a list of article-id data""" |
id_list = []
for article_id_tag in raw_parser.article_id(soup):
id_details = OrderedDict()
set_if_value(id_details, "type", article_id_tag.get("pub-id-type"))
set_if_value(id_details, "value", article_id_tag.text)
set_if_value(id_details, "assigning-authority", article_id_tag.get("assigning-authority"))
id_list.append(id_details)
return id_list |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def copyright_holder_json(soup):
"for json output add a full stop if ends in et al"
holder = None
permissions_tag = raw_parser.article_permissions(soup)
if permissions_tag:
holder = node_text(raw_parser.copyright_holder(permissions_tag))
if holder is not None and holder.endswith('et al'):
holder = holder + '.'
return holder |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def subject_area(soup):
""" Find the subject areas from article-categories subject tags """ |
subject_area = []
tags = raw_parser.subject_area(soup)
for tag in tags:
subject_area.append(node_text(tag))
return subject_area |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def display_channel(soup):
""" Find the subject areas of type display-channel """ |
display_channel = []
tags = raw_parser.display_channel(soup)
for tag in tags:
display_channel.append(node_text(tag))
return display_channel |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def category(soup):
""" Find the category from subject areas """ |
category = []
tags = raw_parser.category(soup)
for tag in tags:
category.append(node_text(tag))
return category |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def ymd(soup):
""" Get the year, month and day from child tags """ |
day = node_text(raw_parser.day(soup))
month = node_text(raw_parser.month(soup))
year = node_text(raw_parser.year(soup))
return (day, month, year) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def pub_date(soup):
""" Return the publishing date in struct format pub_date_date, pub_date_day, pub_date_month, pub_date_year, pub_date_timestamp Default date_type is pub """ |
pub_date = first(raw_parser.pub_date(soup, date_type="pub"))
if pub_date is None:
pub_date = first(raw_parser.pub_date(soup, date_type="publication"))
if pub_date is None:
return None
(day, month, year) = ymd(pub_date)
return date_struct(year, month, day) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def pub_dates(soup):
""" return a list of all the pub dates """ |
pub_dates = []
tags = raw_parser.pub_date(soup)
for tag in tags:
pub_date = OrderedDict()
copy_attribute(tag.attrs, 'publication-format', pub_date)
copy_attribute(tag.attrs, 'date-type', pub_date)
copy_attribute(tag.attrs, 'pub-type', pub_date)
for tag_attr in ["date-type", "pub-type"]:
if tag_attr in tag.attrs:
(day, month, year) = ymd(tag)
pub_date['day'] = day
pub_date['month'] = month
pub_date['year'] = year
pub_date['date'] = date_struct_nn(year, month, day)
pub_dates.append(pub_date)
return pub_dates |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def collection_year(soup):
""" Pub date of type collection will hold a year element for VOR articles """ |
pub_date = first(raw_parser.pub_date(soup, pub_type="collection"))
if not pub_date:
pub_date = first(raw_parser.pub_date(soup, date_type="collection"))
if not pub_date:
return None
year = None
year_tag = raw_parser.year(pub_date)
if year_tag:
year = int(node_text(year_tag))
return year |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def abstracts(soup):
""" Find the article abstract and format it """ |
abstracts = []
abstract_tags = raw_parser.abstract(soup)
for tag in abstract_tags:
abstract = {}
abstract["abstract_type"] = tag.get("abstract-type")
title_tag = raw_parser.title(tag)
if title_tag:
abstract["title"] = node_text(title_tag)
abstract["content"] = None
if raw_parser.paragraph(tag):
abstract["content"] = ""
abstract["full_content"] = ""
good_paragraphs = remove_doi_paragraph(raw_parser.paragraph(tag))
# Plain text content
glue = ""
for p_tag in good_paragraphs:
abstract["content"] += glue + node_text(p_tag)
glue = " "
# Content including markup tags
# When more than one paragraph, wrap each in a <p> tag
for p_tag in good_paragraphs:
abstract["full_content"] += '<p>' + node_contents_str(p_tag) + '</p>'
abstracts.append(abstract)
return abstracts |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def component_doi(soup):
""" Look for all object-id of pub-type-id = doi, these are the component DOI tags """ |
component_doi = []
object_id_tags = raw_parser.object_id(soup, pub_id_type = "doi")
# Get components too for later
component_list = components(soup)
position = 1
for tag in object_id_tags:
component_object = {}
component_object["doi"] = doi_uri_to_doi(tag.text)
component_object["position"] = position
# Try to find the type of component
for component in component_list:
if "doi" in component and component["doi"] == component_object["doi"]:
component_object["type"] = component["type"]
component_doi.append(component_object)
position = position + 1
return component_doi |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def tag_details(tag, nodenames):
""" Used in media and graphics to extract data from their parent tags """ |
details = {}
details['type'] = tag.name
details['ordinal'] = tag_ordinal(tag)
# Ordinal value
if tag_details_sibling_ordinal(tag):
details['sibling_ordinal'] = tag_details_sibling_ordinal(tag)
# Asset name
if tag_details_asset(tag):
details['asset'] = tag_details_asset(tag)
object_id_tag = first(raw_parser.object_id(tag, pub_id_type= "doi"))
if object_id_tag:
details['component_doi'] = extract_component_doi(tag, nodenames)
return details |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def inline_graphics(soup):
""" inline-graphic tags """ |
inline_graphics = []
inline_graphic_tags = raw_parser.inline_graphic(soup)
position = 1
for tag in inline_graphic_tags:
item = {}
copy_attribute(tag.attrs, 'xlink:href', item, 'xlink_href')
# Get the tag type
nodenames = ["sub-article"]
details = tag_details(tag, nodenames)
copy_attribute(details, 'type', item)
# Increment the position
item['position'] = position
# Ordinal should be the same as position in this case but set it anyway
item['ordinal'] = tag_ordinal(tag)
inline_graphics.append(item)
return inline_graphics |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def self_uri(soup):
""" self-uri tags """ |
self_uri = []
self_uri_tags = raw_parser.self_uri(soup)
position = 1
for tag in self_uri_tags:
item = {}
copy_attribute(tag.attrs, 'xlink:href', item, 'xlink_href')
copy_attribute(tag.attrs, 'content-type', item)
# Get the tag type
nodenames = ["sub-article"]
details = tag_details(tag, nodenames)
copy_attribute(details, 'type', item)
# Increment the position
item['position'] = position
# Ordinal should be the same as position in this case but set it anyway
item['ordinal'] = tag_ordinal(tag)
self_uri.append(item)
return self_uri |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def supplementary_material(soup):
""" supplementary-material tags """ |
supplementary_material = []
supplementary_material_tags = raw_parser.supplementary_material(soup)
position = 1
for tag in supplementary_material_tags:
item = {}
copy_attribute(tag.attrs, 'id', item)
# Get the tag type
nodenames = ["supplementary-material"]
details = tag_details(tag, nodenames)
copy_attribute(details, 'type', item)
copy_attribute(details, 'asset', item)
copy_attribute(details, 'component_doi', item)
copy_attribute(details, 'sibling_ordinal', item)
if raw_parser.label(tag):
item['label'] = node_text(raw_parser.label(tag))
item['full_label'] = node_contents_str(raw_parser.label(tag))
# Increment the position
item['position'] = position
# Ordinal should be the same as position in this case but set it anyway
item['ordinal'] = tag_ordinal(tag)
supplementary_material.append(item)
return supplementary_material |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def contrib_email(contrib_tag):
""" Given a contrib tag, look for an email tag, and only return the value if it is not inside an aff tag """ |
email = []
for email_tag in extract_nodes(contrib_tag, "email"):
if email_tag.parent.name != "aff":
email.append(email_tag.text)
return email if len(email) > 0 else None |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def contrib_phone(contrib_tag):
""" Given a contrib tag, look for an phone tag """ |
phone = None
if raw_parser.phone(contrib_tag):
phone = first(raw_parser.phone(contrib_tag)).text
return phone |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def contrib_inline_aff(contrib_tag):
""" Given a contrib tag, look for an aff tag directly inside it """ |
aff_tags = []
for child_tag in contrib_tag:
if child_tag and child_tag.name and child_tag.name == "aff":
aff_tags.append(child_tag)
return aff_tags |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def contrib_xref(contrib_tag, ref_type):
""" Given a contrib tag, look for an xref tag of type ref_type directly inside the contrib tag """ |
aff_tags = []
for child_tag in contrib_tag:
if (child_tag and child_tag.name and child_tag.name == "xref"
and child_tag.get('ref-type') and child_tag.get('ref-type') == ref_type):
aff_tags.append(child_tag)
return aff_tags |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def all_contributors(soup, detail="brief"):
"find all contributors not contrained to only the ones in article meta"
contrib_tags = raw_parser.contributors(soup)
contributors = format_authors(soup, contrib_tags, detail)
return contributors |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def authors_non_byline(soup, detail="full"):
"""Non-byline authors for group author members""" |
# Get a filtered list of contributors, in order to get their group-author-id
contrib_type = "author non-byline"
contributors_ = contributors(soup, detail)
non_byline_authors = [author for author in contributors_ if author.get('type', None) == contrib_type]
# Then renumber their position attribute
position = 1
for author in non_byline_authors:
author["position"] = position
position = position + 1
return non_byline_authors |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def extract_component_doi(tag, nodenames):
""" Used to get component DOI from a tag and confirm it is actually for that tag and it is not for one of its children in the list of nodenames """ |
component_doi = None
if(tag.name == "sub-article"):
component_doi = doi_uri_to_doi(node_text(first(raw_parser.article_id(tag, pub_id_type= "doi"))))
else:
object_id_tag = first(raw_parser.object_id(tag, pub_id_type= "doi"))
# Tweak: if it is media and has no object_id_tag then it is not a "component"
if tag.name == "media" and not object_id_tag:
component_doi = None
else:
# Check the object id is for this tag and not one of its children
# This happens for example when boxed text has a child figure,
# the boxed text does not have a DOI, the figure does have one
if object_id_tag and first_parent(object_id_tag, nodenames).name == tag.name:
component_doi = doi_uri_to_doi(node_text(object_id_tag))
return component_doi |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def correspondence(soup):
""" Find the corresp tags included in author-notes for primary correspondence """ |
correspondence = []
author_notes_nodes = raw_parser.author_notes(soup)
if author_notes_nodes:
corresp_nodes = raw_parser.corresp(author_notes_nodes)
for tag in corresp_nodes:
correspondence.append(tag.text)
return correspondence |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def award_group_award_id(tag):
""" Find the award group award id, one for each item found in the get_funding_group section """ |
award_group_award_id = []
award_id_tags = extract_nodes(tag, "award-id")
for t in award_id_tags:
award_group_award_id.append(t.text)
return award_group_award_id |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def award_group_principal_award_recipient(tag):
""" Find the award group principal award recipient, one for each item found in the get_funding_group section """ |
award_group_principal_award_recipient = []
principal_award_recipients = extract_nodes(tag, "principal-award-recipient")
for t in principal_award_recipients:
principal_award_recipient_text = ""
institution = node_text(first(extract_nodes(t, "institution")))
surname = node_text(first(extract_nodes(t, "surname")))
given_names = node_text(first(extract_nodes(t, "given-names")))
string_name = node_text(first(raw_parser.string_name(t)))
# Concatenate name and institution values if found
# while filtering out excess whitespace
if(given_names):
principal_award_recipient_text += given_names
if(principal_award_recipient_text != ""):
principal_award_recipient_text += " "
if(surname):
principal_award_recipient_text += surname
if(institution):
principal_award_recipient_text += institution
if(string_name):
principal_award_recipient_text += string_name
award_group_principal_award_recipient.append(principal_award_recipient_text)
return award_group_principal_award_recipient |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def object_id_doi(tag, parent_tag_name=None):
"""DOI in an object-id tag found inside the tag""" |
doi = None
object_id = None
object_ids = raw_parser.object_id(tag, "doi")
if object_ids:
object_id = first([id_ for id_ in object_ids])
if parent_tag_name and object_id and object_id.parent.name != parent_tag_name:
object_id = None
if object_id:
doi = node_contents_str(object_id)
return doi |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def title_tag_inspected(tag, parent_tag_name=None, p_parent_tag_name=None, direct_sibling_only=False):
"""Extract the title tag and sometimes inspect its parents""" |
title_tag = None
if direct_sibling_only is True:
for sibling_tag in tag:
if sibling_tag.name and sibling_tag.name == "title":
title_tag = sibling_tag
else:
title_tag = raw_parser.title(tag)
if parent_tag_name and p_parent_tag_name:
if (title_tag and title_tag.parent.name and title_tag.parent.parent.name
and title_tag.parent.name == parent_tag_name
and title_tag.parent.parent.name == p_parent_tag_name):
pass
else:
title_tag = None
return title_tag |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def title_text(tag, parent_tag_name=None, p_parent_tag_name=None, direct_sibling_only=False):
"""Extract the text of a title tag and sometimes inspect its parents""" |
title = None
title_tag = title_tag_inspected(tag, parent_tag_name, p_parent_tag_name, direct_sibling_only)
if title_tag:
title = node_contents_str(title_tag)
return title |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def boxed_text_to_image_block(tag):
"covert boxed-text to an image block containing an inline-graphic"
tag_block = OrderedDict()
image_content = body_block_image_content(first(raw_parser.inline_graphic(tag)))
tag_block["type"] = "image"
set_if_value(tag_block, "doi", doi_uri_to_doi(object_id_doi(tag, tag.name)))
set_if_value(tag_block, "id", tag.get("id"))
set_if_value(tag_block, "image", image_content)
# render paragraphs into a caption
p_tags = raw_parser.paragraph(tag)
caption_content = []
for p_tag in p_tags:
if not raw_parser.inline_graphic(p_tag):
caption_content.append(body_block_content(p_tag))
set_if_value(tag_block, "caption", caption_content)
return tag_block |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def body_json(soup, base_url=None):
""" Get body json and then alter it with section wrapping and removing boxed-text """ |
body_content = body(soup, remove_key_info_box=True, base_url=base_url)
# Wrap in a section if the first block is not a section
if (body_content and len(body_content) > 0 and "type" in body_content[0]
and body_content[0]["type"] != "section"):
# Wrap this one
new_body_section = OrderedDict()
new_body_section["type"] = "section"
new_body_section["id"] = "s0"
new_body_section["title"] = "Main text"
new_body_section["content"] = []
for body_block in body_content:
new_body_section["content"].append(body_block)
new_body = []
new_body.append(new_body_section)
body_content = new_body
body_content_rewritten = elifetools.json_rewrite.rewrite_json("body_json", soup, body_content)
return body_content_rewritten |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def body_block_content_render(tag, recursive=False, base_url=None):
""" Render the tag as body content and call recursively if the tag has child tags """ |
block_content_list = []
tag_content = OrderedDict()
if tag.name == "p":
for block_content in body_block_paragraph_render(tag, base_url=base_url):
if block_content != {}:
block_content_list.append(block_content)
else:
tag_content = body_block_content(tag, base_url=base_url)
nodenames = body_block_nodenames()
tag_content_content = []
# Collect the content of the tag but only for some tags
if tag.name not in ["p", "fig", "table-wrap", "list", "media", "disp-quote", "code"]:
for child_tag in tag:
if not(hasattr(child_tag, 'name')):
continue
if child_tag.name == "p":
# Ignore paragraphs that start with DOI:
if node_text(child_tag) and len(remove_doi_paragraph([child_tag])) <= 0:
continue
for block_content in body_block_paragraph_render(child_tag, base_url=base_url):
if block_content != {}:
tag_content_content.append(block_content)
elif child_tag.name == "fig" and tag.name == "fig-group":
# Do not fig inside fig-group a second time
pass
elif child_tag.name == "media" and tag.name == "fig-group":
# Do not include a media video inside fig-group a second time
if child_tag.get("mimetype") == "video":
pass
else:
for block_content in body_block_content_render(child_tag, recursive=True, base_url=base_url):
if block_content != {}:
tag_content_content.append(block_content)
if len(tag_content_content) > 0:
if tag.name in nodenames or recursive is False:
tag_content["content"] = []
for block_content in tag_content_content:
tag_content["content"].append(block_content)
block_content_list.append(tag_content)
else:
# Not a block tag, e.g. a caption tag, let the content pass through
block_content_list = tag_content_content
else:
block_content_list.append(tag_content)
return block_content_list |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def body_block_paragraph_render(p_tag, html_flag=True, base_url=None):
""" paragraphs may wrap some other body block content this is separated out so it can be called from more than one place """ |
# Configure the XML to HTML conversion preference for shorthand use below
convert = lambda xml_string: xml_to_html(html_flag, xml_string, base_url)
block_content_list = []
tag_content_content = []
nodenames = body_block_nodenames()
paragraph_content = u''
for child_tag in p_tag:
if child_tag.name is None or body_block_content(child_tag) == {}:
paragraph_content = paragraph_content + unicode_value(child_tag)
else:
# Add previous paragraph content first
if paragraph_content.strip() != '':
tag_content_content.append(body_block_paragraph_content(convert(paragraph_content)))
paragraph_content = u''
if child_tag.name is not None and body_block_content(child_tag) != {}:
for block_content in body_block_content_render(child_tag, base_url=base_url):
if block_content != {}:
tag_content_content.append(block_content)
# finish up
if paragraph_content.strip() != '':
tag_content_content.append(body_block_paragraph_content(convert(paragraph_content)))
if len(tag_content_content) > 0:
for block_content in tag_content_content:
block_content_list.append(block_content)
return block_content_list |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def body_block_caption_render(caption_tags, base_url=None):
"""fig and media tag captions are similar so use this common function""" |
caption_content = []
supplementary_material_tags = []
for block_tag in remove_doi_paragraph(caption_tags):
# Note then skip p tags with supplementary-material inside
if raw_parser.supplementary_material(block_tag):
for supp_tag in raw_parser.supplementary_material(block_tag):
supplementary_material_tags.append(supp_tag)
continue
for block_content in body_block_content_render(block_tag, base_url=base_url):
if block_content != {}:
caption_content.append(block_content)
return caption_content, supplementary_material_tags |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def body_block_supplementary_material_render(supp_tags, base_url=None):
"""fig and media tag caption may have supplementary material""" |
source_data = []
for supp_tag in supp_tags:
for block_content in body_block_content_render(supp_tag, base_url=base_url):
if block_content != {}:
if "content" in block_content:
del block_content["content"]
source_data.append(block_content)
return source_data |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def body_block_paragraph_content(text):
"for formatting of simple paragraphs of text only, and check if it is all whitespace"
tag_content = OrderedDict()
if text and text != '':
tag_content["type"] = "paragraph"
tag_content["text"] = clean_whitespace(text)
return tag_content |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def body_block_image_content(tag):
"format a graphic or inline-graphic into a body block json format"
image_content = OrderedDict()
if tag:
copy_attribute(tag.attrs, 'xlink:href', image_content, 'uri')
if "uri" in image_content:
# todo!! alt
set_if_value(image_content, "alt", "")
return image_content |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def body_block_title_label_caption(tag_content, title_value, label_value, caption_content, set_caption=True, prefer_title=False, prefer_label=False):
"""set the title, label and caption values in a consistent way set_caption: insert a "caption" field prefer_title: when only one value is available, set title rather than label. If False, set label rather than title""" |
set_if_value(tag_content, "label", rstrip_punctuation(label_value))
set_if_value(tag_content, "title", title_value)
if set_caption is True and caption_content and len(caption_content) > 0:
tag_content["caption"] = caption_content
if prefer_title:
if "title" not in tag_content and label_value:
set_if_value(tag_content, "title", label_value)
del(tag_content["label"])
if prefer_label:
if "label" not in tag_content and title_value:
set_if_value(tag_content, "label", rstrip_punctuation(title_value))
del(tag_content["title"]) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def body_block_attribution(tag):
"extract the attribution content for figures, tables, videos"
attributions = []
if raw_parser.attrib(tag):
for attrib_tag in raw_parser.attrib(tag):
attributions.append(node_contents_str(attrib_tag))
if raw_parser.permissions(tag):
# concatenate content from from the permissions tag
for permissions_tag in raw_parser.permissions(tag):
attrib_string = ''
# add the copyright statement if found
attrib_string = join_sentences(attrib_string,
node_contents_str(raw_parser.copyright_statement(permissions_tag)), '.')
# add the license paragraphs
if raw_parser.licence_p(permissions_tag):
for licence_p_tag in raw_parser.licence_p(permissions_tag):
attrib_string = join_sentences(attrib_string,
node_contents_str(licence_p_tag), '.')
if attrib_string != '':
attributions.append(attrib_string)
return attributions |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def abstract_json(soup):
"""abstract in article json format""" |
abstract_tags = raw_parser.abstract(soup)
abstract_json = None
for tag in abstract_tags:
if tag.get("abstract-type") is None:
abstract_json = render_abstract_json(tag)
return abstract_json |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def digest_json(soup):
"""digest in article json format""" |
abstract_tags = raw_parser.abstract(soup, abstract_type="executive-summary")
abstract_json = None
for tag in abstract_tags:
abstract_json = render_abstract_json(tag)
return abstract_json |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def author_affiliations(author, html_flag=True):
"""compile author affiliations for json output""" |
# Configure the XML to HTML conversion preference for shorthand use below
convert = lambda xml_string: xml_to_html(html_flag, xml_string)
affilations = []
if author.get("affiliations"):
for affiliation in author.get("affiliations"):
affiliation_json = OrderedDict()
affiliation_json["name"] = []
if affiliation.get("dept"):
affiliation_json["name"].append(convert(affiliation.get("dept")))
if affiliation.get("institution") and affiliation.get("institution").strip() != '':
affiliation_json["name"].append(convert(affiliation.get("institution")))
# Remove if empty
if affiliation_json["name"] == []:
del affiliation_json["name"]
if ((affiliation.get("city") and affiliation.get("city").strip() != '')
or affiliation.get("country") and affiliation.get("country").strip() != ''):
affiliation_address = OrderedDict()
affiliation_address["formatted"] = []
affiliation_address["components"] = OrderedDict()
if affiliation.get("city") and affiliation.get("city").strip() != '':
affiliation_address["formatted"].append(affiliation.get("city"))
affiliation_address["components"]["locality"] = []
affiliation_address["components"]["locality"].append(affiliation.get("city"))
if affiliation.get("country") and affiliation.get("country").strip() != '':
affiliation_address["formatted"].append(affiliation.get("country"))
affiliation_address["components"]["country"] = affiliation.get("country")
# Add if not empty
if affiliation_address != {}:
affiliation_json["address"] = affiliation_address
# Add if not empty
if affiliation_json != {}:
affilations.append(affiliation_json)
if affilations != []:
return affilations
else:
return None |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def author_json_details(author, author_json, contributions, correspondence, competing_interests, equal_contributions_map, present_address_data, foot_notes_data, html_flag=True):
# Configure the XML to HTML conversion preference for shorthand use below convert = lambda xml_string: xml_to_html(html_flag, xml_string) """add more author json""" |
if author_affiliations(author):
author_json["affiliations"] = author_affiliations(author)
# foot notes or additionalInformation
if author_foot_notes(author, foot_notes_data):
author_json["additionalInformation"] = author_foot_notes(author, foot_notes_data)
# email
if author_email_addresses(author, correspondence):
author_json["emailAddresses"] = author_email_addresses(author, correspondence)
# phone
if author_phone_numbers(author, correspondence):
author_json["phoneNumbers"] = author_phone_numbers_json(author, correspondence)
# contributions
if author_contribution(author, contributions):
author_json["contribution"] = convert(author_contribution(author, contributions))
# competing interests
if author_competing_interests(author, competing_interests):
author_json["competingInterests"] = convert(
author_competing_interests(author, competing_interests))
# equal-contributions
if author_equal_contribution(author, equal_contributions_map):
author_json["equalContributionGroups"] = author_equal_contribution(author, equal_contributions_map)
# postalAddress
if author_present_address(author, present_address_data):
author_json["postalAddresses"] = author_present_address(author, present_address_data)
return author_json |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def collab_to_group_author_key_map(authors):
"""compile a map of author collab to group-author-key""" |
collab_map = {}
for author in authors:
if author.get("collab"):
collab_map[author.get("collab")] = author.get("group-author-key")
return collab_map |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def map_equal_contributions(contributors):
"""assign numeric values to each unique equal-contrib id""" |
equal_contribution_map = {}
equal_contribution_keys = []
for contributor in contributors:
if contributor.get("references") and "equal-contrib" in contributor.get("references"):
for key in contributor["references"]["equal-contrib"]:
if key not in equal_contribution_keys:
equal_contribution_keys.append(key)
# Do a basic sort
equal_contribution_keys = sorted(equal_contribution_keys)
# Assign keys based on sorted values
for i, equal_contribution_key in enumerate(equal_contribution_keys):
equal_contribution_map[equal_contribution_key] = i+1
return equal_contribution_map |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def authors_json(soup):
"""authors list in article json format""" |
authors_json_data = []
contributors_data = contributors(soup, "full")
author_contributions_data = author_contributions(soup, None)
author_competing_interests_data = competing_interests(soup, None)
author_correspondence_data = full_correspondence(soup)
authors_non_byline_data = authors_non_byline(soup)
equal_contributions_map = map_equal_contributions(contributors_data)
present_address_data = present_addresses(soup)
foot_notes_data = other_foot_notes(soup)
# First line authors builds basic structure
for contributor in contributors_data:
author_json = None
if contributor["type"] == "author" and contributor.get("collab"):
author_json = author_group(contributor, author_contributions_data,
author_correspondence_data, author_competing_interests_data,
equal_contributions_map, present_address_data,
foot_notes_data)
elif contributor.get("on-behalf-of"):
author_json = author_on_behalf_of(contributor)
elif contributor["type"] == "author" and not contributor.get("group-author-key"):
author_json = author_person(contributor, author_contributions_data,
author_correspondence_data, author_competing_interests_data,
equal_contributions_map, present_address_data, foot_notes_data)
if author_json:
authors_json_data.append(author_json)
# Second, add byline author data
collab_map = collab_to_group_author_key_map(contributors_data)
for contributor in [elem for elem in contributors_data if elem.get("group-author-key") and not elem.get("collab")]:
for group_author in [elem for elem in authors_json_data if elem.get('type') == 'group']:
group_author_key = None
if group_author["name"] in collab_map:
group_author_key = collab_map[group_author["name"]]
if contributor.get("group-author-key") == group_author_key:
author_json = author_person(contributor, author_contributions_data,
author_correspondence_data, author_competing_interests_data,
equal_contributions_map, present_address_data, foot_notes_data)
if contributor.get("sub-group"):
if "groups" not in group_author:
group_author["groups"] = OrderedDict()
if contributor.get("sub-group") not in group_author["groups"]:
group_author["groups"][contributor.get("sub-group")] = []
group_author["groups"][contributor.get("sub-group")].append(author_json)
else:
if "people" not in group_author:
group_author["people"] = []
group_author["people"].append(author_json)
authors_json_data_rewritten = elifetools.json_rewrite.rewrite_json("authors_json", soup, authors_json_data)
return authors_json_data_rewritten |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def author_line(soup):
"""take preferred names from authors json and format them into an author line""" |
author_line = None
authors_json_data = authors_json(soup)
author_names = extract_author_line_names(authors_json_data)
if len(author_names) > 0:
author_line = format_author_line(author_names)
return author_line |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def format_author_line(author_names):
"""authorLine format depends on if there is 1, 2 or more than 2 authors""" |
author_line = None
if not author_names:
return author_line
if len(author_names) <= 2:
author_line = ", ".join(author_names)
elif len(author_names) > 2:
author_line = author_names[0] + " et al."
return author_line |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def references_date(year=None):
"Handle year value parsing for some edge cases"
date = None
discriminator = None
in_press = None
if year and "in press" in year.lower().strip():
in_press = True
elif year and re.match("^[0-9]+$", year):
date = year
elif year:
discriminator_match = re.match("^([0-9]+?)([a-z]+?)$", year)
if discriminator_match:
date = discriminator_match.group(1)
discriminator = discriminator_match.group(2)
else:
date = year
return (date, discriminator, in_press) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def references_json_authors(ref_authors, ref_content):
"build the authors for references json here for testability"
all_authors = references_authors(ref_authors)
if all_authors != {}:
if ref_content.get("type") in ["conference-proceeding", "journal", "other",
"periodical", "preprint", "report", "web"]:
for author_type in ["authors", "authorsEtAl"]:
set_if_value(ref_content, author_type, all_authors.get(author_type))
elif ref_content.get("type") in ["book", "book-chapter"]:
for author_type in ["authors", "authorsEtAl", "editors", "editorsEtAl"]:
set_if_value(ref_content, author_type, all_authors.get(author_type))
elif ref_content.get("type") in ["clinical-trial"]:
# Always set as authors, once, then add the authorsType
for author_type in ["authors", "collaborators", "sponsors"]:
if "authorsType" not in ref_content and all_authors.get(author_type):
set_if_value(ref_content, "authors", all_authors.get(author_type))
set_if_value(ref_content, "authorsEtAl", all_authors.get(author_type + "EtAl"))
ref_content["authorsType"] = author_type
elif ref_content.get("type") in ["data", "software"]:
for author_type in ["authors", "authorsEtAl",
"compilers", "compilersEtAl", "curators", "curatorsEtAl"]:
set_if_value(ref_content, author_type, all_authors.get(author_type))
elif ref_content.get("type") in ["patent"]:
for author_type in ["inventors", "inventorsEtAl", "assignees", "assigneesEtAl"]:
set_if_value(ref_content, author_type, all_authors.get(author_type))
elif ref_content.get("type") in ["thesis"]:
# Convert list to a non-list
if all_authors.get("authors") and len(all_authors.get("authors")) > 0:
ref_content["author"] = all_authors.get("authors")[0]
return ref_content |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def convert_references_json(ref_content, soup=None):
"Check for references that will not pass schema validation, fix or convert them to unknown"
# Convert reference to unkonwn if still missing important values
if (
(ref_content.get("type") == "other")
or
(ref_content.get("type") == "book-chapter" and "editors" not in ref_content)
or
(ref_content.get("type") == "journal" and "articleTitle" not in ref_content)
or
(ref_content.get("type") in ["journal", "book-chapter"]
and not "pages" in ref_content)
or
(ref_content.get("type") == "journal" and "journal" not in ref_content)
or
(ref_content.get("type") in ["book", "book-chapter", "report", "thesis", "software"]
and "publisher" not in ref_content)
or
(ref_content.get("type") == "book" and "bookTitle" not in ref_content)
or
(ref_content.get("type") == "data" and "source" not in ref_content)
or
(ref_content.get("type") == "conference-proceeding" and "conference" not in ref_content)
):
ref_content = references_json_to_unknown(ref_content, soup)
return ref_content |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def references_json_unknown_details(ref_content, soup=None):
"Extract detail value for references of type unknown"
details = ""
# Try adding pages values first
if "pages" in ref_content:
if "range" in ref_content["pages"]:
details += ref_content["pages"]["range"]
else:
details += ref_content["pages"]
if soup:
# Attempt to find the XML element by id, and convert it to details
if "id" in ref_content:
ref_tag = first(soup.select("ref#" + ref_content["id"]))
if ref_tag:
# Now remove tags that would be already part of the unknown reference by now
for remove_tag in ["person-group", "year", "article-title",
"elocation-id", "fpage", "lpage"]:
ref_tag = remove_tag_from_tag(ref_tag, remove_tag)
# Add the remaining tag content comma separated
for tag in first(raw_parser.element_citation(ref_tag)):
if node_text(tag) is not None:
if details != "":
details += ", "
details += node_text(tag)
if details == "":
return None
else:
return details |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def unwrap_appendix_box(json_content):
"""for use in removing unwanted boxed-content from appendices json""" |
if json_content.get("content") and len(json_content["content"]) > 0:
first_block = json_content["content"][0]
if (first_block.get("type")
and first_block.get("type") == "box"
and first_block.get("content")):
if first_block.get("doi") and not json_content.get("doi"):
json_content["doi"] = first_block.get("doi")
json_content["content"] = first_block["content"]
return json_content |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def extract_schemas_from_file(source_path):
"""Extract schemas from 'source_path'. :returns: a list of ViewSchema objects on success, None if no schemas could be extracted. """ |
logging.info("Extracting schemas from %s", source_path)
try:
with open(source_path, 'r') as source_file:
source = source_file.read()
except (FileNotFoundError, PermissionError) as e:
logging.error("Cannot extract schemas: %s", e.strerror)
else:
try:
schemas = extract_schemas_from_source(source, source_path)
except SyntaxError as e:
logging.error("Cannot extract schemas: %s", str(e))
else:
logging.info(
"Extracted %d %s",
len(schemas),
"schema" if len(schemas) == 1 else "schemas")
return schemas |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _get_simple_assignments(tree):
"""Get simple assignments from node tree.""" |
result = {}
for node in ast.walk(tree):
if isinstance(node, ast.Assign):
for target in node.targets:
if isinstance(target, ast.Name):
result[target.id] = node.value
return result |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def render_value(value):
"""Render a value, ensuring that any nested dicts are sorted by key.""" |
if isinstance(value, list):
return '[' + ', '.join(render_value(v) for v in value) + ']'
elif isinstance(value, dict):
return (
'{' +
', '.join('{k!r}: {v}'.format(
k=k, v=render_value(v)) for k, v in sorted(value.items())) +
'}')
else:
return repr(value) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def write_service_double_file(target_root, service_name, rendered):
"""Render syntactically valid python service double code.""" |
target_path = os.path.join(
target_root,
'snapstore_schemas', 'service_doubles', '%s.py' % service_name
)
with open(target_path, 'w') as target_file:
target_file.write(rendered) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def clean_docstring(docstring):
"""Dedent docstring, special casing the first line.""" |
docstring = docstring.strip()
if '\n' in docstring:
# multiline docstring
if docstring[0].isspace():
# whole docstring is indented
return textwrap.dedent(docstring)
else:
# first line not indented, rest maybe
first, _, rest = docstring.partition('\n')
return first + '\n' + textwrap.dedent(rest)
return docstring |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _sort_schema(schema):
"""Recursively sorts a JSON schema by dict key.""" |
if isinstance(schema, dict):
for k, v in sorted(schema.items()):
if isinstance(v, dict):
yield k, OrderedDict(_sort_schema(v))
elif isinstance(v, list):
yield k, list(_sort_schema(v))
else:
yield k, v
elif isinstance(schema, list):
for v in schema:
if isinstance(v, dict):
yield OrderedDict(_sort_schema(v))
elif isinstance(v, list):
yield list(_sort_schema(v))
else:
yield v
else:
yield d |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_field_schema(name, field):
"""Returns a JSON Schema representation of a form field.""" |
field_schema = {
'type': 'string',
}
if field.label:
field_schema['title'] = str(field.label) # force translation
if field.help_text:
field_schema['description'] = str(field.help_text) # force translation
if isinstance(field, (fields.URLField, fields.FileField)):
field_schema['format'] = 'uri'
elif isinstance(field, fields.EmailField):
field_schema['format'] = 'email'
elif isinstance(field, fields.DateTimeField):
field_schema['format'] = 'date-time'
elif isinstance(field, fields.DateField):
field_schema['format'] = 'date'
elif isinstance(field, (fields.DecimalField, fields.FloatField)):
field_schema['type'] = 'number'
elif isinstance(field, fields.IntegerField):
field_schema['type'] = 'integer'
elif isinstance(field, fields.NullBooleanField):
field_schema['type'] = 'boolean'
elif isinstance(field.widget, widgets.CheckboxInput):
field_schema['type'] = 'boolean'
if getattr(field, 'choices', []):
field_schema['enum'] = sorted([choice[0] for choice in field.choices])
# check for multiple values
if isinstance(field.widget, (widgets.Select, widgets.ChoiceWidget)):
if field.widget.allow_multiple_selected:
# promote to array of <type>, move details into the items field
field_schema['items'] = {
'type': field_schema['type'],
}
if 'enum' in field_schema:
field_schema['items']['enum'] = field_schema.pop('enum')
field_schema['type'] = 'array'
return field_schema |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_form_schema(form):
"""Return a JSON Schema object for a Django Form.""" |
schema = {
'type': 'object',
'properties': {},
}
for name, field in form.base_fields.items():
schema['properties'][name] = get_field_schema(name, field)
if field.required:
schema.setdefault('required', []).append(name)
return schema |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def xml_to_html(html_flag, xml_string, base_url=None):
"For formatting json output into HTML friendly format"
if not xml_string or not html_flag is True:
return xml_string
html_string = xml_string
html_string = remove_comment_tags(html_string)
# Escape unmatched angle brackets
if '<' in html_string or '>' in html_string:
html_string = escape_html(html_string)
# Replace more tags
html_string = replace_xref_tags(html_string)
html_string = replace_ext_link_tags(html_string)
html_string = replace_email_tags(html_string)
html_string = replace_inline_graphic_tags(html_string, base_url)
html_string = replace_named_content_tags(html_string)
html_string = replace_mathml_tags(html_string)
html_string = replace_table_style_author_callout(html_string)
html_string = replace_simple_tags(html_string, 'italic', 'i')
html_string = replace_simple_tags(html_string, 'bold', 'b')
html_string = replace_simple_tags(html_string, 'underline', 'span', '<span class="underline">')
html_string = replace_simple_tags(html_string, 'sc', 'span', '<span class="small-caps">')
html_string = replace_simple_tags(html_string, 'monospace', 'span', '<span class="monospace">')
html_string = replace_simple_tags(html_string, 'inline-formula', None)
html_string = replace_simple_tags(html_string, 'break', 'br')
return html_string |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def validate_body(schema):
"""Validate the body of incoming requests for a flask view. An example usage might look like this:: from snapstore_schemas import validate_body @validate_body({ 'type': 'array', 'items': { 'type': 'object', 'properties': { 'snap_id': {'type': 'string'}, 'series': {'type': 'string'}, 'name': {'type': 'string'}, 'title': {'type': 'string'}, 'keywords': { 'type': 'array', 'items': {'type': 'string'} }, 'summary': {'type': 'string'}, 'description': {'type': 'string'}, 'created_at': {'type': 'string'}, }, 'required': ['snap_id', 'series'], 'additionalProperties': False } }) def my_flask_view():
# view code here return "Hello World", 200 All incoming request that have been routed to this view will be matched against the specified schema. If the request body does not match the schema an instance of `DataValidationError` will be raised. By default this will cause the flask application to return a 500 response, but this can be customised by telling flask how to handle these exceptions. The exception instance has an 'error_list' attribute that contains a list of all the errors encountered while processing the request body. """ |
location = get_callsite_location()
def decorator(fn):
validate_schema(schema)
wrapper = wrap_request(fn, schema)
record_schemas(
fn, wrapper, location, request_schema=sort_schema(schema))
return wrapper
return decorator |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def record_schemas( fn, wrapper, location, request_schema=None, response_schema=None):
"""Support extracting the schema from the decorated function.""" |
# have we already been decorated by an acceptable api call?
has_acceptable = hasattr(fn, '_acceptable_metadata')
if request_schema is not None:
# preserve schema for later use
wrapper._request_schema = wrapper._request_schema = request_schema
wrapper._request_schema_location = location
if has_acceptable:
fn._acceptable_metadata._request_schema = request_schema
fn._acceptable_metadata._request_schema_location = location
if response_schema is not None:
# preserve schema for later use
wrapper._response_schema = wrapper._response_schema = response_schema
wrapper._response_schema_location = location
if has_acceptable:
fn._acceptable_metadata._response_schema = response_schema
fn._acceptable_metadata._response_schema_location = location |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def validate_output(schema):
"""Validate the body of a response from a flask view. Like `validate_body`, this function compares a json document to a jsonschema specification. However, this function applies the schema to the view response. Instead of the view returning a flask response object, it should instead return a Python list or dictionary. For example:: from snapstore_schemas import validate_output @validate_output({ 'type': 'object', 'properties': { 'ok': {'type': 'boolean'}, }, 'required': ['ok'], 'additionalProperties': False } def my_flask_view():
# view code here return {'ok': True} Every view response will be evaluated against the schema. Any that do not comply with the schema will cause DataValidationError to be raised. """ |
location = get_callsite_location()
def decorator(fn):
validate_schema(schema)
wrapper = wrap_response(fn, schema)
record_schemas(
fn, wrapper, location, response_schema=sort_schema(schema))
return wrapper
return decorator |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def validate(payload, schema):
"""Validate `payload` against `schema`, returning an error list. jsonschema provides lots of information in it's errors, but it can be a bit of work to extract all the information. """ |
v = jsonschema.Draft4Validator(
schema, format_checker=jsonschema.FormatChecker())
error_list = []
for error in v.iter_errors(payload):
message = error.message
location = '/' + '/'.join([str(c) for c in error.absolute_path])
error_list.append(message + ' at ' + location)
return error_list |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def connect(url, max_retries=None, **kwargs):
"""Connects to a Phoenix query server. :param url: URL to the Phoenix query server, e.g. ``http://localhost:8765/`` :param autocommit: Switch the connection to autocommit mode. :param readonly: Switch the connection to readonly mode. :param max_retries: The maximum number of retries in case there is a connection error. :param cursor_factory: If specified, the connection's :attr:`~phoenixdb.connection.Connection.cursor_factory` is set to it. :returns: :class:`~phoenixdb.connection.Connection` object. """ |
client = AvaticaClient(url, max_retries=max_retries)
client.connect()
return Connection(client, **kwargs) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def connect(self):
"""Opens a HTTP connection to the RPC server.""" |
logger.debug("Opening connection to %s:%s", self.url.hostname, self.url.port)
try:
self.connection = httplib.HTTPConnection(self.url.hostname, self.url.port)
self.connection.connect()
except (httplib.HTTPException, socket.error) as e:
raise errors.InterfaceError('Unable to connect to the specified service', e) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def close(self):
"""Closes the HTTP connection to the RPC server.""" |
if self.connection is not None:
logger.debug("Closing connection to %s:%s", self.url.hostname, self.url.port)
try:
self.connection.close()
except httplib.HTTPException:
logger.warning("Error while closing connection", exc_info=True)
self.connection = None |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def connection_sync(self, connection_id, connProps=None):
"""Synchronizes connection properties with the server. :param connection_id: ID of the current connection. :param connProps: Dictionary with the properties that should be changed. :returns: A ``common_pb2.ConnectionProperties`` object. """ |
if connProps is None:
connProps = {}
request = requests_pb2.ConnectionSyncRequest()
request.connection_id = connection_id
request.conn_props.auto_commit = connProps.get('autoCommit', False)
request.conn_props.has_auto_commit = True
request.conn_props.read_only = connProps.get('readOnly', False)
request.conn_props.has_read_only = True
request.conn_props.transaction_isolation = connProps.get('transactionIsolation', 0)
request.conn_props.catalog = connProps.get('catalog', '')
request.conn_props.schema = connProps.get('schema', '')
response_data = self._apply(request)
response = responses_pb2.ConnectionSyncResponse()
response.ParseFromString(response_data)
return response.conn_props |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def open_connection(self, connection_id, info=None):
"""Opens a new connection. :param connection_id: ID of the connection to open. """ |
request = requests_pb2.OpenConnectionRequest()
request.connection_id = connection_id
if info is not None:
# Info is a list of repeated pairs, setting a dict directly fails
for k, v in info.items():
request.info[k] = v
response_data = self._apply(request)
response = responses_pb2.OpenConnectionResponse()
response.ParseFromString(response_data) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def close_connection(self, connection_id):
"""Closes a connection. :param connection_id: ID of the connection to close. """ |
request = requests_pb2.CloseConnectionRequest()
request.connection_id = connection_id
self._apply(request) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def create_statement(self, connection_id):
"""Creates a new statement. :param connection_id: ID of the current connection. :returns: New statement ID. """ |
request = requests_pb2.CreateStatementRequest()
request.connection_id = connection_id
response_data = self._apply(request)
response = responses_pb2.CreateStatementResponse()
response.ParseFromString(response_data)
return response.statement_id |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def close_statement(self, connection_id, statement_id):
"""Closes a statement. :param connection_id: ID of the current connection. :param statement_id: ID of the statement to close. """ |
request = requests_pb2.CloseStatementRequest()
request.connection_id = connection_id
request.statement_id = statement_id
self._apply(request) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def prepare_and_execute(self, connection_id, statement_id, sql, max_rows_total=None, first_frame_max_size=None):
"""Prepares and immediately executes a statement. :param connection_id: ID of the current connection. :param statement_id: ID of the statement to prepare. :param sql: SQL query. :param max_rows_total: The maximum number of rows that will be allowed for this query. :param first_frame_max_size: The maximum number of rows that will be returned in the first Frame returned for this query. :returns: Result set with the signature of the prepared statement and the first frame data. """ |
request = requests_pb2.PrepareAndExecuteRequest()
request.connection_id = connection_id
request.statement_id = statement_id
request.sql = sql
if max_rows_total is not None:
request.max_rows_total = max_rows_total
if first_frame_max_size is not None:
request.first_frame_max_size = first_frame_max_size
response_data = self._apply(request, 'ExecuteResponse')
response = responses_pb2.ExecuteResponse()
response.ParseFromString(response_data)
return response.results |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def prepare(self, connection_id, sql, max_rows_total=None):
"""Prepares a statement. :param connection_id: ID of the current connection. :param sql: SQL query. :param max_rows_total: The maximum number of rows that will be allowed for this query. :returns: Signature of the prepared statement. """ |
request = requests_pb2.PrepareRequest()
request.connection_id = connection_id
request.sql = sql
if max_rows_total is not None:
request.max_rows_total = max_rows_total
response_data = self._apply(request)
response = responses_pb2.PrepareResponse()
response.ParseFromString(response_data)
return response.statement |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def close(self):
"""Closes the cursor. No further operations are allowed once the cursor is closed. If the cursor is used in a ``with`` statement, this method will be automatically called at the end of the ``with`` block. """ |
if self._closed:
raise ProgrammingError('the cursor is already closed')
if self._id is not None:
self._connection._client.close_statement(self._connection._id, self._id)
self._id = None
self._signature = None
self._column_data_types = []
self._frame = None
self._pos = None
self._closed = True |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _transform_row(self, row):
"""Transforms a Row into Python values. :param row: A ``common_pb2.Row`` object. :returns: A list of values casted into the correct Python types. :raises: NotImplementedError """ |
tmp_row = []
for i, column in enumerate(row.value):
if column.has_array_value:
raise NotImplementedError('array types are not supported')
elif column.scalar_value.null:
tmp_row.append(None)
else:
field_name, rep, mutate_to, cast_from = self._column_data_types[i]
# get the value from the field_name
value = getattr(column.scalar_value, field_name)
# cast the value
if cast_from is not None:
value = cast_from(value)
tmp_row.append(value)
return tmp_row |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def rownumber(self):
"""Read-only attribute providing the current 0-based index of the cursor in the result set or ``None`` if the index cannot be determined. The index can be seen as index of the cursor in a sequence (the result set). The next fetch operation will fetch the row indexed by :attr:`rownumber` in that sequence. """ |
if self._frame is not None and self._pos is not None:
return self._frame.offset + self._pos
return self._pos |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def close(self):
"""Closes the connection. No further operations are allowed, either on the connection or any of its cursors, once the connection is closed. If the connection is used in a ``with`` statement, this method will be automatically called at the end of the ``with`` block. """ |
if self._closed:
raise ProgrammingError('the connection is already closed')
for cursor_ref in self._cursors:
cursor = cursor_ref()
if cursor is not None and not cursor._closed:
cursor.close()
self._client.close_connection(self._id)
self._client.close()
self._closed = True |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def cursor(self, cursor_factory=None):
"""Creates a new cursor. :param cursor_factory: This argument can be used to create non-standard cursors. The class returned must be a subclass of :class:`~phoenixdb.cursor.Cursor` (for example :class:`~phoenixdb.cursor.DictCursor`). A default factory for the connection can also be specified using the :attr:`cursor_factory` attribute. :returns: A :class:`~phoenixdb.cursor.Cursor` object. """ |
if self._closed:
raise ProgrammingError('the connection is already closed')
cursor = (cursor_factory or self.cursor_factory)(self)
self._cursors.append(weakref.ref(cursor, self._cursors.remove))
return cursor |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def set_session(self, autocommit=None, readonly=None):
"""Sets one or more parameters in the current connection. :param autocommit: Switch the connection to autocommit mode. With the current version, you need to always enable this, because :meth:`commit` is not implemented. :param readonly: Switch the connection to read-only mode. """ |
props = {}
if autocommit is not None:
props['autoCommit'] = bool(autocommit)
if readonly is not None:
props['readOnly'] = bool(readonly)
props = self._client.connection_sync(self._id, props)
self._autocommit = props.auto_commit
self._readonly = props.read_only
self._transactionisolation = props.transaction_isolation |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def predict(self, X):
"""Predict target values for X. Parameters X : array-like of shape = [n_samples, n_features] The input samples. Returns ------- y : array of shape = [n_samples] The predicted target value. """ |
K = pairwise_kernels(self.X, X, metric=self.kernel, gamma=self.gamma)
return (K * self.y[:, None]).sum(axis=0) / K.sum(axis=0) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _compute_hidden_activations(self, X):
"""Compute hidden activations given X""" |
self._compute_input_activations(X)
acts = self.input_activations_
if (callable(self.activation_func)):
args_dict = self.activation_args if (self.activation_args) else {}
X_new = self.activation_func(acts, **args_dict)
else:
func_name = self.activation_func
func = self._internal_activation_funcs[func_name]
X_new = func(acts, **self._extra_args)
return X_new |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def transform(self, X, y=None):
"""Generate the random hidden layer's activations given X as input. Parameters X : {array-like, sparse matrix}, shape [n_samples, n_features] Data to transform y : is not used: placeholder to allow for usage in a Pipeline. Returns ------- X_new : numpy array of shape [n_samples, n_components] """ |
if (self.components_ is None):
raise ValueError('No components initialized')
return self._compute_hidden_activations(X) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _compute_radii(self):
"""Generate RBF radii""" |
# use supplied radii if present
radii = self._get_user_components('radii')
# compute radii
if (radii is None):
centers = self.components_['centers']
n_centers = centers.shape[0]
max_dist = np.max(pairwise_distances(centers))
radii = np.ones(n_centers) * max_dist/sqrt(2.0 * n_centers)
self.components_['radii'] = radii |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _compute_centers(self, X, sparse, rs):
"""Generate RBF centers""" |
# use supplied centers if present
centers = self._get_user_components('centers')
# use points taken uniformly from the bounding
# hyperrectangle
if (centers is None):
n_features = X.shape[1]
if (sparse):
fxr = range(n_features)
cols = [X.getcol(i) for i in fxr]
min_dtype = X.dtype.type(1.0e10)
sp_min = lambda col: np.minimum(min_dtype, np.min(col.data))
min_Xs = np.array(map(sp_min, cols))
max_dtype = X.dtype.type(-1.0e10)
sp_max = lambda col: np.maximum(max_dtype, np.max(col.data))
max_Xs = np.array(map(sp_max, cols))
else:
min_Xs = X.min(axis=0)
max_Xs = X.max(axis=0)
spans = max_Xs - min_Xs
ctrs_size = (self.n_hidden, n_features)
centers = min_Xs + spans * rs.uniform(0.0, 1.0, ctrs_size)
self.components_['centers'] = centers |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _compute_biases(self, rs):
"""Generate MLP biases""" |
# use supplied biases if present
biases = self._get_user_components('biases')
if (biases is None):
b_size = self.n_hidden
biases = rs.normal(size=b_size)
self.components_['biases'] = biases |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _compute_weights(self, X, rs):
"""Generate MLP weights""" |
# use supplied weights if present
weights = self._get_user_components('weights')
if (weights is None):
n_features = X.shape[1]
hw_size = (n_features, self.n_hidden)
weights = rs.normal(size=hw_size)
self.components_['weights'] = weights |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _generate_components(self, X):
"""Generate components of hidden layer given X""" |
rs = check_random_state(self.random_state)
if (self._use_mlp_input):
self._compute_biases(rs)
self._compute_weights(X, rs)
if (self._use_rbf_input):
self._compute_centers(X, sp.issparse(X), rs)
self._compute_radii() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _compute_input_activations(self, X):
"""Compute input activations given X""" |
n_samples = X.shape[0]
mlp_acts = np.zeros((n_samples, self.n_hidden))
if (self._use_mlp_input):
b = self.components_['biases']
w = self.components_['weights']
mlp_acts = self.alpha * (safe_sparse_dot(X, w) + b)
rbf_acts = np.zeros((n_samples, self.n_hidden))
if (self._use_rbf_input):
radii = self.components_['radii']
centers = self.components_['centers']
scale = self.rbf_width * (1.0 - self.alpha)
rbf_acts = scale * cdist(X, centers)/radii
self.input_activations_ = mlp_acts + rbf_acts |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _compute_centers(self, X, sparse, rs):
"""Generate centers, then compute tau, dF and dN vals""" |
super(GRBFRandomLayer, self)._compute_centers(X, sparse, rs)
centers = self.components_['centers']
sorted_distances = np.sort(squareform(pdist(centers)))
self.dF_vals = sorted_distances[:, -1]
self.dN_vals = sorted_distances[:, 1]/100.0
#self.dN_vals = 0.0002 * np.ones(self.dF_vals.shape)
tauNum = np.log(np.log(self.grbf_lambda) /
np.log(1.0 - self.grbf_lambda))
tauDenom = np.log(self.dF_vals/self.dN_vals)
self.tau_vals = tauNum/tauDenom
self._extra_args['taus'] = self.tau_vals |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _fit_regression(self, y):
""" fit regression using pseudo-inverse or supplied regressor """ |
if self.regressor is None:
self.coefs_ = safe_sparse_dot(pinv2(self.hidden_activations_), y)
else:
self.regressor.fit(self.hidden_activations_, y)
self.fitted_ = True |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _create_random_layer(self):
"""Pass init params to RandomLayer""" |
return RandomLayer(n_hidden=self.n_hidden,
alpha=self.alpha,
random_state=self.random_state,
activation_func=self.activation_func,
activation_args=self.activation_args,
user_components=self.user_components,
rbf_width=self.rbf_width) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def score(self, X, y):
"""Force use of accuracy score since we don't inherit from ClassifierMixin""" |
from sklearn.metrics import accuracy_score
return accuracy_score(y, self.predict(X)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def compat_serializer_attr(serializer, obj):
""" Required only for DRF 3.1, which does not make dynamically added attribute available in obj in serializer. This is a quick solution but works without breajing anything. """ |
if DRFVLIST[0] == 3 and DRFVLIST[1] == 1:
for i in serializer.instance:
if i.id == obj.id:
return i
else:
return obj |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def compat_get_paginated_response(view, page):
""" get_paginated_response is unknown to DRF 3.0 """ |
if DRFVLIST[0] == 3 and DRFVLIST[1] >= 1:
from rest_messaging.serializers import ComplexMessageSerializer # circular import
serializer = ComplexMessageSerializer(page, many=True)
return view.get_paginated_response(serializer.data)
else:
serializer = view.get_pagination_serializer(page)
return Response(serializer.data) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_participants(self, obj):
""" Allows to define a callback for serializing information about the user. """ |
# we set the many to many serialization to False, because we only want it with retrieve requests
if self.callback is None:
return [participant.id for participant in obj.participants.all()]
else:
# we do not want user information
return self.callback(obj) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_is_notification(self, obj):
""" We say if the message should trigger a notification """ |
try:
o = compat_serializer_attr(self, obj)
return o.is_notification
except Exception:
return False |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_readers(self, obj):
""" Return the ids of the people who read the message instance. """ |
try:
o = compat_serializer_attr(self, obj)
return o.readers
except Exception:
return [] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_threads_where_participant_is_active(self, participant_id):
""" Gets all the threads in which the current participant is involved. The method excludes threads where the participant has left. """ |
participations = Participation.objects.\
filter(participant__id=participant_id).\
exclude(date_left__lte=now()).\
distinct().\
select_related('thread')
return Thread.objects.\
filter(id__in=[p.thread.id for p in participations]).\
distinct() |
Subsets and Splits