text_prompt
stringlengths 100
17.7k
⌀ | code_prompt
stringlengths 7
9.86k
⌀ |
---|---|
<SYSTEM_TASK:>
Used to get component DOI from a tag and confirm it is actually for that tag
<END_TASK>
<USER_TASK:>
Description:
def extract_component_doi(tag, nodenames):
"""
Used to get component DOI from a tag and confirm it is actually for that tag
and it is not for one of its children in the list of nodenames
""" |
component_doi = None
if(tag.name == "sub-article"):
component_doi = doi_uri_to_doi(node_text(first(raw_parser.article_id(tag, pub_id_type= "doi"))))
else:
object_id_tag = first(raw_parser.object_id(tag, pub_id_type= "doi"))
# Tweak: if it is media and has no object_id_tag then it is not a "component"
if tag.name == "media" and not object_id_tag:
component_doi = None
else:
# Check the object id is for this tag and not one of its children
# This happens for example when boxed text has a child figure,
# the boxed text does not have a DOI, the figure does have one
if object_id_tag and first_parent(object_id_tag, nodenames).name == tag.name:
component_doi = doi_uri_to_doi(node_text(object_id_tag))
return component_doi |
<SYSTEM_TASK:>
Find the corresp tags included in author-notes
<END_TASK>
<USER_TASK:>
Description:
def correspondence(soup):
"""
Find the corresp tags included in author-notes
for primary correspondence
""" |
correspondence = []
author_notes_nodes = raw_parser.author_notes(soup)
if author_notes_nodes:
corresp_nodes = raw_parser.corresp(author_notes_nodes)
for tag in corresp_nodes:
correspondence.append(tag.text)
return correspondence |
<SYSTEM_TASK:>
Find the award group award id, one for each
<END_TASK>
<USER_TASK:>
Description:
def award_group_award_id(tag):
"""
Find the award group award id, one for each
item found in the get_funding_group section
""" |
award_group_award_id = []
award_id_tags = extract_nodes(tag, "award-id")
for t in award_id_tags:
award_group_award_id.append(t.text)
return award_group_award_id |
<SYSTEM_TASK:>
Find the award group principal award recipient, one for each
<END_TASK>
<USER_TASK:>
Description:
def award_group_principal_award_recipient(tag):
"""
Find the award group principal award recipient, one for each
item found in the get_funding_group section
""" |
award_group_principal_award_recipient = []
principal_award_recipients = extract_nodes(tag, "principal-award-recipient")
for t in principal_award_recipients:
principal_award_recipient_text = ""
institution = node_text(first(extract_nodes(t, "institution")))
surname = node_text(first(extract_nodes(t, "surname")))
given_names = node_text(first(extract_nodes(t, "given-names")))
string_name = node_text(first(raw_parser.string_name(t)))
# Concatenate name and institution values if found
# while filtering out excess whitespace
if(given_names):
principal_award_recipient_text += given_names
if(principal_award_recipient_text != ""):
principal_award_recipient_text += " "
if(surname):
principal_award_recipient_text += surname
if(institution):
principal_award_recipient_text += institution
if(string_name):
principal_award_recipient_text += string_name
award_group_principal_award_recipient.append(principal_award_recipient_text)
return award_group_principal_award_recipient |
<SYSTEM_TASK:>
DOI in an object-id tag found inside the tag
<END_TASK>
<USER_TASK:>
Description:
def object_id_doi(tag, parent_tag_name=None):
"""DOI in an object-id tag found inside the tag""" |
doi = None
object_id = None
object_ids = raw_parser.object_id(tag, "doi")
if object_ids:
object_id = first([id_ for id_ in object_ids])
if parent_tag_name and object_id and object_id.parent.name != parent_tag_name:
object_id = None
if object_id:
doi = node_contents_str(object_id)
return doi |
<SYSTEM_TASK:>
Extract the title tag and sometimes inspect its parents
<END_TASK>
<USER_TASK:>
Description:
def title_tag_inspected(tag, parent_tag_name=None, p_parent_tag_name=None, direct_sibling_only=False):
"""Extract the title tag and sometimes inspect its parents""" |
title_tag = None
if direct_sibling_only is True:
for sibling_tag in tag:
if sibling_tag.name and sibling_tag.name == "title":
title_tag = sibling_tag
else:
title_tag = raw_parser.title(tag)
if parent_tag_name and p_parent_tag_name:
if (title_tag and title_tag.parent.name and title_tag.parent.parent.name
and title_tag.parent.name == parent_tag_name
and title_tag.parent.parent.name == p_parent_tag_name):
pass
else:
title_tag = None
return title_tag |
<SYSTEM_TASK:>
Extract the text of a title tag and sometimes inspect its parents
<END_TASK>
<USER_TASK:>
Description:
def title_text(tag, parent_tag_name=None, p_parent_tag_name=None, direct_sibling_only=False):
"""Extract the text of a title tag and sometimes inspect its parents""" |
title = None
title_tag = title_tag_inspected(tag, parent_tag_name, p_parent_tag_name, direct_sibling_only)
if title_tag:
title = node_contents_str(title_tag)
return title |
<SYSTEM_TASK:>
Get body json and then alter it with section wrapping and removing boxed-text
<END_TASK>
<USER_TASK:>
Description:
def body_json(soup, base_url=None):
""" Get body json and then alter it with section wrapping and removing boxed-text """ |
body_content = body(soup, remove_key_info_box=True, base_url=base_url)
# Wrap in a section if the first block is not a section
if (body_content and len(body_content) > 0 and "type" in body_content[0]
and body_content[0]["type"] != "section"):
# Wrap this one
new_body_section = OrderedDict()
new_body_section["type"] = "section"
new_body_section["id"] = "s0"
new_body_section["title"] = "Main text"
new_body_section["content"] = []
for body_block in body_content:
new_body_section["content"].append(body_block)
new_body = []
new_body.append(new_body_section)
body_content = new_body
body_content_rewritten = elifetools.json_rewrite.rewrite_json("body_json", soup, body_content)
return body_content_rewritten |
<SYSTEM_TASK:>
Render the tag as body content and call recursively if
<END_TASK>
<USER_TASK:>
Description:
def body_block_content_render(tag, recursive=False, base_url=None):
"""
Render the tag as body content and call recursively if
the tag has child tags
""" |
block_content_list = []
tag_content = OrderedDict()
if tag.name == "p":
for block_content in body_block_paragraph_render(tag, base_url=base_url):
if block_content != {}:
block_content_list.append(block_content)
else:
tag_content = body_block_content(tag, base_url=base_url)
nodenames = body_block_nodenames()
tag_content_content = []
# Collect the content of the tag but only for some tags
if tag.name not in ["p", "fig", "table-wrap", "list", "media", "disp-quote", "code"]:
for child_tag in tag:
if not(hasattr(child_tag, 'name')):
continue
if child_tag.name == "p":
# Ignore paragraphs that start with DOI:
if node_text(child_tag) and len(remove_doi_paragraph([child_tag])) <= 0:
continue
for block_content in body_block_paragraph_render(child_tag, base_url=base_url):
if block_content != {}:
tag_content_content.append(block_content)
elif child_tag.name == "fig" and tag.name == "fig-group":
# Do not fig inside fig-group a second time
pass
elif child_tag.name == "media" and tag.name == "fig-group":
# Do not include a media video inside fig-group a second time
if child_tag.get("mimetype") == "video":
pass
else:
for block_content in body_block_content_render(child_tag, recursive=True, base_url=base_url):
if block_content != {}:
tag_content_content.append(block_content)
if len(tag_content_content) > 0:
if tag.name in nodenames or recursive is False:
tag_content["content"] = []
for block_content in tag_content_content:
tag_content["content"].append(block_content)
block_content_list.append(tag_content)
else:
# Not a block tag, e.g. a caption tag, let the content pass through
block_content_list = tag_content_content
else:
block_content_list.append(tag_content)
return block_content_list |
<SYSTEM_TASK:>
paragraphs may wrap some other body block content
<END_TASK>
<USER_TASK:>
Description:
def body_block_paragraph_render(p_tag, html_flag=True, base_url=None):
"""
paragraphs may wrap some other body block content
this is separated out so it can be called from more than one place
""" |
# Configure the XML to HTML conversion preference for shorthand use below
convert = lambda xml_string: xml_to_html(html_flag, xml_string, base_url)
block_content_list = []
tag_content_content = []
nodenames = body_block_nodenames()
paragraph_content = u''
for child_tag in p_tag:
if child_tag.name is None or body_block_content(child_tag) == {}:
paragraph_content = paragraph_content + unicode_value(child_tag)
else:
# Add previous paragraph content first
if paragraph_content.strip() != '':
tag_content_content.append(body_block_paragraph_content(convert(paragraph_content)))
paragraph_content = u''
if child_tag.name is not None and body_block_content(child_tag) != {}:
for block_content in body_block_content_render(child_tag, base_url=base_url):
if block_content != {}:
tag_content_content.append(block_content)
# finish up
if paragraph_content.strip() != '':
tag_content_content.append(body_block_paragraph_content(convert(paragraph_content)))
if len(tag_content_content) > 0:
for block_content in tag_content_content:
block_content_list.append(block_content)
return block_content_list |
<SYSTEM_TASK:>
fig and media tag captions are similar so use this common function
<END_TASK>
<USER_TASK:>
Description:
def body_block_caption_render(caption_tags, base_url=None):
"""fig and media tag captions are similar so use this common function""" |
caption_content = []
supplementary_material_tags = []
for block_tag in remove_doi_paragraph(caption_tags):
# Note then skip p tags with supplementary-material inside
if raw_parser.supplementary_material(block_tag):
for supp_tag in raw_parser.supplementary_material(block_tag):
supplementary_material_tags.append(supp_tag)
continue
for block_content in body_block_content_render(block_tag, base_url=base_url):
if block_content != {}:
caption_content.append(block_content)
return caption_content, supplementary_material_tags |
<SYSTEM_TASK:>
fig and media tag caption may have supplementary material
<END_TASK>
<USER_TASK:>
Description:
def body_block_supplementary_material_render(supp_tags, base_url=None):
"""fig and media tag caption may have supplementary material""" |
source_data = []
for supp_tag in supp_tags:
for block_content in body_block_content_render(supp_tag, base_url=base_url):
if block_content != {}:
if "content" in block_content:
del block_content["content"]
source_data.append(block_content)
return source_data |
<SYSTEM_TASK:>
set the title, label and caption values in a consistent way
<END_TASK>
<USER_TASK:>
Description:
def body_block_title_label_caption(tag_content, title_value, label_value,
caption_content, set_caption=True, prefer_title=False, prefer_label=False):
"""set the title, label and caption values in a consistent way
set_caption: insert a "caption" field
prefer_title: when only one value is available, set title rather than label. If False, set label rather than title""" |
set_if_value(tag_content, "label", rstrip_punctuation(label_value))
set_if_value(tag_content, "title", title_value)
if set_caption is True and caption_content and len(caption_content) > 0:
tag_content["caption"] = caption_content
if prefer_title:
if "title" not in tag_content and label_value:
set_if_value(tag_content, "title", label_value)
del(tag_content["label"])
if prefer_label:
if "label" not in tag_content and title_value:
set_if_value(tag_content, "label", rstrip_punctuation(title_value))
del(tag_content["title"]) |
<SYSTEM_TASK:>
compile author affiliations for json output
<END_TASK>
<USER_TASK:>
Description:
def author_affiliations(author, html_flag=True):
"""compile author affiliations for json output""" |
# Configure the XML to HTML conversion preference for shorthand use below
convert = lambda xml_string: xml_to_html(html_flag, xml_string)
affilations = []
if author.get("affiliations"):
for affiliation in author.get("affiliations"):
affiliation_json = OrderedDict()
affiliation_json["name"] = []
if affiliation.get("dept"):
affiliation_json["name"].append(convert(affiliation.get("dept")))
if affiliation.get("institution") and affiliation.get("institution").strip() != '':
affiliation_json["name"].append(convert(affiliation.get("institution")))
# Remove if empty
if affiliation_json["name"] == []:
del affiliation_json["name"]
if ((affiliation.get("city") and affiliation.get("city").strip() != '')
or affiliation.get("country") and affiliation.get("country").strip() != ''):
affiliation_address = OrderedDict()
affiliation_address["formatted"] = []
affiliation_address["components"] = OrderedDict()
if affiliation.get("city") and affiliation.get("city").strip() != '':
affiliation_address["formatted"].append(affiliation.get("city"))
affiliation_address["components"]["locality"] = []
affiliation_address["components"]["locality"].append(affiliation.get("city"))
if affiliation.get("country") and affiliation.get("country").strip() != '':
affiliation_address["formatted"].append(affiliation.get("country"))
affiliation_address["components"]["country"] = affiliation.get("country")
# Add if not empty
if affiliation_address != {}:
affiliation_json["address"] = affiliation_address
# Add if not empty
if affiliation_json != {}:
affilations.append(affiliation_json)
if affilations != []:
return affilations
else:
return None |
<SYSTEM_TASK:>
add more author json
<END_TASK>
<USER_TASK:>
Description:
def author_json_details(author, author_json, contributions, correspondence,
competing_interests, equal_contributions_map, present_address_data,
foot_notes_data, html_flag=True):
# Configure the XML to HTML conversion preference for shorthand use below
convert = lambda xml_string: xml_to_html(html_flag, xml_string)
"""add more author json""" |
if author_affiliations(author):
author_json["affiliations"] = author_affiliations(author)
# foot notes or additionalInformation
if author_foot_notes(author, foot_notes_data):
author_json["additionalInformation"] = author_foot_notes(author, foot_notes_data)
# email
if author_email_addresses(author, correspondence):
author_json["emailAddresses"] = author_email_addresses(author, correspondence)
# phone
if author_phone_numbers(author, correspondence):
author_json["phoneNumbers"] = author_phone_numbers_json(author, correspondence)
# contributions
if author_contribution(author, contributions):
author_json["contribution"] = convert(author_contribution(author, contributions))
# competing interests
if author_competing_interests(author, competing_interests):
author_json["competingInterests"] = convert(
author_competing_interests(author, competing_interests))
# equal-contributions
if author_equal_contribution(author, equal_contributions_map):
author_json["equalContributionGroups"] = author_equal_contribution(author, equal_contributions_map)
# postalAddress
if author_present_address(author, present_address_data):
author_json["postalAddresses"] = author_present_address(author, present_address_data)
return author_json |
<SYSTEM_TASK:>
compile a map of author collab to group-author-key
<END_TASK>
<USER_TASK:>
Description:
def collab_to_group_author_key_map(authors):
"""compile a map of author collab to group-author-key""" |
collab_map = {}
for author in authors:
if author.get("collab"):
collab_map[author.get("collab")] = author.get("group-author-key")
return collab_map |
<SYSTEM_TASK:>
assign numeric values to each unique equal-contrib id
<END_TASK>
<USER_TASK:>
Description:
def map_equal_contributions(contributors):
"""assign numeric values to each unique equal-contrib id""" |
equal_contribution_map = {}
equal_contribution_keys = []
for contributor in contributors:
if contributor.get("references") and "equal-contrib" in contributor.get("references"):
for key in contributor["references"]["equal-contrib"]:
if key not in equal_contribution_keys:
equal_contribution_keys.append(key)
# Do a basic sort
equal_contribution_keys = sorted(equal_contribution_keys)
# Assign keys based on sorted values
for i, equal_contribution_key in enumerate(equal_contribution_keys):
equal_contribution_map[equal_contribution_key] = i+1
return equal_contribution_map |
<SYSTEM_TASK:>
authors list in article json format
<END_TASK>
<USER_TASK:>
Description:
def authors_json(soup):
"""authors list in article json format""" |
authors_json_data = []
contributors_data = contributors(soup, "full")
author_contributions_data = author_contributions(soup, None)
author_competing_interests_data = competing_interests(soup, None)
author_correspondence_data = full_correspondence(soup)
authors_non_byline_data = authors_non_byline(soup)
equal_contributions_map = map_equal_contributions(contributors_data)
present_address_data = present_addresses(soup)
foot_notes_data = other_foot_notes(soup)
# First line authors builds basic structure
for contributor in contributors_data:
author_json = None
if contributor["type"] == "author" and contributor.get("collab"):
author_json = author_group(contributor, author_contributions_data,
author_correspondence_data, author_competing_interests_data,
equal_contributions_map, present_address_data,
foot_notes_data)
elif contributor.get("on-behalf-of"):
author_json = author_on_behalf_of(contributor)
elif contributor["type"] == "author" and not contributor.get("group-author-key"):
author_json = author_person(contributor, author_contributions_data,
author_correspondence_data, author_competing_interests_data,
equal_contributions_map, present_address_data, foot_notes_data)
if author_json:
authors_json_data.append(author_json)
# Second, add byline author data
collab_map = collab_to_group_author_key_map(contributors_data)
for contributor in [elem for elem in contributors_data if elem.get("group-author-key") and not elem.get("collab")]:
for group_author in [elem for elem in authors_json_data if elem.get('type') == 'group']:
group_author_key = None
if group_author["name"] in collab_map:
group_author_key = collab_map[group_author["name"]]
if contributor.get("group-author-key") == group_author_key:
author_json = author_person(contributor, author_contributions_data,
author_correspondence_data, author_competing_interests_data,
equal_contributions_map, present_address_data, foot_notes_data)
if contributor.get("sub-group"):
if "groups" not in group_author:
group_author["groups"] = OrderedDict()
if contributor.get("sub-group") not in group_author["groups"]:
group_author["groups"][contributor.get("sub-group")] = []
group_author["groups"][contributor.get("sub-group")].append(author_json)
else:
if "people" not in group_author:
group_author["people"] = []
group_author["people"].append(author_json)
authors_json_data_rewritten = elifetools.json_rewrite.rewrite_json("authors_json", soup, authors_json_data)
return authors_json_data_rewritten |
<SYSTEM_TASK:>
take preferred names from authors json and format them into an author line
<END_TASK>
<USER_TASK:>
Description:
def author_line(soup):
"""take preferred names from authors json and format them into an author line""" |
author_line = None
authors_json_data = authors_json(soup)
author_names = extract_author_line_names(authors_json_data)
if len(author_names) > 0:
author_line = format_author_line(author_names)
return author_line |
<SYSTEM_TASK:>
authorLine format depends on if there is 1, 2 or more than 2 authors
<END_TASK>
<USER_TASK:>
Description:
def format_author_line(author_names):
"""authorLine format depends on if there is 1, 2 or more than 2 authors""" |
author_line = None
if not author_names:
return author_line
if len(author_names) <= 2:
author_line = ", ".join(author_names)
elif len(author_names) > 2:
author_line = author_names[0] + " et al."
return author_line |
<SYSTEM_TASK:>
for use in removing unwanted boxed-content from appendices json
<END_TASK>
<USER_TASK:>
Description:
def unwrap_appendix_box(json_content):
"""for use in removing unwanted boxed-content from appendices json""" |
if json_content.get("content") and len(json_content["content"]) > 0:
first_block = json_content["content"][0]
if (first_block.get("type")
and first_block.get("type") == "box"
and first_block.get("content")):
if first_block.get("doi") and not json_content.get("doi"):
json_content["doi"] = first_block.get("doi")
json_content["content"] = first_block["content"]
return json_content |
<SYSTEM_TASK:>
Get simple assignments from node tree.
<END_TASK>
<USER_TASK:>
Description:
def _get_simple_assignments(tree):
"""Get simple assignments from node tree.""" |
result = {}
for node in ast.walk(tree):
if isinstance(node, ast.Assign):
for target in node.targets:
if isinstance(target, ast.Name):
result[target.id] = node.value
return result |
<SYSTEM_TASK:>
Render a value, ensuring that any nested dicts are sorted by key.
<END_TASK>
<USER_TASK:>
Description:
def render_value(value):
"""Render a value, ensuring that any nested dicts are sorted by key.""" |
if isinstance(value, list):
return '[' + ', '.join(render_value(v) for v in value) + ']'
elif isinstance(value, dict):
return (
'{' +
', '.join('{k!r}: {v}'.format(
k=k, v=render_value(v)) for k, v in sorted(value.items())) +
'}')
else:
return repr(value) |
<SYSTEM_TASK:>
Render syntactically valid python service double code.
<END_TASK>
<USER_TASK:>
Description:
def write_service_double_file(target_root, service_name, rendered):
"""Render syntactically valid python service double code.""" |
target_path = os.path.join(
target_root,
'snapstore_schemas', 'service_doubles', '%s.py' % service_name
)
with open(target_path, 'w') as target_file:
target_file.write(rendered) |
<SYSTEM_TASK:>
Recursively sorts a JSON schema by dict key.
<END_TASK>
<USER_TASK:>
Description:
def _sort_schema(schema):
"""Recursively sorts a JSON schema by dict key.""" |
if isinstance(schema, dict):
for k, v in sorted(schema.items()):
if isinstance(v, dict):
yield k, OrderedDict(_sort_schema(v))
elif isinstance(v, list):
yield k, list(_sort_schema(v))
else:
yield k, v
elif isinstance(schema, list):
for v in schema:
if isinstance(v, dict):
yield OrderedDict(_sort_schema(v))
elif isinstance(v, list):
yield list(_sort_schema(v))
else:
yield v
else:
yield d |
<SYSTEM_TASK:>
Returns a JSON Schema representation of a form field.
<END_TASK>
<USER_TASK:>
Description:
def get_field_schema(name, field):
"""Returns a JSON Schema representation of a form field.""" |
field_schema = {
'type': 'string',
}
if field.label:
field_schema['title'] = str(field.label) # force translation
if field.help_text:
field_schema['description'] = str(field.help_text) # force translation
if isinstance(field, (fields.URLField, fields.FileField)):
field_schema['format'] = 'uri'
elif isinstance(field, fields.EmailField):
field_schema['format'] = 'email'
elif isinstance(field, fields.DateTimeField):
field_schema['format'] = 'date-time'
elif isinstance(field, fields.DateField):
field_schema['format'] = 'date'
elif isinstance(field, (fields.DecimalField, fields.FloatField)):
field_schema['type'] = 'number'
elif isinstance(field, fields.IntegerField):
field_schema['type'] = 'integer'
elif isinstance(field, fields.NullBooleanField):
field_schema['type'] = 'boolean'
elif isinstance(field.widget, widgets.CheckboxInput):
field_schema['type'] = 'boolean'
if getattr(field, 'choices', []):
field_schema['enum'] = sorted([choice[0] for choice in field.choices])
# check for multiple values
if isinstance(field.widget, (widgets.Select, widgets.ChoiceWidget)):
if field.widget.allow_multiple_selected:
# promote to array of <type>, move details into the items field
field_schema['items'] = {
'type': field_schema['type'],
}
if 'enum' in field_schema:
field_schema['items']['enum'] = field_schema.pop('enum')
field_schema['type'] = 'array'
return field_schema |
<SYSTEM_TASK:>
Validate the body of incoming requests for a flask view.
<END_TASK>
<USER_TASK:>
Description:
def validate_body(schema):
"""Validate the body of incoming requests for a flask view.
An example usage might look like this::
from snapstore_schemas import validate_body
@validate_body({
'type': 'array',
'items': {
'type': 'object',
'properties': {
'snap_id': {'type': 'string'},
'series': {'type': 'string'},
'name': {'type': 'string'},
'title': {'type': 'string'},
'keywords': {
'type': 'array',
'items': {'type': 'string'}
},
'summary': {'type': 'string'},
'description': {'type': 'string'},
'created_at': {'type': 'string'},
},
'required': ['snap_id', 'series'],
'additionalProperties': False
}
})
def my_flask_view():
# view code here
return "Hello World", 200
All incoming request that have been routed to this view will be matched
against the specified schema. If the request body does not match the schema
an instance of `DataValidationError` will be raised.
By default this will cause the flask application to return a 500 response,
but this can be customised by telling flask how to handle these exceptions.
The exception instance has an 'error_list' attribute that contains a list
of all the errors encountered while processing the request body.
""" |
location = get_callsite_location()
def decorator(fn):
validate_schema(schema)
wrapper = wrap_request(fn, schema)
record_schemas(
fn, wrapper, location, request_schema=sort_schema(schema))
return wrapper
return decorator |
<SYSTEM_TASK:>
Support extracting the schema from the decorated function.
<END_TASK>
<USER_TASK:>
Description:
def record_schemas(
fn, wrapper, location, request_schema=None, response_schema=None):
"""Support extracting the schema from the decorated function.""" |
# have we already been decorated by an acceptable api call?
has_acceptable = hasattr(fn, '_acceptable_metadata')
if request_schema is not None:
# preserve schema for later use
wrapper._request_schema = wrapper._request_schema = request_schema
wrapper._request_schema_location = location
if has_acceptable:
fn._acceptable_metadata._request_schema = request_schema
fn._acceptable_metadata._request_schema_location = location
if response_schema is not None:
# preserve schema for later use
wrapper._response_schema = wrapper._response_schema = response_schema
wrapper._response_schema_location = location
if has_acceptable:
fn._acceptable_metadata._response_schema = response_schema
fn._acceptable_metadata._response_schema_location = location |
<SYSTEM_TASK:>
Validate the body of a response from a flask view.
<END_TASK>
<USER_TASK:>
Description:
def validate_output(schema):
"""Validate the body of a response from a flask view.
Like `validate_body`, this function compares a json document to a
jsonschema specification. However, this function applies the schema to the
view response.
Instead of the view returning a flask response object, it should instead
return a Python list or dictionary. For example::
from snapstore_schemas import validate_output
@validate_output({
'type': 'object',
'properties': {
'ok': {'type': 'boolean'},
},
'required': ['ok'],
'additionalProperties': False
}
def my_flask_view():
# view code here
return {'ok': True}
Every view response will be evaluated against the schema. Any that do not
comply with the schema will cause DataValidationError to be raised.
""" |
location = get_callsite_location()
def decorator(fn):
validate_schema(schema)
wrapper = wrap_response(fn, schema)
record_schemas(
fn, wrapper, location, response_schema=sort_schema(schema))
return wrapper
return decorator |
<SYSTEM_TASK:>
Validate `payload` against `schema`, returning an error list.
<END_TASK>
<USER_TASK:>
Description:
def validate(payload, schema):
"""Validate `payload` against `schema`, returning an error list.
jsonschema provides lots of information in it's errors, but it can be a bit
of work to extract all the information.
""" |
v = jsonschema.Draft4Validator(
schema, format_checker=jsonschema.FormatChecker())
error_list = []
for error in v.iter_errors(payload):
message = error.message
location = '/' + '/'.join([str(c) for c in error.absolute_path])
error_list.append(message + ' at ' + location)
return error_list |
<SYSTEM_TASK:>
Connects to a Phoenix query server.
<END_TASK>
<USER_TASK:>
Description:
def connect(url, max_retries=None, **kwargs):
"""Connects to a Phoenix query server.
:param url:
URL to the Phoenix query server, e.g. ``http://localhost:8765/``
:param autocommit:
Switch the connection to autocommit mode.
:param readonly:
Switch the connection to readonly mode.
:param max_retries:
The maximum number of retries in case there is a connection error.
:param cursor_factory:
If specified, the connection's :attr:`~phoenixdb.connection.Connection.cursor_factory` is set to it.
:returns:
:class:`~phoenixdb.connection.Connection` object.
""" |
client = AvaticaClient(url, max_retries=max_retries)
client.connect()
return Connection(client, **kwargs) |
<SYSTEM_TASK:>
Opens a HTTP connection to the RPC server.
<END_TASK>
<USER_TASK:>
Description:
def connect(self):
"""Opens a HTTP connection to the RPC server.""" |
logger.debug("Opening connection to %s:%s", self.url.hostname, self.url.port)
try:
self.connection = httplib.HTTPConnection(self.url.hostname, self.url.port)
self.connection.connect()
except (httplib.HTTPException, socket.error) as e:
raise errors.InterfaceError('Unable to connect to the specified service', e) |
<SYSTEM_TASK:>
Closes the HTTP connection to the RPC server.
<END_TASK>
<USER_TASK:>
Description:
def close(self):
"""Closes the HTTP connection to the RPC server.""" |
if self.connection is not None:
logger.debug("Closing connection to %s:%s", self.url.hostname, self.url.port)
try:
self.connection.close()
except httplib.HTTPException:
logger.warning("Error while closing connection", exc_info=True)
self.connection = None |
<SYSTEM_TASK:>
Synchronizes connection properties with the server.
<END_TASK>
<USER_TASK:>
Description:
def connection_sync(self, connection_id, connProps=None):
"""Synchronizes connection properties with the server.
:param connection_id:
ID of the current connection.
:param connProps:
Dictionary with the properties that should be changed.
:returns:
A ``common_pb2.ConnectionProperties`` object.
""" |
if connProps is None:
connProps = {}
request = requests_pb2.ConnectionSyncRequest()
request.connection_id = connection_id
request.conn_props.auto_commit = connProps.get('autoCommit', False)
request.conn_props.has_auto_commit = True
request.conn_props.read_only = connProps.get('readOnly', False)
request.conn_props.has_read_only = True
request.conn_props.transaction_isolation = connProps.get('transactionIsolation', 0)
request.conn_props.catalog = connProps.get('catalog', '')
request.conn_props.schema = connProps.get('schema', '')
response_data = self._apply(request)
response = responses_pb2.ConnectionSyncResponse()
response.ParseFromString(response_data)
return response.conn_props |
<SYSTEM_TASK:>
Opens a new connection.
<END_TASK>
<USER_TASK:>
Description:
def open_connection(self, connection_id, info=None):
"""Opens a new connection.
:param connection_id:
ID of the connection to open.
""" |
request = requests_pb2.OpenConnectionRequest()
request.connection_id = connection_id
if info is not None:
# Info is a list of repeated pairs, setting a dict directly fails
for k, v in info.items():
request.info[k] = v
response_data = self._apply(request)
response = responses_pb2.OpenConnectionResponse()
response.ParseFromString(response_data) |
<SYSTEM_TASK:>
Closes a connection.
<END_TASK>
<USER_TASK:>
Description:
def close_connection(self, connection_id):
"""Closes a connection.
:param connection_id:
ID of the connection to close.
""" |
request = requests_pb2.CloseConnectionRequest()
request.connection_id = connection_id
self._apply(request) |
<SYSTEM_TASK:>
Closes a statement.
<END_TASK>
<USER_TASK:>
Description:
def close_statement(self, connection_id, statement_id):
"""Closes a statement.
:param connection_id:
ID of the current connection.
:param statement_id:
ID of the statement to close.
""" |
request = requests_pb2.CloseStatementRequest()
request.connection_id = connection_id
request.statement_id = statement_id
self._apply(request) |
<SYSTEM_TASK:>
Prepares and immediately executes a statement.
<END_TASK>
<USER_TASK:>
Description:
def prepare_and_execute(self, connection_id, statement_id, sql, max_rows_total=None, first_frame_max_size=None):
"""Prepares and immediately executes a statement.
:param connection_id:
ID of the current connection.
:param statement_id:
ID of the statement to prepare.
:param sql:
SQL query.
:param max_rows_total:
The maximum number of rows that will be allowed for this query.
:param first_frame_max_size:
The maximum number of rows that will be returned in the first Frame returned for this query.
:returns:
Result set with the signature of the prepared statement and the first frame data.
""" |
request = requests_pb2.PrepareAndExecuteRequest()
request.connection_id = connection_id
request.statement_id = statement_id
request.sql = sql
if max_rows_total is not None:
request.max_rows_total = max_rows_total
if first_frame_max_size is not None:
request.first_frame_max_size = first_frame_max_size
response_data = self._apply(request, 'ExecuteResponse')
response = responses_pb2.ExecuteResponse()
response.ParseFromString(response_data)
return response.results |
<SYSTEM_TASK:>
Prepares a statement.
<END_TASK>
<USER_TASK:>
Description:
def prepare(self, connection_id, sql, max_rows_total=None):
"""Prepares a statement.
:param connection_id:
ID of the current connection.
:param sql:
SQL query.
:param max_rows_total:
The maximum number of rows that will be allowed for this query.
:returns:
Signature of the prepared statement.
""" |
request = requests_pb2.PrepareRequest()
request.connection_id = connection_id
request.sql = sql
if max_rows_total is not None:
request.max_rows_total = max_rows_total
response_data = self._apply(request)
response = responses_pb2.PrepareResponse()
response.ParseFromString(response_data)
return response.statement |
<SYSTEM_TASK:>
Closes the cursor.
<END_TASK>
<USER_TASK:>
Description:
def close(self):
"""Closes the cursor.
No further operations are allowed once the cursor is closed.
If the cursor is used in a ``with`` statement, this method will
be automatically called at the end of the ``with`` block.
""" |
if self._closed:
raise ProgrammingError('the cursor is already closed')
if self._id is not None:
self._connection._client.close_statement(self._connection._id, self._id)
self._id = None
self._signature = None
self._column_data_types = []
self._frame = None
self._pos = None
self._closed = True |
<SYSTEM_TASK:>
Transforms a Row into Python values.
<END_TASK>
<USER_TASK:>
Description:
def _transform_row(self, row):
"""Transforms a Row into Python values.
:param row:
A ``common_pb2.Row`` object.
:returns:
A list of values casted into the correct Python types.
:raises:
NotImplementedError
""" |
tmp_row = []
for i, column in enumerate(row.value):
if column.has_array_value:
raise NotImplementedError('array types are not supported')
elif column.scalar_value.null:
tmp_row.append(None)
else:
field_name, rep, mutate_to, cast_from = self._column_data_types[i]
# get the value from the field_name
value = getattr(column.scalar_value, field_name)
# cast the value
if cast_from is not None:
value = cast_from(value)
tmp_row.append(value)
return tmp_row |
<SYSTEM_TASK:>
Read-only attribute providing the current 0-based index of the
<END_TASK>
<USER_TASK:>
Description:
def rownumber(self):
"""Read-only attribute providing the current 0-based index of the
cursor in the result set or ``None`` if the index cannot be
determined.
The index can be seen as index of the cursor in a sequence
(the result set). The next fetch operation will fetch the
row indexed by :attr:`rownumber` in that sequence.
""" |
if self._frame is not None and self._pos is not None:
return self._frame.offset + self._pos
return self._pos |
<SYSTEM_TASK:>
Closes the connection.
<END_TASK>
<USER_TASK:>
Description:
def close(self):
"""Closes the connection.
No further operations are allowed, either on the connection or any
of its cursors, once the connection is closed.
If the connection is used in a ``with`` statement, this method will
be automatically called at the end of the ``with`` block.
""" |
if self._closed:
raise ProgrammingError('the connection is already closed')
for cursor_ref in self._cursors:
cursor = cursor_ref()
if cursor is not None and not cursor._closed:
cursor.close()
self._client.close_connection(self._id)
self._client.close()
self._closed = True |
<SYSTEM_TASK:>
Creates a new cursor.
<END_TASK>
<USER_TASK:>
Description:
def cursor(self, cursor_factory=None):
"""Creates a new cursor.
:param cursor_factory:
This argument can be used to create non-standard cursors.
The class returned must be a subclass of
:class:`~phoenixdb.cursor.Cursor` (for example :class:`~phoenixdb.cursor.DictCursor`).
A default factory for the connection can also be specified using the
:attr:`cursor_factory` attribute.
:returns:
A :class:`~phoenixdb.cursor.Cursor` object.
""" |
if self._closed:
raise ProgrammingError('the connection is already closed')
cursor = (cursor_factory or self.cursor_factory)(self)
self._cursors.append(weakref.ref(cursor, self._cursors.remove))
return cursor |
<SYSTEM_TASK:>
Sets one or more parameters in the current connection.
<END_TASK>
<USER_TASK:>
Description:
def set_session(self, autocommit=None, readonly=None):
"""Sets one or more parameters in the current connection.
:param autocommit:
Switch the connection to autocommit mode. With the current
version, you need to always enable this, because
:meth:`commit` is not implemented.
:param readonly:
Switch the connection to read-only mode.
""" |
props = {}
if autocommit is not None:
props['autoCommit'] = bool(autocommit)
if readonly is not None:
props['readOnly'] = bool(readonly)
props = self._client.connection_sync(self._id, props)
self._autocommit = props.auto_commit
self._readonly = props.read_only
self._transactionisolation = props.transaction_isolation |
<SYSTEM_TASK:>
Predict target values for X.
<END_TASK>
<USER_TASK:>
Description:
def predict(self, X):
"""Predict target values for X.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y : array of shape = [n_samples]
The predicted target value.
""" |
K = pairwise_kernels(self.X, X, metric=self.kernel, gamma=self.gamma)
return (K * self.y[:, None]).sum(axis=0) / K.sum(axis=0) |
<SYSTEM_TASK:>
Generate the random hidden layer's activations given X as input.
<END_TASK>
<USER_TASK:>
Description:
def transform(self, X, y=None):
"""Generate the random hidden layer's activations given X as input.
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
Data to transform
y : is not used: placeholder to allow for usage in a Pipeline.
Returns
-------
X_new : numpy array of shape [n_samples, n_components]
""" |
if (self.components_ is None):
raise ValueError('No components initialized')
return self._compute_hidden_activations(X) |
<SYSTEM_TASK:>
Generate MLP weights
<END_TASK>
<USER_TASK:>
Description:
def _compute_weights(self, X, rs):
"""Generate MLP weights""" |
# use supplied weights if present
weights = self._get_user_components('weights')
if (weights is None):
n_features = X.shape[1]
hw_size = (n_features, self.n_hidden)
weights = rs.normal(size=hw_size)
self.components_['weights'] = weights |
<SYSTEM_TASK:>
Generate components of hidden layer given X
<END_TASK>
<USER_TASK:>
Description:
def _generate_components(self, X):
"""Generate components of hidden layer given X""" |
rs = check_random_state(self.random_state)
if (self._use_mlp_input):
self._compute_biases(rs)
self._compute_weights(X, rs)
if (self._use_rbf_input):
self._compute_centers(X, sp.issparse(X), rs)
self._compute_radii() |
<SYSTEM_TASK:>
Compute input activations given X
<END_TASK>
<USER_TASK:>
Description:
def _compute_input_activations(self, X):
"""Compute input activations given X""" |
n_samples = X.shape[0]
mlp_acts = np.zeros((n_samples, self.n_hidden))
if (self._use_mlp_input):
b = self.components_['biases']
w = self.components_['weights']
mlp_acts = self.alpha * (safe_sparse_dot(X, w) + b)
rbf_acts = np.zeros((n_samples, self.n_hidden))
if (self._use_rbf_input):
radii = self.components_['radii']
centers = self.components_['centers']
scale = self.rbf_width * (1.0 - self.alpha)
rbf_acts = scale * cdist(X, centers)/radii
self.input_activations_ = mlp_acts + rbf_acts |
<SYSTEM_TASK:>
Generate centers, then compute tau, dF and dN vals
<END_TASK>
<USER_TASK:>
Description:
def _compute_centers(self, X, sparse, rs):
"""Generate centers, then compute tau, dF and dN vals""" |
super(GRBFRandomLayer, self)._compute_centers(X, sparse, rs)
centers = self.components_['centers']
sorted_distances = np.sort(squareform(pdist(centers)))
self.dF_vals = sorted_distances[:, -1]
self.dN_vals = sorted_distances[:, 1]/100.0
#self.dN_vals = 0.0002 * np.ones(self.dF_vals.shape)
tauNum = np.log(np.log(self.grbf_lambda) /
np.log(1.0 - self.grbf_lambda))
tauDenom = np.log(self.dF_vals/self.dN_vals)
self.tau_vals = tauNum/tauDenom
self._extra_args['taus'] = self.tau_vals |
<SYSTEM_TASK:>
fit regression using pseudo-inverse
<END_TASK>
<USER_TASK:>
Description:
def _fit_regression(self, y):
"""
fit regression using pseudo-inverse
or supplied regressor
""" |
if self.regressor is None:
self.coefs_ = safe_sparse_dot(pinv2(self.hidden_activations_), y)
else:
self.regressor.fit(self.hidden_activations_, y)
self.fitted_ = True |
<SYSTEM_TASK:>
Force use of accuracy score since we don't inherit
<END_TASK>
<USER_TASK:>
Description:
def score(self, X, y):
"""Force use of accuracy score since we don't inherit
from ClassifierMixin""" |
from sklearn.metrics import accuracy_score
return accuracy_score(y, self.predict(X)) |
<SYSTEM_TASK:>
Required only for DRF 3.1, which does not make dynamically added attribute available in obj in serializer.
<END_TASK>
<USER_TASK:>
Description:
def compat_serializer_attr(serializer, obj):
"""
Required only for DRF 3.1, which does not make dynamically added attribute available in obj in serializer.
This is a quick solution but works without breajing anything.
""" |
if DRFVLIST[0] == 3 and DRFVLIST[1] == 1:
for i in serializer.instance:
if i.id == obj.id:
return i
else:
return obj |
<SYSTEM_TASK:>
get_paginated_response is unknown to DRF 3.0
<END_TASK>
<USER_TASK:>
Description:
def compat_get_paginated_response(view, page):
""" get_paginated_response is unknown to DRF 3.0 """ |
if DRFVLIST[0] == 3 and DRFVLIST[1] >= 1:
from rest_messaging.serializers import ComplexMessageSerializer # circular import
serializer = ComplexMessageSerializer(page, many=True)
return view.get_paginated_response(serializer.data)
else:
serializer = view.get_pagination_serializer(page)
return Response(serializer.data) |
<SYSTEM_TASK:>
Allows to define a callback for serializing information about the user.
<END_TASK>
<USER_TASK:>
Description:
def get_participants(self, obj):
""" Allows to define a callback for serializing information about the user. """ |
# we set the many to many serialization to False, because we only want it with retrieve requests
if self.callback is None:
return [participant.id for participant in obj.participants.all()]
else:
# we do not want user information
return self.callback(obj) |
<SYSTEM_TASK:>
We say if the message should trigger a notification
<END_TASK>
<USER_TASK:>
Description:
def get_is_notification(self, obj):
""" We say if the message should trigger a notification """ |
try:
o = compat_serializer_attr(self, obj)
return o.is_notification
except Exception:
return False |
<SYSTEM_TASK:>
Return the ids of the people who read the message instance.
<END_TASK>
<USER_TASK:>
Description:
def get_readers(self, obj):
""" Return the ids of the people who read the message instance. """ |
try:
o = compat_serializer_attr(self, obj)
return o.readers
except Exception:
return [] |
<SYSTEM_TASK:>
Gets all the threads in which the current participant is involved. The method excludes threads where the participant has left.
<END_TASK>
<USER_TASK:>
Description:
def get_threads_where_participant_is_active(self, participant_id):
""" Gets all the threads in which the current participant is involved. The method excludes threads where the participant has left. """ |
participations = Participation.objects.\
filter(participant__id=participant_id).\
exclude(date_left__lte=now()).\
distinct().\
select_related('thread')
return Thread.objects.\
filter(id__in=[p.thread.id for p in participations]).\
distinct() |
<SYSTEM_TASK:>
Gets the threads where the specified participants are active and no one has left.
<END_TASK>
<USER_TASK:>
Description:
def get_active_threads_involving_all_participants(self, *participant_ids):
""" Gets the threads where the specified participants are active and no one has left. """ |
query = Thread.objects.\
exclude(participation__date_left__lte=now()).\
annotate(count_participants=Count('participants')).\
filter(count_participants=len(participant_ids))
for participant_id in participant_ids:
query = query.filter(participants__id=participant_id)
return query.distinct() |
<SYSTEM_TASK:>
When a Participant posts a message to other participants without specifying an existing Thread,
<END_TASK>
<USER_TASK:>
Description:
def get_or_create_thread(self, request, name=None, *participant_ids):
"""
When a Participant posts a message to other participants without specifying an existing Thread,
we must
1. Create a new Thread if they have not yet opened the discussion.
2. If they have already opened the discussion and multiple Threads are not allowed for the same users, we must
re-attach this message to the existing thread.
3. If they have already opened the discussion and multiple Threads are allowed, we simply create a new one.
""" |
# we get the current participant
# or create him if he does not exit
participant_ids = list(participant_ids)
if request.rest_messaging_participant.id not in participant_ids:
participant_ids.append(request.rest_messaging_participant.id)
# we need at least one other participant
if len(participant_ids) < 2:
raise Exception('At least two participants are required.')
if getattr(settings, "REST_MESSAGING_THREAD_UNIQUE_FOR_ACTIVE_RECIPIENTS", True) is True:
# if we limit the number of threads by active participants
# we ensure a thread is not already running
existing_threads = self.get_active_threads_involving_all_participants(*participant_ids)
if len(list(existing_threads)) > 0:
return existing_threads[0]
# we have no existing Thread or multiple Thread instances are allowed
thread = Thread.objects.create(name=name)
# we add the participants
thread.add_participants(request, *participant_ids)
# we send a signal to say the thread with participants is created
post_save.send(Thread, instance=thread, created=True, created_and_add_participants=True, request_participant_id=request.rest_messaging_participant.id)
return thread |
<SYSTEM_TASK:>
Returns the number of messages sent in the last 24 hours so we can ensure the user does not exceed his messaging limits
<END_TASK>
<USER_TASK:>
Description:
def return_daily_messages_count(self, sender):
""" Returns the number of messages sent in the last 24 hours so we can ensure the user does not exceed his messaging limits """ |
h24 = now() - timedelta(days=1)
return Message.objects.filter(sender=sender, sent_at__gte=h24).count() |
<SYSTEM_TASK:>
Check who read each message.
<END_TASK>
<USER_TASK:>
Description:
def check_who_read(self, messages):
""" Check who read each message. """ |
# we get the corresponding Participation objects
for m in messages:
readers = []
for p in m.thread.participation_set.all():
if p.date_last_check is None:
pass
elif p.date_last_check > m.sent_at:
# the message has been read
readers.append(p.participant.id)
setattr(m, "readers", readers)
return messages |
<SYSTEM_TASK:>
Check if each message requires a notification for the specified participant.
<END_TASK>
<USER_TASK:>
Description:
def check_is_notification(self, participant_id, messages):
""" Check if each message requires a notification for the specified participant. """ |
try:
# we get the last check
last_check = NotificationCheck.objects.filter(participant__id=participant_id).latest('id').date_check
except Exception:
# we have no notification check
# all the messages are considered as new
for m in messages:
m.is_notification = True
return messages
for m in messages:
if m.sent_at > last_check and m.sender.id != participant_id:
setattr(m, "is_notification", True)
else:
setattr(m, "is_notification", False)
return messages |
<SYSTEM_TASK:>
Returns the last message in each thread
<END_TASK>
<USER_TASK:>
Description:
def get_lasts_messages_of_threads(self, participant_id, check_who_read=True, check_is_notification=True):
""" Returns the last message in each thread """ |
# we get the last message for each thread
# we must query the messages using two queries because only Postgres supports .order_by('thread', '-sent_at').distinct('thread')
threads = Thread.managers.\
get_threads_where_participant_is_active(participant_id).\
annotate(last_message_id=Max('message__id'))
messages = Message.objects.filter(id__in=[thread.last_message_id for thread in threads]).\
order_by('-id').\
distinct().\
select_related('thread', 'sender')
if check_who_read is True:
messages = messages.prefetch_related('thread__participation_set', 'thread__participation_set__participant')
messages = self.check_who_read(messages)
else:
messages = messages.prefetch_related('thread__participants')
if check_is_notification is True:
messages = self.check_is_notification(participant_id, messages)
return messages |
<SYSTEM_TASK:>
Returns all the messages in a thread.
<END_TASK>
<USER_TASK:>
Description:
def get_all_messages_in_thread(self, participant_id, thread_id, check_who_read=True):
""" Returns all the messages in a thread. """ |
try:
messages = Message.objects.filter(thread__id=thread_id).\
order_by('-id').\
select_related('thread').\
prefetch_related('thread__participation_set', 'thread__participation_set__participant')
except Exception:
return Message.objects.none()
messages = self.check_who_read(messages)
return messages |
<SYSTEM_TASK:>
We ensure the Thread only involves eligible participants.
<END_TASK>
<USER_TASK:>
Description:
def create(self, request, *args, **kwargs):
""" We ensure the Thread only involves eligible participants. """ |
serializer = self.get_serializer(data=compat_get_request_data(request))
compat_serializer_check_is_valid(serializer)
self.perform_create(request, serializer)
headers = self.get_success_headers(serializer.data)
return Response(serializer.data, status=status.HTTP_201_CREATED, headers=headers) |
<SYSTEM_TASK:>
Pk is the pk of the Thread to which the messages belong.
<END_TASK>
<USER_TASK:>
Description:
def mark_thread_as_read(self, request, pk=None):
""" Pk is the pk of the Thread to which the messages belong. """ |
# we get the thread and check for permission
thread = Thread.objects.get(id=pk)
self.check_object_permissions(request, thread)
# we save the date
try:
participation = Participation.objects.get(thread=thread, participant=request.rest_messaging_participant)
participation.date_last_check = now()
participation.save()
# we return the thread
serializer = self.get_serializer(thread)
return Response(serializer.data)
except Exception:
return Response(status=status.HTTP_400_BAD_REQUEST) |
<SYSTEM_TASK:>
Extract next stops along the journey.
<END_TASK>
<USER_TASK:>
Description:
def _pass_list(self) -> List[Dict[str, Any]]:
"""Extract next stops along the journey.""" |
stops: List[Dict[str, Any]] = []
for stop in self.journey.PassList.BasicStop:
index = stop.get("index")
station = stop.Location.Station.HafasName.Text.text
station_id = stop.Location.Station.ExternalId.text
stops.append({"index": index, "stationId": station_id, "station": station})
return stops |
<SYSTEM_TASK:>
Check `style` against pyout.styling.schema.
<END_TASK>
<USER_TASK:>
Description:
def validate(style):
"""Check `style` against pyout.styling.schema.
Parameters
----------
style : dict
Style object to validate.
Raises
------
StyleValidationError if `style` is not valid.
""" |
try:
import jsonschema
except ImportError:
return
try:
jsonschema.validate(style, schema)
except jsonschema.ValidationError as exc:
new_exc = StyleValidationError(exc)
# Don't dump the original jsonschema exception because it is already
# included in the StyleValidationError's message.
new_exc.__cause__ = None
raise new_exc |
<SYSTEM_TASK:>
Classify `value` of bold, color, and underline keys.
<END_TASK>
<USER_TASK:>
Description:
def value_type(value):
"""Classify `value` of bold, color, and underline keys.
Parameters
----------
value : style value
Returns
-------
str, {"simple", "lookup", "re_lookup", "interval"}
""" |
try:
keys = list(value.keys())
except AttributeError:
return "simple"
if keys in [["lookup"], ["re_lookup"], ["interval"]]:
return keys[0]
raise ValueError("Type of `value` could not be determined") |
<SYSTEM_TASK:>
store an audio file to storage dir
<END_TASK>
<USER_TASK:>
Description:
def add(self, src):
""" store an audio file to storage dir
:param src: audio file path
:return: checksum value
""" |
if not audio.get_type(src):
raise TypeError('The type of this file is not supported.')
return super().add(src) |
<SYSTEM_TASK:>
Merge command with arguments.
<END_TASK>
<USER_TASK:>
Description:
def _get_cmd(command, arguments):
"""Merge command with arguments.""" |
if arguments is None:
arguments = []
if command.endswith(".py") or command.endswith(".pyw"):
return [sys.executable, command] + list(arguments)
else:
return [command] + list(arguments) |
<SYSTEM_TASK:>
A command line argument parser.
<END_TASK>
<USER_TASK:>
Description:
def argparse(argv, parser, arguments):
""" A command line argument parser.
Parses arguments coming from the argv Observable and outputs them as
Argument items in the output observable.
Parameters
-----------
argv : Observable
An Observable of strings.
parser : Observable
An Observable containing one Parser item.
arguments : Observable
An Observable containing ArgumentDef items.
Returns
-------
Observable
An Observable of Argument items.
""" |
def add_arg(parser, arg_spec):
parser.add_argument(arg_spec.name, help=arg_spec.help)
return parser
parse_request = parser \
.map(lambda i: ArgumentParser(description=i.description)) \
.combine_latest(arguments, lambda parser, arg_def: add_arg(parser,arg_def)) \
.last() \
.combine_latest(argv.to_list(), lambda parser, args: (parser,args))
def subscribe(observer):
def on_next(value):
parser, args = value
try:
args = parser.parse_args(args)
for key,value in vars(args).items():
observer.on_next(Argument(key=key, value=value))
except NameError as exc:
observer.on_error("{}\n{}".format(exc, parser.format_help()))
return parse_request.subscribe(on_next, observer.on_error, observer.on_completed)
return AnonymousObservable(subscribe) |
<SYSTEM_TASK:>
Calculate the complex flow vector `Q_n`.
<END_TASK>
<USER_TASK:>
Description:
def qn(phi, *n):
"""
Calculate the complex flow vector `Q_n`.
:param array-like phi: Azimuthal angles.
:param int n: One or more harmonics to calculate.
:returns:
A single complex number if only one ``n`` was given or a complex array
for multiple ``n``.
""" |
phi = np.ravel(phi)
n = np.asarray(n)
i_n_phi = np.zeros((n.size, phi.size), dtype=complex)
np.outer(n, phi, out=i_n_phi.imag)
qn = np.exp(i_n_phi, out=i_n_phi).sum(axis=1)
if qn.size == 1:
qn = qn[0]
return qn |
<SYSTEM_TASK:>
r"""
<END_TASK>
<USER_TASK:>
Description:
def correlation(self, n, k, error=False):
r"""
Calculate `\langle k \rangle_n`,
the `k`-particle correlation function for `n`\ th-order anisotropy.
:param int n: Anisotropy order.
:param int k: Correlation order.
:param bool error:
Whether to calculate statistical error
(for `\langle 2 \rangle` only).
If true, return a tuple ``(corr, corr_error)``.
""" |
self._calculate_corr(n, k)
corr_nk = self._corr[n][k]
if error:
self._calculate_corr_err(n, k)
return corr_nk, self._corr_err[n][k]
else:
return corr_nk |
<SYSTEM_TASK:>
r"""
<END_TASK>
<USER_TASK:>
Description:
def sample(self, multiplicity):
r"""
Randomly sample azimuthal angles `\phi`.
:param int multiplicity: Number to sample.
:returns: Array of sampled angles.
""" |
if self._n is None:
return self._uniform_phi(multiplicity)
# Since the flow PDF does not have an analytic inverse CDF, I use a
# simple accept-reject sampling algorithm. This is reasonably
# efficient since for normal-sized vn, the PDF is close to flat. Now
# due to the overhead of Python functions, it's desirable to minimize
# the number of calls to the random number generator. Therefore I
# sample numbers in chunks; most of the time only one or two chunks
# should be needed. Eventually, I might rewrite this with Cython, but
# it's fast enough for now.
N = 0 # number of phi that have been sampled
phi = np.empty(multiplicity) # allocate array for phi
pdf_max = 1 + 2*self._vn.sum() # sampling efficiency ~ 1/pdf_max
while N < multiplicity:
n_remaining = multiplicity - N
n_to_sample = int(1.03*pdf_max*n_remaining)
phi_chunk = self._uniform_phi(n_to_sample)
phi_chunk = phi_chunk[self._pdf(phi_chunk) >
np.random.uniform(0, pdf_max, n_to_sample)]
K = min(phi_chunk.size, n_remaining) # number of phi to take
phi[N:N+K] = phi_chunk[:K]
N += K
return phi |
<SYSTEM_TASK:>
Formats a response from a view to handle any RDF graphs
<END_TASK>
<USER_TASK:>
Description:
def output(self, response, accepts):
""" Formats a response from a view to handle any RDF graphs
If a view function returns an RDF graph, serialize it based on Accept header
If it's not an RDF graph, return it without any special handling
""" |
graph = self.get_graph(response)
if graph is not None:
# decide the format
mimetype, format = self.format_selector.decide(accepts, graph.context_aware)
# requested content couldn't find anything
if mimetype is None:
return self.make_406_response()
# explicitly mark text mimetypes as utf-8
if 'text' in mimetype:
mimetype = mimetype + '; charset=utf-8'
# format the new response
serialized = graph.serialize(format=format)
response = self.make_new_response(response, mimetype, serialized)
return response
else:
return response |
<SYSTEM_TASK:>
Wraps a view function to return formatted RDF graphs
<END_TASK>
<USER_TASK:>
Description:
def decorate(self, view):
""" Wraps a view function to return formatted RDF graphs
Uses content negotiation to serialize the graph to the client-preferred format
Passes other content through unmodified
""" |
from functools import wraps
@wraps(view)
def decorated(*args, **kwargs):
response = view(*args, **kwargs)
accept = self.get_accept()
return self.output(response, accept)
return decorated |
<SYSTEM_TASK:>
Return a value from configuration.
<END_TASK>
<USER_TASK:>
Description:
def get(self, var, default=None):
"""Return a value from configuration.
Safe version which always returns a default value if the value is not
found.
""" |
try:
return self.__get(var)
except (KeyError, IndexError):
return default |
<SYSTEM_TASK:>
Insert at the index.
<END_TASK>
<USER_TASK:>
Description:
def insert(self, var, value, index=None):
"""Insert at the index.
If the index is not provided appends to the end of the list.
""" |
current = self.__get(var)
if not isinstance(current, list):
raise KeyError("%s: is not a list" % var)
if index is None:
current.append(value)
else:
current.insert(index, value)
if self.auto_save:
self.save() |
<SYSTEM_TASK:>
Return a merged set of top level keys from all configurations.
<END_TASK>
<USER_TASK:>
Description:
def keys(self):
"""Return a merged set of top level keys from all configurations.""" |
s = set()
for config in self.__configs:
s |= config.keys()
return s |
<SYSTEM_TASK:>
Split the string s using shell-like syntax.
<END_TASK>
<USER_TASK:>
Description:
def split(s, posix=True):
"""Split the string s using shell-like syntax.
Args:
s (str): String to split
posix (bool): Use posix split
Returns:
list of str: List of string parts
""" |
if isinstance(s, six.binary_type):
s = s.decode("utf-8")
return shlex.split(s, posix=posix) |
<SYSTEM_TASK:>
Recursive search function.
<END_TASK>
<USER_TASK:>
Description:
def search(path, matcher="*", dirs=False, files=True):
"""Recursive search function.
Args:
path (str): Path to search recursively
matcher (str or callable): String pattern to search for or function
that returns True/False for a file argument
dirs (bool): if True returns directories that match the pattern
files(bool): if True returns files that match the patter
Yields:
str: Found files and directories
""" |
if callable(matcher):
def fnmatcher(items):
return list(filter(matcher, items))
else:
def fnmatcher(items):
return fnmatch.filter(items, matcher)
for root, directories, filenames in os.walk(os.path.abspath(path)):
to_match = []
if dirs:
to_match.extend(directories)
if files:
to_match.extend(filenames)
for item in fnmatcher(to_match):
yield os.path.join(root, item) |
<SYSTEM_TASK:>
Change the current working directory.
<END_TASK>
<USER_TASK:>
Description:
def chdir(directory):
"""Change the current working directory.
Args:
directory (str): Directory to go to.
""" |
directory = os.path.abspath(directory)
logger.info("chdir -> %s" % directory)
try:
if not os.path.isdir(directory):
logger.error(
"chdir -> %s failed! Directory does not exist!", directory
)
return False
os.chdir(directory)
return True
except Exception as e:
logger.error("chdir -> %s failed! %s" % (directory, e))
return False |
<SYSTEM_TASK:>
Context object for changing directory.
<END_TASK>
<USER_TASK:>
Description:
def goto(directory, create=False):
"""Context object for changing directory.
Args:
directory (str): Directory to go to.
create (bool): Create directory if it doesn't exists.
Usage::
>>> with goto(directory) as ok:
... if not ok:
... print 'Error'
... else:
... print 'All OK'
""" |
current = os.getcwd()
directory = os.path.abspath(directory)
if os.path.isdir(directory) or (create and mkdir(directory)):
logger.info("goto -> %s", directory)
os.chdir(directory)
try:
yield True
finally:
logger.info("goto <- %s", directory)
os.chdir(current)
else:
logger.info(
"goto(%s) - directory does not exist, or cannot be " "created.",
directory,
)
yield False |
<SYSTEM_TASK:>
Delete a file or directory.
<END_TASK>
<USER_TASK:>
Description:
def remove(path):
"""Delete a file or directory.
Args:
path (str): Path to the file or directory that needs to be deleted.
Returns:
bool: True if the operation is successful, False otherwise.
""" |
if os.path.isdir(path):
return __rmtree(path)
else:
return __rmfile(path) |
<SYSTEM_TASK:>
Read the content of the file.
<END_TASK>
<USER_TASK:>
Description:
def read(path, encoding="utf-8"):
"""Read the content of the file.
Args:
path (str): Path to the file
encoding (str): File encoding. Default: utf-8
Returns:
str: File content or empty string if there was an error
""" |
try:
with io.open(path, encoding=encoding) as f:
return f.read()
except Exception as e:
logger.error("read: %s failed. Error: %s", path, e)
return "" |
<SYSTEM_TASK:>
Create a file at the given path if it does not already exists.
<END_TASK>
<USER_TASK:>
Description:
def touch(path, content="", encoding="utf-8", overwrite=False):
"""Create a file at the given path if it does not already exists.
Args:
path (str): Path to the file.
content (str): Optional content that will be written in the file.
encoding (str): Encoding in which to write the content.
Default: ``utf-8``
overwrite (bool): Overwrite the file if exists.
Returns:
bool: True if the operation is successful, False otherwise.
""" |
path = os.path.abspath(path)
if not overwrite and os.path.exists(path):
logger.warning('touch: "%s" already exists', path)
return False
try:
logger.info("touch: %s", path)
with io.open(path, "wb") as f:
if not isinstance(content, six.binary_type):
content = content.encode(encoding)
f.write(content)
return True
except Exception as e:
logger.error("touch: %s failed. Error: %s", path, e)
return False |
<SYSTEM_TASK:>
Return an object from a dot path.
<END_TASK>
<USER_TASK:>
Description:
def get_object(path="", obj=None):
"""Return an object from a dot path.
Path can either be a full path, in which case the `get_object` function
will try to import the modules in the path and follow it to the final
object. Or it can be a path relative to the object passed in as the second
argument.
Args:
path (str): Full or relative dot path to the desired object
obj (object): Starting object. Dot path is calculated relatively to
this object.
Returns:
Object at the end of the path, or list of non hidden objects if we use
the star query.
Example for full paths::
>>> get_object('os.path.join')
<function join at 0x1002d9ed8>
>>> get_object('tea.process')
<module 'tea.process' from 'tea/process/__init__.pyc'>
Example for relative paths when an object is passed in::
>>> import os
>>> get_object('path.join', os)
<function join at 0x1002d9ed8>
Example for a star query. (Star query can be used only as the last element
of the path::
>>> get_object('tea.dsa.*')
[]
>>> get_object('tea.dsa.singleton.*')
[<class 'tea.dsa.singleton.Singleton'>,
<class 'tea.dsa.singleton.SingletonMetaclass'>
<module 'six' from '...'>]
>>> get_object('tea.dsa.*')
[<module 'tea.dsa.singleton' from '...'>] # Since we imported it
""" |
if not path:
return obj
path = path.split(".")
if obj is None:
obj = importlib.import_module(path[0])
path = path[1:]
for item in path:
if item == "*":
# This is the star query, returns non hidden objects
return [
getattr(obj, name)
for name in dir(obj)
if not name.startswith("__")
]
if isinstance(obj, types.ModuleType):
submodule = "{}.{}".format(_package(obj), item)
try:
obj = importlib.import_module(submodule)
except Exception as import_error:
try:
obj = getattr(obj, item)
except Exception:
# FIXME: I know I should probably merge the errors, but
# it's easier just to throw the import error since
# it's most probably the one user wants to see.
# Create a new LoadingError and throw a combination
# of the import error and attribute error.
raise import_error
else:
obj = getattr(obj, item)
return obj |
<SYSTEM_TASK:>
Load recursively all all subclasses from a module.
<END_TASK>
<USER_TASK:>
Description:
def load_subclasses(klass, modules=None):
"""Load recursively all all subclasses from a module.
Args:
klass (str or list of str): Class whose subclasses we want to load.
modules: List of additional modules or module names that should be
recursively imported in order to find all the subclasses of the
desired class. Default: None
FIXME: This function is kept only for backward compatibility reasons, it
should not be used. Deprecation warning should be raised and it should
be replaces by the ``Loader`` class.
""" |
if modules:
if isinstance(modules, six.string_types):
modules = [modules]
loader = Loader()
loader.load(*modules)
return klass.__subclasses__() |
<SYSTEM_TASK:>
Return full formatted traceback as a string.
<END_TASK>
<USER_TASK:>
Description:
def get_exception():
"""Return full formatted traceback as a string.""" |
trace = ""
exception = ""
exc_list = traceback.format_exception_only(
sys.exc_info()[0], sys.exc_info()[1]
)
for entry in exc_list:
exception += entry
tb_list = traceback.format_tb(sys.exc_info()[2])
for entry in tb_list:
trace += entry
return "%s\n%s" % (exception, trace) |
<SYSTEM_TASK:>
Load one or more modules.
<END_TASK>
<USER_TASK:>
Description:
def load(self, *modules):
"""Load one or more modules.
Args:
modules: Either a string full path to a module or an actual module
object.
""" |
for module in modules:
if isinstance(module, six.string_types):
try:
module = get_object(module)
except Exception as e:
self.errors[module] = e
continue
self.modules[module.__package__] = module
for (loader, module_name, is_pkg) in pkgutil.walk_packages(
module.__path__
):
full_name = "{}.{}".format(_package(module), module_name)
try:
self.modules[full_name] = get_object(full_name)
if is_pkg:
self.load(self.modules[full_name])
except Exception as e:
self.errors[full_name] = e |
<SYSTEM_TASK:>
Calculate the product filter.
<END_TASK>
<USER_TASK:>
Description:
def _product_filter(products) -> str:
"""Calculate the product filter.""" |
_filter = 0
for product in {PRODUCTS[p] for p in products}:
_filter += product
return format(_filter, "b")[::-1] |
<SYSTEM_TASK:>
Forbid multi-line headers, to prevent header injection.
<END_TASK>
<USER_TASK:>
Description:
def forbid_multi_line_headers(name, val):
"""Forbid multi-line headers, to prevent header injection.""" |
val = smart_text(val)
if "\n" in val or "\r" in val:
raise BadHeaderError(
"Header values can't contain newlines "
"(got %r for header %r)" % (val, name)
)
try:
val = val.encode("ascii")
except UnicodeEncodeError:
if name.lower() in ("to", "from", "cc"):
result = []
for item in val.split(", "):
nm, addr = parseaddr(item)
nm = str(Header(nm, DEFAULT_CHARSET))
result.append(formataddr((nm, str(addr))))
val = ", ".join(result)
else:
val = Header(val, DEFAULT_CHARSET)
else:
if name.lower() == "subject":
val = Header(val)
return name, val |
<SYSTEM_TASK:>
Close the connection to the email server.
<END_TASK>
<USER_TASK:>
Description:
def close(self):
"""Close the connection to the email server.""" |
try:
try:
self.connection.quit()
except socket.sslerror:
# This happens when calling quit() on a TLS connection
# sometimes.
self.connection.close()
except Exception as e:
logger.error(
"Error trying to close connection to server " "%s:%s: %s",
self.host,
self.port,
e,
)
if self.fail_silently:
return
raise
finally:
self.connection = None |
<SYSTEM_TASK:>
Send an email.
<END_TASK>
<USER_TASK:>
Description:
def _send(self, message):
"""Send an email.
Helper method that does the actual sending.
""" |
if not message.recipients():
return False
try:
self.connection.sendmail(
message.sender,
message.recipients(),
message.message().as_string(),
)
except Exception as e:
logger.error(
"Error sending a message to server %s:%s: %s",
self.host,
self.port,
e,
)
if not self.fail_silently:
raise
return False
return True |
<SYSTEM_TASK:>
Attache a file from the filesystem.
<END_TASK>
<USER_TASK:>
Description:
def attach_file(self, path, mimetype=None):
"""Attache a file from the filesystem.""" |
filename = os.path.basename(path)
content = open(path, "rb").read()
self.attach(filename, content, mimetype) |
<SYSTEM_TASK:>
Convert the filename, content, mimetype triple to attachment.
<END_TASK>
<USER_TASK:>
Description:
def _create_attachment(self, filename, content, mimetype=None):
"""Convert the filename, content, mimetype triple to attachment.""" |
if mimetype is None:
mimetype, _ = mimetypes.guess_type(filename)
if mimetype is None:
mimetype = DEFAULT_ATTACHMENT_MIME_TYPE
basetype, subtype = mimetype.split("/", 1)
if basetype == "text":
attachment = SafeMIMEText(
smart_bytes(content, DEFAULT_CHARSET), subtype, DEFAULT_CHARSET
)
else:
# Encode non-text attachments with base64.
attachment = MIMEBase(basetype, subtype)
attachment.set_payload(content)
encode_base64(attachment)
if filename:
attachment.add_header(
"Content-Disposition", "attachment", filename=filename
)
return attachment |
<SYSTEM_TASK:>
Attach an alternative content representation.
<END_TASK>
<USER_TASK:>
Description:
def attach_alternative(self, content, mimetype=None):
"""Attach an alternative content representation.""" |
self.attach(content=content, mimetype=mimetype) |
Subsets and Splits