id
int32 0
252k
| repo
stringlengths 7
55
| path
stringlengths 4
127
| func_name
stringlengths 1
88
| original_string
stringlengths 75
19.8k
| language
stringclasses 1
value | code
stringlengths 51
19.8k
| code_tokens
sequence | docstring
stringlengths 3
17.3k
| docstring_tokens
sequence | sha
stringlengths 40
40
| url
stringlengths 87
242
|
---|---|---|---|---|---|---|---|---|---|---|---|
248,400 | junzis/pyModeS | pyModeS/extra/aero.py | tas2mach | def tas2mach(Vtas, H):
"""True Airspeed to Mach number"""
a = vsound(H)
Mach = Vtas/a
return Mach | python | def tas2mach(Vtas, H):
a = vsound(H)
Mach = Vtas/a
return Mach | [
"def",
"tas2mach",
"(",
"Vtas",
",",
"H",
")",
":",
"a",
"=",
"vsound",
"(",
"H",
")",
"Mach",
"=",
"Vtas",
"/",
"a",
"return",
"Mach"
] | True Airspeed to Mach number | [
"True",
"Airspeed",
"to",
"Mach",
"number"
] | 8cd5655a04b08171a9ad5f1ffd232b7e0178ea53 | https://github.com/junzis/pyModeS/blob/8cd5655a04b08171a9ad5f1ffd232b7e0178ea53/pyModeS/extra/aero.py#L129-L133 |
248,401 | junzis/pyModeS | pyModeS/extra/aero.py | mach2tas | def mach2tas(Mach, H):
"""Mach number to True Airspeed"""
a = vsound(H)
Vtas = Mach*a
return Vtas | python | def mach2tas(Mach, H):
a = vsound(H)
Vtas = Mach*a
return Vtas | [
"def",
"mach2tas",
"(",
"Mach",
",",
"H",
")",
":",
"a",
"=",
"vsound",
"(",
"H",
")",
"Vtas",
"=",
"Mach",
"*",
"a",
"return",
"Vtas"
] | Mach number to True Airspeed | [
"Mach",
"number",
"to",
"True",
"Airspeed"
] | 8cd5655a04b08171a9ad5f1ffd232b7e0178ea53 | https://github.com/junzis/pyModeS/blob/8cd5655a04b08171a9ad5f1ffd232b7e0178ea53/pyModeS/extra/aero.py#L136-L140 |
248,402 | junzis/pyModeS | pyModeS/extra/aero.py | tas2eas | def tas2eas(Vtas, H):
"""True Airspeed to Equivalent Airspeed"""
rho = density(H)
Veas = Vtas * np.sqrt(rho/rho0)
return Veas | python | def tas2eas(Vtas, H):
rho = density(H)
Veas = Vtas * np.sqrt(rho/rho0)
return Veas | [
"def",
"tas2eas",
"(",
"Vtas",
",",
"H",
")",
":",
"rho",
"=",
"density",
"(",
"H",
")",
"Veas",
"=",
"Vtas",
"*",
"np",
".",
"sqrt",
"(",
"rho",
"/",
"rho0",
")",
"return",
"Veas"
] | True Airspeed to Equivalent Airspeed | [
"True",
"Airspeed",
"to",
"Equivalent",
"Airspeed"
] | 8cd5655a04b08171a9ad5f1ffd232b7e0178ea53 | https://github.com/junzis/pyModeS/blob/8cd5655a04b08171a9ad5f1ffd232b7e0178ea53/pyModeS/extra/aero.py#L150-L154 |
248,403 | junzis/pyModeS | pyModeS/extra/aero.py | cas2tas | def cas2tas(Vcas, H):
"""Calibrated Airspeed to True Airspeed"""
p, rho, T = atmos(H)
qdyn = p0*((1.+rho0*Vcas*Vcas/(7.*p0))**3.5-1.)
Vtas = np.sqrt(7.*p/rho*((1.+qdyn/p)**(2./7.)-1.))
return Vtas | python | def cas2tas(Vcas, H):
p, rho, T = atmos(H)
qdyn = p0*((1.+rho0*Vcas*Vcas/(7.*p0))**3.5-1.)
Vtas = np.sqrt(7.*p/rho*((1.+qdyn/p)**(2./7.)-1.))
return Vtas | [
"def",
"cas2tas",
"(",
"Vcas",
",",
"H",
")",
":",
"p",
",",
"rho",
",",
"T",
"=",
"atmos",
"(",
"H",
")",
"qdyn",
"=",
"p0",
"*",
"(",
"(",
"1.",
"+",
"rho0",
"*",
"Vcas",
"*",
"Vcas",
"/",
"(",
"7.",
"*",
"p0",
")",
")",
"**",
"3.5",
"-",
"1.",
")",
"Vtas",
"=",
"np",
".",
"sqrt",
"(",
"7.",
"*",
"p",
"/",
"rho",
"*",
"(",
"(",
"1.",
"+",
"qdyn",
"/",
"p",
")",
"**",
"(",
"2.",
"/",
"7.",
")",
"-",
"1.",
")",
")",
"return",
"Vtas"
] | Calibrated Airspeed to True Airspeed | [
"Calibrated",
"Airspeed",
"to",
"True",
"Airspeed"
] | 8cd5655a04b08171a9ad5f1ffd232b7e0178ea53 | https://github.com/junzis/pyModeS/blob/8cd5655a04b08171a9ad5f1ffd232b7e0178ea53/pyModeS/extra/aero.py#L157-L162 |
248,404 | junzis/pyModeS | pyModeS/extra/aero.py | mach2cas | def mach2cas(Mach, H):
"""Mach number to Calibrated Airspeed"""
Vtas = mach2tas(Mach, H)
Vcas = tas2cas(Vtas, H)
return Vcas | python | def mach2cas(Mach, H):
Vtas = mach2tas(Mach, H)
Vcas = tas2cas(Vtas, H)
return Vcas | [
"def",
"mach2cas",
"(",
"Mach",
",",
"H",
")",
":",
"Vtas",
"=",
"mach2tas",
"(",
"Mach",
",",
"H",
")",
"Vcas",
"=",
"tas2cas",
"(",
"Vtas",
",",
"H",
")",
"return",
"Vcas"
] | Mach number to Calibrated Airspeed | [
"Mach",
"number",
"to",
"Calibrated",
"Airspeed"
] | 8cd5655a04b08171a9ad5f1ffd232b7e0178ea53 | https://github.com/junzis/pyModeS/blob/8cd5655a04b08171a9ad5f1ffd232b7e0178ea53/pyModeS/extra/aero.py#L173-L177 |
248,405 | junzis/pyModeS | pyModeS/extra/aero.py | cas2mach | def cas2mach(Vcas, H):
"""Calibrated Airspeed to Mach number"""
Vtas = cas2tas(Vcas, H)
Mach = tas2mach(Vtas, H)
return Mach | python | def cas2mach(Vcas, H):
Vtas = cas2tas(Vcas, H)
Mach = tas2mach(Vtas, H)
return Mach | [
"def",
"cas2mach",
"(",
"Vcas",
",",
"H",
")",
":",
"Vtas",
"=",
"cas2tas",
"(",
"Vcas",
",",
"H",
")",
"Mach",
"=",
"tas2mach",
"(",
"Vtas",
",",
"H",
")",
"return",
"Mach"
] | Calibrated Airspeed to Mach number | [
"Calibrated",
"Airspeed",
"to",
"Mach",
"number"
] | 8cd5655a04b08171a9ad5f1ffd232b7e0178ea53 | https://github.com/junzis/pyModeS/blob/8cd5655a04b08171a9ad5f1ffd232b7e0178ea53/pyModeS/extra/aero.py#L180-L184 |
248,406 | agusmakmun/django-markdown-editor | martor/views.py | markdown_search_user | def markdown_search_user(request):
"""
Json usernames of the users registered & actived.
url(method=get):
/martor/search-user/?username={username}
Response:
error:
- `status` is status code (204)
- `error` is error message.
success:
- `status` is status code (204)
- `data` is list dict of usernames.
{ 'status': 200,
'data': [
{'usernane': 'john'},
{'usernane': 'albert'}]
}
"""
data = {}
username = request.GET.get('username')
if username is not None \
and username != '' \
and ' ' not in username:
users = User.objects.filter(
Q(username__icontains=username)
).filter(is_active=True)
if users.exists():
data.update({
'status': 200,
'data': [{'username': u.username} for u in users]
})
return HttpResponse(
json.dumps(data, cls=LazyEncoder),
content_type='application/json')
data.update({
'status': 204,
'error': _('No users registered as `%(username)s` '
'or user is unactived.') % {'username': username}
})
else:
data.update({
'status': 204,
'error': _('Validation Failed for field `username`')
})
return HttpResponse(
json.dumps(data, cls=LazyEncoder),
content_type='application/json') | python | def markdown_search_user(request):
data = {}
username = request.GET.get('username')
if username is not None \
and username != '' \
and ' ' not in username:
users = User.objects.filter(
Q(username__icontains=username)
).filter(is_active=True)
if users.exists():
data.update({
'status': 200,
'data': [{'username': u.username} for u in users]
})
return HttpResponse(
json.dumps(data, cls=LazyEncoder),
content_type='application/json')
data.update({
'status': 204,
'error': _('No users registered as `%(username)s` '
'or user is unactived.') % {'username': username}
})
else:
data.update({
'status': 204,
'error': _('Validation Failed for field `username`')
})
return HttpResponse(
json.dumps(data, cls=LazyEncoder),
content_type='application/json') | [
"def",
"markdown_search_user",
"(",
"request",
")",
":",
"data",
"=",
"{",
"}",
"username",
"=",
"request",
".",
"GET",
".",
"get",
"(",
"'username'",
")",
"if",
"username",
"is",
"not",
"None",
"and",
"username",
"!=",
"''",
"and",
"' '",
"not",
"in",
"username",
":",
"users",
"=",
"User",
".",
"objects",
".",
"filter",
"(",
"Q",
"(",
"username__icontains",
"=",
"username",
")",
")",
".",
"filter",
"(",
"is_active",
"=",
"True",
")",
"if",
"users",
".",
"exists",
"(",
")",
":",
"data",
".",
"update",
"(",
"{",
"'status'",
":",
"200",
",",
"'data'",
":",
"[",
"{",
"'username'",
":",
"u",
".",
"username",
"}",
"for",
"u",
"in",
"users",
"]",
"}",
")",
"return",
"HttpResponse",
"(",
"json",
".",
"dumps",
"(",
"data",
",",
"cls",
"=",
"LazyEncoder",
")",
",",
"content_type",
"=",
"'application/json'",
")",
"data",
".",
"update",
"(",
"{",
"'status'",
":",
"204",
",",
"'error'",
":",
"_",
"(",
"'No users registered as `%(username)s` '",
"'or user is unactived.'",
")",
"%",
"{",
"'username'",
":",
"username",
"}",
"}",
")",
"else",
":",
"data",
".",
"update",
"(",
"{",
"'status'",
":",
"204",
",",
"'error'",
":",
"_",
"(",
"'Validation Failed for field `username`'",
")",
"}",
")",
"return",
"HttpResponse",
"(",
"json",
".",
"dumps",
"(",
"data",
",",
"cls",
"=",
"LazyEncoder",
")",
",",
"content_type",
"=",
"'application/json'",
")"
] | Json usernames of the users registered & actived.
url(method=get):
/martor/search-user/?username={username}
Response:
error:
- `status` is status code (204)
- `error` is error message.
success:
- `status` is status code (204)
- `data` is list dict of usernames.
{ 'status': 200,
'data': [
{'usernane': 'john'},
{'usernane': 'albert'}]
} | [
"Json",
"usernames",
"of",
"the",
"users",
"registered",
"&",
"actived",
"."
] | 5ef9b947d247d05dfd1d14dff44c5a6e83372985 | https://github.com/agusmakmun/django-markdown-editor/blob/5ef9b947d247d05dfd1d14dff44c5a6e83372985/martor/views.py#L37-L85 |
248,407 | agusmakmun/django-markdown-editor | martor/extensions/mention.py | MentionPattern.handleMatch | def handleMatch(self, m):
username = self.unescape(m.group(2))
"""Makesure `username` is registered and actived."""
if MARTOR_ENABLE_CONFIGS['mention'] == 'true':
if username in [u.username for u in User.objects.exclude(is_active=False)]:
url = '{0}{1}/'.format(MARTOR_MARKDOWN_BASE_MENTION_URL, username)
el = markdown.util.etree.Element('a')
el.set('href', url)
el.set('class', 'direct-mention-link')
el.text = markdown.util.AtomicString('@' + username)
return el | python | def handleMatch(self, m):
username = self.unescape(m.group(2))
if MARTOR_ENABLE_CONFIGS['mention'] == 'true':
if username in [u.username for u in User.objects.exclude(is_active=False)]:
url = '{0}{1}/'.format(MARTOR_MARKDOWN_BASE_MENTION_URL, username)
el = markdown.util.etree.Element('a')
el.set('href', url)
el.set('class', 'direct-mention-link')
el.text = markdown.util.AtomicString('@' + username)
return el | [
"def",
"handleMatch",
"(",
"self",
",",
"m",
")",
":",
"username",
"=",
"self",
".",
"unescape",
"(",
"m",
".",
"group",
"(",
"2",
")",
")",
"if",
"MARTOR_ENABLE_CONFIGS",
"[",
"'mention'",
"]",
"==",
"'true'",
":",
"if",
"username",
"in",
"[",
"u",
".",
"username",
"for",
"u",
"in",
"User",
".",
"objects",
".",
"exclude",
"(",
"is_active",
"=",
"False",
")",
"]",
":",
"url",
"=",
"'{0}{1}/'",
".",
"format",
"(",
"MARTOR_MARKDOWN_BASE_MENTION_URL",
",",
"username",
")",
"el",
"=",
"markdown",
".",
"util",
".",
"etree",
".",
"Element",
"(",
"'a'",
")",
"el",
".",
"set",
"(",
"'href'",
",",
"url",
")",
"el",
".",
"set",
"(",
"'class'",
",",
"'direct-mention-link'",
")",
"el",
".",
"text",
"=",
"markdown",
".",
"util",
".",
"AtomicString",
"(",
"'@'",
"+",
"username",
")",
"return",
"el"
] | Makesure `username` is registered and actived. | [
"Makesure",
"username",
"is",
"registered",
"and",
"actived",
"."
] | 5ef9b947d247d05dfd1d14dff44c5a6e83372985 | https://github.com/agusmakmun/django-markdown-editor/blob/5ef9b947d247d05dfd1d14dff44c5a6e83372985/martor/extensions/mention.py#L24-L35 |
248,408 | agusmakmun/django-markdown-editor | martor/utils.py | markdownify | def markdownify(markdown_content):
"""
Render the markdown content to HTML.
Basic:
>>> from martor.utils import markdownify
>>> content = ""
>>> markdownify(content)
'<p><img alt="awesome" src="http://i.imgur.com/hvguiSn.jpg" /></p>'
>>>
"""
try:
return markdown.markdown(
markdown_content,
safe_mode=MARTOR_MARKDOWN_SAFE_MODE,
extensions=MARTOR_MARKDOWN_EXTENSIONS,
extension_configs=MARTOR_MARKDOWN_EXTENSION_CONFIGS
)
except Exception:
raise VersionNotCompatible("The markdown isn't compatible, please reinstall "
"your python markdown into Markdown>=3.0") | python | def markdownify(markdown_content):
try:
return markdown.markdown(
markdown_content,
safe_mode=MARTOR_MARKDOWN_SAFE_MODE,
extensions=MARTOR_MARKDOWN_EXTENSIONS,
extension_configs=MARTOR_MARKDOWN_EXTENSION_CONFIGS
)
except Exception:
raise VersionNotCompatible("The markdown isn't compatible, please reinstall "
"your python markdown into Markdown>=3.0") | [
"def",
"markdownify",
"(",
"markdown_content",
")",
":",
"try",
":",
"return",
"markdown",
".",
"markdown",
"(",
"markdown_content",
",",
"safe_mode",
"=",
"MARTOR_MARKDOWN_SAFE_MODE",
",",
"extensions",
"=",
"MARTOR_MARKDOWN_EXTENSIONS",
",",
"extension_configs",
"=",
"MARTOR_MARKDOWN_EXTENSION_CONFIGS",
")",
"except",
"Exception",
":",
"raise",
"VersionNotCompatible",
"(",
"\"The markdown isn't compatible, please reinstall \"",
"\"your python markdown into Markdown>=3.0\"",
")"
] | Render the markdown content to HTML.
Basic:
>>> from martor.utils import markdownify
>>> content = ""
>>> markdownify(content)
'<p><img alt="awesome" src="http://i.imgur.com/hvguiSn.jpg" /></p>'
>>> | [
"Render",
"the",
"markdown",
"content",
"to",
"HTML",
"."
] | 5ef9b947d247d05dfd1d14dff44c5a6e83372985 | https://github.com/agusmakmun/django-markdown-editor/blob/5ef9b947d247d05dfd1d14dff44c5a6e83372985/martor/utils.py#L17-L37 |
248,409 | APSL/puput | puput/urls.py | get_entry_url | def get_entry_url(entry, blog_page, root_page):
"""
Get the entry url given and entry page a blog page instances.
It will use an url or another depending if blog_page is the root page.
"""
if root_page == blog_page:
return reverse('entry_page_serve', kwargs={
'year': entry.date.strftime('%Y'),
'month': entry.date.strftime('%m'),
'day': entry.date.strftime('%d'),
'slug': entry.slug
})
else:
# The method get_url_parts provides a tuple with a custom URL routing
# scheme. In the last position it finds the subdomain of the blog, which
# it is used to construct the entry url.
# Using the stripped subdomain it allows Puput to generate the urls for
# every sitemap level
blog_path = strip_prefix_and_ending_slash(blog_page.specific.last_url_part)
return reverse('entry_page_serve_slug', kwargs={
'blog_path': blog_path,
'year': entry.date.strftime('%Y'),
'month': entry.date.strftime('%m'),
'day': entry.date.strftime('%d'),
'slug': entry.slug
}) | python | def get_entry_url(entry, blog_page, root_page):
if root_page == blog_page:
return reverse('entry_page_serve', kwargs={
'year': entry.date.strftime('%Y'),
'month': entry.date.strftime('%m'),
'day': entry.date.strftime('%d'),
'slug': entry.slug
})
else:
# The method get_url_parts provides a tuple with a custom URL routing
# scheme. In the last position it finds the subdomain of the blog, which
# it is used to construct the entry url.
# Using the stripped subdomain it allows Puput to generate the urls for
# every sitemap level
blog_path = strip_prefix_and_ending_slash(blog_page.specific.last_url_part)
return reverse('entry_page_serve_slug', kwargs={
'blog_path': blog_path,
'year': entry.date.strftime('%Y'),
'month': entry.date.strftime('%m'),
'day': entry.date.strftime('%d'),
'slug': entry.slug
}) | [
"def",
"get_entry_url",
"(",
"entry",
",",
"blog_page",
",",
"root_page",
")",
":",
"if",
"root_page",
"==",
"blog_page",
":",
"return",
"reverse",
"(",
"'entry_page_serve'",
",",
"kwargs",
"=",
"{",
"'year'",
":",
"entry",
".",
"date",
".",
"strftime",
"(",
"'%Y'",
")",
",",
"'month'",
":",
"entry",
".",
"date",
".",
"strftime",
"(",
"'%m'",
")",
",",
"'day'",
":",
"entry",
".",
"date",
".",
"strftime",
"(",
"'%d'",
")",
",",
"'slug'",
":",
"entry",
".",
"slug",
"}",
")",
"else",
":",
"# The method get_url_parts provides a tuple with a custom URL routing",
"# scheme. In the last position it finds the subdomain of the blog, which",
"# it is used to construct the entry url.",
"# Using the stripped subdomain it allows Puput to generate the urls for",
"# every sitemap level",
"blog_path",
"=",
"strip_prefix_and_ending_slash",
"(",
"blog_page",
".",
"specific",
".",
"last_url_part",
")",
"return",
"reverse",
"(",
"'entry_page_serve_slug'",
",",
"kwargs",
"=",
"{",
"'blog_path'",
":",
"blog_path",
",",
"'year'",
":",
"entry",
".",
"date",
".",
"strftime",
"(",
"'%Y'",
")",
",",
"'month'",
":",
"entry",
".",
"date",
".",
"strftime",
"(",
"'%m'",
")",
",",
"'day'",
":",
"entry",
".",
"date",
".",
"strftime",
"(",
"'%d'",
")",
",",
"'slug'",
":",
"entry",
".",
"slug",
"}",
")"
] | Get the entry url given and entry page a blog page instances.
It will use an url or another depending if blog_page is the root page. | [
"Get",
"the",
"entry",
"url",
"given",
"and",
"entry",
"page",
"a",
"blog",
"page",
"instances",
".",
"It",
"will",
"use",
"an",
"url",
"or",
"another",
"depending",
"if",
"blog_page",
"is",
"the",
"root",
"page",
"."
] | c3294f6bb0dd784f881ce9e3089cbf40d0528e47 | https://github.com/APSL/puput/blob/c3294f6bb0dd784f881ce9e3089cbf40d0528e47/puput/urls.py#L63-L88 |
248,410 | APSL/puput | puput/urls.py | get_feeds_url | def get_feeds_url(blog_page, root_page):
"""
Get the feeds urls a blog page instance.
It will use an url or another depending if blog_page is the root page.
"""
if root_page == blog_page:
return reverse('blog_page_feed')
else:
blog_path = strip_prefix_and_ending_slash(blog_page.specific.last_url_part)
return reverse('blog_page_feed_slug', kwargs={'blog_path': blog_path}) | python | def get_feeds_url(blog_page, root_page):
if root_page == blog_page:
return reverse('blog_page_feed')
else:
blog_path = strip_prefix_and_ending_slash(blog_page.specific.last_url_part)
return reverse('blog_page_feed_slug', kwargs={'blog_path': blog_path}) | [
"def",
"get_feeds_url",
"(",
"blog_page",
",",
"root_page",
")",
":",
"if",
"root_page",
"==",
"blog_page",
":",
"return",
"reverse",
"(",
"'blog_page_feed'",
")",
"else",
":",
"blog_path",
"=",
"strip_prefix_and_ending_slash",
"(",
"blog_page",
".",
"specific",
".",
"last_url_part",
")",
"return",
"reverse",
"(",
"'blog_page_feed_slug'",
",",
"kwargs",
"=",
"{",
"'blog_path'",
":",
"blog_path",
"}",
")"
] | Get the feeds urls a blog page instance.
It will use an url or another depending if blog_page is the root page. | [
"Get",
"the",
"feeds",
"urls",
"a",
"blog",
"page",
"instance",
".",
"It",
"will",
"use",
"an",
"url",
"or",
"another",
"depending",
"if",
"blog_page",
"is",
"the",
"root",
"page",
"."
] | c3294f6bb0dd784f881ce9e3089cbf40d0528e47 | https://github.com/APSL/puput/blob/c3294f6bb0dd784f881ce9e3089cbf40d0528e47/puput/urls.py#L91-L100 |
248,411 | cs50/check50 | check50/__main__.py | install_dependencies | def install_dependencies(dependencies, verbose=False):
"""Install all packages in dependency list via pip."""
if not dependencies:
return
stdout = stderr = None if verbose else subprocess.DEVNULL
with tempfile.TemporaryDirectory() as req_dir:
req_file = Path(req_dir) / "requirements.txt"
with open(req_file, "w") as f:
for dependency in dependencies:
f.write(f"{dependency}\n")
pip = ["python3", "-m", "pip", "install", "-r", req_file]
# Unless we are in a virtualenv, we need --user
if sys.base_prefix == sys.prefix and not hasattr(sys, "real_prefix"):
pip.append("--user")
try:
subprocess.check_call(pip, stdout=stdout, stderr=stderr)
except subprocess.CalledProcessError:
raise Error(_("failed to install dependencies"))
# Reload sys.path, to find recently installed packages
importlib.reload(site) | python | def install_dependencies(dependencies, verbose=False):
if not dependencies:
return
stdout = stderr = None if verbose else subprocess.DEVNULL
with tempfile.TemporaryDirectory() as req_dir:
req_file = Path(req_dir) / "requirements.txt"
with open(req_file, "w") as f:
for dependency in dependencies:
f.write(f"{dependency}\n")
pip = ["python3", "-m", "pip", "install", "-r", req_file]
# Unless we are in a virtualenv, we need --user
if sys.base_prefix == sys.prefix and not hasattr(sys, "real_prefix"):
pip.append("--user")
try:
subprocess.check_call(pip, stdout=stdout, stderr=stderr)
except subprocess.CalledProcessError:
raise Error(_("failed to install dependencies"))
# Reload sys.path, to find recently installed packages
importlib.reload(site) | [
"def",
"install_dependencies",
"(",
"dependencies",
",",
"verbose",
"=",
"False",
")",
":",
"if",
"not",
"dependencies",
":",
"return",
"stdout",
"=",
"stderr",
"=",
"None",
"if",
"verbose",
"else",
"subprocess",
".",
"DEVNULL",
"with",
"tempfile",
".",
"TemporaryDirectory",
"(",
")",
"as",
"req_dir",
":",
"req_file",
"=",
"Path",
"(",
"req_dir",
")",
"/",
"\"requirements.txt\"",
"with",
"open",
"(",
"req_file",
",",
"\"w\"",
")",
"as",
"f",
":",
"for",
"dependency",
"in",
"dependencies",
":",
"f",
".",
"write",
"(",
"f\"{dependency}\\n\"",
")",
"pip",
"=",
"[",
"\"python3\"",
",",
"\"-m\"",
",",
"\"pip\"",
",",
"\"install\"",
",",
"\"-r\"",
",",
"req_file",
"]",
"# Unless we are in a virtualenv, we need --user",
"if",
"sys",
".",
"base_prefix",
"==",
"sys",
".",
"prefix",
"and",
"not",
"hasattr",
"(",
"sys",
",",
"\"real_prefix\"",
")",
":",
"pip",
".",
"append",
"(",
"\"--user\"",
")",
"try",
":",
"subprocess",
".",
"check_call",
"(",
"pip",
",",
"stdout",
"=",
"stdout",
",",
"stderr",
"=",
"stderr",
")",
"except",
"subprocess",
".",
"CalledProcessError",
":",
"raise",
"Error",
"(",
"_",
"(",
"\"failed to install dependencies\"",
")",
")",
"# Reload sys.path, to find recently installed packages",
"importlib",
".",
"reload",
"(",
"site",
")"
] | Install all packages in dependency list via pip. | [
"Install",
"all",
"packages",
"in",
"dependency",
"list",
"via",
"pip",
"."
] | 42c1f0c36baa6a24f69742d74551a9ea7a5ceb33 | https://github.com/cs50/check50/blob/42c1f0c36baa6a24f69742d74551a9ea7a5ceb33/check50/__main__.py#L108-L132 |
248,412 | cs50/check50 | check50/__main__.py | install_translations | def install_translations(config):
"""Add check translations according to ``config`` as a fallback to existing translations"""
if not config:
return
from . import _translation
checks_translation = gettext.translation(domain=config["domain"],
localedir=internal.check_dir / config["localedir"],
fallback=True)
_translation.add_fallback(checks_translation) | python | def install_translations(config):
if not config:
return
from . import _translation
checks_translation = gettext.translation(domain=config["domain"],
localedir=internal.check_dir / config["localedir"],
fallback=True)
_translation.add_fallback(checks_translation) | [
"def",
"install_translations",
"(",
"config",
")",
":",
"if",
"not",
"config",
":",
"return",
"from",
".",
"import",
"_translation",
"checks_translation",
"=",
"gettext",
".",
"translation",
"(",
"domain",
"=",
"config",
"[",
"\"domain\"",
"]",
",",
"localedir",
"=",
"internal",
".",
"check_dir",
"/",
"config",
"[",
"\"localedir\"",
"]",
",",
"fallback",
"=",
"True",
")",
"_translation",
".",
"add_fallback",
"(",
"checks_translation",
")"
] | Add check translations according to ``config`` as a fallback to existing translations | [
"Add",
"check",
"translations",
"according",
"to",
"config",
"as",
"a",
"fallback",
"to",
"existing",
"translations"
] | 42c1f0c36baa6a24f69742d74551a9ea7a5ceb33 | https://github.com/cs50/check50/blob/42c1f0c36baa6a24f69742d74551a9ea7a5ceb33/check50/__main__.py#L134-L144 |
248,413 | cs50/check50 | check50/api.py | hash | def hash(file):
"""
Hashes file using SHA-256.
:param file: name of file to be hashed
:type file: str
:rtype: str
:raises check50.Failure: if ``file`` does not exist
"""
exists(file)
log(_("hashing {}...").format(file))
# https://stackoverflow.com/a/22058673
with open(file, "rb") as f:
sha256 = hashlib.sha256()
for block in iter(lambda: f.read(65536), b""):
sha256.update(block)
return sha256.hexdigest() | python | def hash(file):
exists(file)
log(_("hashing {}...").format(file))
# https://stackoverflow.com/a/22058673
with open(file, "rb") as f:
sha256 = hashlib.sha256()
for block in iter(lambda: f.read(65536), b""):
sha256.update(block)
return sha256.hexdigest() | [
"def",
"hash",
"(",
"file",
")",
":",
"exists",
"(",
"file",
")",
"log",
"(",
"_",
"(",
"\"hashing {}...\"",
")",
".",
"format",
"(",
"file",
")",
")",
"# https://stackoverflow.com/a/22058673",
"with",
"open",
"(",
"file",
",",
"\"rb\"",
")",
"as",
"f",
":",
"sha256",
"=",
"hashlib",
".",
"sha256",
"(",
")",
"for",
"block",
"in",
"iter",
"(",
"lambda",
":",
"f",
".",
"read",
"(",
"65536",
")",
",",
"b\"\"",
")",
":",
"sha256",
".",
"update",
"(",
"block",
")",
"return",
"sha256",
".",
"hexdigest",
"(",
")"
] | Hashes file using SHA-256.
:param file: name of file to be hashed
:type file: str
:rtype: str
:raises check50.Failure: if ``file`` does not exist | [
"Hashes",
"file",
"using",
"SHA",
"-",
"256",
"."
] | 42c1f0c36baa6a24f69742d74551a9ea7a5ceb33 | https://github.com/cs50/check50/blob/42c1f0c36baa6a24f69742d74551a9ea7a5ceb33/check50/api.py#L67-L86 |
248,414 | cs50/check50 | check50/api.py | exists | def exists(*paths):
"""
Assert that all given paths exist.
:params paths: files/directories to be checked for existence
:raises check50.Failure: if any ``path in paths`` does not exist
Example usage::
check50.exists("foo.c", "foo.h")
"""
for path in paths:
log(_("checking that {} exists...").format(path))
if not os.path.exists(path):
raise Failure(_("{} not found").format(path)) | python | def exists(*paths):
for path in paths:
log(_("checking that {} exists...").format(path))
if not os.path.exists(path):
raise Failure(_("{} not found").format(path)) | [
"def",
"exists",
"(",
"*",
"paths",
")",
":",
"for",
"path",
"in",
"paths",
":",
"log",
"(",
"_",
"(",
"\"checking that {} exists...\"",
")",
".",
"format",
"(",
"path",
")",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"path",
")",
":",
"raise",
"Failure",
"(",
"_",
"(",
"\"{} not found\"",
")",
".",
"format",
"(",
"path",
")",
")"
] | Assert that all given paths exist.
:params paths: files/directories to be checked for existence
:raises check50.Failure: if any ``path in paths`` does not exist
Example usage::
check50.exists("foo.c", "foo.h") | [
"Assert",
"that",
"all",
"given",
"paths",
"exist",
"."
] | 42c1f0c36baa6a24f69742d74551a9ea7a5ceb33 | https://github.com/cs50/check50/blob/42c1f0c36baa6a24f69742d74551a9ea7a5ceb33/check50/api.py#L89-L104 |
248,415 | cs50/check50 | check50/api.py | import_checks | def import_checks(path):
"""
Import checks module given relative path.
:param path: relative path from which to import checks module
:type path: str
:returns: the imported module
:raises FileNotFoundError: if ``path / .check50.yaml`` does not exist
:raises yaml.YAMLError: if ``path / .check50.yaml`` is not a valid YAML file
This function is particularly useful when a set of checks logically extends
another, as is often the case in CS50's own problems that have a "less comfy"
and "more comfy" version. The "more comfy" version can include all of the
"less comfy" checks like so::
less = check50.import_checks("../less")
from less import *
.. note::
the ``__name__`` of the imported module is given by the basename
of the specified path (``less`` in the above example).
"""
dir = internal.check_dir / path
file = internal.load_config(dir)["checks"]
mod = internal.import_file(dir.name, (dir / file).resolve())
sys.modules[dir.name] = mod
return mod | python | def import_checks(path):
dir = internal.check_dir / path
file = internal.load_config(dir)["checks"]
mod = internal.import_file(dir.name, (dir / file).resolve())
sys.modules[dir.name] = mod
return mod | [
"def",
"import_checks",
"(",
"path",
")",
":",
"dir",
"=",
"internal",
".",
"check_dir",
"/",
"path",
"file",
"=",
"internal",
".",
"load_config",
"(",
"dir",
")",
"[",
"\"checks\"",
"]",
"mod",
"=",
"internal",
".",
"import_file",
"(",
"dir",
".",
"name",
",",
"(",
"dir",
"/",
"file",
")",
".",
"resolve",
"(",
")",
")",
"sys",
".",
"modules",
"[",
"dir",
".",
"name",
"]",
"=",
"mod",
"return",
"mod"
] | Import checks module given relative path.
:param path: relative path from which to import checks module
:type path: str
:returns: the imported module
:raises FileNotFoundError: if ``path / .check50.yaml`` does not exist
:raises yaml.YAMLError: if ``path / .check50.yaml`` is not a valid YAML file
This function is particularly useful when a set of checks logically extends
another, as is often the case in CS50's own problems that have a "less comfy"
and "more comfy" version. The "more comfy" version can include all of the
"less comfy" checks like so::
less = check50.import_checks("../less")
from less import *
.. note::
the ``__name__`` of the imported module is given by the basename
of the specified path (``less`` in the above example). | [
"Import",
"checks",
"module",
"given",
"relative",
"path",
"."
] | 42c1f0c36baa6a24f69742d74551a9ea7a5ceb33 | https://github.com/cs50/check50/blob/42c1f0c36baa6a24f69742d74551a9ea7a5ceb33/check50/api.py#L107-L134 |
248,416 | cs50/check50 | check50/api.py | _raw | def _raw(s):
"""Get raw representation of s, truncating if too long."""
if isinstance(s, list):
s = "\n".join(_raw(item) for item in s)
if s == EOF:
return "EOF"
s = repr(s) # Get raw representation of string
s = s[1:-1] # Strip away quotation marks
if len(s) > 15:
s = s[:15] + "..." # Truncate if too long
return s | python | def _raw(s):
if isinstance(s, list):
s = "\n".join(_raw(item) for item in s)
if s == EOF:
return "EOF"
s = repr(s) # Get raw representation of string
s = s[1:-1] # Strip away quotation marks
if len(s) > 15:
s = s[:15] + "..." # Truncate if too long
return s | [
"def",
"_raw",
"(",
"s",
")",
":",
"if",
"isinstance",
"(",
"s",
",",
"list",
")",
":",
"s",
"=",
"\"\\n\"",
".",
"join",
"(",
"_raw",
"(",
"item",
")",
"for",
"item",
"in",
"s",
")",
"if",
"s",
"==",
"EOF",
":",
"return",
"\"EOF\"",
"s",
"=",
"repr",
"(",
"s",
")",
"# Get raw representation of string",
"s",
"=",
"s",
"[",
"1",
":",
"-",
"1",
"]",
"# Strip away quotation marks",
"if",
"len",
"(",
"s",
")",
">",
"15",
":",
"s",
"=",
"s",
"[",
":",
"15",
"]",
"+",
"\"...\"",
"# Truncate if too long",
"return",
"s"
] | Get raw representation of s, truncating if too long. | [
"Get",
"raw",
"representation",
"of",
"s",
"truncating",
"if",
"too",
"long",
"."
] | 42c1f0c36baa6a24f69742d74551a9ea7a5ceb33 | https://github.com/cs50/check50/blob/42c1f0c36baa6a24f69742d74551a9ea7a5ceb33/check50/api.py#L400-L413 |
248,417 | cs50/check50 | check50/api.py | _copy | def _copy(src, dst):
"""Copy src to dst, copying recursively if src is a directory."""
try:
shutil.copy(src, dst)
except IsADirectoryError:
if os.path.isdir(dst):
dst = os.path.join(dst, os.path.basename(src))
shutil.copytree(src, dst) | python | def _copy(src, dst):
try:
shutil.copy(src, dst)
except IsADirectoryError:
if os.path.isdir(dst):
dst = os.path.join(dst, os.path.basename(src))
shutil.copytree(src, dst) | [
"def",
"_copy",
"(",
"src",
",",
"dst",
")",
":",
"try",
":",
"shutil",
".",
"copy",
"(",
"src",
",",
"dst",
")",
"except",
"IsADirectoryError",
":",
"if",
"os",
".",
"path",
".",
"isdir",
"(",
"dst",
")",
":",
"dst",
"=",
"os",
".",
"path",
".",
"join",
"(",
"dst",
",",
"os",
".",
"path",
".",
"basename",
"(",
"src",
")",
")",
"shutil",
".",
"copytree",
"(",
"src",
",",
"dst",
")"
] | Copy src to dst, copying recursively if src is a directory. | [
"Copy",
"src",
"to",
"dst",
"copying",
"recursively",
"if",
"src",
"is",
"a",
"directory",
"."
] | 42c1f0c36baa6a24f69742d74551a9ea7a5ceb33 | https://github.com/cs50/check50/blob/42c1f0c36baa6a24f69742d74551a9ea7a5ceb33/check50/api.py#L416-L423 |
248,418 | cs50/check50 | check50/api.py | run.stdin | def stdin(self, line, prompt=True, timeout=3):
"""
Send line to stdin, optionally expect a prompt.
:param line: line to be send to stdin
:type line: str
:param prompt: boolean indicating whether a prompt is expected, if True absorbs \
all of stdout before inserting line into stdin and raises \
:class:`check50.Failure` if stdout is empty
:type prompt: bool
:param timeout: maximum number of seconds to wait for prompt
:type timeout: int / float
:raises check50.Failure: if ``prompt`` is set to True and no prompt is given
"""
if line == EOF:
log("sending EOF...")
else:
log(_("sending input {}...").format(line))
if prompt:
try:
self.process.expect(".+", timeout=timeout)
except (TIMEOUT, EOF):
raise Failure(_("expected prompt for input, found none"))
except UnicodeDecodeError:
raise Failure(_("output not valid ASCII text"))
try:
if line == EOF:
self.process.sendeof()
else:
self.process.sendline(line)
except OSError:
pass
return self | python | def stdin(self, line, prompt=True, timeout=3):
if line == EOF:
log("sending EOF...")
else:
log(_("sending input {}...").format(line))
if prompt:
try:
self.process.expect(".+", timeout=timeout)
except (TIMEOUT, EOF):
raise Failure(_("expected prompt for input, found none"))
except UnicodeDecodeError:
raise Failure(_("output not valid ASCII text"))
try:
if line == EOF:
self.process.sendeof()
else:
self.process.sendline(line)
except OSError:
pass
return self | [
"def",
"stdin",
"(",
"self",
",",
"line",
",",
"prompt",
"=",
"True",
",",
"timeout",
"=",
"3",
")",
":",
"if",
"line",
"==",
"EOF",
":",
"log",
"(",
"\"sending EOF...\"",
")",
"else",
":",
"log",
"(",
"_",
"(",
"\"sending input {}...\"",
")",
".",
"format",
"(",
"line",
")",
")",
"if",
"prompt",
":",
"try",
":",
"self",
".",
"process",
".",
"expect",
"(",
"\".+\"",
",",
"timeout",
"=",
"timeout",
")",
"except",
"(",
"TIMEOUT",
",",
"EOF",
")",
":",
"raise",
"Failure",
"(",
"_",
"(",
"\"expected prompt for input, found none\"",
")",
")",
"except",
"UnicodeDecodeError",
":",
"raise",
"Failure",
"(",
"_",
"(",
"\"output not valid ASCII text\"",
")",
")",
"try",
":",
"if",
"line",
"==",
"EOF",
":",
"self",
".",
"process",
".",
"sendeof",
"(",
")",
"else",
":",
"self",
".",
"process",
".",
"sendline",
"(",
"line",
")",
"except",
"OSError",
":",
"pass",
"return",
"self"
] | Send line to stdin, optionally expect a prompt.
:param line: line to be send to stdin
:type line: str
:param prompt: boolean indicating whether a prompt is expected, if True absorbs \
all of stdout before inserting line into stdin and raises \
:class:`check50.Failure` if stdout is empty
:type prompt: bool
:param timeout: maximum number of seconds to wait for prompt
:type timeout: int / float
:raises check50.Failure: if ``prompt`` is set to True and no prompt is given | [
"Send",
"line",
"to",
"stdin",
"optionally",
"expect",
"a",
"prompt",
"."
] | 42c1f0c36baa6a24f69742d74551a9ea7a5ceb33 | https://github.com/cs50/check50/blob/42c1f0c36baa6a24f69742d74551a9ea7a5ceb33/check50/api.py#L165-L199 |
248,419 | cs50/check50 | check50/api.py | run.reject | def reject(self, timeout=1):
"""
Check that the process survives for timeout. Useful for checking whether program is waiting on input.
:param timeout: number of seconds to wait
:type timeout: int / float
:raises check50.Failure: if process ends before ``timeout``
"""
log(_("checking that input was rejected..."))
try:
self._wait(timeout)
except Failure as e:
if not isinstance(e.__cause__, TIMEOUT):
raise
else:
raise Failure(_("expected program to reject input, but it did not"))
return self | python | def reject(self, timeout=1):
log(_("checking that input was rejected..."))
try:
self._wait(timeout)
except Failure as e:
if not isinstance(e.__cause__, TIMEOUT):
raise
else:
raise Failure(_("expected program to reject input, but it did not"))
return self | [
"def",
"reject",
"(",
"self",
",",
"timeout",
"=",
"1",
")",
":",
"log",
"(",
"_",
"(",
"\"checking that input was rejected...\"",
")",
")",
"try",
":",
"self",
".",
"_wait",
"(",
"timeout",
")",
"except",
"Failure",
"as",
"e",
":",
"if",
"not",
"isinstance",
"(",
"e",
".",
"__cause__",
",",
"TIMEOUT",
")",
":",
"raise",
"else",
":",
"raise",
"Failure",
"(",
"_",
"(",
"\"expected program to reject input, but it did not\"",
")",
")",
"return",
"self"
] | Check that the process survives for timeout. Useful for checking whether program is waiting on input.
:param timeout: number of seconds to wait
:type timeout: int / float
:raises check50.Failure: if process ends before ``timeout`` | [
"Check",
"that",
"the",
"process",
"survives",
"for",
"timeout",
".",
"Useful",
"for",
"checking",
"whether",
"program",
"is",
"waiting",
"on",
"input",
"."
] | 42c1f0c36baa6a24f69742d74551a9ea7a5ceb33 | https://github.com/cs50/check50/blob/42c1f0c36baa6a24f69742d74551a9ea7a5ceb33/check50/api.py#L269-L286 |
248,420 | cs50/check50 | check50/internal.py | import_file | def import_file(name, path):
"""
Import a file given a raw file path.
:param name: Name of module to be imported
:type name: str
:param path: Path to Python file
:type path: str / Path
"""
spec = importlib.util.spec_from_file_location(name, path)
mod = importlib.util.module_from_spec(spec)
spec.loader.exec_module(mod)
return mod | python | def import_file(name, path):
spec = importlib.util.spec_from_file_location(name, path)
mod = importlib.util.module_from_spec(spec)
spec.loader.exec_module(mod)
return mod | [
"def",
"import_file",
"(",
"name",
",",
"path",
")",
":",
"spec",
"=",
"importlib",
".",
"util",
".",
"spec_from_file_location",
"(",
"name",
",",
"path",
")",
"mod",
"=",
"importlib",
".",
"util",
".",
"module_from_spec",
"(",
"spec",
")",
"spec",
".",
"loader",
".",
"exec_module",
"(",
"mod",
")",
"return",
"mod"
] | Import a file given a raw file path.
:param name: Name of module to be imported
:type name: str
:param path: Path to Python file
:type path: str / Path | [
"Import",
"a",
"file",
"given",
"a",
"raw",
"file",
"path",
"."
] | 42c1f0c36baa6a24f69742d74551a9ea7a5ceb33 | https://github.com/cs50/check50/blob/42c1f0c36baa6a24f69742d74551a9ea7a5ceb33/check50/internal.py#L108-L120 |
248,421 | cs50/check50 | check50/c.py | compile | def compile(*files, exe_name=None, cc=CC, **cflags):
"""
Compile C source files.
:param files: filenames to be compiled
:param exe_name: name of resulting executable
:param cc: compiler to use (:data:`check50.c.CC` by default)
:param cflags: additional flags to pass to the compiler
:raises check50.Failure: if compilation failed (i.e., if the compiler returns a non-zero exit status).
:raises RuntimeError: if no filenames are specified
If ``exe_name`` is None, :func:`check50.c.compile` will default to the first
file specified sans the ``.c`` extension::
check50.c.compile("foo.c", "bar.c") # clang foo.c bar.c -o foo -std=c11 -ggdb -lm
Additional CFLAGS may be passed as keyword arguments like so::
check50.c.compile("foo.c", "bar.c", lcs50=True) # clang foo.c bar.c -o foo -std=c11 -ggdb -lm -lcs50
In the same vein, the default CFLAGS may be overriden via keyword arguments::
check50.c.compile("foo.c", "bar.c", std="c99", lm=False) # clang foo.c bar.c -o foo -std=c99 -ggdb
"""
if not files:
raise RuntimeError(_("compile requires at least one file"))
if exe_name is None and files[0].endswith(".c"):
exe_name = Path(files[0]).stem
files = " ".join(files)
flags = CFLAGS.copy()
flags.update(cflags)
flags = " ".join((f"-{flag}" + (f"={value}" if value is not True else "")).replace("_", "-")
for flag, value in flags.items() if value)
out_flag = f" -o {exe_name} " if exe_name is not None else " "
run(f"{cc} {files}{out_flag}{flags}").exit(0) | python | def compile(*files, exe_name=None, cc=CC, **cflags):
if not files:
raise RuntimeError(_("compile requires at least one file"))
if exe_name is None and files[0].endswith(".c"):
exe_name = Path(files[0]).stem
files = " ".join(files)
flags = CFLAGS.copy()
flags.update(cflags)
flags = " ".join((f"-{flag}" + (f"={value}" if value is not True else "")).replace("_", "-")
for flag, value in flags.items() if value)
out_flag = f" -o {exe_name} " if exe_name is not None else " "
run(f"{cc} {files}{out_flag}{flags}").exit(0) | [
"def",
"compile",
"(",
"*",
"files",
",",
"exe_name",
"=",
"None",
",",
"cc",
"=",
"CC",
",",
"*",
"*",
"cflags",
")",
":",
"if",
"not",
"files",
":",
"raise",
"RuntimeError",
"(",
"_",
"(",
"\"compile requires at least one file\"",
")",
")",
"if",
"exe_name",
"is",
"None",
"and",
"files",
"[",
"0",
"]",
".",
"endswith",
"(",
"\".c\"",
")",
":",
"exe_name",
"=",
"Path",
"(",
"files",
"[",
"0",
"]",
")",
".",
"stem",
"files",
"=",
"\" \"",
".",
"join",
"(",
"files",
")",
"flags",
"=",
"CFLAGS",
".",
"copy",
"(",
")",
"flags",
".",
"update",
"(",
"cflags",
")",
"flags",
"=",
"\" \"",
".",
"join",
"(",
"(",
"f\"-{flag}\"",
"+",
"(",
"f\"={value}\"",
"if",
"value",
"is",
"not",
"True",
"else",
"\"\"",
")",
")",
".",
"replace",
"(",
"\"_\"",
",",
"\"-\"",
")",
"for",
"flag",
",",
"value",
"in",
"flags",
".",
"items",
"(",
")",
"if",
"value",
")",
"out_flag",
"=",
"f\" -o {exe_name} \"",
"if",
"exe_name",
"is",
"not",
"None",
"else",
"\" \"",
"run",
"(",
"f\"{cc} {files}{out_flag}{flags}\"",
")",
".",
"exit",
"(",
"0",
")"
] | Compile C source files.
:param files: filenames to be compiled
:param exe_name: name of resulting executable
:param cc: compiler to use (:data:`check50.c.CC` by default)
:param cflags: additional flags to pass to the compiler
:raises check50.Failure: if compilation failed (i.e., if the compiler returns a non-zero exit status).
:raises RuntimeError: if no filenames are specified
If ``exe_name`` is None, :func:`check50.c.compile` will default to the first
file specified sans the ``.c`` extension::
check50.c.compile("foo.c", "bar.c") # clang foo.c bar.c -o foo -std=c11 -ggdb -lm
Additional CFLAGS may be passed as keyword arguments like so::
check50.c.compile("foo.c", "bar.c", lcs50=True) # clang foo.c bar.c -o foo -std=c11 -ggdb -lm -lcs50
In the same vein, the default CFLAGS may be overriden via keyword arguments::
check50.c.compile("foo.c", "bar.c", std="c99", lm=False) # clang foo.c bar.c -o foo -std=c99 -ggdb | [
"Compile",
"C",
"source",
"files",
"."
] | 42c1f0c36baa6a24f69742d74551a9ea7a5ceb33 | https://github.com/cs50/check50/blob/42c1f0c36baa6a24f69742d74551a9ea7a5ceb33/check50/c.py#L16-L57 |
248,422 | cs50/check50 | check50/c.py | valgrind | def valgrind(command, env={}):
"""Run a command with valgrind.
:param command: command to be run
:type command: str
:param env: environment in which to run command
:type env: str
:raises check50.Failure: if, at the end of the check, valgrind reports any errors
This function works exactly like :func:`check50.run`, with the additional effect that ``command`` is run through
``valgrind`` and ``valgrind``'s output is automatically reviewed at the end of the check for memory leaks and other
bugs. If ``valgrind`` reports any issues, the check is failed and student-friendly messages are printed to the log.
Example usage::
check50.c.valgrind("./leaky").stdin("foo").stdout("bar").exit(0)
.. note::
It is recommended that the student's code is compiled with the `-ggdb`
flag so that additional information, such as the file and line number at which
the issue was detected can be included in the log as well.
"""
xml_file = tempfile.NamedTemporaryFile()
internal.register.after_check(lambda: _check_valgrind(xml_file))
# Ideally we'd like for this whole command not to be logged.
return run(f"valgrind --show-leak-kinds=all --xml=yes --xml-file={xml_file.name} -- {command}", env=env) | python | def valgrind(command, env={}):
xml_file = tempfile.NamedTemporaryFile()
internal.register.after_check(lambda: _check_valgrind(xml_file))
# Ideally we'd like for this whole command not to be logged.
return run(f"valgrind --show-leak-kinds=all --xml=yes --xml-file={xml_file.name} -- {command}", env=env) | [
"def",
"valgrind",
"(",
"command",
",",
"env",
"=",
"{",
"}",
")",
":",
"xml_file",
"=",
"tempfile",
".",
"NamedTemporaryFile",
"(",
")",
"internal",
".",
"register",
".",
"after_check",
"(",
"lambda",
":",
"_check_valgrind",
"(",
"xml_file",
")",
")",
"# Ideally we'd like for this whole command not to be logged.",
"return",
"run",
"(",
"f\"valgrind --show-leak-kinds=all --xml=yes --xml-file={xml_file.name} -- {command}\"",
",",
"env",
"=",
"env",
")"
] | Run a command with valgrind.
:param command: command to be run
:type command: str
:param env: environment in which to run command
:type env: str
:raises check50.Failure: if, at the end of the check, valgrind reports any errors
This function works exactly like :func:`check50.run`, with the additional effect that ``command`` is run through
``valgrind`` and ``valgrind``'s output is automatically reviewed at the end of the check for memory leaks and other
bugs. If ``valgrind`` reports any issues, the check is failed and student-friendly messages are printed to the log.
Example usage::
check50.c.valgrind("./leaky").stdin("foo").stdout("bar").exit(0)
.. note::
It is recommended that the student's code is compiled with the `-ggdb`
flag so that additional information, such as the file and line number at which
the issue was detected can be included in the log as well. | [
"Run",
"a",
"command",
"with",
"valgrind",
"."
] | 42c1f0c36baa6a24f69742d74551a9ea7a5ceb33 | https://github.com/cs50/check50/blob/42c1f0c36baa6a24f69742d74551a9ea7a5ceb33/check50/c.py#L60-L86 |
248,423 | cs50/check50 | check50/c.py | _check_valgrind | def _check_valgrind(xml_file):
"""Log and report any errors encountered by valgrind."""
log(_("checking for valgrind errors..."))
# Load XML file created by valgrind
xml = ET.ElementTree(file=xml_file)
# Ensure that we don't get duplicate error messages.
reported = set()
for error in xml.iterfind("error"):
# Type of error valgrind encountered
kind = error.find("kind").text
# Valgrind's error message
what = error.find("xwhat/text" if kind.startswith("Leak_") else "what").text
# Error message that we will report
msg = ["\t", what]
# Find first stack frame within student's code.
for frame in error.iterfind("stack/frame"):
obj = frame.find("obj")
if obj is not None and internal.run_dir in Path(obj.text).parents:
file, line = frame.find("file"), frame.find("line")
if file is not None and line is not None:
msg.append(f": ({_('file')}: {file.text}, {_('line')}: {line.text})")
break
msg = "".join(msg)
if msg not in reported:
log(msg)
reported.add(msg)
# Only raise exception if we encountered errors.
if reported:
raise Failure(_("valgrind tests failed; rerun with --log for more information.")) | python | def _check_valgrind(xml_file):
log(_("checking for valgrind errors..."))
# Load XML file created by valgrind
xml = ET.ElementTree(file=xml_file)
# Ensure that we don't get duplicate error messages.
reported = set()
for error in xml.iterfind("error"):
# Type of error valgrind encountered
kind = error.find("kind").text
# Valgrind's error message
what = error.find("xwhat/text" if kind.startswith("Leak_") else "what").text
# Error message that we will report
msg = ["\t", what]
# Find first stack frame within student's code.
for frame in error.iterfind("stack/frame"):
obj = frame.find("obj")
if obj is not None and internal.run_dir in Path(obj.text).parents:
file, line = frame.find("file"), frame.find("line")
if file is not None and line is not None:
msg.append(f": ({_('file')}: {file.text}, {_('line')}: {line.text})")
break
msg = "".join(msg)
if msg not in reported:
log(msg)
reported.add(msg)
# Only raise exception if we encountered errors.
if reported:
raise Failure(_("valgrind tests failed; rerun with --log for more information.")) | [
"def",
"_check_valgrind",
"(",
"xml_file",
")",
":",
"log",
"(",
"_",
"(",
"\"checking for valgrind errors...\"",
")",
")",
"# Load XML file created by valgrind",
"xml",
"=",
"ET",
".",
"ElementTree",
"(",
"file",
"=",
"xml_file",
")",
"# Ensure that we don't get duplicate error messages.",
"reported",
"=",
"set",
"(",
")",
"for",
"error",
"in",
"xml",
".",
"iterfind",
"(",
"\"error\"",
")",
":",
"# Type of error valgrind encountered",
"kind",
"=",
"error",
".",
"find",
"(",
"\"kind\"",
")",
".",
"text",
"# Valgrind's error message",
"what",
"=",
"error",
".",
"find",
"(",
"\"xwhat/text\"",
"if",
"kind",
".",
"startswith",
"(",
"\"Leak_\"",
")",
"else",
"\"what\"",
")",
".",
"text",
"# Error message that we will report",
"msg",
"=",
"[",
"\"\\t\"",
",",
"what",
"]",
"# Find first stack frame within student's code.",
"for",
"frame",
"in",
"error",
".",
"iterfind",
"(",
"\"stack/frame\"",
")",
":",
"obj",
"=",
"frame",
".",
"find",
"(",
"\"obj\"",
")",
"if",
"obj",
"is",
"not",
"None",
"and",
"internal",
".",
"run_dir",
"in",
"Path",
"(",
"obj",
".",
"text",
")",
".",
"parents",
":",
"file",
",",
"line",
"=",
"frame",
".",
"find",
"(",
"\"file\"",
")",
",",
"frame",
".",
"find",
"(",
"\"line\"",
")",
"if",
"file",
"is",
"not",
"None",
"and",
"line",
"is",
"not",
"None",
":",
"msg",
".",
"append",
"(",
"f\": ({_('file')}: {file.text}, {_('line')}: {line.text})\"",
")",
"break",
"msg",
"=",
"\"\"",
".",
"join",
"(",
"msg",
")",
"if",
"msg",
"not",
"in",
"reported",
":",
"log",
"(",
"msg",
")",
"reported",
".",
"add",
"(",
"msg",
")",
"# Only raise exception if we encountered errors.",
"if",
"reported",
":",
"raise",
"Failure",
"(",
"_",
"(",
"\"valgrind tests failed; rerun with --log for more information.\"",
")",
")"
] | Log and report any errors encountered by valgrind. | [
"Log",
"and",
"report",
"any",
"errors",
"encountered",
"by",
"valgrind",
"."
] | 42c1f0c36baa6a24f69742d74551a9ea7a5ceb33 | https://github.com/cs50/check50/blob/42c1f0c36baa6a24f69742d74551a9ea7a5ceb33/check50/c.py#L89-L124 |
248,424 | cs50/check50 | check50/runner.py | _timeout | def _timeout(seconds):
"""Context manager that runs code block until timeout is reached.
Example usage::
try:
with _timeout(10):
do_stuff()
except Timeout:
print("do_stuff timed out")
"""
def _handle_timeout(*args):
raise Timeout(seconds)
signal.signal(signal.SIGALRM, _handle_timeout)
signal.alarm(seconds)
try:
yield
finally:
signal.alarm(0)
signal.signal(signal.SIGALRM, signal.SIG_DFL) | python | def _timeout(seconds):
def _handle_timeout(*args):
raise Timeout(seconds)
signal.signal(signal.SIGALRM, _handle_timeout)
signal.alarm(seconds)
try:
yield
finally:
signal.alarm(0)
signal.signal(signal.SIGALRM, signal.SIG_DFL) | [
"def",
"_timeout",
"(",
"seconds",
")",
":",
"def",
"_handle_timeout",
"(",
"*",
"args",
")",
":",
"raise",
"Timeout",
"(",
"seconds",
")",
"signal",
".",
"signal",
"(",
"signal",
".",
"SIGALRM",
",",
"_handle_timeout",
")",
"signal",
".",
"alarm",
"(",
"seconds",
")",
"try",
":",
"yield",
"finally",
":",
"signal",
".",
"alarm",
"(",
"0",
")",
"signal",
".",
"signal",
"(",
"signal",
".",
"SIGALRM",
",",
"signal",
".",
"SIG_DFL",
")"
] | Context manager that runs code block until timeout is reached.
Example usage::
try:
with _timeout(10):
do_stuff()
except Timeout:
print("do_stuff timed out") | [
"Context",
"manager",
"that",
"runs",
"code",
"block",
"until",
"timeout",
"is",
"reached",
"."
] | 42c1f0c36baa6a24f69742d74551a9ea7a5ceb33 | https://github.com/cs50/check50/blob/42c1f0c36baa6a24f69742d74551a9ea7a5ceb33/check50/runner.py#L52-L73 |
248,425 | cs50/check50 | check50/runner.py | CheckRunner.run | def run(self, files, working_area):
"""
Run checks concurrently.
Returns a list of CheckResults ordered by declaration order of the checks in the imported module
"""
# Ensure that dictionary is ordered by check declaration order (via self.check_names)
# NOTE: Requires CPython 3.6. If we need to support older versions of Python, replace with OrderedDict.
results = {name: None for name in self.check_names}
checks_root = working_area.parent
with futures.ProcessPoolExecutor() as executor:
# Start all checks that have no dependencies
not_done = set(executor.submit(run_check(name, self.checks_spec, checks_root))
for name, _ in self.child_map[None])
not_passed = []
while not_done:
done, not_done = futures.wait(not_done, return_when=futures.FIRST_COMPLETED)
for future in done:
# Get result from completed check
result, state = future.result()
results[result.name] = result
if result.passed:
# Dispatch dependent checks
for child_name, _ in self.child_map[result.name]:
not_done.add(executor.submit(
run_check(child_name, self.checks_spec, checks_root, state)))
else:
not_passed.append(result.name)
for name in not_passed:
self._skip_children(name, results)
return results.values() | python | def run(self, files, working_area):
# Ensure that dictionary is ordered by check declaration order (via self.check_names)
# NOTE: Requires CPython 3.6. If we need to support older versions of Python, replace with OrderedDict.
results = {name: None for name in self.check_names}
checks_root = working_area.parent
with futures.ProcessPoolExecutor() as executor:
# Start all checks that have no dependencies
not_done = set(executor.submit(run_check(name, self.checks_spec, checks_root))
for name, _ in self.child_map[None])
not_passed = []
while not_done:
done, not_done = futures.wait(not_done, return_when=futures.FIRST_COMPLETED)
for future in done:
# Get result from completed check
result, state = future.result()
results[result.name] = result
if result.passed:
# Dispatch dependent checks
for child_name, _ in self.child_map[result.name]:
not_done.add(executor.submit(
run_check(child_name, self.checks_spec, checks_root, state)))
else:
not_passed.append(result.name)
for name in not_passed:
self._skip_children(name, results)
return results.values() | [
"def",
"run",
"(",
"self",
",",
"files",
",",
"working_area",
")",
":",
"# Ensure that dictionary is ordered by check declaration order (via self.check_names)",
"# NOTE: Requires CPython 3.6. If we need to support older versions of Python, replace with OrderedDict.",
"results",
"=",
"{",
"name",
":",
"None",
"for",
"name",
"in",
"self",
".",
"check_names",
"}",
"checks_root",
"=",
"working_area",
".",
"parent",
"with",
"futures",
".",
"ProcessPoolExecutor",
"(",
")",
"as",
"executor",
":",
"# Start all checks that have no dependencies",
"not_done",
"=",
"set",
"(",
"executor",
".",
"submit",
"(",
"run_check",
"(",
"name",
",",
"self",
".",
"checks_spec",
",",
"checks_root",
")",
")",
"for",
"name",
",",
"_",
"in",
"self",
".",
"child_map",
"[",
"None",
"]",
")",
"not_passed",
"=",
"[",
"]",
"while",
"not_done",
":",
"done",
",",
"not_done",
"=",
"futures",
".",
"wait",
"(",
"not_done",
",",
"return_when",
"=",
"futures",
".",
"FIRST_COMPLETED",
")",
"for",
"future",
"in",
"done",
":",
"# Get result from completed check",
"result",
",",
"state",
"=",
"future",
".",
"result",
"(",
")",
"results",
"[",
"result",
".",
"name",
"]",
"=",
"result",
"if",
"result",
".",
"passed",
":",
"# Dispatch dependent checks",
"for",
"child_name",
",",
"_",
"in",
"self",
".",
"child_map",
"[",
"result",
".",
"name",
"]",
":",
"not_done",
".",
"add",
"(",
"executor",
".",
"submit",
"(",
"run_check",
"(",
"child_name",
",",
"self",
".",
"checks_spec",
",",
"checks_root",
",",
"state",
")",
")",
")",
"else",
":",
"not_passed",
".",
"append",
"(",
"result",
".",
"name",
")",
"for",
"name",
"in",
"not_passed",
":",
"self",
".",
"_skip_children",
"(",
"name",
",",
"results",
")",
"return",
"results",
".",
"values",
"(",
")"
] | Run checks concurrently.
Returns a list of CheckResults ordered by declaration order of the checks in the imported module | [
"Run",
"checks",
"concurrently",
".",
"Returns",
"a",
"list",
"of",
"CheckResults",
"ordered",
"by",
"declaration",
"order",
"of",
"the",
"checks",
"in",
"the",
"imported",
"module"
] | 42c1f0c36baa6a24f69742d74551a9ea7a5ceb33 | https://github.com/cs50/check50/blob/42c1f0c36baa6a24f69742d74551a9ea7a5ceb33/check50/runner.py#L178-L212 |
248,426 | cs50/check50 | check50/py.py | append_code | def append_code(original, codefile):
"""Append the contents of one file to another.
:param original: name of file that will be appended to
:type original: str
:param codefile: name of file that will be appende
:type codefile: str
This function is particularly useful when one wants to replace a function
in student code with their own implementation of one. If two functions are
defined with the same name in Python, the latter definition is taken so overwriting
a function is as simple as writing it to a file and then appending it to the
student's code.
Example usage::
# Include a file containing our own implementation of a lookup function.
check50.include("lookup.py")
# Overwrite the lookup function in helpers.py with our own implementation.
check50.py.append_code("helpers.py", "lookup.py")
"""
with open(codefile) as code, open(original, "a") as o:
o.write("\n")
o.writelines(code) | python | def append_code(original, codefile):
with open(codefile) as code, open(original, "a") as o:
o.write("\n")
o.writelines(code) | [
"def",
"append_code",
"(",
"original",
",",
"codefile",
")",
":",
"with",
"open",
"(",
"codefile",
")",
"as",
"code",
",",
"open",
"(",
"original",
",",
"\"a\"",
")",
"as",
"o",
":",
"o",
".",
"write",
"(",
"\"\\n\"",
")",
"o",
".",
"writelines",
"(",
"code",
")"
] | Append the contents of one file to another.
:param original: name of file that will be appended to
:type original: str
:param codefile: name of file that will be appende
:type codefile: str
This function is particularly useful when one wants to replace a function
in student code with their own implementation of one. If two functions are
defined with the same name in Python, the latter definition is taken so overwriting
a function is as simple as writing it to a file and then appending it to the
student's code.
Example usage::
# Include a file containing our own implementation of a lookup function.
check50.include("lookup.py")
# Overwrite the lookup function in helpers.py with our own implementation.
check50.py.append_code("helpers.py", "lookup.py") | [
"Append",
"the",
"contents",
"of",
"one",
"file",
"to",
"another",
"."
] | 42c1f0c36baa6a24f69742d74551a9ea7a5ceb33 | https://github.com/cs50/check50/blob/42c1f0c36baa6a24f69742d74551a9ea7a5ceb33/check50/py.py#L10-L34 |
248,427 | cs50/check50 | check50/py.py | import_ | def import_(path):
"""Import a Python program given a raw file path
:param path: path to python file to be imported
:type path: str
:raises check50.Failure: if ``path`` doesn't exist, or if the Python file at ``path`` throws an exception when imported.
"""
exists(path)
log(_("importing {}...").format(path))
name = Path(path).stem
try:
return internal.import_file(name, path)
except Exception as e:
raise Failure(str(e)) | python | def import_(path):
exists(path)
log(_("importing {}...").format(path))
name = Path(path).stem
try:
return internal.import_file(name, path)
except Exception as e:
raise Failure(str(e)) | [
"def",
"import_",
"(",
"path",
")",
":",
"exists",
"(",
"path",
")",
"log",
"(",
"_",
"(",
"\"importing {}...\"",
")",
".",
"format",
"(",
"path",
")",
")",
"name",
"=",
"Path",
"(",
"path",
")",
".",
"stem",
"try",
":",
"return",
"internal",
".",
"import_file",
"(",
"name",
",",
"path",
")",
"except",
"Exception",
"as",
"e",
":",
"raise",
"Failure",
"(",
"str",
"(",
"e",
")",
")"
] | Import a Python program given a raw file path
:param path: path to python file to be imported
:type path: str
:raises check50.Failure: if ``path`` doesn't exist, or if the Python file at ``path`` throws an exception when imported. | [
"Import",
"a",
"Python",
"program",
"given",
"a",
"raw",
"file",
"path"
] | 42c1f0c36baa6a24f69742d74551a9ea7a5ceb33 | https://github.com/cs50/check50/blob/42c1f0c36baa6a24f69742d74551a9ea7a5ceb33/check50/py.py#L37-L50 |
248,428 | cs50/check50 | check50/py.py | compile | def compile(file):
"""
Compile a Python program into byte code
:param file: file to be compiled
:raises check50.Failure: if compilation fails e.g. if there is a SyntaxError
"""
log(_("compiling {} into byte code...").format(file))
try:
py_compile.compile(file, doraise=True)
except py_compile.PyCompileError as e:
log(_("Exception raised: "))
for line in e.msg.splitlines():
log(line)
raise Failure(_("{} raised while compiling {} (rerun with --log for more details)").format(e.exc_type_name, file)) | python | def compile(file):
log(_("compiling {} into byte code...").format(file))
try:
py_compile.compile(file, doraise=True)
except py_compile.PyCompileError as e:
log(_("Exception raised: "))
for line in e.msg.splitlines():
log(line)
raise Failure(_("{} raised while compiling {} (rerun with --log for more details)").format(e.exc_type_name, file)) | [
"def",
"compile",
"(",
"file",
")",
":",
"log",
"(",
"_",
"(",
"\"compiling {} into byte code...\"",
")",
".",
"format",
"(",
"file",
")",
")",
"try",
":",
"py_compile",
".",
"compile",
"(",
"file",
",",
"doraise",
"=",
"True",
")",
"except",
"py_compile",
".",
"PyCompileError",
"as",
"e",
":",
"log",
"(",
"_",
"(",
"\"Exception raised: \"",
")",
")",
"for",
"line",
"in",
"e",
".",
"msg",
".",
"splitlines",
"(",
")",
":",
"log",
"(",
"line",
")",
"raise",
"Failure",
"(",
"_",
"(",
"\"{} raised while compiling {} (rerun with --log for more details)\"",
")",
".",
"format",
"(",
"e",
".",
"exc_type_name",
",",
"file",
")",
")"
] | Compile a Python program into byte code
:param file: file to be compiled
:raises check50.Failure: if compilation fails e.g. if there is a SyntaxError | [
"Compile",
"a",
"Python",
"program",
"into",
"byte",
"code"
] | 42c1f0c36baa6a24f69742d74551a9ea7a5ceb33 | https://github.com/cs50/check50/blob/42c1f0c36baa6a24f69742d74551a9ea7a5ceb33/check50/py.py#L52-L68 |
248,429 | cs50/check50 | check50/flask.py | app.get | def get(self, route, data=None, params=None, follow_redirects=True):
"""Send GET request to app.
:param route: route to send request to
:type route: str
:param data: form data to include in request
:type data: dict
:param params: URL parameters to include in request
:param follow_redirects: enable redirection (defaults to ``True``)
:type follow_redirects: bool
:returns: ``self``
:raises check50.Failure: if Flask application throws an uncaught exception
Example usage::
check50.flask.app("application.py").get("/buy", params={"q": "02138"}).content()
"""
return self._send("GET", route, data, params, follow_redirects=follow_redirects) | python | def get(self, route, data=None, params=None, follow_redirects=True):
return self._send("GET", route, data, params, follow_redirects=follow_redirects) | [
"def",
"get",
"(",
"self",
",",
"route",
",",
"data",
"=",
"None",
",",
"params",
"=",
"None",
",",
"follow_redirects",
"=",
"True",
")",
":",
"return",
"self",
".",
"_send",
"(",
"\"GET\"",
",",
"route",
",",
"data",
",",
"params",
",",
"follow_redirects",
"=",
"follow_redirects",
")"
] | Send GET request to app.
:param route: route to send request to
:type route: str
:param data: form data to include in request
:type data: dict
:param params: URL parameters to include in request
:param follow_redirects: enable redirection (defaults to ``True``)
:type follow_redirects: bool
:returns: ``self``
:raises check50.Failure: if Flask application throws an uncaught exception
Example usage::
check50.flask.app("application.py").get("/buy", params={"q": "02138"}).content() | [
"Send",
"GET",
"request",
"to",
"app",
"."
] | 42c1f0c36baa6a24f69742d74551a9ea7a5ceb33 | https://github.com/cs50/check50/blob/42c1f0c36baa6a24f69742d74551a9ea7a5ceb33/check50/flask.py#L52-L69 |
248,430 | cs50/check50 | check50/flask.py | app.post | def post(self, route, data=None, params=None, follow_redirects=True):
"""Send POST request to app.
:param route: route to send request to
:type route: str
:param data: form data to include in request
:type data: dict
:param params: URL parameters to include in request
:param follow_redirects: enable redirection (defaults to ``True``)
:type follow_redirects: bool
:raises check50.Failure: if Flask application throws an uncaught exception
Example usage::
check50.flask.app("application.py").post("/buy", data={"symbol": "GOOG", "shares": 10}).status(200)
"""
return self._send("POST", route, data, params, follow_redirects=follow_redirects) | python | def post(self, route, data=None, params=None, follow_redirects=True):
return self._send("POST", route, data, params, follow_redirects=follow_redirects) | [
"def",
"post",
"(",
"self",
",",
"route",
",",
"data",
"=",
"None",
",",
"params",
"=",
"None",
",",
"follow_redirects",
"=",
"True",
")",
":",
"return",
"self",
".",
"_send",
"(",
"\"POST\"",
",",
"route",
",",
"data",
",",
"params",
",",
"follow_redirects",
"=",
"follow_redirects",
")"
] | Send POST request to app.
:param route: route to send request to
:type route: str
:param data: form data to include in request
:type data: dict
:param params: URL parameters to include in request
:param follow_redirects: enable redirection (defaults to ``True``)
:type follow_redirects: bool
:raises check50.Failure: if Flask application throws an uncaught exception
Example usage::
check50.flask.app("application.py").post("/buy", data={"symbol": "GOOG", "shares": 10}).status(200) | [
"Send",
"POST",
"request",
"to",
"app",
"."
] | 42c1f0c36baa6a24f69742d74551a9ea7a5ceb33 | https://github.com/cs50/check50/blob/42c1f0c36baa6a24f69742d74551a9ea7a5ceb33/check50/flask.py#L71-L88 |
248,431 | cs50/check50 | check50/flask.py | app.status | def status(self, code=None):
"""Check status code in response returned by application.
If ``code`` is not None, assert that ``code`` is returned by application,
else simply return the status code.
:param code: ``code`` to assert that application returns
:type code: int
Example usage::
check50.flask.app("application.py").status(200)
status = check50.flask.app("application.py").get("/").status()
if status != 200:
raise check50.Failure(f"expected status code 200, but got {status}")
"""
if code is None:
return self.response.status_code
log(_("checking that status code {} is returned...").format(code))
if code != self.response.status_code:
raise Failure(_("expected status code {}, but got {}").format(
code, self.response.status_code))
return self | python | def status(self, code=None):
if code is None:
return self.response.status_code
log(_("checking that status code {} is returned...").format(code))
if code != self.response.status_code:
raise Failure(_("expected status code {}, but got {}").format(
code, self.response.status_code))
return self | [
"def",
"status",
"(",
"self",
",",
"code",
"=",
"None",
")",
":",
"if",
"code",
"is",
"None",
":",
"return",
"self",
".",
"response",
".",
"status_code",
"log",
"(",
"_",
"(",
"\"checking that status code {} is returned...\"",
")",
".",
"format",
"(",
"code",
")",
")",
"if",
"code",
"!=",
"self",
".",
"response",
".",
"status_code",
":",
"raise",
"Failure",
"(",
"_",
"(",
"\"expected status code {}, but got {}\"",
")",
".",
"format",
"(",
"code",
",",
"self",
".",
"response",
".",
"status_code",
")",
")",
"return",
"self"
] | Check status code in response returned by application.
If ``code`` is not None, assert that ``code`` is returned by application,
else simply return the status code.
:param code: ``code`` to assert that application returns
:type code: int
Example usage::
check50.flask.app("application.py").status(200)
status = check50.flask.app("application.py").get("/").status()
if status != 200:
raise check50.Failure(f"expected status code 200, but got {status}") | [
"Check",
"status",
"code",
"in",
"response",
"returned",
"by",
"application",
".",
"If",
"code",
"is",
"not",
"None",
"assert",
"that",
"code",
"is",
"returned",
"by",
"application",
"else",
"simply",
"return",
"the",
"status",
"code",
"."
] | 42c1f0c36baa6a24f69742d74551a9ea7a5ceb33 | https://github.com/cs50/check50/blob/42c1f0c36baa6a24f69742d74551a9ea7a5ceb33/check50/flask.py#L90-L114 |
248,432 | cs50/check50 | check50/flask.py | app.raw_content | def raw_content(self, output=None, str_output=None):
"""Searches for `output` regex match within content of page, regardless of mimetype."""
return self._search_page(output, str_output, self.response.data, lambda regex, content: regex.search(content.decode())) | python | def raw_content(self, output=None, str_output=None):
return self._search_page(output, str_output, self.response.data, lambda regex, content: regex.search(content.decode())) | [
"def",
"raw_content",
"(",
"self",
",",
"output",
"=",
"None",
",",
"str_output",
"=",
"None",
")",
":",
"return",
"self",
".",
"_search_page",
"(",
"output",
",",
"str_output",
",",
"self",
".",
"response",
".",
"data",
",",
"lambda",
"regex",
",",
"content",
":",
"regex",
".",
"search",
"(",
"content",
".",
"decode",
"(",
")",
")",
")"
] | Searches for `output` regex match within content of page, regardless of mimetype. | [
"Searches",
"for",
"output",
"regex",
"match",
"within",
"content",
"of",
"page",
"regardless",
"of",
"mimetype",
"."
] | 42c1f0c36baa6a24f69742d74551a9ea7a5ceb33 | https://github.com/cs50/check50/blob/42c1f0c36baa6a24f69742d74551a9ea7a5ceb33/check50/flask.py#L116-L118 |
248,433 | cs50/check50 | check50/flask.py | app.content | def content(self, output=None, str_output=None, **kwargs):
"""Searches for `output` regex within HTML page. kwargs are passed to BeautifulSoup's find function to filter for tags."""
if self.response.mimetype != "text/html":
raise Failure(_("expected request to return HTML, but it returned {}").format(
self.response.mimetype))
# TODO: Remove once beautiful soup updates to accomodate python 3.7
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
content = BeautifulSoup(self.response.data, "html.parser")
return self._search_page(
output,
str_output,
content,
lambda regex, content: any(regex.search(str(tag)) for tag in content.find_all(**kwargs))) | python | def content(self, output=None, str_output=None, **kwargs):
if self.response.mimetype != "text/html":
raise Failure(_("expected request to return HTML, but it returned {}").format(
self.response.mimetype))
# TODO: Remove once beautiful soup updates to accomodate python 3.7
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
content = BeautifulSoup(self.response.data, "html.parser")
return self._search_page(
output,
str_output,
content,
lambda regex, content: any(regex.search(str(tag)) for tag in content.find_all(**kwargs))) | [
"def",
"content",
"(",
"self",
",",
"output",
"=",
"None",
",",
"str_output",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"self",
".",
"response",
".",
"mimetype",
"!=",
"\"text/html\"",
":",
"raise",
"Failure",
"(",
"_",
"(",
"\"expected request to return HTML, but it returned {}\"",
")",
".",
"format",
"(",
"self",
".",
"response",
".",
"mimetype",
")",
")",
"# TODO: Remove once beautiful soup updates to accomodate python 3.7",
"with",
"warnings",
".",
"catch_warnings",
"(",
")",
":",
"warnings",
".",
"filterwarnings",
"(",
"\"ignore\"",
",",
"category",
"=",
"DeprecationWarning",
")",
"content",
"=",
"BeautifulSoup",
"(",
"self",
".",
"response",
".",
"data",
",",
"\"html.parser\"",
")",
"return",
"self",
".",
"_search_page",
"(",
"output",
",",
"str_output",
",",
"content",
",",
"lambda",
"regex",
",",
"content",
":",
"any",
"(",
"regex",
".",
"search",
"(",
"str",
"(",
"tag",
")",
")",
"for",
"tag",
"in",
"content",
".",
"find_all",
"(",
"*",
"*",
"kwargs",
")",
")",
")"
] | Searches for `output` regex within HTML page. kwargs are passed to BeautifulSoup's find function to filter for tags. | [
"Searches",
"for",
"output",
"regex",
"within",
"HTML",
"page",
".",
"kwargs",
"are",
"passed",
"to",
"BeautifulSoup",
"s",
"find",
"function",
"to",
"filter",
"for",
"tags",
"."
] | 42c1f0c36baa6a24f69742d74551a9ea7a5ceb33 | https://github.com/cs50/check50/blob/42c1f0c36baa6a24f69742d74551a9ea7a5ceb33/check50/flask.py#L120-L135 |
248,434 | cs50/check50 | check50/flask.py | app._send | def _send(self, method, route, data, params, **kwargs):
"""Send request of type `method` to `route`."""
route = self._fmt_route(route, params)
log(_("sending {} request to {}").format(method.upper(), route))
try:
self.response = getattr(self._client, method.lower())(route, data=data, **kwargs)
except BaseException as e: # Catch all exceptions thrown by app
log(_("exception raised in application: {}: {}").format(type(e).__name__, e))
raise Failure(_("application raised an exception (rerun with --log for more details)"))
return self | python | def _send(self, method, route, data, params, **kwargs):
route = self._fmt_route(route, params)
log(_("sending {} request to {}").format(method.upper(), route))
try:
self.response = getattr(self._client, method.lower())(route, data=data, **kwargs)
except BaseException as e: # Catch all exceptions thrown by app
log(_("exception raised in application: {}: {}").format(type(e).__name__, e))
raise Failure(_("application raised an exception (rerun with --log for more details)"))
return self | [
"def",
"_send",
"(",
"self",
",",
"method",
",",
"route",
",",
"data",
",",
"params",
",",
"*",
"*",
"kwargs",
")",
":",
"route",
"=",
"self",
".",
"_fmt_route",
"(",
"route",
",",
"params",
")",
"log",
"(",
"_",
"(",
"\"sending {} request to {}\"",
")",
".",
"format",
"(",
"method",
".",
"upper",
"(",
")",
",",
"route",
")",
")",
"try",
":",
"self",
".",
"response",
"=",
"getattr",
"(",
"self",
".",
"_client",
",",
"method",
".",
"lower",
"(",
")",
")",
"(",
"route",
",",
"data",
"=",
"data",
",",
"*",
"*",
"kwargs",
")",
"except",
"BaseException",
"as",
"e",
":",
"# Catch all exceptions thrown by app",
"log",
"(",
"_",
"(",
"\"exception raised in application: {}: {}\"",
")",
".",
"format",
"(",
"type",
"(",
"e",
")",
".",
"__name__",
",",
"e",
")",
")",
"raise",
"Failure",
"(",
"_",
"(",
"\"application raised an exception (rerun with --log for more details)\"",
")",
")",
"return",
"self"
] | Send request of type `method` to `route`. | [
"Send",
"request",
"of",
"type",
"method",
"to",
"route",
"."
] | 42c1f0c36baa6a24f69742d74551a9ea7a5ceb33 | https://github.com/cs50/check50/blob/42c1f0c36baa6a24f69742d74551a9ea7a5ceb33/check50/flask.py#L137-L146 |
248,435 | cs50/check50 | check50/simple.py | compile | def compile(checks):
"""Returns compiled check50 checks from simple YAML checks in path."""
out = ["import check50"]
for name, check in checks.items():
out.append(_compile_check(name, check))
return "\n\n".join(out) | python | def compile(checks):
out = ["import check50"]
for name, check in checks.items():
out.append(_compile_check(name, check))
return "\n\n".join(out) | [
"def",
"compile",
"(",
"checks",
")",
":",
"out",
"=",
"[",
"\"import check50\"",
"]",
"for",
"name",
",",
"check",
"in",
"checks",
".",
"items",
"(",
")",
":",
"out",
".",
"append",
"(",
"_compile_check",
"(",
"name",
",",
"check",
")",
")",
"return",
"\"\\n\\n\"",
".",
"join",
"(",
"out",
")"
] | Returns compiled check50 checks from simple YAML checks in path. | [
"Returns",
"compiled",
"check50",
"checks",
"from",
"simple",
"YAML",
"checks",
"in",
"path",
"."
] | 42c1f0c36baa6a24f69742d74551a9ea7a5ceb33 | https://github.com/cs50/check50/blob/42c1f0c36baa6a24f69742d74551a9ea7a5ceb33/check50/simple.py#L8-L16 |
248,436 | quantopian/trading_calendars | trading_calendars/utils/pandas_utils.py | days_at_time | def days_at_time(days, t, tz, day_offset=0):
"""
Create an index of days at time ``t``, interpreted in timezone ``tz``.
The returned index is localized to UTC.
Parameters
----------
days : DatetimeIndex
An index of dates (represented as midnight).
t : datetime.time
The time to apply as an offset to each day in ``days``.
tz : pytz.timezone
The timezone to use to interpret ``t``.
day_offset : int
The number of days we want to offset @days by
Examples
--------
In the example below, the times switch from 13:45 to 12:45 UTC because
March 13th is the daylight savings transition for US/Eastern. All the
times are still 8:45 when interpreted in US/Eastern.
>>> import pandas as pd; import datetime; import pprint
>>> dts = pd.date_range('2016-03-12', '2016-03-14')
>>> dts_at_845 = days_at_time(dts, datetime.time(8, 45), 'US/Eastern')
>>> pprint.pprint([str(dt) for dt in dts_at_845])
['2016-03-12 13:45:00+00:00',
'2016-03-13 12:45:00+00:00',
'2016-03-14 12:45:00+00:00']
"""
days = pd.DatetimeIndex(days).tz_localize(None)
if len(days) == 0:
return days.tz_localize(UTC)
# Offset days without tz to avoid timezone issues.
delta = pd.Timedelta(
days=day_offset,
hours=t.hour,
minutes=t.minute,
seconds=t.second,
)
return (days + delta).tz_localize(tz).tz_convert(UTC) | python | def days_at_time(days, t, tz, day_offset=0):
days = pd.DatetimeIndex(days).tz_localize(None)
if len(days) == 0:
return days.tz_localize(UTC)
# Offset days without tz to avoid timezone issues.
delta = pd.Timedelta(
days=day_offset,
hours=t.hour,
minutes=t.minute,
seconds=t.second,
)
return (days + delta).tz_localize(tz).tz_convert(UTC) | [
"def",
"days_at_time",
"(",
"days",
",",
"t",
",",
"tz",
",",
"day_offset",
"=",
"0",
")",
":",
"days",
"=",
"pd",
".",
"DatetimeIndex",
"(",
"days",
")",
".",
"tz_localize",
"(",
"None",
")",
"if",
"len",
"(",
"days",
")",
"==",
"0",
":",
"return",
"days",
".",
"tz_localize",
"(",
"UTC",
")",
"# Offset days without tz to avoid timezone issues.",
"delta",
"=",
"pd",
".",
"Timedelta",
"(",
"days",
"=",
"day_offset",
",",
"hours",
"=",
"t",
".",
"hour",
",",
"minutes",
"=",
"t",
".",
"minute",
",",
"seconds",
"=",
"t",
".",
"second",
",",
")",
"return",
"(",
"days",
"+",
"delta",
")",
".",
"tz_localize",
"(",
"tz",
")",
".",
"tz_convert",
"(",
"UTC",
")"
] | Create an index of days at time ``t``, interpreted in timezone ``tz``.
The returned index is localized to UTC.
Parameters
----------
days : DatetimeIndex
An index of dates (represented as midnight).
t : datetime.time
The time to apply as an offset to each day in ``days``.
tz : pytz.timezone
The timezone to use to interpret ``t``.
day_offset : int
The number of days we want to offset @days by
Examples
--------
In the example below, the times switch from 13:45 to 12:45 UTC because
March 13th is the daylight savings transition for US/Eastern. All the
times are still 8:45 when interpreted in US/Eastern.
>>> import pandas as pd; import datetime; import pprint
>>> dts = pd.date_range('2016-03-12', '2016-03-14')
>>> dts_at_845 = days_at_time(dts, datetime.time(8, 45), 'US/Eastern')
>>> pprint.pprint([str(dt) for dt in dts_at_845])
['2016-03-12 13:45:00+00:00',
'2016-03-13 12:45:00+00:00',
'2016-03-14 12:45:00+00:00'] | [
"Create",
"an",
"index",
"of",
"days",
"at",
"time",
"t",
"interpreted",
"in",
"timezone",
"tz",
"."
] | 951711c82c8a2875c09e96e2979faaf8734fb4df | https://github.com/quantopian/trading_calendars/blob/951711c82c8a2875c09e96e2979faaf8734fb4df/trading_calendars/utils/pandas_utils.py#L6-L48 |
248,437 | quantopian/trading_calendars | trading_calendars/common_holidays.py | weekend_boxing_day | def weekend_boxing_day(start_date=None, end_date=None, observance=None):
"""
If boxing day is saturday then Monday 28th is a holiday
If boxing day is sunday then Tuesday 28th is a holiday
"""
return Holiday(
"Weekend Boxing Day",
month=12,
day=28,
days_of_week=(MONDAY, TUESDAY),
start_date=start_date,
end_date=end_date,
observance=observance,
) | python | def weekend_boxing_day(start_date=None, end_date=None, observance=None):
return Holiday(
"Weekend Boxing Day",
month=12,
day=28,
days_of_week=(MONDAY, TUESDAY),
start_date=start_date,
end_date=end_date,
observance=observance,
) | [
"def",
"weekend_boxing_day",
"(",
"start_date",
"=",
"None",
",",
"end_date",
"=",
"None",
",",
"observance",
"=",
"None",
")",
":",
"return",
"Holiday",
"(",
"\"Weekend Boxing Day\"",
",",
"month",
"=",
"12",
",",
"day",
"=",
"28",
",",
"days_of_week",
"=",
"(",
"MONDAY",
",",
"TUESDAY",
")",
",",
"start_date",
"=",
"start_date",
",",
"end_date",
"=",
"end_date",
",",
"observance",
"=",
"observance",
",",
")"
] | If boxing day is saturday then Monday 28th is a holiday
If boxing day is sunday then Tuesday 28th is a holiday | [
"If",
"boxing",
"day",
"is",
"saturday",
"then",
"Monday",
"28th",
"is",
"a",
"holiday",
"If",
"boxing",
"day",
"is",
"sunday",
"then",
"Tuesday",
"28th",
"is",
"a",
"holiday"
] | 951711c82c8a2875c09e96e2979faaf8734fb4df | https://github.com/quantopian/trading_calendars/blob/951711c82c8a2875c09e96e2979faaf8734fb4df/trading_calendars/common_holidays.py#L261-L274 |
248,438 | quantopian/trading_calendars | trading_calendars/xtks_holidays.py | is_holiday_or_weekend | def is_holiday_or_weekend(holidays, dt):
"""
Given a list of holidays, return whether dt is a holiday
or it is on a weekend.
"""
one_day = timedelta(days=1)
for h in holidays:
if dt in h.dates(dt - one_day, dt + one_day) or \
dt.weekday() in WEEKENDS:
return True
return False | python | def is_holiday_or_weekend(holidays, dt):
one_day = timedelta(days=1)
for h in holidays:
if dt in h.dates(dt - one_day, dt + one_day) or \
dt.weekday() in WEEKENDS:
return True
return False | [
"def",
"is_holiday_or_weekend",
"(",
"holidays",
",",
"dt",
")",
":",
"one_day",
"=",
"timedelta",
"(",
"days",
"=",
"1",
")",
"for",
"h",
"in",
"holidays",
":",
"if",
"dt",
"in",
"h",
".",
"dates",
"(",
"dt",
"-",
"one_day",
",",
"dt",
"+",
"one_day",
")",
"or",
"dt",
".",
"weekday",
"(",
")",
"in",
"WEEKENDS",
":",
"return",
"True",
"return",
"False"
] | Given a list of holidays, return whether dt is a holiday
or it is on a weekend. | [
"Given",
"a",
"list",
"of",
"holidays",
"return",
"whether",
"dt",
"is",
"a",
"holiday",
"or",
"it",
"is",
"on",
"a",
"weekend",
"."
] | 951711c82c8a2875c09e96e2979faaf8734fb4df | https://github.com/quantopian/trading_calendars/blob/951711c82c8a2875c09e96e2979faaf8734fb4df/trading_calendars/xtks_holidays.py#L18-L30 |
248,439 | quantopian/trading_calendars | trading_calendars/xtks_holidays.py | next_non_holiday_weekday | def next_non_holiday_weekday(holidays, dt):
"""
If a holiday falls on a Sunday, observe it on the next non-holiday weekday.
Parameters
----------
holidays : list[pd.tseries.holiday.Holiday]
list of holidays
dt : pd.Timestamp
date of holiday.
"""
day_of_week = dt.weekday()
if day_of_week == SUNDAY:
while is_holiday_or_weekend(holidays, dt):
dt += timedelta(1)
return dt | python | def next_non_holiday_weekday(holidays, dt):
day_of_week = dt.weekday()
if day_of_week == SUNDAY:
while is_holiday_or_weekend(holidays, dt):
dt += timedelta(1)
return dt | [
"def",
"next_non_holiday_weekday",
"(",
"holidays",
",",
"dt",
")",
":",
"day_of_week",
"=",
"dt",
".",
"weekday",
"(",
")",
"if",
"day_of_week",
"==",
"SUNDAY",
":",
"while",
"is_holiday_or_weekend",
"(",
"holidays",
",",
"dt",
")",
":",
"dt",
"+=",
"timedelta",
"(",
"1",
")",
"return",
"dt"
] | If a holiday falls on a Sunday, observe it on the next non-holiday weekday.
Parameters
----------
holidays : list[pd.tseries.holiday.Holiday]
list of holidays
dt : pd.Timestamp
date of holiday. | [
"If",
"a",
"holiday",
"falls",
"on",
"a",
"Sunday",
"observe",
"it",
"on",
"the",
"next",
"non",
"-",
"holiday",
"weekday",
"."
] | 951711c82c8a2875c09e96e2979faaf8734fb4df | https://github.com/quantopian/trading_calendars/blob/951711c82c8a2875c09e96e2979faaf8734fb4df/trading_calendars/xtks_holidays.py#L33-L50 |
248,440 | quantopian/trading_calendars | trading_calendars/calendar_helpers.py | compute_all_minutes | def compute_all_minutes(opens_in_ns, closes_in_ns):
"""
Given arrays of opens and closes, both in nanoseconds,
return an array of each minute between the opens and closes.
"""
deltas = closes_in_ns - opens_in_ns
# + 1 because we want 390 mins per standard day, not 389
daily_sizes = (deltas // NANOSECONDS_PER_MINUTE) + 1
num_minutes = daily_sizes.sum()
# One allocation for the entire thing. This assumes that each day
# represents a contiguous block of minutes.
pieces = []
for open_, size in zip(opens_in_ns, daily_sizes):
pieces.append(
np.arange(open_,
open_ + size * NANOSECONDS_PER_MINUTE,
NANOSECONDS_PER_MINUTE)
)
out = np.concatenate(pieces).view('datetime64[ns]')
assert len(out) == num_minutes
return out | python | def compute_all_minutes(opens_in_ns, closes_in_ns):
deltas = closes_in_ns - opens_in_ns
# + 1 because we want 390 mins per standard day, not 389
daily_sizes = (deltas // NANOSECONDS_PER_MINUTE) + 1
num_minutes = daily_sizes.sum()
# One allocation for the entire thing. This assumes that each day
# represents a contiguous block of minutes.
pieces = []
for open_, size in zip(opens_in_ns, daily_sizes):
pieces.append(
np.arange(open_,
open_ + size * NANOSECONDS_PER_MINUTE,
NANOSECONDS_PER_MINUTE)
)
out = np.concatenate(pieces).view('datetime64[ns]')
assert len(out) == num_minutes
return out | [
"def",
"compute_all_minutes",
"(",
"opens_in_ns",
",",
"closes_in_ns",
")",
":",
"deltas",
"=",
"closes_in_ns",
"-",
"opens_in_ns",
"# + 1 because we want 390 mins per standard day, not 389",
"daily_sizes",
"=",
"(",
"deltas",
"//",
"NANOSECONDS_PER_MINUTE",
")",
"+",
"1",
"num_minutes",
"=",
"daily_sizes",
".",
"sum",
"(",
")",
"# One allocation for the entire thing. This assumes that each day",
"# represents a contiguous block of minutes.",
"pieces",
"=",
"[",
"]",
"for",
"open_",
",",
"size",
"in",
"zip",
"(",
"opens_in_ns",
",",
"daily_sizes",
")",
":",
"pieces",
".",
"append",
"(",
"np",
".",
"arange",
"(",
"open_",
",",
"open_",
"+",
"size",
"*",
"NANOSECONDS_PER_MINUTE",
",",
"NANOSECONDS_PER_MINUTE",
")",
")",
"out",
"=",
"np",
".",
"concatenate",
"(",
"pieces",
")",
".",
"view",
"(",
"'datetime64[ns]'",
")",
"assert",
"len",
"(",
"out",
")",
"==",
"num_minutes",
"return",
"out"
] | Given arrays of opens and closes, both in nanoseconds,
return an array of each minute between the opens and closes. | [
"Given",
"arrays",
"of",
"opens",
"and",
"closes",
"both",
"in",
"nanoseconds",
"return",
"an",
"array",
"of",
"each",
"minute",
"between",
"the",
"opens",
"and",
"closes",
"."
] | 951711c82c8a2875c09e96e2979faaf8734fb4df | https://github.com/quantopian/trading_calendars/blob/951711c82c8a2875c09e96e2979faaf8734fb4df/trading_calendars/calendar_helpers.py#L47-L71 |
248,441 | quantopian/trading_calendars | trading_calendars/calendar_utils.py | TradingCalendarDispatcher.get_calendar | def get_calendar(self, name):
"""
Retrieves an instance of an TradingCalendar whose name is given.
Parameters
----------
name : str
The name of the TradingCalendar to be retrieved.
Returns
-------
calendar : calendars.TradingCalendar
The desired calendar.
"""
canonical_name = self.resolve_alias(name)
try:
return self._calendars[canonical_name]
except KeyError:
# We haven't loaded this calendar yet, so make a new one.
pass
try:
factory = self._calendar_factories[canonical_name]
except KeyError:
# We don't have a factory registered for this name. Barf.
raise InvalidCalendarName(calendar_name=name)
# Cache the calendar for future use.
calendar = self._calendars[canonical_name] = factory()
return calendar | python | def get_calendar(self, name):
canonical_name = self.resolve_alias(name)
try:
return self._calendars[canonical_name]
except KeyError:
# We haven't loaded this calendar yet, so make a new one.
pass
try:
factory = self._calendar_factories[canonical_name]
except KeyError:
# We don't have a factory registered for this name. Barf.
raise InvalidCalendarName(calendar_name=name)
# Cache the calendar for future use.
calendar = self._calendars[canonical_name] = factory()
return calendar | [
"def",
"get_calendar",
"(",
"self",
",",
"name",
")",
":",
"canonical_name",
"=",
"self",
".",
"resolve_alias",
"(",
"name",
")",
"try",
":",
"return",
"self",
".",
"_calendars",
"[",
"canonical_name",
"]",
"except",
"KeyError",
":",
"# We haven't loaded this calendar yet, so make a new one.",
"pass",
"try",
":",
"factory",
"=",
"self",
".",
"_calendar_factories",
"[",
"canonical_name",
"]",
"except",
"KeyError",
":",
"# We don't have a factory registered for this name. Barf.",
"raise",
"InvalidCalendarName",
"(",
"calendar_name",
"=",
"name",
")",
"# Cache the calendar for future use.",
"calendar",
"=",
"self",
".",
"_calendars",
"[",
"canonical_name",
"]",
"=",
"factory",
"(",
")",
"return",
"calendar"
] | Retrieves an instance of an TradingCalendar whose name is given.
Parameters
----------
name : str
The name of the TradingCalendar to be retrieved.
Returns
-------
calendar : calendars.TradingCalendar
The desired calendar. | [
"Retrieves",
"an",
"instance",
"of",
"an",
"TradingCalendar",
"whose",
"name",
"is",
"given",
"."
] | 951711c82c8a2875c09e96e2979faaf8734fb4df | https://github.com/quantopian/trading_calendars/blob/951711c82c8a2875c09e96e2979faaf8734fb4df/trading_calendars/calendar_utils.py#L118-L148 |
248,442 | quantopian/trading_calendars | trading_calendars/calendar_utils.py | TradingCalendarDispatcher.register_calendar | def register_calendar(self, name, calendar, force=False):
"""
Registers a calendar for retrieval by the get_calendar method.
Parameters
----------
name: str
The key with which to register this calendar.
calendar: TradingCalendar
The calendar to be registered for retrieval.
force : bool, optional
If True, old calendars will be overwritten on a name collision.
If False, name collisions will raise an exception.
Default is False.
Raises
------
CalendarNameCollision
If a calendar is already registered with the given calendar's name.
"""
if force:
self.deregister_calendar(name)
if self.has_calendar(name):
raise CalendarNameCollision(calendar_name=name)
self._calendars[name] = calendar | python | def register_calendar(self, name, calendar, force=False):
if force:
self.deregister_calendar(name)
if self.has_calendar(name):
raise CalendarNameCollision(calendar_name=name)
self._calendars[name] = calendar | [
"def",
"register_calendar",
"(",
"self",
",",
"name",
",",
"calendar",
",",
"force",
"=",
"False",
")",
":",
"if",
"force",
":",
"self",
".",
"deregister_calendar",
"(",
"name",
")",
"if",
"self",
".",
"has_calendar",
"(",
"name",
")",
":",
"raise",
"CalendarNameCollision",
"(",
"calendar_name",
"=",
"name",
")",
"self",
".",
"_calendars",
"[",
"name",
"]",
"=",
"calendar"
] | Registers a calendar for retrieval by the get_calendar method.
Parameters
----------
name: str
The key with which to register this calendar.
calendar: TradingCalendar
The calendar to be registered for retrieval.
force : bool, optional
If True, old calendars will be overwritten on a name collision.
If False, name collisions will raise an exception.
Default is False.
Raises
------
CalendarNameCollision
If a calendar is already registered with the given calendar's name. | [
"Registers",
"a",
"calendar",
"for",
"retrieval",
"by",
"the",
"get_calendar",
"method",
"."
] | 951711c82c8a2875c09e96e2979faaf8734fb4df | https://github.com/quantopian/trading_calendars/blob/951711c82c8a2875c09e96e2979faaf8734fb4df/trading_calendars/calendar_utils.py#L160-L186 |
248,443 | quantopian/trading_calendars | trading_calendars/calendar_utils.py | TradingCalendarDispatcher.register_calendar_type | def register_calendar_type(self, name, calendar_type, force=False):
"""
Registers a calendar by type.
This is useful for registering a new calendar to be lazily instantiated
at some future point in time.
Parameters
----------
name: str
The key with which to register this calendar.
calendar_type: type
The type of the calendar to register.
force : bool, optional
If True, old calendars will be overwritten on a name collision.
If False, name collisions will raise an exception.
Default is False.
Raises
------
CalendarNameCollision
If a calendar is already registered with the given calendar's name.
"""
if force:
self.deregister_calendar(name)
if self.has_calendar(name):
raise CalendarNameCollision(calendar_name=name)
self._calendar_factories[name] = calendar_type | python | def register_calendar_type(self, name, calendar_type, force=False):
if force:
self.deregister_calendar(name)
if self.has_calendar(name):
raise CalendarNameCollision(calendar_name=name)
self._calendar_factories[name] = calendar_type | [
"def",
"register_calendar_type",
"(",
"self",
",",
"name",
",",
"calendar_type",
",",
"force",
"=",
"False",
")",
":",
"if",
"force",
":",
"self",
".",
"deregister_calendar",
"(",
"name",
")",
"if",
"self",
".",
"has_calendar",
"(",
"name",
")",
":",
"raise",
"CalendarNameCollision",
"(",
"calendar_name",
"=",
"name",
")",
"self",
".",
"_calendar_factories",
"[",
"name",
"]",
"=",
"calendar_type"
] | Registers a calendar by type.
This is useful for registering a new calendar to be lazily instantiated
at some future point in time.
Parameters
----------
name: str
The key with which to register this calendar.
calendar_type: type
The type of the calendar to register.
force : bool, optional
If True, old calendars will be overwritten on a name collision.
If False, name collisions will raise an exception.
Default is False.
Raises
------
CalendarNameCollision
If a calendar is already registered with the given calendar's name. | [
"Registers",
"a",
"calendar",
"by",
"type",
"."
] | 951711c82c8a2875c09e96e2979faaf8734fb4df | https://github.com/quantopian/trading_calendars/blob/951711c82c8a2875c09e96e2979faaf8734fb4df/trading_calendars/calendar_utils.py#L188-L217 |
248,444 | quantopian/trading_calendars | trading_calendars/calendar_utils.py | TradingCalendarDispatcher.register_calendar_alias | def register_calendar_alias(self, alias, real_name, force=False):
"""
Register an alias for a calendar.
This is useful when multiple exchanges should share a calendar, or when
there are multiple ways to refer to the same exchange.
After calling ``register_alias('alias', 'real_name')``, subsequent
calls to ``get_calendar('alias')`` will return the same result as
``get_calendar('real_name')``.
Parameters
----------
alias : str
The name to be used to refer to a calendar.
real_name : str
The canonical name of the registered calendar.
force : bool, optional
If True, old calendars will be overwritten on a name collision.
If False, name collisions will raise an exception.
Default is False.
"""
if force:
self.deregister_calendar(alias)
if self.has_calendar(alias):
raise CalendarNameCollision(calendar_name=alias)
self._aliases[alias] = real_name
# Ensure that the new alias doesn't create a cycle, and back it out if
# we did.
try:
self.resolve_alias(alias)
except CyclicCalendarAlias:
del self._aliases[alias]
raise | python | def register_calendar_alias(self, alias, real_name, force=False):
if force:
self.deregister_calendar(alias)
if self.has_calendar(alias):
raise CalendarNameCollision(calendar_name=alias)
self._aliases[alias] = real_name
# Ensure that the new alias doesn't create a cycle, and back it out if
# we did.
try:
self.resolve_alias(alias)
except CyclicCalendarAlias:
del self._aliases[alias]
raise | [
"def",
"register_calendar_alias",
"(",
"self",
",",
"alias",
",",
"real_name",
",",
"force",
"=",
"False",
")",
":",
"if",
"force",
":",
"self",
".",
"deregister_calendar",
"(",
"alias",
")",
"if",
"self",
".",
"has_calendar",
"(",
"alias",
")",
":",
"raise",
"CalendarNameCollision",
"(",
"calendar_name",
"=",
"alias",
")",
"self",
".",
"_aliases",
"[",
"alias",
"]",
"=",
"real_name",
"# Ensure that the new alias doesn't create a cycle, and back it out if",
"# we did.",
"try",
":",
"self",
".",
"resolve_alias",
"(",
"alias",
")",
"except",
"CyclicCalendarAlias",
":",
"del",
"self",
".",
"_aliases",
"[",
"alias",
"]",
"raise"
] | Register an alias for a calendar.
This is useful when multiple exchanges should share a calendar, or when
there are multiple ways to refer to the same exchange.
After calling ``register_alias('alias', 'real_name')``, subsequent
calls to ``get_calendar('alias')`` will return the same result as
``get_calendar('real_name')``.
Parameters
----------
alias : str
The name to be used to refer to a calendar.
real_name : str
The canonical name of the registered calendar.
force : bool, optional
If True, old calendars will be overwritten on a name collision.
If False, name collisions will raise an exception.
Default is False. | [
"Register",
"an",
"alias",
"for",
"a",
"calendar",
"."
] | 951711c82c8a2875c09e96e2979faaf8734fb4df | https://github.com/quantopian/trading_calendars/blob/951711c82c8a2875c09e96e2979faaf8734fb4df/trading_calendars/calendar_utils.py#L219-L255 |
248,445 | quantopian/trading_calendars | trading_calendars/calendar_utils.py | TradingCalendarDispatcher.resolve_alias | def resolve_alias(self, name):
"""
Resolve a calendar alias for retrieval.
Parameters
----------
name : str
The name of the requested calendar.
Returns
-------
canonical_name : str
The real name of the calendar to create/return.
"""
seen = []
while name in self._aliases:
seen.append(name)
name = self._aliases[name]
# This is O(N ** 2), but if there's an alias chain longer than 2,
# something strange has happened.
if name in seen:
seen.append(name)
raise CyclicCalendarAlias(
cycle=" -> ".join(repr(k) for k in seen)
)
return name | python | def resolve_alias(self, name):
seen = []
while name in self._aliases:
seen.append(name)
name = self._aliases[name]
# This is O(N ** 2), but if there's an alias chain longer than 2,
# something strange has happened.
if name in seen:
seen.append(name)
raise CyclicCalendarAlias(
cycle=" -> ".join(repr(k) for k in seen)
)
return name | [
"def",
"resolve_alias",
"(",
"self",
",",
"name",
")",
":",
"seen",
"=",
"[",
"]",
"while",
"name",
"in",
"self",
".",
"_aliases",
":",
"seen",
".",
"append",
"(",
"name",
")",
"name",
"=",
"self",
".",
"_aliases",
"[",
"name",
"]",
"# This is O(N ** 2), but if there's an alias chain longer than 2,",
"# something strange has happened.",
"if",
"name",
"in",
"seen",
":",
"seen",
".",
"append",
"(",
"name",
")",
"raise",
"CyclicCalendarAlias",
"(",
"cycle",
"=",
"\" -> \"",
".",
"join",
"(",
"repr",
"(",
"k",
")",
"for",
"k",
"in",
"seen",
")",
")",
"return",
"name"
] | Resolve a calendar alias for retrieval.
Parameters
----------
name : str
The name of the requested calendar.
Returns
-------
canonical_name : str
The real name of the calendar to create/return. | [
"Resolve",
"a",
"calendar",
"alias",
"for",
"retrieval",
"."
] | 951711c82c8a2875c09e96e2979faaf8734fb4df | https://github.com/quantopian/trading_calendars/blob/951711c82c8a2875c09e96e2979faaf8734fb4df/trading_calendars/calendar_utils.py#L257-L285 |
248,446 | quantopian/trading_calendars | trading_calendars/calendar_utils.py | TradingCalendarDispatcher.deregister_calendar | def deregister_calendar(self, name):
"""
If a calendar is registered with the given name, it is de-registered.
Parameters
----------
cal_name : str
The name of the calendar to be deregistered.
"""
self._calendars.pop(name, None)
self._calendar_factories.pop(name, None)
self._aliases.pop(name, None) | python | def deregister_calendar(self, name):
self._calendars.pop(name, None)
self._calendar_factories.pop(name, None)
self._aliases.pop(name, None) | [
"def",
"deregister_calendar",
"(",
"self",
",",
"name",
")",
":",
"self",
".",
"_calendars",
".",
"pop",
"(",
"name",
",",
"None",
")",
"self",
".",
"_calendar_factories",
".",
"pop",
"(",
"name",
",",
"None",
")",
"self",
".",
"_aliases",
".",
"pop",
"(",
"name",
",",
"None",
")"
] | If a calendar is registered with the given name, it is de-registered.
Parameters
----------
cal_name : str
The name of the calendar to be deregistered. | [
"If",
"a",
"calendar",
"is",
"registered",
"with",
"the",
"given",
"name",
"it",
"is",
"de",
"-",
"registered",
"."
] | 951711c82c8a2875c09e96e2979faaf8734fb4df | https://github.com/quantopian/trading_calendars/blob/951711c82c8a2875c09e96e2979faaf8734fb4df/trading_calendars/calendar_utils.py#L287-L298 |
248,447 | quantopian/trading_calendars | trading_calendars/calendar_utils.py | TradingCalendarDispatcher.clear_calendars | def clear_calendars(self):
"""
Deregisters all current registered calendars
"""
self._calendars.clear()
self._calendar_factories.clear()
self._aliases.clear() | python | def clear_calendars(self):
self._calendars.clear()
self._calendar_factories.clear()
self._aliases.clear() | [
"def",
"clear_calendars",
"(",
"self",
")",
":",
"self",
".",
"_calendars",
".",
"clear",
"(",
")",
"self",
".",
"_calendar_factories",
".",
"clear",
"(",
")",
"self",
".",
"_aliases",
".",
"clear",
"(",
")"
] | Deregisters all current registered calendars | [
"Deregisters",
"all",
"current",
"registered",
"calendars"
] | 951711c82c8a2875c09e96e2979faaf8734fb4df | https://github.com/quantopian/trading_calendars/blob/951711c82c8a2875c09e96e2979faaf8734fb4df/trading_calendars/calendar_utils.py#L300-L306 |
248,448 | quantopian/trading_calendars | trading_calendars/trading_calendar.py | _overwrite_special_dates | def _overwrite_special_dates(midnight_utcs,
opens_or_closes,
special_opens_or_closes):
"""
Overwrite dates in open_or_closes with corresponding dates in
special_opens_or_closes, using midnight_utcs for alignment.
"""
# Short circuit when nothing to apply.
if not len(special_opens_or_closes):
return
len_m, len_oc = len(midnight_utcs), len(opens_or_closes)
if len_m != len_oc:
raise ValueError(
"Found misaligned dates while building calendar.\n"
"Expected midnight_utcs to be the same length as open_or_closes,\n"
"but len(midnight_utcs)=%d, len(open_or_closes)=%d" % len_m, len_oc
)
# Find the array indices corresponding to each special date.
indexer = midnight_utcs.get_indexer(special_opens_or_closes.index)
# -1 indicates that no corresponding entry was found. If any -1s are
# present, then we have special dates that doesn't correspond to any
# trading day.
if -1 in indexer:
bad_dates = list(special_opens_or_closes[indexer == -1])
raise ValueError("Special dates %s are not trading days." % bad_dates)
# NOTE: This is a slightly dirty hack. We're in-place overwriting the
# internal data of an Index, which is conceptually immutable. Since we're
# maintaining sorting, this should be ok, but this is a good place to
# sanity check if things start going haywire with calendar computations.
opens_or_closes.values[indexer] = special_opens_or_closes.values | python | def _overwrite_special_dates(midnight_utcs,
opens_or_closes,
special_opens_or_closes):
# Short circuit when nothing to apply.
if not len(special_opens_or_closes):
return
len_m, len_oc = len(midnight_utcs), len(opens_or_closes)
if len_m != len_oc:
raise ValueError(
"Found misaligned dates while building calendar.\n"
"Expected midnight_utcs to be the same length as open_or_closes,\n"
"but len(midnight_utcs)=%d, len(open_or_closes)=%d" % len_m, len_oc
)
# Find the array indices corresponding to each special date.
indexer = midnight_utcs.get_indexer(special_opens_or_closes.index)
# -1 indicates that no corresponding entry was found. If any -1s are
# present, then we have special dates that doesn't correspond to any
# trading day.
if -1 in indexer:
bad_dates = list(special_opens_or_closes[indexer == -1])
raise ValueError("Special dates %s are not trading days." % bad_dates)
# NOTE: This is a slightly dirty hack. We're in-place overwriting the
# internal data of an Index, which is conceptually immutable. Since we're
# maintaining sorting, this should be ok, but this is a good place to
# sanity check if things start going haywire with calendar computations.
opens_or_closes.values[indexer] = special_opens_or_closes.values | [
"def",
"_overwrite_special_dates",
"(",
"midnight_utcs",
",",
"opens_or_closes",
",",
"special_opens_or_closes",
")",
":",
"# Short circuit when nothing to apply.",
"if",
"not",
"len",
"(",
"special_opens_or_closes",
")",
":",
"return",
"len_m",
",",
"len_oc",
"=",
"len",
"(",
"midnight_utcs",
")",
",",
"len",
"(",
"opens_or_closes",
")",
"if",
"len_m",
"!=",
"len_oc",
":",
"raise",
"ValueError",
"(",
"\"Found misaligned dates while building calendar.\\n\"",
"\"Expected midnight_utcs to be the same length as open_or_closes,\\n\"",
"\"but len(midnight_utcs)=%d, len(open_or_closes)=%d\"",
"%",
"len_m",
",",
"len_oc",
")",
"# Find the array indices corresponding to each special date.",
"indexer",
"=",
"midnight_utcs",
".",
"get_indexer",
"(",
"special_opens_or_closes",
".",
"index",
")",
"# -1 indicates that no corresponding entry was found. If any -1s are",
"# present, then we have special dates that doesn't correspond to any",
"# trading day.",
"if",
"-",
"1",
"in",
"indexer",
":",
"bad_dates",
"=",
"list",
"(",
"special_opens_or_closes",
"[",
"indexer",
"==",
"-",
"1",
"]",
")",
"raise",
"ValueError",
"(",
"\"Special dates %s are not trading days.\"",
"%",
"bad_dates",
")",
"# NOTE: This is a slightly dirty hack. We're in-place overwriting the",
"# internal data of an Index, which is conceptually immutable. Since we're",
"# maintaining sorting, this should be ok, but this is a good place to",
"# sanity check if things start going haywire with calendar computations.",
"opens_or_closes",
".",
"values",
"[",
"indexer",
"]",
"=",
"special_opens_or_closes",
".",
"values"
] | Overwrite dates in open_or_closes with corresponding dates in
special_opens_or_closes, using midnight_utcs for alignment. | [
"Overwrite",
"dates",
"in",
"open_or_closes",
"with",
"corresponding",
"dates",
"in",
"special_opens_or_closes",
"using",
"midnight_utcs",
"for",
"alignment",
"."
] | 951711c82c8a2875c09e96e2979faaf8734fb4df | https://github.com/quantopian/trading_calendars/blob/951711c82c8a2875c09e96e2979faaf8734fb4df/trading_calendars/trading_calendar.py#L1016-L1049 |
248,449 | quantopian/trading_calendars | trading_calendars/trading_calendar.py | TradingCalendar.is_open_on_minute | def is_open_on_minute(self, dt):
"""
Given a dt, return whether this exchange is open at the given dt.
Parameters
----------
dt: pd.Timestamp
The dt for which to check if this exchange is open.
Returns
-------
bool
Whether the exchange is open on this dt.
"""
return is_open(self.market_opens_nanos, self.market_closes_nanos,
dt.value) | python | def is_open_on_minute(self, dt):
return is_open(self.market_opens_nanos, self.market_closes_nanos,
dt.value) | [
"def",
"is_open_on_minute",
"(",
"self",
",",
"dt",
")",
":",
"return",
"is_open",
"(",
"self",
".",
"market_opens_nanos",
",",
"self",
".",
"market_closes_nanos",
",",
"dt",
".",
"value",
")"
] | Given a dt, return whether this exchange is open at the given dt.
Parameters
----------
dt: pd.Timestamp
The dt for which to check if this exchange is open.
Returns
-------
bool
Whether the exchange is open on this dt. | [
"Given",
"a",
"dt",
"return",
"whether",
"this",
"exchange",
"is",
"open",
"at",
"the",
"given",
"dt",
"."
] | 951711c82c8a2875c09e96e2979faaf8734fb4df | https://github.com/quantopian/trading_calendars/blob/951711c82c8a2875c09e96e2979faaf8734fb4df/trading_calendars/trading_calendar.py#L339-L354 |
248,450 | quantopian/trading_calendars | trading_calendars/trading_calendar.py | TradingCalendar.next_open | def next_open(self, dt):
"""
Given a dt, returns the next open.
If the given dt happens to be a session open, the next session's open
will be returned.
Parameters
----------
dt: pd.Timestamp
The dt for which to get the next open.
Returns
-------
pd.Timestamp
The UTC timestamp of the next open.
"""
idx = next_divider_idx(self.market_opens_nanos, dt.value)
return pd.Timestamp(self.market_opens_nanos[idx], tz=UTC) | python | def next_open(self, dt):
idx = next_divider_idx(self.market_opens_nanos, dt.value)
return pd.Timestamp(self.market_opens_nanos[idx], tz=UTC) | [
"def",
"next_open",
"(",
"self",
",",
"dt",
")",
":",
"idx",
"=",
"next_divider_idx",
"(",
"self",
".",
"market_opens_nanos",
",",
"dt",
".",
"value",
")",
"return",
"pd",
".",
"Timestamp",
"(",
"self",
".",
"market_opens_nanos",
"[",
"idx",
"]",
",",
"tz",
"=",
"UTC",
")"
] | Given a dt, returns the next open.
If the given dt happens to be a session open, the next session's open
will be returned.
Parameters
----------
dt: pd.Timestamp
The dt for which to get the next open.
Returns
-------
pd.Timestamp
The UTC timestamp of the next open. | [
"Given",
"a",
"dt",
"returns",
"the",
"next",
"open",
"."
] | 951711c82c8a2875c09e96e2979faaf8734fb4df | https://github.com/quantopian/trading_calendars/blob/951711c82c8a2875c09e96e2979faaf8734fb4df/trading_calendars/trading_calendar.py#L356-L374 |
248,451 | quantopian/trading_calendars | trading_calendars/trading_calendar.py | TradingCalendar.next_close | def next_close(self, dt):
"""
Given a dt, returns the next close.
Parameters
----------
dt: pd.Timestamp
The dt for which to get the next close.
Returns
-------
pd.Timestamp
The UTC timestamp of the next close.
"""
idx = next_divider_idx(self.market_closes_nanos, dt.value)
return pd.Timestamp(self.market_closes_nanos[idx], tz=UTC) | python | def next_close(self, dt):
idx = next_divider_idx(self.market_closes_nanos, dt.value)
return pd.Timestamp(self.market_closes_nanos[idx], tz=UTC) | [
"def",
"next_close",
"(",
"self",
",",
"dt",
")",
":",
"idx",
"=",
"next_divider_idx",
"(",
"self",
".",
"market_closes_nanos",
",",
"dt",
".",
"value",
")",
"return",
"pd",
".",
"Timestamp",
"(",
"self",
".",
"market_closes_nanos",
"[",
"idx",
"]",
",",
"tz",
"=",
"UTC",
")"
] | Given a dt, returns the next close.
Parameters
----------
dt: pd.Timestamp
The dt for which to get the next close.
Returns
-------
pd.Timestamp
The UTC timestamp of the next close. | [
"Given",
"a",
"dt",
"returns",
"the",
"next",
"close",
"."
] | 951711c82c8a2875c09e96e2979faaf8734fb4df | https://github.com/quantopian/trading_calendars/blob/951711c82c8a2875c09e96e2979faaf8734fb4df/trading_calendars/trading_calendar.py#L376-L391 |
248,452 | quantopian/trading_calendars | trading_calendars/trading_calendar.py | TradingCalendar.previous_open | def previous_open(self, dt):
"""
Given a dt, returns the previous open.
Parameters
----------
dt: pd.Timestamp
The dt for which to get the previous open.
Returns
-------
pd.Timestamp
The UTC imestamp of the previous open.
"""
idx = previous_divider_idx(self.market_opens_nanos, dt.value)
return pd.Timestamp(self.market_opens_nanos[idx], tz=UTC) | python | def previous_open(self, dt):
idx = previous_divider_idx(self.market_opens_nanos, dt.value)
return pd.Timestamp(self.market_opens_nanos[idx], tz=UTC) | [
"def",
"previous_open",
"(",
"self",
",",
"dt",
")",
":",
"idx",
"=",
"previous_divider_idx",
"(",
"self",
".",
"market_opens_nanos",
",",
"dt",
".",
"value",
")",
"return",
"pd",
".",
"Timestamp",
"(",
"self",
".",
"market_opens_nanos",
"[",
"idx",
"]",
",",
"tz",
"=",
"UTC",
")"
] | Given a dt, returns the previous open.
Parameters
----------
dt: pd.Timestamp
The dt for which to get the previous open.
Returns
-------
pd.Timestamp
The UTC imestamp of the previous open. | [
"Given",
"a",
"dt",
"returns",
"the",
"previous",
"open",
"."
] | 951711c82c8a2875c09e96e2979faaf8734fb4df | https://github.com/quantopian/trading_calendars/blob/951711c82c8a2875c09e96e2979faaf8734fb4df/trading_calendars/trading_calendar.py#L393-L408 |
248,453 | quantopian/trading_calendars | trading_calendars/trading_calendar.py | TradingCalendar.previous_close | def previous_close(self, dt):
"""
Given a dt, returns the previous close.
Parameters
----------
dt: pd.Timestamp
The dt for which to get the previous close.
Returns
-------
pd.Timestamp
The UTC timestamp of the previous close.
"""
idx = previous_divider_idx(self.market_closes_nanos, dt.value)
return pd.Timestamp(self.market_closes_nanos[idx], tz=UTC) | python | def previous_close(self, dt):
idx = previous_divider_idx(self.market_closes_nanos, dt.value)
return pd.Timestamp(self.market_closes_nanos[idx], tz=UTC) | [
"def",
"previous_close",
"(",
"self",
",",
"dt",
")",
":",
"idx",
"=",
"previous_divider_idx",
"(",
"self",
".",
"market_closes_nanos",
",",
"dt",
".",
"value",
")",
"return",
"pd",
".",
"Timestamp",
"(",
"self",
".",
"market_closes_nanos",
"[",
"idx",
"]",
",",
"tz",
"=",
"UTC",
")"
] | Given a dt, returns the previous close.
Parameters
----------
dt: pd.Timestamp
The dt for which to get the previous close.
Returns
-------
pd.Timestamp
The UTC timestamp of the previous close. | [
"Given",
"a",
"dt",
"returns",
"the",
"previous",
"close",
"."
] | 951711c82c8a2875c09e96e2979faaf8734fb4df | https://github.com/quantopian/trading_calendars/blob/951711c82c8a2875c09e96e2979faaf8734fb4df/trading_calendars/trading_calendar.py#L410-L425 |
248,454 | quantopian/trading_calendars | trading_calendars/trading_calendar.py | TradingCalendar.next_minute | def next_minute(self, dt):
"""
Given a dt, return the next exchange minute. If the given dt is not
an exchange minute, returns the next exchange open.
Parameters
----------
dt: pd.Timestamp
The dt for which to get the next exchange minute.
Returns
-------
pd.Timestamp
The next exchange minute.
"""
idx = next_divider_idx(self._trading_minutes_nanos, dt.value)
return self.all_minutes[idx] | python | def next_minute(self, dt):
idx = next_divider_idx(self._trading_minutes_nanos, dt.value)
return self.all_minutes[idx] | [
"def",
"next_minute",
"(",
"self",
",",
"dt",
")",
":",
"idx",
"=",
"next_divider_idx",
"(",
"self",
".",
"_trading_minutes_nanos",
",",
"dt",
".",
"value",
")",
"return",
"self",
".",
"all_minutes",
"[",
"idx",
"]"
] | Given a dt, return the next exchange minute. If the given dt is not
an exchange minute, returns the next exchange open.
Parameters
----------
dt: pd.Timestamp
The dt for which to get the next exchange minute.
Returns
-------
pd.Timestamp
The next exchange minute. | [
"Given",
"a",
"dt",
"return",
"the",
"next",
"exchange",
"minute",
".",
"If",
"the",
"given",
"dt",
"is",
"not",
"an",
"exchange",
"minute",
"returns",
"the",
"next",
"exchange",
"open",
"."
] | 951711c82c8a2875c09e96e2979faaf8734fb4df | https://github.com/quantopian/trading_calendars/blob/951711c82c8a2875c09e96e2979faaf8734fb4df/trading_calendars/trading_calendar.py#L427-L443 |
248,455 | quantopian/trading_calendars | trading_calendars/trading_calendar.py | TradingCalendar.previous_minute | def previous_minute(self, dt):
"""
Given a dt, return the previous exchange minute.
Raises KeyError if the given timestamp is not an exchange minute.
Parameters
----------
dt: pd.Timestamp
The dt for which to get the previous exchange minute.
Returns
-------
pd.Timestamp
The previous exchange minute.
"""
idx = previous_divider_idx(self._trading_minutes_nanos, dt.value)
return self.all_minutes[idx] | python | def previous_minute(self, dt):
idx = previous_divider_idx(self._trading_minutes_nanos, dt.value)
return self.all_minutes[idx] | [
"def",
"previous_minute",
"(",
"self",
",",
"dt",
")",
":",
"idx",
"=",
"previous_divider_idx",
"(",
"self",
".",
"_trading_minutes_nanos",
",",
"dt",
".",
"value",
")",
"return",
"self",
".",
"all_minutes",
"[",
"idx",
"]"
] | Given a dt, return the previous exchange minute.
Raises KeyError if the given timestamp is not an exchange minute.
Parameters
----------
dt: pd.Timestamp
The dt for which to get the previous exchange minute.
Returns
-------
pd.Timestamp
The previous exchange minute. | [
"Given",
"a",
"dt",
"return",
"the",
"previous",
"exchange",
"minute",
"."
] | 951711c82c8a2875c09e96e2979faaf8734fb4df | https://github.com/quantopian/trading_calendars/blob/951711c82c8a2875c09e96e2979faaf8734fb4df/trading_calendars/trading_calendar.py#L445-L463 |
248,456 | quantopian/trading_calendars | trading_calendars/trading_calendar.py | TradingCalendar.next_session_label | def next_session_label(self, session_label):
"""
Given a session label, returns the label of the next session.
Parameters
----------
session_label: pd.Timestamp
A session whose next session is desired.
Returns
-------
pd.Timestamp
The next session label (midnight UTC).
Notes
-----
Raises ValueError if the given session is the last session in this
calendar.
"""
idx = self.schedule.index.get_loc(session_label)
try:
return self.schedule.index[idx + 1]
except IndexError:
if idx == len(self.schedule.index) - 1:
raise ValueError("There is no next session as this is the end"
" of the exchange calendar.")
else:
raise | python | def next_session_label(self, session_label):
idx = self.schedule.index.get_loc(session_label)
try:
return self.schedule.index[idx + 1]
except IndexError:
if idx == len(self.schedule.index) - 1:
raise ValueError("There is no next session as this is the end"
" of the exchange calendar.")
else:
raise | [
"def",
"next_session_label",
"(",
"self",
",",
"session_label",
")",
":",
"idx",
"=",
"self",
".",
"schedule",
".",
"index",
".",
"get_loc",
"(",
"session_label",
")",
"try",
":",
"return",
"self",
".",
"schedule",
".",
"index",
"[",
"idx",
"+",
"1",
"]",
"except",
"IndexError",
":",
"if",
"idx",
"==",
"len",
"(",
"self",
".",
"schedule",
".",
"index",
")",
"-",
"1",
":",
"raise",
"ValueError",
"(",
"\"There is no next session as this is the end\"",
"\" of the exchange calendar.\"",
")",
"else",
":",
"raise"
] | Given a session label, returns the label of the next session.
Parameters
----------
session_label: pd.Timestamp
A session whose next session is desired.
Returns
-------
pd.Timestamp
The next session label (midnight UTC).
Notes
-----
Raises ValueError if the given session is the last session in this
calendar. | [
"Given",
"a",
"session",
"label",
"returns",
"the",
"label",
"of",
"the",
"next",
"session",
"."
] | 951711c82c8a2875c09e96e2979faaf8734fb4df | https://github.com/quantopian/trading_calendars/blob/951711c82c8a2875c09e96e2979faaf8734fb4df/trading_calendars/trading_calendar.py#L465-L492 |
248,457 | quantopian/trading_calendars | trading_calendars/trading_calendar.py | TradingCalendar.previous_session_label | def previous_session_label(self, session_label):
"""
Given a session label, returns the label of the previous session.
Parameters
----------
session_label: pd.Timestamp
A session whose previous session is desired.
Returns
-------
pd.Timestamp
The previous session label (midnight UTC).
Notes
-----
Raises ValueError if the given session is the first session in this
calendar.
"""
idx = self.schedule.index.get_loc(session_label)
if idx == 0:
raise ValueError("There is no previous session as this is the"
" beginning of the exchange calendar.")
return self.schedule.index[idx - 1] | python | def previous_session_label(self, session_label):
idx = self.schedule.index.get_loc(session_label)
if idx == 0:
raise ValueError("There is no previous session as this is the"
" beginning of the exchange calendar.")
return self.schedule.index[idx - 1] | [
"def",
"previous_session_label",
"(",
"self",
",",
"session_label",
")",
":",
"idx",
"=",
"self",
".",
"schedule",
".",
"index",
".",
"get_loc",
"(",
"session_label",
")",
"if",
"idx",
"==",
"0",
":",
"raise",
"ValueError",
"(",
"\"There is no previous session as this is the\"",
"\" beginning of the exchange calendar.\"",
")",
"return",
"self",
".",
"schedule",
".",
"index",
"[",
"idx",
"-",
"1",
"]"
] | Given a session label, returns the label of the previous session.
Parameters
----------
session_label: pd.Timestamp
A session whose previous session is desired.
Returns
-------
pd.Timestamp
The previous session label (midnight UTC).
Notes
-----
Raises ValueError if the given session is the first session in this
calendar. | [
"Given",
"a",
"session",
"label",
"returns",
"the",
"label",
"of",
"the",
"previous",
"session",
"."
] | 951711c82c8a2875c09e96e2979faaf8734fb4df | https://github.com/quantopian/trading_calendars/blob/951711c82c8a2875c09e96e2979faaf8734fb4df/trading_calendars/trading_calendar.py#L494-L518 |
248,458 | quantopian/trading_calendars | trading_calendars/trading_calendar.py | TradingCalendar.minutes_for_session | def minutes_for_session(self, session_label):
"""
Given a session label, return the minutes for that session.
Parameters
----------
session_label: pd.Timestamp (midnight UTC)
A session label whose session's minutes are desired.
Returns
-------
pd.DateTimeIndex
All the minutes for the given session.
"""
return self.minutes_in_range(
start_minute=self.schedule.at[session_label, 'market_open'],
end_minute=self.schedule.at[session_label, 'market_close'],
) | python | def minutes_for_session(self, session_label):
return self.minutes_in_range(
start_minute=self.schedule.at[session_label, 'market_open'],
end_minute=self.schedule.at[session_label, 'market_close'],
) | [
"def",
"minutes_for_session",
"(",
"self",
",",
"session_label",
")",
":",
"return",
"self",
".",
"minutes_in_range",
"(",
"start_minute",
"=",
"self",
".",
"schedule",
".",
"at",
"[",
"session_label",
",",
"'market_open'",
"]",
",",
"end_minute",
"=",
"self",
".",
"schedule",
".",
"at",
"[",
"session_label",
",",
"'market_close'",
"]",
",",
")"
] | Given a session label, return the minutes for that session.
Parameters
----------
session_label: pd.Timestamp (midnight UTC)
A session label whose session's minutes are desired.
Returns
-------
pd.DateTimeIndex
All the minutes for the given session. | [
"Given",
"a",
"session",
"label",
"return",
"the",
"minutes",
"for",
"that",
"session",
"."
] | 951711c82c8a2875c09e96e2979faaf8734fb4df | https://github.com/quantopian/trading_calendars/blob/951711c82c8a2875c09e96e2979faaf8734fb4df/trading_calendars/trading_calendar.py#L520-L537 |
248,459 | quantopian/trading_calendars | trading_calendars/trading_calendar.py | TradingCalendar.execution_minutes_for_session | def execution_minutes_for_session(self, session_label):
"""
Given a session label, return the execution minutes for that session.
Parameters
----------
session_label: pd.Timestamp (midnight UTC)
A session label whose session's minutes are desired.
Returns
-------
pd.DateTimeIndex
All the execution minutes for the given session.
"""
return self.minutes_in_range(
start_minute=self.execution_time_from_open(
self.schedule.at[session_label, 'market_open'],
),
end_minute=self.execution_time_from_close(
self.schedule.at[session_label, 'market_close'],
),
) | python | def execution_minutes_for_session(self, session_label):
return self.minutes_in_range(
start_minute=self.execution_time_from_open(
self.schedule.at[session_label, 'market_open'],
),
end_minute=self.execution_time_from_close(
self.schedule.at[session_label, 'market_close'],
),
) | [
"def",
"execution_minutes_for_session",
"(",
"self",
",",
"session_label",
")",
":",
"return",
"self",
".",
"minutes_in_range",
"(",
"start_minute",
"=",
"self",
".",
"execution_time_from_open",
"(",
"self",
".",
"schedule",
".",
"at",
"[",
"session_label",
",",
"'market_open'",
"]",
",",
")",
",",
"end_minute",
"=",
"self",
".",
"execution_time_from_close",
"(",
"self",
".",
"schedule",
".",
"at",
"[",
"session_label",
",",
"'market_close'",
"]",
",",
")",
",",
")"
] | Given a session label, return the execution minutes for that session.
Parameters
----------
session_label: pd.Timestamp (midnight UTC)
A session label whose session's minutes are desired.
Returns
-------
pd.DateTimeIndex
All the execution minutes for the given session. | [
"Given",
"a",
"session",
"label",
"return",
"the",
"execution",
"minutes",
"for",
"that",
"session",
"."
] | 951711c82c8a2875c09e96e2979faaf8734fb4df | https://github.com/quantopian/trading_calendars/blob/951711c82c8a2875c09e96e2979faaf8734fb4df/trading_calendars/trading_calendar.py#L539-L560 |
248,460 | quantopian/trading_calendars | trading_calendars/trading_calendar.py | TradingCalendar.sessions_in_range | def sessions_in_range(self, start_session_label, end_session_label):
"""
Given start and end session labels, return all the sessions in that
range, inclusive.
Parameters
----------
start_session_label: pd.Timestamp (midnight UTC)
The label representing the first session of the desired range.
end_session_label: pd.Timestamp (midnight UTC)
The label representing the last session of the desired range.
Returns
-------
pd.DatetimeIndex
The desired sessions.
"""
return self.all_sessions[
self.all_sessions.slice_indexer(
start_session_label,
end_session_label
)
] | python | def sessions_in_range(self, start_session_label, end_session_label):
return self.all_sessions[
self.all_sessions.slice_indexer(
start_session_label,
end_session_label
)
] | [
"def",
"sessions_in_range",
"(",
"self",
",",
"start_session_label",
",",
"end_session_label",
")",
":",
"return",
"self",
".",
"all_sessions",
"[",
"self",
".",
"all_sessions",
".",
"slice_indexer",
"(",
"start_session_label",
",",
"end_session_label",
")",
"]"
] | Given start and end session labels, return all the sessions in that
range, inclusive.
Parameters
----------
start_session_label: pd.Timestamp (midnight UTC)
The label representing the first session of the desired range.
end_session_label: pd.Timestamp (midnight UTC)
The label representing the last session of the desired range.
Returns
-------
pd.DatetimeIndex
The desired sessions. | [
"Given",
"start",
"and",
"end",
"session",
"labels",
"return",
"all",
"the",
"sessions",
"in",
"that",
"range",
"inclusive",
"."
] | 951711c82c8a2875c09e96e2979faaf8734fb4df | https://github.com/quantopian/trading_calendars/blob/951711c82c8a2875c09e96e2979faaf8734fb4df/trading_calendars/trading_calendar.py#L592-L615 |
248,461 | quantopian/trading_calendars | trading_calendars/trading_calendar.py | TradingCalendar.minutes_in_range | def minutes_in_range(self, start_minute, end_minute):
"""
Given start and end minutes, return all the calendar minutes
in that range, inclusive.
Given minutes don't need to be calendar minutes.
Parameters
----------
start_minute: pd.Timestamp
The minute representing the start of the desired range.
end_minute: pd.Timestamp
The minute representing the end of the desired range.
Returns
-------
pd.DatetimeIndex
The minutes in the desired range.
"""
start_idx = searchsorted(self._trading_minutes_nanos,
start_minute.value)
end_idx = searchsorted(self._trading_minutes_nanos,
end_minute.value)
if end_minute.value == self._trading_minutes_nanos[end_idx]:
# if the end minute is a market minute, increase by 1
end_idx += 1
return self.all_minutes[start_idx:end_idx] | python | def minutes_in_range(self, start_minute, end_minute):
start_idx = searchsorted(self._trading_minutes_nanos,
start_minute.value)
end_idx = searchsorted(self._trading_minutes_nanos,
end_minute.value)
if end_minute.value == self._trading_minutes_nanos[end_idx]:
# if the end minute is a market minute, increase by 1
end_idx += 1
return self.all_minutes[start_idx:end_idx] | [
"def",
"minutes_in_range",
"(",
"self",
",",
"start_minute",
",",
"end_minute",
")",
":",
"start_idx",
"=",
"searchsorted",
"(",
"self",
".",
"_trading_minutes_nanos",
",",
"start_minute",
".",
"value",
")",
"end_idx",
"=",
"searchsorted",
"(",
"self",
".",
"_trading_minutes_nanos",
",",
"end_minute",
".",
"value",
")",
"if",
"end_minute",
".",
"value",
"==",
"self",
".",
"_trading_minutes_nanos",
"[",
"end_idx",
"]",
":",
"# if the end minute is a market minute, increase by 1",
"end_idx",
"+=",
"1",
"return",
"self",
".",
"all_minutes",
"[",
"start_idx",
":",
"end_idx",
"]"
] | Given start and end minutes, return all the calendar minutes
in that range, inclusive.
Given minutes don't need to be calendar minutes.
Parameters
----------
start_minute: pd.Timestamp
The minute representing the start of the desired range.
end_minute: pd.Timestamp
The minute representing the end of the desired range.
Returns
-------
pd.DatetimeIndex
The minutes in the desired range. | [
"Given",
"start",
"and",
"end",
"minutes",
"return",
"all",
"the",
"calendar",
"minutes",
"in",
"that",
"range",
"inclusive",
"."
] | 951711c82c8a2875c09e96e2979faaf8734fb4df | https://github.com/quantopian/trading_calendars/blob/951711c82c8a2875c09e96e2979faaf8734fb4df/trading_calendars/trading_calendar.py#L681-L711 |
248,462 | quantopian/trading_calendars | trading_calendars/trading_calendar.py | TradingCalendar.minutes_for_sessions_in_range | def minutes_for_sessions_in_range(self,
start_session_label,
end_session_label):
"""
Returns all the minutes for all the sessions from the given start
session label to the given end session label, inclusive.
Parameters
----------
start_session_label: pd.Timestamp
The label of the first session in the range.
end_session_label: pd.Timestamp
The label of the last session in the range.
Returns
-------
pd.DatetimeIndex
The minutes in the desired range.
"""
first_minute, _ = self.open_and_close_for_session(start_session_label)
_, last_minute = self.open_and_close_for_session(end_session_label)
return self.minutes_in_range(first_minute, last_minute) | python | def minutes_for_sessions_in_range(self,
start_session_label,
end_session_label):
first_minute, _ = self.open_and_close_for_session(start_session_label)
_, last_minute = self.open_and_close_for_session(end_session_label)
return self.minutes_in_range(first_minute, last_minute) | [
"def",
"minutes_for_sessions_in_range",
"(",
"self",
",",
"start_session_label",
",",
"end_session_label",
")",
":",
"first_minute",
",",
"_",
"=",
"self",
".",
"open_and_close_for_session",
"(",
"start_session_label",
")",
"_",
",",
"last_minute",
"=",
"self",
".",
"open_and_close_for_session",
"(",
"end_session_label",
")",
"return",
"self",
".",
"minutes_in_range",
"(",
"first_minute",
",",
"last_minute",
")"
] | Returns all the minutes for all the sessions from the given start
session label to the given end session label, inclusive.
Parameters
----------
start_session_label: pd.Timestamp
The label of the first session in the range.
end_session_label: pd.Timestamp
The label of the last session in the range.
Returns
-------
pd.DatetimeIndex
The minutes in the desired range. | [
"Returns",
"all",
"the",
"minutes",
"for",
"all",
"the",
"sessions",
"from",
"the",
"given",
"start",
"session",
"label",
"to",
"the",
"given",
"end",
"session",
"label",
"inclusive",
"."
] | 951711c82c8a2875c09e96e2979faaf8734fb4df | https://github.com/quantopian/trading_calendars/blob/951711c82c8a2875c09e96e2979faaf8734fb4df/trading_calendars/trading_calendar.py#L713-L737 |
248,463 | quantopian/trading_calendars | trading_calendars/trading_calendar.py | TradingCalendar.open_and_close_for_session | def open_and_close_for_session(self, session_label):
"""
Returns a tuple of timestamps of the open and close of the session
represented by the given label.
Parameters
----------
session_label: pd.Timestamp
The session whose open and close are desired.
Returns
-------
(Timestamp, Timestamp)
The open and close for the given session.
"""
sched = self.schedule
# `market_open` and `market_close` should be timezone aware, but pandas
# 0.16.1 does not appear to support this:
# http://pandas.pydata.org/pandas-docs/stable/whatsnew.html#datetime-with-tz # noqa
return (
sched.at[session_label, 'market_open'].tz_localize(UTC),
sched.at[session_label, 'market_close'].tz_localize(UTC),
) | python | def open_and_close_for_session(self, session_label):
sched = self.schedule
# `market_open` and `market_close` should be timezone aware, but pandas
# 0.16.1 does not appear to support this:
# http://pandas.pydata.org/pandas-docs/stable/whatsnew.html#datetime-with-tz # noqa
return (
sched.at[session_label, 'market_open'].tz_localize(UTC),
sched.at[session_label, 'market_close'].tz_localize(UTC),
) | [
"def",
"open_and_close_for_session",
"(",
"self",
",",
"session_label",
")",
":",
"sched",
"=",
"self",
".",
"schedule",
"# `market_open` and `market_close` should be timezone aware, but pandas",
"# 0.16.1 does not appear to support this:",
"# http://pandas.pydata.org/pandas-docs/stable/whatsnew.html#datetime-with-tz # noqa",
"return",
"(",
"sched",
".",
"at",
"[",
"session_label",
",",
"'market_open'",
"]",
".",
"tz_localize",
"(",
"UTC",
")",
",",
"sched",
".",
"at",
"[",
"session_label",
",",
"'market_close'",
"]",
".",
"tz_localize",
"(",
"UTC",
")",
",",
")"
] | Returns a tuple of timestamps of the open and close of the session
represented by the given label.
Parameters
----------
session_label: pd.Timestamp
The session whose open and close are desired.
Returns
-------
(Timestamp, Timestamp)
The open and close for the given session. | [
"Returns",
"a",
"tuple",
"of",
"timestamps",
"of",
"the",
"open",
"and",
"close",
"of",
"the",
"session",
"represented",
"by",
"the",
"given",
"label",
"."
] | 951711c82c8a2875c09e96e2979faaf8734fb4df | https://github.com/quantopian/trading_calendars/blob/951711c82c8a2875c09e96e2979faaf8734fb4df/trading_calendars/trading_calendar.py#L739-L762 |
248,464 | quantopian/trading_calendars | trading_calendars/trading_calendar.py | TradingCalendar.all_minutes | def all_minutes(self):
"""
Returns a DatetimeIndex representing all the minutes in this calendar.
"""
opens_in_ns = self._opens.values.astype(
'datetime64[ns]',
).view('int64')
closes_in_ns = self._closes.values.astype(
'datetime64[ns]',
).view('int64')
return DatetimeIndex(
compute_all_minutes(opens_in_ns, closes_in_ns),
tz=UTC,
) | python | def all_minutes(self):
opens_in_ns = self._opens.values.astype(
'datetime64[ns]',
).view('int64')
closes_in_ns = self._closes.values.astype(
'datetime64[ns]',
).view('int64')
return DatetimeIndex(
compute_all_minutes(opens_in_ns, closes_in_ns),
tz=UTC,
) | [
"def",
"all_minutes",
"(",
"self",
")",
":",
"opens_in_ns",
"=",
"self",
".",
"_opens",
".",
"values",
".",
"astype",
"(",
"'datetime64[ns]'",
",",
")",
".",
"view",
"(",
"'int64'",
")",
"closes_in_ns",
"=",
"self",
".",
"_closes",
".",
"values",
".",
"astype",
"(",
"'datetime64[ns]'",
",",
")",
".",
"view",
"(",
"'int64'",
")",
"return",
"DatetimeIndex",
"(",
"compute_all_minutes",
"(",
"opens_in_ns",
",",
"closes_in_ns",
")",
",",
"tz",
"=",
"UTC",
",",
")"
] | Returns a DatetimeIndex representing all the minutes in this calendar. | [
"Returns",
"a",
"DatetimeIndex",
"representing",
"all",
"the",
"minutes",
"in",
"this",
"calendar",
"."
] | 951711c82c8a2875c09e96e2979faaf8734fb4df | https://github.com/quantopian/trading_calendars/blob/951711c82c8a2875c09e96e2979faaf8734fb4df/trading_calendars/trading_calendar.py#L807-L822 |
248,465 | quantopian/trading_calendars | trading_calendars/trading_calendar.py | TradingCalendar.minute_to_session_label | def minute_to_session_label(self, dt, direction="next"):
"""
Given a minute, get the label of its containing session.
Parameters
----------
dt : pd.Timestamp or nanosecond offset
The dt for which to get the containing session.
direction: str
"next" (default) means that if the given dt is not part of a
session, return the label of the next session.
"previous" means that if the given dt is not part of a session,
return the label of the previous session.
"none" means that a KeyError will be raised if the given
dt is not part of a session.
Returns
-------
pd.Timestamp (midnight UTC)
The label of the containing session.
"""
if direction == "next":
try:
return self._minute_to_session_label_cache[dt]
except KeyError:
pass
idx = searchsorted(self.market_closes_nanos, dt)
current_or_next_session = self.schedule.index[idx]
self._minute_to_session_label_cache[dt] = current_or_next_session
if direction == "next":
return current_or_next_session
elif direction == "previous":
if not is_open(self.market_opens_nanos, self.market_closes_nanos,
dt):
# if the exchange is closed, use the previous session
return self.schedule.index[idx - 1]
elif direction == "none":
if not is_open(self.market_opens_nanos, self.market_closes_nanos,
dt):
# if the exchange is closed, blow up
raise ValueError("The given dt is not an exchange minute!")
else:
# invalid direction
raise ValueError("Invalid direction parameter: "
"{0}".format(direction))
return current_or_next_session | python | def minute_to_session_label(self, dt, direction="next"):
if direction == "next":
try:
return self._minute_to_session_label_cache[dt]
except KeyError:
pass
idx = searchsorted(self.market_closes_nanos, dt)
current_or_next_session = self.schedule.index[idx]
self._minute_to_session_label_cache[dt] = current_or_next_session
if direction == "next":
return current_or_next_session
elif direction == "previous":
if not is_open(self.market_opens_nanos, self.market_closes_nanos,
dt):
# if the exchange is closed, use the previous session
return self.schedule.index[idx - 1]
elif direction == "none":
if not is_open(self.market_opens_nanos, self.market_closes_nanos,
dt):
# if the exchange is closed, blow up
raise ValueError("The given dt is not an exchange minute!")
else:
# invalid direction
raise ValueError("Invalid direction parameter: "
"{0}".format(direction))
return current_or_next_session | [
"def",
"minute_to_session_label",
"(",
"self",
",",
"dt",
",",
"direction",
"=",
"\"next\"",
")",
":",
"if",
"direction",
"==",
"\"next\"",
":",
"try",
":",
"return",
"self",
".",
"_minute_to_session_label_cache",
"[",
"dt",
"]",
"except",
"KeyError",
":",
"pass",
"idx",
"=",
"searchsorted",
"(",
"self",
".",
"market_closes_nanos",
",",
"dt",
")",
"current_or_next_session",
"=",
"self",
".",
"schedule",
".",
"index",
"[",
"idx",
"]",
"self",
".",
"_minute_to_session_label_cache",
"[",
"dt",
"]",
"=",
"current_or_next_session",
"if",
"direction",
"==",
"\"next\"",
":",
"return",
"current_or_next_session",
"elif",
"direction",
"==",
"\"previous\"",
":",
"if",
"not",
"is_open",
"(",
"self",
".",
"market_opens_nanos",
",",
"self",
".",
"market_closes_nanos",
",",
"dt",
")",
":",
"# if the exchange is closed, use the previous session",
"return",
"self",
".",
"schedule",
".",
"index",
"[",
"idx",
"-",
"1",
"]",
"elif",
"direction",
"==",
"\"none\"",
":",
"if",
"not",
"is_open",
"(",
"self",
".",
"market_opens_nanos",
",",
"self",
".",
"market_closes_nanos",
",",
"dt",
")",
":",
"# if the exchange is closed, blow up",
"raise",
"ValueError",
"(",
"\"The given dt is not an exchange minute!\"",
")",
"else",
":",
"# invalid direction",
"raise",
"ValueError",
"(",
"\"Invalid direction parameter: \"",
"\"{0}\"",
".",
"format",
"(",
"direction",
")",
")",
"return",
"current_or_next_session"
] | Given a minute, get the label of its containing session.
Parameters
----------
dt : pd.Timestamp or nanosecond offset
The dt for which to get the containing session.
direction: str
"next" (default) means that if the given dt is not part of a
session, return the label of the next session.
"previous" means that if the given dt is not part of a session,
return the label of the previous session.
"none" means that a KeyError will be raised if the given
dt is not part of a session.
Returns
-------
pd.Timestamp (midnight UTC)
The label of the containing session. | [
"Given",
"a",
"minute",
"get",
"the",
"label",
"of",
"its",
"containing",
"session",
"."
] | 951711c82c8a2875c09e96e2979faaf8734fb4df | https://github.com/quantopian/trading_calendars/blob/951711c82c8a2875c09e96e2979faaf8734fb4df/trading_calendars/trading_calendar.py#L825-L876 |
248,466 | quantopian/trading_calendars | trading_calendars/trading_calendar.py | TradingCalendar.minute_index_to_session_labels | def minute_index_to_session_labels(self, index):
"""
Given a sorted DatetimeIndex of market minutes, return a
DatetimeIndex of the corresponding session labels.
Parameters
----------
index: pd.DatetimeIndex or pd.Series
The ordered list of market minutes we want session labels for.
Returns
-------
pd.DatetimeIndex (UTC)
The list of session labels corresponding to the given minutes.
"""
if not index.is_monotonic_increasing:
raise ValueError(
"Non-ordered index passed to minute_index_to_session_labels."
)
# Find the indices of the previous open and the next close for each
# minute.
prev_opens = (
self._opens.values.searchsorted(index.values, side='right') - 1
)
next_closes = (
self._closes.values.searchsorted(index.values, side='left')
)
# If they don't match, the minute is outside the trading day. Barf.
mismatches = (prev_opens != next_closes)
if mismatches.any():
# Show the first bad minute in the error message.
bad_ix = np.flatnonzero(mismatches)[0]
example = index[bad_ix]
prev_day = prev_opens[bad_ix]
prev_open, prev_close = self.schedule.iloc[prev_day]
next_open, next_close = self.schedule.iloc[prev_day + 1]
raise ValueError(
"{num} non-market minutes in minute_index_to_session_labels:\n"
"First Bad Minute: {first_bad}\n"
"Previous Session: {prev_open} -> {prev_close}\n"
"Next Session: {next_open} -> {next_close}"
.format(
num=mismatches.sum(),
first_bad=example,
prev_open=prev_open, prev_close=prev_close,
next_open=next_open, next_close=next_close)
)
return self.schedule.index[prev_opens] | python | def minute_index_to_session_labels(self, index):
if not index.is_monotonic_increasing:
raise ValueError(
"Non-ordered index passed to minute_index_to_session_labels."
)
# Find the indices of the previous open and the next close for each
# minute.
prev_opens = (
self._opens.values.searchsorted(index.values, side='right') - 1
)
next_closes = (
self._closes.values.searchsorted(index.values, side='left')
)
# If they don't match, the minute is outside the trading day. Barf.
mismatches = (prev_opens != next_closes)
if mismatches.any():
# Show the first bad minute in the error message.
bad_ix = np.flatnonzero(mismatches)[0]
example = index[bad_ix]
prev_day = prev_opens[bad_ix]
prev_open, prev_close = self.schedule.iloc[prev_day]
next_open, next_close = self.schedule.iloc[prev_day + 1]
raise ValueError(
"{num} non-market minutes in minute_index_to_session_labels:\n"
"First Bad Minute: {first_bad}\n"
"Previous Session: {prev_open} -> {prev_close}\n"
"Next Session: {next_open} -> {next_close}"
.format(
num=mismatches.sum(),
first_bad=example,
prev_open=prev_open, prev_close=prev_close,
next_open=next_open, next_close=next_close)
)
return self.schedule.index[prev_opens] | [
"def",
"minute_index_to_session_labels",
"(",
"self",
",",
"index",
")",
":",
"if",
"not",
"index",
".",
"is_monotonic_increasing",
":",
"raise",
"ValueError",
"(",
"\"Non-ordered index passed to minute_index_to_session_labels.\"",
")",
"# Find the indices of the previous open and the next close for each",
"# minute.",
"prev_opens",
"=",
"(",
"self",
".",
"_opens",
".",
"values",
".",
"searchsorted",
"(",
"index",
".",
"values",
",",
"side",
"=",
"'right'",
")",
"-",
"1",
")",
"next_closes",
"=",
"(",
"self",
".",
"_closes",
".",
"values",
".",
"searchsorted",
"(",
"index",
".",
"values",
",",
"side",
"=",
"'left'",
")",
")",
"# If they don't match, the minute is outside the trading day. Barf.",
"mismatches",
"=",
"(",
"prev_opens",
"!=",
"next_closes",
")",
"if",
"mismatches",
".",
"any",
"(",
")",
":",
"# Show the first bad minute in the error message.",
"bad_ix",
"=",
"np",
".",
"flatnonzero",
"(",
"mismatches",
")",
"[",
"0",
"]",
"example",
"=",
"index",
"[",
"bad_ix",
"]",
"prev_day",
"=",
"prev_opens",
"[",
"bad_ix",
"]",
"prev_open",
",",
"prev_close",
"=",
"self",
".",
"schedule",
".",
"iloc",
"[",
"prev_day",
"]",
"next_open",
",",
"next_close",
"=",
"self",
".",
"schedule",
".",
"iloc",
"[",
"prev_day",
"+",
"1",
"]",
"raise",
"ValueError",
"(",
"\"{num} non-market minutes in minute_index_to_session_labels:\\n\"",
"\"First Bad Minute: {first_bad}\\n\"",
"\"Previous Session: {prev_open} -> {prev_close}\\n\"",
"\"Next Session: {next_open} -> {next_close}\"",
".",
"format",
"(",
"num",
"=",
"mismatches",
".",
"sum",
"(",
")",
",",
"first_bad",
"=",
"example",
",",
"prev_open",
"=",
"prev_open",
",",
"prev_close",
"=",
"prev_close",
",",
"next_open",
"=",
"next_open",
",",
"next_close",
"=",
"next_close",
")",
")",
"return",
"self",
".",
"schedule",
".",
"index",
"[",
"prev_opens",
"]"
] | Given a sorted DatetimeIndex of market minutes, return a
DatetimeIndex of the corresponding session labels.
Parameters
----------
index: pd.DatetimeIndex or pd.Series
The ordered list of market minutes we want session labels for.
Returns
-------
pd.DatetimeIndex (UTC)
The list of session labels corresponding to the given minutes. | [
"Given",
"a",
"sorted",
"DatetimeIndex",
"of",
"market",
"minutes",
"return",
"a",
"DatetimeIndex",
"of",
"the",
"corresponding",
"session",
"labels",
"."
] | 951711c82c8a2875c09e96e2979faaf8734fb4df | https://github.com/quantopian/trading_calendars/blob/951711c82c8a2875c09e96e2979faaf8734fb4df/trading_calendars/trading_calendar.py#L878-L930 |
248,467 | quantopian/trading_calendars | trading_calendars/trading_calendar.py | TradingCalendar._special_dates | def _special_dates(self, calendars, ad_hoc_dates, start_date, end_date):
"""
Compute a Series of times associated with special dates.
Parameters
----------
holiday_calendars : list[(datetime.time, HolidayCalendar)]
Pairs of time and calendar describing when that time occurs. These
are used to describe regularly-scheduled late opens or early
closes.
ad_hoc_dates : list[(datetime.time, list[pd.Timestamp])]
Pairs of time and list of dates associated with the given times.
These are used to describe late opens or early closes that occurred
for unscheduled or otherwise irregular reasons.
start_date : pd.Timestamp
Start of the range for which we should calculate special dates.
end_date : pd.Timestamp
End of the range for which we should calculate special dates.
Returns
-------
special_dates : pd.Series
Series mapping trading sessions with special opens/closes to the
special open/close for that session.
"""
# List of Series for regularly-scheduled times.
regular = [
scheduled_special_times(
calendar,
start_date,
end_date,
time_,
self.tz,
)
for time_, calendar in calendars
]
# List of Series for ad-hoc times.
ad_hoc = [
pd.Series(
index=pd.to_datetime(datetimes, utc=True),
data=days_at_time(datetimes, time_, self.tz),
)
for time_, datetimes in ad_hoc_dates
]
merged = regular + ad_hoc
if not merged:
# Concat barfs if the input has length 0.
return pd.Series([])
result = pd.concat(merged).sort_index()
return result.loc[(result >= start_date) & (result <= end_date)] | python | def _special_dates(self, calendars, ad_hoc_dates, start_date, end_date):
# List of Series for regularly-scheduled times.
regular = [
scheduled_special_times(
calendar,
start_date,
end_date,
time_,
self.tz,
)
for time_, calendar in calendars
]
# List of Series for ad-hoc times.
ad_hoc = [
pd.Series(
index=pd.to_datetime(datetimes, utc=True),
data=days_at_time(datetimes, time_, self.tz),
)
for time_, datetimes in ad_hoc_dates
]
merged = regular + ad_hoc
if not merged:
# Concat barfs if the input has length 0.
return pd.Series([])
result = pd.concat(merged).sort_index()
return result.loc[(result >= start_date) & (result <= end_date)] | [
"def",
"_special_dates",
"(",
"self",
",",
"calendars",
",",
"ad_hoc_dates",
",",
"start_date",
",",
"end_date",
")",
":",
"# List of Series for regularly-scheduled times.",
"regular",
"=",
"[",
"scheduled_special_times",
"(",
"calendar",
",",
"start_date",
",",
"end_date",
",",
"time_",
",",
"self",
".",
"tz",
",",
")",
"for",
"time_",
",",
"calendar",
"in",
"calendars",
"]",
"# List of Series for ad-hoc times.",
"ad_hoc",
"=",
"[",
"pd",
".",
"Series",
"(",
"index",
"=",
"pd",
".",
"to_datetime",
"(",
"datetimes",
",",
"utc",
"=",
"True",
")",
",",
"data",
"=",
"days_at_time",
"(",
"datetimes",
",",
"time_",
",",
"self",
".",
"tz",
")",
",",
")",
"for",
"time_",
",",
"datetimes",
"in",
"ad_hoc_dates",
"]",
"merged",
"=",
"regular",
"+",
"ad_hoc",
"if",
"not",
"merged",
":",
"# Concat barfs if the input has length 0.",
"return",
"pd",
".",
"Series",
"(",
"[",
"]",
")",
"result",
"=",
"pd",
".",
"concat",
"(",
"merged",
")",
".",
"sort_index",
"(",
")",
"return",
"result",
".",
"loc",
"[",
"(",
"result",
">=",
"start_date",
")",
"&",
"(",
"result",
"<=",
"end_date",
")",
"]"
] | Compute a Series of times associated with special dates.
Parameters
----------
holiday_calendars : list[(datetime.time, HolidayCalendar)]
Pairs of time and calendar describing when that time occurs. These
are used to describe regularly-scheduled late opens or early
closes.
ad_hoc_dates : list[(datetime.time, list[pd.Timestamp])]
Pairs of time and list of dates associated with the given times.
These are used to describe late opens or early closes that occurred
for unscheduled or otherwise irregular reasons.
start_date : pd.Timestamp
Start of the range for which we should calculate special dates.
end_date : pd.Timestamp
End of the range for which we should calculate special dates.
Returns
-------
special_dates : pd.Series
Series mapping trading sessions with special opens/closes to the
special open/close for that session. | [
"Compute",
"a",
"Series",
"of",
"times",
"associated",
"with",
"special",
"dates",
"."
] | 951711c82c8a2875c09e96e2979faaf8734fb4df | https://github.com/quantopian/trading_calendars/blob/951711c82c8a2875c09e96e2979faaf8734fb4df/trading_calendars/trading_calendar.py#L932-L984 |
248,468 | datahq/dataflows | setup.py | read | def read(*paths):
"""Read a text file."""
basedir = os.path.dirname(__file__)
fullpath = os.path.join(basedir, *paths)
contents = io.open(fullpath, encoding='utf-8').read().strip()
return contents | python | def read(*paths):
basedir = os.path.dirname(__file__)
fullpath = os.path.join(basedir, *paths)
contents = io.open(fullpath, encoding='utf-8').read().strip()
return contents | [
"def",
"read",
"(",
"*",
"paths",
")",
":",
"basedir",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"__file__",
")",
"fullpath",
"=",
"os",
".",
"path",
".",
"join",
"(",
"basedir",
",",
"*",
"paths",
")",
"contents",
"=",
"io",
".",
"open",
"(",
"fullpath",
",",
"encoding",
"=",
"'utf-8'",
")",
".",
"read",
"(",
")",
".",
"strip",
"(",
")",
"return",
"contents"
] | Read a text file. | [
"Read",
"a",
"text",
"file",
"."
] | 2c5e5e01e09c8b44e0ff36d85b3f2f4dcf4e8465 | https://github.com/datahq/dataflows/blob/2c5e5e01e09c8b44e0ff36d85b3f2f4dcf4e8465/setup.py#L12-L17 |
248,469 | ipfs/py-ipfs-api | ipfsapi/encoding.py | Encoding.parse | def parse(self, raw):
"""Returns a Python object decoded from the bytes of this encoding.
Raises
------
~ipfsapi.exceptions.DecodingError
Parameters
----------
raw : bytes
Data to be parsed
Returns
-------
object
"""
results = list(self.parse_partial(raw))
results.extend(self.parse_finalize())
return results[0] if len(results) == 1 else results | python | def parse(self, raw):
results = list(self.parse_partial(raw))
results.extend(self.parse_finalize())
return results[0] if len(results) == 1 else results | [
"def",
"parse",
"(",
"self",
",",
"raw",
")",
":",
"results",
"=",
"list",
"(",
"self",
".",
"parse_partial",
"(",
"raw",
")",
")",
"results",
".",
"extend",
"(",
"self",
".",
"parse_finalize",
"(",
")",
")",
"return",
"results",
"[",
"0",
"]",
"if",
"len",
"(",
"results",
")",
"==",
"1",
"else",
"results"
] | Returns a Python object decoded from the bytes of this encoding.
Raises
------
~ipfsapi.exceptions.DecodingError
Parameters
----------
raw : bytes
Data to be parsed
Returns
-------
object | [
"Returns",
"a",
"Python",
"object",
"decoded",
"from",
"the",
"bytes",
"of",
"this",
"encoding",
"."
] | 7574dad04877b45dbe4ad321dcfa9e880eb2d90c | https://github.com/ipfs/py-ipfs-api/blob/7574dad04877b45dbe4ad321dcfa9e880eb2d90c/ipfsapi/encoding.py#L60-L78 |
248,470 | ipfs/py-ipfs-api | ipfsapi/encoding.py | Json.parse_partial | def parse_partial(self, data):
"""Incrementally decodes JSON data sets into Python objects.
Raises
------
~ipfsapi.exceptions.DecodingError
Returns
-------
generator
"""
try:
# Python 3 requires all JSON data to be a text string
lines = self._decoder1.decode(data, False).split("\n")
# Add first input line to last buffer line, if applicable, to
# handle cases where the JSON string has been chopped in half
# at the network level due to streaming
if len(self._buffer) > 0 and self._buffer[-1] is not None:
self._buffer[-1] += lines[0]
self._buffer.extend(lines[1:])
else:
self._buffer.extend(lines)
except UnicodeDecodeError as error:
raise exceptions.DecodingError('json', error)
# Process data buffer
index = 0
try:
# Process each line as separate buffer
#PERF: This way the `.lstrip()` call becomes almost always a NOP
# even if it does return a different string it will only
# have to allocate a new buffer for the currently processed
# line.
while index < len(self._buffer):
while self._buffer[index]:
# Make sure buffer does not start with whitespace
#PERF: `.lstrip()` does not reallocate if the string does
# not actually start with whitespace.
self._buffer[index] = self._buffer[index].lstrip()
# Handle case where the remainder of the line contained
# only whitespace
if not self._buffer[index]:
self._buffer[index] = None
continue
# Try decoding the partial data buffer and return results
# from this
data = self._buffer[index]
for index2 in range(index, len(self._buffer)):
# If decoding doesn't succeed with the currently
# selected buffer (very unlikely with our current
# class of input data) then retry with appending
# any other pending pieces of input data
# This will happen with JSON data that contains
# arbitrary new-lines: "{1:\n2,\n3:4}"
if index2 > index:
data += "\n" + self._buffer[index2]
try:
(obj, offset) = self._decoder2.raw_decode(data)
except ValueError:
# Treat error as fatal if we have already added
# the final buffer to the input
if (index2 + 1) == len(self._buffer):
raise
else:
index = index2
break
# Decoding succeeded – yield result and shorten buffer
yield obj
if offset < len(self._buffer[index]):
self._buffer[index] = self._buffer[index][offset:]
else:
self._buffer[index] = None
index += 1
except ValueError as error:
# It is unfortunately not possible to reliably detect whether
# parsing ended because of an error *within* the JSON string, or
# an unexpected *end* of the JSON string.
# We therefor have to assume that any error that occurs here
# *might* be related to the JSON parser hitting EOF and therefor
# have to postpone error reporting until `parse_finalize` is
# called.
self._lasterror = error
finally:
# Remove all processed buffers
del self._buffer[0:index] | python | def parse_partial(self, data):
try:
# Python 3 requires all JSON data to be a text string
lines = self._decoder1.decode(data, False).split("\n")
# Add first input line to last buffer line, if applicable, to
# handle cases where the JSON string has been chopped in half
# at the network level due to streaming
if len(self._buffer) > 0 and self._buffer[-1] is not None:
self._buffer[-1] += lines[0]
self._buffer.extend(lines[1:])
else:
self._buffer.extend(lines)
except UnicodeDecodeError as error:
raise exceptions.DecodingError('json', error)
# Process data buffer
index = 0
try:
# Process each line as separate buffer
#PERF: This way the `.lstrip()` call becomes almost always a NOP
# even if it does return a different string it will only
# have to allocate a new buffer for the currently processed
# line.
while index < len(self._buffer):
while self._buffer[index]:
# Make sure buffer does not start with whitespace
#PERF: `.lstrip()` does not reallocate if the string does
# not actually start with whitespace.
self._buffer[index] = self._buffer[index].lstrip()
# Handle case where the remainder of the line contained
# only whitespace
if not self._buffer[index]:
self._buffer[index] = None
continue
# Try decoding the partial data buffer and return results
# from this
data = self._buffer[index]
for index2 in range(index, len(self._buffer)):
# If decoding doesn't succeed with the currently
# selected buffer (very unlikely with our current
# class of input data) then retry with appending
# any other pending pieces of input data
# This will happen with JSON data that contains
# arbitrary new-lines: "{1:\n2,\n3:4}"
if index2 > index:
data += "\n" + self._buffer[index2]
try:
(obj, offset) = self._decoder2.raw_decode(data)
except ValueError:
# Treat error as fatal if we have already added
# the final buffer to the input
if (index2 + 1) == len(self._buffer):
raise
else:
index = index2
break
# Decoding succeeded – yield result and shorten buffer
yield obj
if offset < len(self._buffer[index]):
self._buffer[index] = self._buffer[index][offset:]
else:
self._buffer[index] = None
index += 1
except ValueError as error:
# It is unfortunately not possible to reliably detect whether
# parsing ended because of an error *within* the JSON string, or
# an unexpected *end* of the JSON string.
# We therefor have to assume that any error that occurs here
# *might* be related to the JSON parser hitting EOF and therefor
# have to postpone error reporting until `parse_finalize` is
# called.
self._lasterror = error
finally:
# Remove all processed buffers
del self._buffer[0:index] | [
"def",
"parse_partial",
"(",
"self",
",",
"data",
")",
":",
"try",
":",
"# Python 3 requires all JSON data to be a text string",
"lines",
"=",
"self",
".",
"_decoder1",
".",
"decode",
"(",
"data",
",",
"False",
")",
".",
"split",
"(",
"\"\\n\"",
")",
"# Add first input line to last buffer line, if applicable, to",
"# handle cases where the JSON string has been chopped in half",
"# at the network level due to streaming",
"if",
"len",
"(",
"self",
".",
"_buffer",
")",
">",
"0",
"and",
"self",
".",
"_buffer",
"[",
"-",
"1",
"]",
"is",
"not",
"None",
":",
"self",
".",
"_buffer",
"[",
"-",
"1",
"]",
"+=",
"lines",
"[",
"0",
"]",
"self",
".",
"_buffer",
".",
"extend",
"(",
"lines",
"[",
"1",
":",
"]",
")",
"else",
":",
"self",
".",
"_buffer",
".",
"extend",
"(",
"lines",
")",
"except",
"UnicodeDecodeError",
"as",
"error",
":",
"raise",
"exceptions",
".",
"DecodingError",
"(",
"'json'",
",",
"error",
")",
"# Process data buffer",
"index",
"=",
"0",
"try",
":",
"# Process each line as separate buffer",
"#PERF: This way the `.lstrip()` call becomes almost always a NOP",
"# even if it does return a different string it will only",
"# have to allocate a new buffer for the currently processed",
"# line.",
"while",
"index",
"<",
"len",
"(",
"self",
".",
"_buffer",
")",
":",
"while",
"self",
".",
"_buffer",
"[",
"index",
"]",
":",
"# Make sure buffer does not start with whitespace",
"#PERF: `.lstrip()` does not reallocate if the string does",
"# not actually start with whitespace.",
"self",
".",
"_buffer",
"[",
"index",
"]",
"=",
"self",
".",
"_buffer",
"[",
"index",
"]",
".",
"lstrip",
"(",
")",
"# Handle case where the remainder of the line contained",
"# only whitespace",
"if",
"not",
"self",
".",
"_buffer",
"[",
"index",
"]",
":",
"self",
".",
"_buffer",
"[",
"index",
"]",
"=",
"None",
"continue",
"# Try decoding the partial data buffer and return results",
"# from this",
"data",
"=",
"self",
".",
"_buffer",
"[",
"index",
"]",
"for",
"index2",
"in",
"range",
"(",
"index",
",",
"len",
"(",
"self",
".",
"_buffer",
")",
")",
":",
"# If decoding doesn't succeed with the currently",
"# selected buffer (very unlikely with our current",
"# class of input data) then retry with appending",
"# any other pending pieces of input data",
"# This will happen with JSON data that contains",
"# arbitrary new-lines: \"{1:\\n2,\\n3:4}\"",
"if",
"index2",
">",
"index",
":",
"data",
"+=",
"\"\\n\"",
"+",
"self",
".",
"_buffer",
"[",
"index2",
"]",
"try",
":",
"(",
"obj",
",",
"offset",
")",
"=",
"self",
".",
"_decoder2",
".",
"raw_decode",
"(",
"data",
")",
"except",
"ValueError",
":",
"# Treat error as fatal if we have already added",
"# the final buffer to the input",
"if",
"(",
"index2",
"+",
"1",
")",
"==",
"len",
"(",
"self",
".",
"_buffer",
")",
":",
"raise",
"else",
":",
"index",
"=",
"index2",
"break",
"# Decoding succeeded – yield result and shorten buffer",
"yield",
"obj",
"if",
"offset",
"<",
"len",
"(",
"self",
".",
"_buffer",
"[",
"index",
"]",
")",
":",
"self",
".",
"_buffer",
"[",
"index",
"]",
"=",
"self",
".",
"_buffer",
"[",
"index",
"]",
"[",
"offset",
":",
"]",
"else",
":",
"self",
".",
"_buffer",
"[",
"index",
"]",
"=",
"None",
"index",
"+=",
"1",
"except",
"ValueError",
"as",
"error",
":",
"# It is unfortunately not possible to reliably detect whether",
"# parsing ended because of an error *within* the JSON string, or",
"# an unexpected *end* of the JSON string.",
"# We therefor have to assume that any error that occurs here",
"# *might* be related to the JSON parser hitting EOF and therefor",
"# have to postpone error reporting until `parse_finalize` is",
"# called.",
"self",
".",
"_lasterror",
"=",
"error",
"finally",
":",
"# Remove all processed buffers",
"del",
"self",
".",
"_buffer",
"[",
"0",
":",
"index",
"]"
] | Incrementally decodes JSON data sets into Python objects.
Raises
------
~ipfsapi.exceptions.DecodingError
Returns
-------
generator | [
"Incrementally",
"decodes",
"JSON",
"data",
"sets",
"into",
"Python",
"objects",
"."
] | 7574dad04877b45dbe4ad321dcfa9e880eb2d90c | https://github.com/ipfs/py-ipfs-api/blob/7574dad04877b45dbe4ad321dcfa9e880eb2d90c/ipfsapi/encoding.py#L141-L230 |
248,471 | ipfs/py-ipfs-api | ipfsapi/encoding.py | Json.parse_finalize | def parse_finalize(self):
"""Raises errors for incomplete buffered data that could not be parsed
because the end of the input data has been reached.
Raises
------
~ipfsapi.exceptions.DecodingError
Returns
-------
tuple : Always empty
"""
try:
try:
# Raise exception for remaining bytes in bytes decoder
self._decoder1.decode(b'', True)
except UnicodeDecodeError as error:
raise exceptions.DecodingError('json', error)
# Late raise errors that looked like they could have been fixed if
# the caller had provided more data
if self._buffer:
raise exceptions.DecodingError('json', self._lasterror)
finally:
# Reset state
self._buffer = []
self._lasterror = None
self._decoder1.reset()
return () | python | def parse_finalize(self):
try:
try:
# Raise exception for remaining bytes in bytes decoder
self._decoder1.decode(b'', True)
except UnicodeDecodeError as error:
raise exceptions.DecodingError('json', error)
# Late raise errors that looked like they could have been fixed if
# the caller had provided more data
if self._buffer:
raise exceptions.DecodingError('json', self._lasterror)
finally:
# Reset state
self._buffer = []
self._lasterror = None
self._decoder1.reset()
return () | [
"def",
"parse_finalize",
"(",
"self",
")",
":",
"try",
":",
"try",
":",
"# Raise exception for remaining bytes in bytes decoder",
"self",
".",
"_decoder1",
".",
"decode",
"(",
"b''",
",",
"True",
")",
"except",
"UnicodeDecodeError",
"as",
"error",
":",
"raise",
"exceptions",
".",
"DecodingError",
"(",
"'json'",
",",
"error",
")",
"# Late raise errors that looked like they could have been fixed if",
"# the caller had provided more data",
"if",
"self",
".",
"_buffer",
":",
"raise",
"exceptions",
".",
"DecodingError",
"(",
"'json'",
",",
"self",
".",
"_lasterror",
")",
"finally",
":",
"# Reset state",
"self",
".",
"_buffer",
"=",
"[",
"]",
"self",
".",
"_lasterror",
"=",
"None",
"self",
".",
"_decoder1",
".",
"reset",
"(",
")",
"return",
"(",
")"
] | Raises errors for incomplete buffered data that could not be parsed
because the end of the input data has been reached.
Raises
------
~ipfsapi.exceptions.DecodingError
Returns
-------
tuple : Always empty | [
"Raises",
"errors",
"for",
"incomplete",
"buffered",
"data",
"that",
"could",
"not",
"be",
"parsed",
"because",
"the",
"end",
"of",
"the",
"input",
"data",
"has",
"been",
"reached",
"."
] | 7574dad04877b45dbe4ad321dcfa9e880eb2d90c | https://github.com/ipfs/py-ipfs-api/blob/7574dad04877b45dbe4ad321dcfa9e880eb2d90c/ipfsapi/encoding.py#L232-L261 |
248,472 | ipfs/py-ipfs-api | ipfsapi/encoding.py | Json.encode | def encode(self, obj):
"""Returns ``obj`` serialized as JSON formatted bytes.
Raises
------
~ipfsapi.exceptions.EncodingError
Parameters
----------
obj : str | list | dict | int
JSON serializable Python object
Returns
-------
bytes
"""
try:
result = json.dumps(obj, sort_keys=True, indent=None,
separators=(',', ':'), ensure_ascii=False)
if isinstance(result, six.text_type):
return result.encode("utf-8")
else:
return result
except (UnicodeEncodeError, TypeError) as error:
raise exceptions.EncodingError('json', error) | python | def encode(self, obj):
try:
result = json.dumps(obj, sort_keys=True, indent=None,
separators=(',', ':'), ensure_ascii=False)
if isinstance(result, six.text_type):
return result.encode("utf-8")
else:
return result
except (UnicodeEncodeError, TypeError) as error:
raise exceptions.EncodingError('json', error) | [
"def",
"encode",
"(",
"self",
",",
"obj",
")",
":",
"try",
":",
"result",
"=",
"json",
".",
"dumps",
"(",
"obj",
",",
"sort_keys",
"=",
"True",
",",
"indent",
"=",
"None",
",",
"separators",
"=",
"(",
"','",
",",
"':'",
")",
",",
"ensure_ascii",
"=",
"False",
")",
"if",
"isinstance",
"(",
"result",
",",
"six",
".",
"text_type",
")",
":",
"return",
"result",
".",
"encode",
"(",
"\"utf-8\"",
")",
"else",
":",
"return",
"result",
"except",
"(",
"UnicodeEncodeError",
",",
"TypeError",
")",
"as",
"error",
":",
"raise",
"exceptions",
".",
"EncodingError",
"(",
"'json'",
",",
"error",
")"
] | Returns ``obj`` serialized as JSON formatted bytes.
Raises
------
~ipfsapi.exceptions.EncodingError
Parameters
----------
obj : str | list | dict | int
JSON serializable Python object
Returns
-------
bytes | [
"Returns",
"obj",
"serialized",
"as",
"JSON",
"formatted",
"bytes",
"."
] | 7574dad04877b45dbe4ad321dcfa9e880eb2d90c | https://github.com/ipfs/py-ipfs-api/blob/7574dad04877b45dbe4ad321dcfa9e880eb2d90c/ipfsapi/encoding.py#L263-L287 |
248,473 | ipfs/py-ipfs-api | ipfsapi/encoding.py | Pickle.parse_finalize | def parse_finalize(self):
"""Parses the buffered data and yields the result.
Raises
------
~ipfsapi.exceptions.DecodingError
Returns
-------
generator
"""
try:
self._buffer.seek(0, 0)
yield pickle.load(self._buffer)
except pickle.UnpicklingError as error:
raise exceptions.DecodingError('pickle', error) | python | def parse_finalize(self):
try:
self._buffer.seek(0, 0)
yield pickle.load(self._buffer)
except pickle.UnpicklingError as error:
raise exceptions.DecodingError('pickle', error) | [
"def",
"parse_finalize",
"(",
"self",
")",
":",
"try",
":",
"self",
".",
"_buffer",
".",
"seek",
"(",
"0",
",",
"0",
")",
"yield",
"pickle",
".",
"load",
"(",
"self",
".",
"_buffer",
")",
"except",
"pickle",
".",
"UnpicklingError",
"as",
"error",
":",
"raise",
"exceptions",
".",
"DecodingError",
"(",
"'pickle'",
",",
"error",
")"
] | Parses the buffered data and yields the result.
Raises
------
~ipfsapi.exceptions.DecodingError
Returns
-------
generator | [
"Parses",
"the",
"buffered",
"data",
"and",
"yields",
"the",
"result",
"."
] | 7574dad04877b45dbe4ad321dcfa9e880eb2d90c | https://github.com/ipfs/py-ipfs-api/blob/7574dad04877b45dbe4ad321dcfa9e880eb2d90c/ipfsapi/encoding.py#L319-L334 |
248,474 | ipfs/py-ipfs-api | ipfsapi/encoding.py | Pickle.encode | def encode(self, obj):
"""Returns ``obj`` serialized as a pickle binary string.
Raises
------
~ipfsapi.exceptions.EncodingError
Parameters
----------
obj : object
Serializable Python object
Returns
-------
bytes
"""
try:
return pickle.dumps(obj)
except pickle.PicklingError as error:
raise exceptions.EncodingError('pickle', error) | python | def encode(self, obj):
try:
return pickle.dumps(obj)
except pickle.PicklingError as error:
raise exceptions.EncodingError('pickle', error) | [
"def",
"encode",
"(",
"self",
",",
"obj",
")",
":",
"try",
":",
"return",
"pickle",
".",
"dumps",
"(",
"obj",
")",
"except",
"pickle",
".",
"PicklingError",
"as",
"error",
":",
"raise",
"exceptions",
".",
"EncodingError",
"(",
"'pickle'",
",",
"error",
")"
] | Returns ``obj`` serialized as a pickle binary string.
Raises
------
~ipfsapi.exceptions.EncodingError
Parameters
----------
obj : object
Serializable Python object
Returns
-------
bytes | [
"Returns",
"obj",
"serialized",
"as",
"a",
"pickle",
"binary",
"string",
"."
] | 7574dad04877b45dbe4ad321dcfa9e880eb2d90c | https://github.com/ipfs/py-ipfs-api/blob/7574dad04877b45dbe4ad321dcfa9e880eb2d90c/ipfsapi/encoding.py#L360-L379 |
248,475 | ipfs/py-ipfs-api | ipfsapi/multipart.py | glob_compile | def glob_compile(pat):
"""Translate a shell glob PATTERN to a regular expression.
This is almost entirely based on `fnmatch.translate` source-code from the
python 3.5 standard-library.
"""
i, n = 0, len(pat)
res = ''
while i < n:
c = pat[i]
i = i + 1
if c == '/' and len(pat) > (i + 2) and pat[i:(i + 3)] == '**/':
# Special-case for "any number of sub-directories" operator since
# may also expand to no entries:
# Otherwise `a/**/b` would expand to `a[/].*[/]b` which wouldn't
# match the immediate sub-directories of `a`, like `a/b`.
i = i + 3
res = res + '[/]([^/]*[/])*'
elif c == '*':
if len(pat) > i and pat[i] == '*':
i = i + 1
res = res + '.*'
else:
res = res + '[^/]*'
elif c == '?':
res = res + '[^/]'
elif c == '[':
j = i
if j < n and pat[j] == '!':
j = j + 1
if j < n and pat[j] == ']':
j = j + 1
while j < n and pat[j] != ']':
j = j + 1
if j >= n:
res = res + '\\['
else:
stuff = pat[i:j].replace('\\', '\\\\')
i = j + 1
if stuff[0] == '!':
stuff = '^' + stuff[1:]
elif stuff[0] == '^':
stuff = '\\' + stuff
res = '%s[%s]' % (res, stuff)
else:
res = res + re.escape(c)
return re.compile('^' + res + '\Z(?ms)' + '$') | python | def glob_compile(pat):
i, n = 0, len(pat)
res = ''
while i < n:
c = pat[i]
i = i + 1
if c == '/' and len(pat) > (i + 2) and pat[i:(i + 3)] == '**/':
# Special-case for "any number of sub-directories" operator since
# may also expand to no entries:
# Otherwise `a/**/b` would expand to `a[/].*[/]b` which wouldn't
# match the immediate sub-directories of `a`, like `a/b`.
i = i + 3
res = res + '[/]([^/]*[/])*'
elif c == '*':
if len(pat) > i and pat[i] == '*':
i = i + 1
res = res + '.*'
else:
res = res + '[^/]*'
elif c == '?':
res = res + '[^/]'
elif c == '[':
j = i
if j < n and pat[j] == '!':
j = j + 1
if j < n and pat[j] == ']':
j = j + 1
while j < n and pat[j] != ']':
j = j + 1
if j >= n:
res = res + '\\['
else:
stuff = pat[i:j].replace('\\', '\\\\')
i = j + 1
if stuff[0] == '!':
stuff = '^' + stuff[1:]
elif stuff[0] == '^':
stuff = '\\' + stuff
res = '%s[%s]' % (res, stuff)
else:
res = res + re.escape(c)
return re.compile('^' + res + '\Z(?ms)' + '$') | [
"def",
"glob_compile",
"(",
"pat",
")",
":",
"i",
",",
"n",
"=",
"0",
",",
"len",
"(",
"pat",
")",
"res",
"=",
"''",
"while",
"i",
"<",
"n",
":",
"c",
"=",
"pat",
"[",
"i",
"]",
"i",
"=",
"i",
"+",
"1",
"if",
"c",
"==",
"'/'",
"and",
"len",
"(",
"pat",
")",
">",
"(",
"i",
"+",
"2",
")",
"and",
"pat",
"[",
"i",
":",
"(",
"i",
"+",
"3",
")",
"]",
"==",
"'**/'",
":",
"# Special-case for \"any number of sub-directories\" operator since",
"# may also expand to no entries:",
"# Otherwise `a/**/b` would expand to `a[/].*[/]b` which wouldn't",
"# match the immediate sub-directories of `a`, like `a/b`.",
"i",
"=",
"i",
"+",
"3",
"res",
"=",
"res",
"+",
"'[/]([^/]*[/])*'",
"elif",
"c",
"==",
"'*'",
":",
"if",
"len",
"(",
"pat",
")",
">",
"i",
"and",
"pat",
"[",
"i",
"]",
"==",
"'*'",
":",
"i",
"=",
"i",
"+",
"1",
"res",
"=",
"res",
"+",
"'.*'",
"else",
":",
"res",
"=",
"res",
"+",
"'[^/]*'",
"elif",
"c",
"==",
"'?'",
":",
"res",
"=",
"res",
"+",
"'[^/]'",
"elif",
"c",
"==",
"'['",
":",
"j",
"=",
"i",
"if",
"j",
"<",
"n",
"and",
"pat",
"[",
"j",
"]",
"==",
"'!'",
":",
"j",
"=",
"j",
"+",
"1",
"if",
"j",
"<",
"n",
"and",
"pat",
"[",
"j",
"]",
"==",
"']'",
":",
"j",
"=",
"j",
"+",
"1",
"while",
"j",
"<",
"n",
"and",
"pat",
"[",
"j",
"]",
"!=",
"']'",
":",
"j",
"=",
"j",
"+",
"1",
"if",
"j",
">=",
"n",
":",
"res",
"=",
"res",
"+",
"'\\\\['",
"else",
":",
"stuff",
"=",
"pat",
"[",
"i",
":",
"j",
"]",
".",
"replace",
"(",
"'\\\\'",
",",
"'\\\\\\\\'",
")",
"i",
"=",
"j",
"+",
"1",
"if",
"stuff",
"[",
"0",
"]",
"==",
"'!'",
":",
"stuff",
"=",
"'^'",
"+",
"stuff",
"[",
"1",
":",
"]",
"elif",
"stuff",
"[",
"0",
"]",
"==",
"'^'",
":",
"stuff",
"=",
"'\\\\'",
"+",
"stuff",
"res",
"=",
"'%s[%s]'",
"%",
"(",
"res",
",",
"stuff",
")",
"else",
":",
"res",
"=",
"res",
"+",
"re",
".",
"escape",
"(",
"c",
")",
"return",
"re",
".",
"compile",
"(",
"'^'",
"+",
"res",
"+",
"'\\Z(?ms)'",
"+",
"'$'",
")"
] | Translate a shell glob PATTERN to a regular expression.
This is almost entirely based on `fnmatch.translate` source-code from the
python 3.5 standard-library. | [
"Translate",
"a",
"shell",
"glob",
"PATTERN",
"to",
"a",
"regular",
"expression",
"."
] | 7574dad04877b45dbe4ad321dcfa9e880eb2d90c | https://github.com/ipfs/py-ipfs-api/blob/7574dad04877b45dbe4ad321dcfa9e880eb2d90c/ipfsapi/multipart.py#L319-L366 |
248,476 | ipfs/py-ipfs-api | ipfsapi/multipart.py | stream_files | def stream_files(files, chunk_size=default_chunk_size):
"""Gets a buffered generator for streaming files.
Returns a buffered generator which encodes a file or list of files as
:mimetype:`multipart/form-data` with the corresponding headers.
Parameters
----------
files : str
The file(s) to stream
chunk_size : int
Maximum size of each stream chunk
"""
stream = FileStream(files, chunk_size=chunk_size)
return stream.body(), stream.headers | python | def stream_files(files, chunk_size=default_chunk_size):
stream = FileStream(files, chunk_size=chunk_size)
return stream.body(), stream.headers | [
"def",
"stream_files",
"(",
"files",
",",
"chunk_size",
"=",
"default_chunk_size",
")",
":",
"stream",
"=",
"FileStream",
"(",
"files",
",",
"chunk_size",
"=",
"chunk_size",
")",
"return",
"stream",
".",
"body",
"(",
")",
",",
"stream",
".",
"headers"
] | Gets a buffered generator for streaming files.
Returns a buffered generator which encodes a file or list of files as
:mimetype:`multipart/form-data` with the corresponding headers.
Parameters
----------
files : str
The file(s) to stream
chunk_size : int
Maximum size of each stream chunk | [
"Gets",
"a",
"buffered",
"generator",
"for",
"streaming",
"files",
"."
] | 7574dad04877b45dbe4ad321dcfa9e880eb2d90c | https://github.com/ipfs/py-ipfs-api/blob/7574dad04877b45dbe4ad321dcfa9e880eb2d90c/ipfsapi/multipart.py#L560-L575 |
248,477 | ipfs/py-ipfs-api | ipfsapi/multipart.py | stream_directory | def stream_directory(directory,
recursive=False,
patterns='**',
chunk_size=default_chunk_size):
"""Gets a buffered generator for streaming directories.
Returns a buffered generator which encodes a directory as
:mimetype:`multipart/form-data` with the corresponding headers.
Parameters
----------
directory : str
The filepath of the directory to stream
recursive : bool
Stream all content within the directory recursively?
patterns : str | list
Single *glob* pattern or list of *glob* patterns and compiled
regular expressions to match the names of the filepaths to keep
chunk_size : int
Maximum size of each stream chunk
"""
stream = DirectoryStream(directory,
recursive=recursive,
patterns=patterns,
chunk_size=chunk_size)
return stream.body(), stream.headers | python | def stream_directory(directory,
recursive=False,
patterns='**',
chunk_size=default_chunk_size):
stream = DirectoryStream(directory,
recursive=recursive,
patterns=patterns,
chunk_size=chunk_size)
return stream.body(), stream.headers | [
"def",
"stream_directory",
"(",
"directory",
",",
"recursive",
"=",
"False",
",",
"patterns",
"=",
"'**'",
",",
"chunk_size",
"=",
"default_chunk_size",
")",
":",
"stream",
"=",
"DirectoryStream",
"(",
"directory",
",",
"recursive",
"=",
"recursive",
",",
"patterns",
"=",
"patterns",
",",
"chunk_size",
"=",
"chunk_size",
")",
"return",
"stream",
".",
"body",
"(",
")",
",",
"stream",
".",
"headers"
] | Gets a buffered generator for streaming directories.
Returns a buffered generator which encodes a directory as
:mimetype:`multipart/form-data` with the corresponding headers.
Parameters
----------
directory : str
The filepath of the directory to stream
recursive : bool
Stream all content within the directory recursively?
patterns : str | list
Single *glob* pattern or list of *glob* patterns and compiled
regular expressions to match the names of the filepaths to keep
chunk_size : int
Maximum size of each stream chunk | [
"Gets",
"a",
"buffered",
"generator",
"for",
"streaming",
"directories",
"."
] | 7574dad04877b45dbe4ad321dcfa9e880eb2d90c | https://github.com/ipfs/py-ipfs-api/blob/7574dad04877b45dbe4ad321dcfa9e880eb2d90c/ipfsapi/multipart.py#L578-L604 |
248,478 | ipfs/py-ipfs-api | ipfsapi/multipart.py | stream_filesystem_node | def stream_filesystem_node(path,
recursive=False,
patterns='**',
chunk_size=default_chunk_size):
"""Gets a buffered generator for streaming either files or directories.
Returns a buffered generator which encodes the file or directory at the
given path as :mimetype:`multipart/form-data` with the corresponding
headers.
Parameters
----------
path : str
The filepath of the directory or file to stream
recursive : bool
Stream all content within the directory recursively?
patterns : str | list
Single *glob* pattern or list of *glob* patterns and compiled
regular expressions to match the names of the filepaths to keep
chunk_size : int
Maximum size of each stream chunk
"""
is_dir = isinstance(path, six.string_types) and os.path.isdir(path)
if recursive or is_dir:
return stream_directory(path, recursive, patterns, chunk_size)
else:
return stream_files(path, chunk_size) | python | def stream_filesystem_node(path,
recursive=False,
patterns='**',
chunk_size=default_chunk_size):
is_dir = isinstance(path, six.string_types) and os.path.isdir(path)
if recursive or is_dir:
return stream_directory(path, recursive, patterns, chunk_size)
else:
return stream_files(path, chunk_size) | [
"def",
"stream_filesystem_node",
"(",
"path",
",",
"recursive",
"=",
"False",
",",
"patterns",
"=",
"'**'",
",",
"chunk_size",
"=",
"default_chunk_size",
")",
":",
"is_dir",
"=",
"isinstance",
"(",
"path",
",",
"six",
".",
"string_types",
")",
"and",
"os",
".",
"path",
".",
"isdir",
"(",
"path",
")",
"if",
"recursive",
"or",
"is_dir",
":",
"return",
"stream_directory",
"(",
"path",
",",
"recursive",
",",
"patterns",
",",
"chunk_size",
")",
"else",
":",
"return",
"stream_files",
"(",
"path",
",",
"chunk_size",
")"
] | Gets a buffered generator for streaming either files or directories.
Returns a buffered generator which encodes the file or directory at the
given path as :mimetype:`multipart/form-data` with the corresponding
headers.
Parameters
----------
path : str
The filepath of the directory or file to stream
recursive : bool
Stream all content within the directory recursively?
patterns : str | list
Single *glob* pattern or list of *glob* patterns and compiled
regular expressions to match the names of the filepaths to keep
chunk_size : int
Maximum size of each stream chunk | [
"Gets",
"a",
"buffered",
"generator",
"for",
"streaming",
"either",
"files",
"or",
"directories",
"."
] | 7574dad04877b45dbe4ad321dcfa9e880eb2d90c | https://github.com/ipfs/py-ipfs-api/blob/7574dad04877b45dbe4ad321dcfa9e880eb2d90c/ipfsapi/multipart.py#L607-L633 |
248,479 | ipfs/py-ipfs-api | ipfsapi/multipart.py | stream_bytes | def stream_bytes(data, chunk_size=default_chunk_size):
"""Gets a buffered generator for streaming binary data.
Returns a buffered generator which encodes binary data as
:mimetype:`multipart/form-data` with the corresponding headers.
Parameters
----------
data : bytes
The data bytes to stream
chunk_size : int
The maximum size of each stream chunk
Returns
-------
(generator, dict)
"""
stream = BytesStream(data, chunk_size=chunk_size)
return stream.body(), stream.headers | python | def stream_bytes(data, chunk_size=default_chunk_size):
stream = BytesStream(data, chunk_size=chunk_size)
return stream.body(), stream.headers | [
"def",
"stream_bytes",
"(",
"data",
",",
"chunk_size",
"=",
"default_chunk_size",
")",
":",
"stream",
"=",
"BytesStream",
"(",
"data",
",",
"chunk_size",
"=",
"chunk_size",
")",
"return",
"stream",
".",
"body",
"(",
")",
",",
"stream",
".",
"headers"
] | Gets a buffered generator for streaming binary data.
Returns a buffered generator which encodes binary data as
:mimetype:`multipart/form-data` with the corresponding headers.
Parameters
----------
data : bytes
The data bytes to stream
chunk_size : int
The maximum size of each stream chunk
Returns
-------
(generator, dict) | [
"Gets",
"a",
"buffered",
"generator",
"for",
"streaming",
"binary",
"data",
"."
] | 7574dad04877b45dbe4ad321dcfa9e880eb2d90c | https://github.com/ipfs/py-ipfs-api/blob/7574dad04877b45dbe4ad321dcfa9e880eb2d90c/ipfsapi/multipart.py#L636-L655 |
248,480 | ipfs/py-ipfs-api | ipfsapi/multipart.py | stream_text | def stream_text(text, chunk_size=default_chunk_size):
"""Gets a buffered generator for streaming text.
Returns a buffered generator which encodes a string as
:mimetype:`multipart/form-data` with the corresponding headers.
Parameters
----------
text : str
The data bytes to stream
chunk_size : int
The maximum size of each stream chunk
Returns
-------
(generator, dict)
"""
if isgenerator(text):
def binary_stream():
for item in text:
if six.PY2 and isinstance(text, six.binary_type):
#PY2: Allow binary strings under Python 2 since
# Python 2 code is not expected to always get the
# distinction between text and binary strings right.
yield text
else:
yield text.encode("utf-8")
data = binary_stream()
elif six.PY2 and isinstance(text, six.binary_type):
#PY2: See above.
data = text
else:
data = text.encode("utf-8")
return stream_bytes(data, chunk_size) | python | def stream_text(text, chunk_size=default_chunk_size):
if isgenerator(text):
def binary_stream():
for item in text:
if six.PY2 and isinstance(text, six.binary_type):
#PY2: Allow binary strings under Python 2 since
# Python 2 code is not expected to always get the
# distinction between text and binary strings right.
yield text
else:
yield text.encode("utf-8")
data = binary_stream()
elif six.PY2 and isinstance(text, six.binary_type):
#PY2: See above.
data = text
else:
data = text.encode("utf-8")
return stream_bytes(data, chunk_size) | [
"def",
"stream_text",
"(",
"text",
",",
"chunk_size",
"=",
"default_chunk_size",
")",
":",
"if",
"isgenerator",
"(",
"text",
")",
":",
"def",
"binary_stream",
"(",
")",
":",
"for",
"item",
"in",
"text",
":",
"if",
"six",
".",
"PY2",
"and",
"isinstance",
"(",
"text",
",",
"six",
".",
"binary_type",
")",
":",
"#PY2: Allow binary strings under Python 2 since",
"# Python 2 code is not expected to always get the",
"# distinction between text and binary strings right.",
"yield",
"text",
"else",
":",
"yield",
"text",
".",
"encode",
"(",
"\"utf-8\"",
")",
"data",
"=",
"binary_stream",
"(",
")",
"elif",
"six",
".",
"PY2",
"and",
"isinstance",
"(",
"text",
",",
"six",
".",
"binary_type",
")",
":",
"#PY2: See above.",
"data",
"=",
"text",
"else",
":",
"data",
"=",
"text",
".",
"encode",
"(",
"\"utf-8\"",
")",
"return",
"stream_bytes",
"(",
"data",
",",
"chunk_size",
")"
] | Gets a buffered generator for streaming text.
Returns a buffered generator which encodes a string as
:mimetype:`multipart/form-data` with the corresponding headers.
Parameters
----------
text : str
The data bytes to stream
chunk_size : int
The maximum size of each stream chunk
Returns
-------
(generator, dict) | [
"Gets",
"a",
"buffered",
"generator",
"for",
"streaming",
"text",
"."
] | 7574dad04877b45dbe4ad321dcfa9e880eb2d90c | https://github.com/ipfs/py-ipfs-api/blob/7574dad04877b45dbe4ad321dcfa9e880eb2d90c/ipfsapi/multipart.py#L658-L692 |
248,481 | ipfs/py-ipfs-api | ipfsapi/multipart.py | BodyGenerator._write_headers | def _write_headers(self, headers):
"""Yields the HTTP header text for some content.
Parameters
----------
headers : dict
The headers to yield
"""
if headers:
for name in sorted(headers.keys()):
yield name.encode("ascii")
yield b': '
yield headers[name].encode("ascii")
yield CRLF
yield CRLF | python | def _write_headers(self, headers):
if headers:
for name in sorted(headers.keys()):
yield name.encode("ascii")
yield b': '
yield headers[name].encode("ascii")
yield CRLF
yield CRLF | [
"def",
"_write_headers",
"(",
"self",
",",
"headers",
")",
":",
"if",
"headers",
":",
"for",
"name",
"in",
"sorted",
"(",
"headers",
".",
"keys",
"(",
")",
")",
":",
"yield",
"name",
".",
"encode",
"(",
"\"ascii\"",
")",
"yield",
"b': '",
"yield",
"headers",
"[",
"name",
"]",
".",
"encode",
"(",
"\"ascii\"",
")",
"yield",
"CRLF",
"yield",
"CRLF"
] | Yields the HTTP header text for some content.
Parameters
----------
headers : dict
The headers to yield | [
"Yields",
"the",
"HTTP",
"header",
"text",
"for",
"some",
"content",
"."
] | 7574dad04877b45dbe4ad321dcfa9e880eb2d90c | https://github.com/ipfs/py-ipfs-api/blob/7574dad04877b45dbe4ad321dcfa9e880eb2d90c/ipfsapi/multipart.py#L141-L155 |
248,482 | ipfs/py-ipfs-api | ipfsapi/multipart.py | BodyGenerator.file_open | def file_open(self, fn):
"""Yields the opening text of a file section in multipart HTTP.
Parameters
----------
fn : str
Filename for the file being opened and added to the HTTP body
"""
yield b'--'
yield self.boundary.encode()
yield CRLF
headers = content_disposition(fn)
headers.update(content_type(fn))
for c in self._write_headers(headers):
yield c | python | def file_open(self, fn):
yield b'--'
yield self.boundary.encode()
yield CRLF
headers = content_disposition(fn)
headers.update(content_type(fn))
for c in self._write_headers(headers):
yield c | [
"def",
"file_open",
"(",
"self",
",",
"fn",
")",
":",
"yield",
"b'--'",
"yield",
"self",
".",
"boundary",
".",
"encode",
"(",
")",
"yield",
"CRLF",
"headers",
"=",
"content_disposition",
"(",
"fn",
")",
"headers",
".",
"update",
"(",
"content_type",
"(",
"fn",
")",
")",
"for",
"c",
"in",
"self",
".",
"_write_headers",
"(",
"headers",
")",
":",
"yield",
"c"
] | Yields the opening text of a file section in multipart HTTP.
Parameters
----------
fn : str
Filename for the file being opened and added to the HTTP body | [
"Yields",
"the",
"opening",
"text",
"of",
"a",
"file",
"section",
"in",
"multipart",
"HTTP",
"."
] | 7574dad04877b45dbe4ad321dcfa9e880eb2d90c | https://github.com/ipfs/py-ipfs-api/blob/7574dad04877b45dbe4ad321dcfa9e880eb2d90c/ipfsapi/multipart.py#L169-L183 |
248,483 | ipfs/py-ipfs-api | ipfsapi/multipart.py | BufferedGenerator.file_chunks | def file_chunks(self, fp):
"""Yields chunks of a file.
Parameters
----------
fp : io.RawIOBase
The file to break into chunks
(must be an open file or have the ``readinto`` method)
"""
fsize = utils.file_size(fp)
offset = 0
if hasattr(fp, 'readinto'):
while offset < fsize:
nb = fp.readinto(self._internal)
yield self.buf[:nb]
offset += nb
else:
while offset < fsize:
nb = min(self.chunk_size, fsize - offset)
yield fp.read(nb)
offset += nb | python | def file_chunks(self, fp):
fsize = utils.file_size(fp)
offset = 0
if hasattr(fp, 'readinto'):
while offset < fsize:
nb = fp.readinto(self._internal)
yield self.buf[:nb]
offset += nb
else:
while offset < fsize:
nb = min(self.chunk_size, fsize - offset)
yield fp.read(nb)
offset += nb | [
"def",
"file_chunks",
"(",
"self",
",",
"fp",
")",
":",
"fsize",
"=",
"utils",
".",
"file_size",
"(",
"fp",
")",
"offset",
"=",
"0",
"if",
"hasattr",
"(",
"fp",
",",
"'readinto'",
")",
":",
"while",
"offset",
"<",
"fsize",
":",
"nb",
"=",
"fp",
".",
"readinto",
"(",
"self",
".",
"_internal",
")",
"yield",
"self",
".",
"buf",
"[",
":",
"nb",
"]",
"offset",
"+=",
"nb",
"else",
":",
"while",
"offset",
"<",
"fsize",
":",
"nb",
"=",
"min",
"(",
"self",
".",
"chunk_size",
",",
"fsize",
"-",
"offset",
")",
"yield",
"fp",
".",
"read",
"(",
"nb",
")",
"offset",
"+=",
"nb"
] | Yields chunks of a file.
Parameters
----------
fp : io.RawIOBase
The file to break into chunks
(must be an open file or have the ``readinto`` method) | [
"Yields",
"chunks",
"of",
"a",
"file",
"."
] | 7574dad04877b45dbe4ad321dcfa9e880eb2d90c | https://github.com/ipfs/py-ipfs-api/blob/7574dad04877b45dbe4ad321dcfa9e880eb2d90c/ipfsapi/multipart.py#L222-L242 |
248,484 | ipfs/py-ipfs-api | ipfsapi/multipart.py | BufferedGenerator.gen_chunks | def gen_chunks(self, gen):
"""Generates byte chunks of a given size.
Takes a bytes generator and yields chunks of a maximum of
``chunk_size`` bytes.
Parameters
----------
gen : generator
The bytes generator that produces the bytes
"""
for data in gen:
size = len(data)
if size < self.chunk_size:
yield data
else:
mv = buffer(data)
offset = 0
while offset < size:
nb = min(self.chunk_size, size - offset)
yield mv[offset:offset + nb]
offset += nb | python | def gen_chunks(self, gen):
for data in gen:
size = len(data)
if size < self.chunk_size:
yield data
else:
mv = buffer(data)
offset = 0
while offset < size:
nb = min(self.chunk_size, size - offset)
yield mv[offset:offset + nb]
offset += nb | [
"def",
"gen_chunks",
"(",
"self",
",",
"gen",
")",
":",
"for",
"data",
"in",
"gen",
":",
"size",
"=",
"len",
"(",
"data",
")",
"if",
"size",
"<",
"self",
".",
"chunk_size",
":",
"yield",
"data",
"else",
":",
"mv",
"=",
"buffer",
"(",
"data",
")",
"offset",
"=",
"0",
"while",
"offset",
"<",
"size",
":",
"nb",
"=",
"min",
"(",
"self",
".",
"chunk_size",
",",
"size",
"-",
"offset",
")",
"yield",
"mv",
"[",
"offset",
":",
"offset",
"+",
"nb",
"]",
"offset",
"+=",
"nb"
] | Generates byte chunks of a given size.
Takes a bytes generator and yields chunks of a maximum of
``chunk_size`` bytes.
Parameters
----------
gen : generator
The bytes generator that produces the bytes | [
"Generates",
"byte",
"chunks",
"of",
"a",
"given",
"size",
"."
] | 7574dad04877b45dbe4ad321dcfa9e880eb2d90c | https://github.com/ipfs/py-ipfs-api/blob/7574dad04877b45dbe4ad321dcfa9e880eb2d90c/ipfsapi/multipart.py#L244-L265 |
248,485 | ipfs/py-ipfs-api | ipfsapi/multipart.py | FileStream.body | def body(self):
"""Yields the body of the buffered file."""
for fp, need_close in self.files:
try:
name = os.path.basename(fp.name)
except AttributeError:
name = ''
for chunk in self.gen_chunks(self.envelope.file_open(name)):
yield chunk
for chunk in self.file_chunks(fp):
yield chunk
for chunk in self.gen_chunks(self.envelope.file_close()):
yield chunk
if need_close:
fp.close()
for chunk in self.close():
yield chunk | python | def body(self):
for fp, need_close in self.files:
try:
name = os.path.basename(fp.name)
except AttributeError:
name = ''
for chunk in self.gen_chunks(self.envelope.file_open(name)):
yield chunk
for chunk in self.file_chunks(fp):
yield chunk
for chunk in self.gen_chunks(self.envelope.file_close()):
yield chunk
if need_close:
fp.close()
for chunk in self.close():
yield chunk | [
"def",
"body",
"(",
"self",
")",
":",
"for",
"fp",
",",
"need_close",
"in",
"self",
".",
"files",
":",
"try",
":",
"name",
"=",
"os",
".",
"path",
".",
"basename",
"(",
"fp",
".",
"name",
")",
"except",
"AttributeError",
":",
"name",
"=",
"''",
"for",
"chunk",
"in",
"self",
".",
"gen_chunks",
"(",
"self",
".",
"envelope",
".",
"file_open",
"(",
"name",
")",
")",
":",
"yield",
"chunk",
"for",
"chunk",
"in",
"self",
".",
"file_chunks",
"(",
"fp",
")",
":",
"yield",
"chunk",
"for",
"chunk",
"in",
"self",
".",
"gen_chunks",
"(",
"self",
".",
"envelope",
".",
"file_close",
"(",
")",
")",
":",
"yield",
"chunk",
"if",
"need_close",
":",
"fp",
".",
"close",
"(",
")",
"for",
"chunk",
"in",
"self",
".",
"close",
"(",
")",
":",
"yield",
"chunk"
] | Yields the body of the buffered file. | [
"Yields",
"the",
"body",
"of",
"the",
"buffered",
"file",
"."
] | 7574dad04877b45dbe4ad321dcfa9e880eb2d90c | https://github.com/ipfs/py-ipfs-api/blob/7574dad04877b45dbe4ad321dcfa9e880eb2d90c/ipfsapi/multipart.py#L300-L316 |
248,486 | ipfs/py-ipfs-api | ipfsapi/multipart.py | DirectoryStream._prepare | def _prepare(self):
"""Pre-formats the multipart HTTP request to transmit the directory."""
names = []
added_directories = set()
def add_directory(short_path):
# Do not continue if this directory has already been added
if short_path in added_directories:
return
# Scan for first super-directory that has already been added
dir_base = short_path
dir_parts = []
while dir_base:
dir_base, dir_name = os.path.split(dir_base)
dir_parts.append(dir_name)
if dir_base in added_directories:
break
# Add missing intermediate directory nodes in the right order
while dir_parts:
dir_base = os.path.join(dir_base, dir_parts.pop())
# Create an empty, fake file to represent the directory
mock_file = io.StringIO()
mock_file.write(u'')
# Add this directory to those that will be sent
names.append(('files',
(dir_base.replace(os.sep, '/'), mock_file, 'application/x-directory')))
# Remember that this directory has already been sent
added_directories.add(dir_base)
def add_file(short_path, full_path):
try:
# Always add files in wildcard directories
names.append(('files', (short_name.replace(os.sep, '/'),
open(full_path, 'rb'),
'application/octet-stream')))
except OSError:
# File might have disappeared between `os.walk()` and `open()`
pass
def match_short_path(short_path):
# Remove initial path component so that all files are based in
# the target directory itself (not one level above)
if os.sep in short_path:
path = short_path.split(os.sep, 1)[1]
else:
return False
# Convert all path seperators to POSIX style
path = path.replace(os.sep, '/')
# Do the matching and the simplified path
for pattern in self.patterns:
if pattern.match(path):
return True
return False
# Identify the unecessary portion of the relative path
truncate = os.path.dirname(self.directory)
# Traverse the filesystem downward from the target directory's uri
# Errors: `os.walk()` will simply return an empty generator if the
# target directory does not exist.
wildcard_directories = set()
for curr_dir, _, files in os.walk(self.directory):
# find the path relative to the directory being added
if len(truncate) > 0:
_, _, short_path = curr_dir.partition(truncate)
else:
short_path = curr_dir
# remove leading / or \ if it is present
if short_path.startswith(os.sep):
short_path = short_path[1:]
wildcard_directory = False
if os.path.split(short_path)[0] in wildcard_directories:
# Parent directory has matched a pattern, all sub-nodes should
# be added too
wildcard_directories.add(short_path)
wildcard_directory = True
else:
# Check if directory path matches one of the patterns
if match_short_path(short_path):
# Directory matched pattern and it should therefor
# be added along with all of its contents
wildcard_directories.add(short_path)
wildcard_directory = True
# Always add directories within wildcard directories - even if they
# are empty
if wildcard_directory:
add_directory(short_path)
# Iterate across the files in the current directory
for filename in files:
# Find the filename relative to the directory being added
short_name = os.path.join(short_path, filename)
filepath = os.path.join(curr_dir, filename)
if wildcard_directory:
# Always add files in wildcard directories
add_file(short_name, filepath)
else:
# Add file (and all missing intermediary directories)
# if it matches one of the patterns
if match_short_path(short_name):
add_directory(short_path)
add_file(short_name, filepath)
# Send the request and present the response body to the user
req = requests.Request("POST", 'http://localhost', files=names)
prep = req.prepare()
return prep | python | def _prepare(self):
names = []
added_directories = set()
def add_directory(short_path):
# Do not continue if this directory has already been added
if short_path in added_directories:
return
# Scan for first super-directory that has already been added
dir_base = short_path
dir_parts = []
while dir_base:
dir_base, dir_name = os.path.split(dir_base)
dir_parts.append(dir_name)
if dir_base in added_directories:
break
# Add missing intermediate directory nodes in the right order
while dir_parts:
dir_base = os.path.join(dir_base, dir_parts.pop())
# Create an empty, fake file to represent the directory
mock_file = io.StringIO()
mock_file.write(u'')
# Add this directory to those that will be sent
names.append(('files',
(dir_base.replace(os.sep, '/'), mock_file, 'application/x-directory')))
# Remember that this directory has already been sent
added_directories.add(dir_base)
def add_file(short_path, full_path):
try:
# Always add files in wildcard directories
names.append(('files', (short_name.replace(os.sep, '/'),
open(full_path, 'rb'),
'application/octet-stream')))
except OSError:
# File might have disappeared between `os.walk()` and `open()`
pass
def match_short_path(short_path):
# Remove initial path component so that all files are based in
# the target directory itself (not one level above)
if os.sep in short_path:
path = short_path.split(os.sep, 1)[1]
else:
return False
# Convert all path seperators to POSIX style
path = path.replace(os.sep, '/')
# Do the matching and the simplified path
for pattern in self.patterns:
if pattern.match(path):
return True
return False
# Identify the unecessary portion of the relative path
truncate = os.path.dirname(self.directory)
# Traverse the filesystem downward from the target directory's uri
# Errors: `os.walk()` will simply return an empty generator if the
# target directory does not exist.
wildcard_directories = set()
for curr_dir, _, files in os.walk(self.directory):
# find the path relative to the directory being added
if len(truncate) > 0:
_, _, short_path = curr_dir.partition(truncate)
else:
short_path = curr_dir
# remove leading / or \ if it is present
if short_path.startswith(os.sep):
short_path = short_path[1:]
wildcard_directory = False
if os.path.split(short_path)[0] in wildcard_directories:
# Parent directory has matched a pattern, all sub-nodes should
# be added too
wildcard_directories.add(short_path)
wildcard_directory = True
else:
# Check if directory path matches one of the patterns
if match_short_path(short_path):
# Directory matched pattern and it should therefor
# be added along with all of its contents
wildcard_directories.add(short_path)
wildcard_directory = True
# Always add directories within wildcard directories - even if they
# are empty
if wildcard_directory:
add_directory(short_path)
# Iterate across the files in the current directory
for filename in files:
# Find the filename relative to the directory being added
short_name = os.path.join(short_path, filename)
filepath = os.path.join(curr_dir, filename)
if wildcard_directory:
# Always add files in wildcard directories
add_file(short_name, filepath)
else:
# Add file (and all missing intermediary directories)
# if it matches one of the patterns
if match_short_path(short_name):
add_directory(short_path)
add_file(short_name, filepath)
# Send the request and present the response body to the user
req = requests.Request("POST", 'http://localhost', files=names)
prep = req.prepare()
return prep | [
"def",
"_prepare",
"(",
"self",
")",
":",
"names",
"=",
"[",
"]",
"added_directories",
"=",
"set",
"(",
")",
"def",
"add_directory",
"(",
"short_path",
")",
":",
"# Do not continue if this directory has already been added",
"if",
"short_path",
"in",
"added_directories",
":",
"return",
"# Scan for first super-directory that has already been added",
"dir_base",
"=",
"short_path",
"dir_parts",
"=",
"[",
"]",
"while",
"dir_base",
":",
"dir_base",
",",
"dir_name",
"=",
"os",
".",
"path",
".",
"split",
"(",
"dir_base",
")",
"dir_parts",
".",
"append",
"(",
"dir_name",
")",
"if",
"dir_base",
"in",
"added_directories",
":",
"break",
"# Add missing intermediate directory nodes in the right order",
"while",
"dir_parts",
":",
"dir_base",
"=",
"os",
".",
"path",
".",
"join",
"(",
"dir_base",
",",
"dir_parts",
".",
"pop",
"(",
")",
")",
"# Create an empty, fake file to represent the directory",
"mock_file",
"=",
"io",
".",
"StringIO",
"(",
")",
"mock_file",
".",
"write",
"(",
"u''",
")",
"# Add this directory to those that will be sent",
"names",
".",
"append",
"(",
"(",
"'files'",
",",
"(",
"dir_base",
".",
"replace",
"(",
"os",
".",
"sep",
",",
"'/'",
")",
",",
"mock_file",
",",
"'application/x-directory'",
")",
")",
")",
"# Remember that this directory has already been sent",
"added_directories",
".",
"add",
"(",
"dir_base",
")",
"def",
"add_file",
"(",
"short_path",
",",
"full_path",
")",
":",
"try",
":",
"# Always add files in wildcard directories",
"names",
".",
"append",
"(",
"(",
"'files'",
",",
"(",
"short_name",
".",
"replace",
"(",
"os",
".",
"sep",
",",
"'/'",
")",
",",
"open",
"(",
"full_path",
",",
"'rb'",
")",
",",
"'application/octet-stream'",
")",
")",
")",
"except",
"OSError",
":",
"# File might have disappeared between `os.walk()` and `open()`",
"pass",
"def",
"match_short_path",
"(",
"short_path",
")",
":",
"# Remove initial path component so that all files are based in",
"# the target directory itself (not one level above)",
"if",
"os",
".",
"sep",
"in",
"short_path",
":",
"path",
"=",
"short_path",
".",
"split",
"(",
"os",
".",
"sep",
",",
"1",
")",
"[",
"1",
"]",
"else",
":",
"return",
"False",
"# Convert all path seperators to POSIX style",
"path",
"=",
"path",
".",
"replace",
"(",
"os",
".",
"sep",
",",
"'/'",
")",
"# Do the matching and the simplified path",
"for",
"pattern",
"in",
"self",
".",
"patterns",
":",
"if",
"pattern",
".",
"match",
"(",
"path",
")",
":",
"return",
"True",
"return",
"False",
"# Identify the unecessary portion of the relative path",
"truncate",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"self",
".",
"directory",
")",
"# Traverse the filesystem downward from the target directory's uri",
"# Errors: `os.walk()` will simply return an empty generator if the",
"# target directory does not exist.",
"wildcard_directories",
"=",
"set",
"(",
")",
"for",
"curr_dir",
",",
"_",
",",
"files",
"in",
"os",
".",
"walk",
"(",
"self",
".",
"directory",
")",
":",
"# find the path relative to the directory being added",
"if",
"len",
"(",
"truncate",
")",
">",
"0",
":",
"_",
",",
"_",
",",
"short_path",
"=",
"curr_dir",
".",
"partition",
"(",
"truncate",
")",
"else",
":",
"short_path",
"=",
"curr_dir",
"# remove leading / or \\ if it is present",
"if",
"short_path",
".",
"startswith",
"(",
"os",
".",
"sep",
")",
":",
"short_path",
"=",
"short_path",
"[",
"1",
":",
"]",
"wildcard_directory",
"=",
"False",
"if",
"os",
".",
"path",
".",
"split",
"(",
"short_path",
")",
"[",
"0",
"]",
"in",
"wildcard_directories",
":",
"# Parent directory has matched a pattern, all sub-nodes should",
"# be added too",
"wildcard_directories",
".",
"add",
"(",
"short_path",
")",
"wildcard_directory",
"=",
"True",
"else",
":",
"# Check if directory path matches one of the patterns",
"if",
"match_short_path",
"(",
"short_path",
")",
":",
"# Directory matched pattern and it should therefor",
"# be added along with all of its contents",
"wildcard_directories",
".",
"add",
"(",
"short_path",
")",
"wildcard_directory",
"=",
"True",
"# Always add directories within wildcard directories - even if they",
"# are empty",
"if",
"wildcard_directory",
":",
"add_directory",
"(",
"short_path",
")",
"# Iterate across the files in the current directory",
"for",
"filename",
"in",
"files",
":",
"# Find the filename relative to the directory being added",
"short_name",
"=",
"os",
".",
"path",
".",
"join",
"(",
"short_path",
",",
"filename",
")",
"filepath",
"=",
"os",
".",
"path",
".",
"join",
"(",
"curr_dir",
",",
"filename",
")",
"if",
"wildcard_directory",
":",
"# Always add files in wildcard directories",
"add_file",
"(",
"short_name",
",",
"filepath",
")",
"else",
":",
"# Add file (and all missing intermediary directories)",
"# if it matches one of the patterns",
"if",
"match_short_path",
"(",
"short_name",
")",
":",
"add_directory",
"(",
"short_path",
")",
"add_file",
"(",
"short_name",
",",
"filepath",
")",
"# Send the request and present the response body to the user",
"req",
"=",
"requests",
".",
"Request",
"(",
"\"POST\"",
",",
"'http://localhost'",
",",
"files",
"=",
"names",
")",
"prep",
"=",
"req",
".",
"prepare",
"(",
")",
"return",
"prep"
] | Pre-formats the multipart HTTP request to transmit the directory. | [
"Pre",
"-",
"formats",
"the",
"multipart",
"HTTP",
"request",
"to",
"transmit",
"the",
"directory",
"."
] | 7574dad04877b45dbe4ad321dcfa9e880eb2d90c | https://github.com/ipfs/py-ipfs-api/blob/7574dad04877b45dbe4ad321dcfa9e880eb2d90c/ipfsapi/multipart.py#L415-L528 |
248,487 | ipfs/py-ipfs-api | ipfsapi/multipart.py | BytesStream.body | def body(self):
"""Yields the encoded body."""
for chunk in self.gen_chunks(self.envelope.file_open(self.name)):
yield chunk
for chunk in self.gen_chunks(self.data):
yield chunk
for chunk in self.gen_chunks(self.envelope.file_close()):
yield chunk
for chunk in self.close():
yield chunk | python | def body(self):
for chunk in self.gen_chunks(self.envelope.file_open(self.name)):
yield chunk
for chunk in self.gen_chunks(self.data):
yield chunk
for chunk in self.gen_chunks(self.envelope.file_close()):
yield chunk
for chunk in self.close():
yield chunk | [
"def",
"body",
"(",
"self",
")",
":",
"for",
"chunk",
"in",
"self",
".",
"gen_chunks",
"(",
"self",
".",
"envelope",
".",
"file_open",
"(",
"self",
".",
"name",
")",
")",
":",
"yield",
"chunk",
"for",
"chunk",
"in",
"self",
".",
"gen_chunks",
"(",
"self",
".",
"data",
")",
":",
"yield",
"chunk",
"for",
"chunk",
"in",
"self",
".",
"gen_chunks",
"(",
"self",
".",
"envelope",
".",
"file_close",
"(",
")",
")",
":",
"yield",
"chunk",
"for",
"chunk",
"in",
"self",
".",
"close",
"(",
")",
":",
"yield",
"chunk"
] | Yields the encoded body. | [
"Yields",
"the",
"encoded",
"body",
"."
] | 7574dad04877b45dbe4ad321dcfa9e880eb2d90c | https://github.com/ipfs/py-ipfs-api/blob/7574dad04877b45dbe4ad321dcfa9e880eb2d90c/ipfsapi/multipart.py#L548-L557 |
248,488 | ipfs/py-ipfs-api | ipfsapi/http.py | pass_defaults | def pass_defaults(func):
"""Decorator that returns a function named wrapper.
When invoked, wrapper invokes func with default kwargs appended.
Parameters
----------
func : callable
The function to append the default kwargs to
"""
@functools.wraps(func)
def wrapper(self, *args, **kwargs):
merged = {}
merged.update(self.defaults)
merged.update(kwargs)
return func(self, *args, **merged)
return wrapper | python | def pass_defaults(func):
@functools.wraps(func)
def wrapper(self, *args, **kwargs):
merged = {}
merged.update(self.defaults)
merged.update(kwargs)
return func(self, *args, **merged)
return wrapper | [
"def",
"pass_defaults",
"(",
"func",
")",
":",
"@",
"functools",
".",
"wraps",
"(",
"func",
")",
"def",
"wrapper",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"merged",
"=",
"{",
"}",
"merged",
".",
"update",
"(",
"self",
".",
"defaults",
")",
"merged",
".",
"update",
"(",
"kwargs",
")",
"return",
"func",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"merged",
")",
"return",
"wrapper"
] | Decorator that returns a function named wrapper.
When invoked, wrapper invokes func with default kwargs appended.
Parameters
----------
func : callable
The function to append the default kwargs to | [
"Decorator",
"that",
"returns",
"a",
"function",
"named",
"wrapper",
"."
] | 7574dad04877b45dbe4ad321dcfa9e880eb2d90c | https://github.com/ipfs/py-ipfs-api/blob/7574dad04877b45dbe4ad321dcfa9e880eb2d90c/ipfsapi/http.py#L23-L39 |
248,489 | ipfs/py-ipfs-api | ipfsapi/http.py | HTTPClient.request | def request(self, path,
args=[], files=[], opts={}, stream=False,
decoder=None, headers={}, data=None):
"""Makes an HTTP request to the IPFS daemon.
This function returns the contents of the HTTP response from the IPFS
daemon.
Raises
------
~ipfsapi.exceptions.ErrorResponse
~ipfsapi.exceptions.ConnectionError
~ipfsapi.exceptions.ProtocolError
~ipfsapi.exceptions.StatusError
~ipfsapi.exceptions.TimeoutError
Parameters
----------
path : str
The REST command path to send
args : list
Positional parameters to be sent along with the HTTP request
files : :class:`io.RawIOBase` | :obj:`str` | :obj:`list`
The file object(s) or path(s) to stream to the daemon
opts : dict
Query string paramters to be sent along with the HTTP request
decoder : str
The encoder to use to parse the HTTP response
kwargs : dict
Additional arguments to pass to :mod:`requests`
"""
url = self.base + path
params = []
params.append(('stream-channels', 'true'))
for opt in opts.items():
params.append(opt)
for arg in args:
params.append(('arg', arg))
method = 'post' if (files or data) else 'get'
parser = encoding.get_encoding(decoder if decoder else "none")
return self._request(method, url, params, parser, stream,
files, headers, data) | python | def request(self, path,
args=[], files=[], opts={}, stream=False,
decoder=None, headers={}, data=None):
url = self.base + path
params = []
params.append(('stream-channels', 'true'))
for opt in opts.items():
params.append(opt)
for arg in args:
params.append(('arg', arg))
method = 'post' if (files or data) else 'get'
parser = encoding.get_encoding(decoder if decoder else "none")
return self._request(method, url, params, parser, stream,
files, headers, data) | [
"def",
"request",
"(",
"self",
",",
"path",
",",
"args",
"=",
"[",
"]",
",",
"files",
"=",
"[",
"]",
",",
"opts",
"=",
"{",
"}",
",",
"stream",
"=",
"False",
",",
"decoder",
"=",
"None",
",",
"headers",
"=",
"{",
"}",
",",
"data",
"=",
"None",
")",
":",
"url",
"=",
"self",
".",
"base",
"+",
"path",
"params",
"=",
"[",
"]",
"params",
".",
"append",
"(",
"(",
"'stream-channels'",
",",
"'true'",
")",
")",
"for",
"opt",
"in",
"opts",
".",
"items",
"(",
")",
":",
"params",
".",
"append",
"(",
"opt",
")",
"for",
"arg",
"in",
"args",
":",
"params",
".",
"append",
"(",
"(",
"'arg'",
",",
"arg",
")",
")",
"method",
"=",
"'post'",
"if",
"(",
"files",
"or",
"data",
")",
"else",
"'get'",
"parser",
"=",
"encoding",
".",
"get_encoding",
"(",
"decoder",
"if",
"decoder",
"else",
"\"none\"",
")",
"return",
"self",
".",
"_request",
"(",
"method",
",",
"url",
",",
"params",
",",
"parser",
",",
"stream",
",",
"files",
",",
"headers",
",",
"data",
")"
] | Makes an HTTP request to the IPFS daemon.
This function returns the contents of the HTTP response from the IPFS
daemon.
Raises
------
~ipfsapi.exceptions.ErrorResponse
~ipfsapi.exceptions.ConnectionError
~ipfsapi.exceptions.ProtocolError
~ipfsapi.exceptions.StatusError
~ipfsapi.exceptions.TimeoutError
Parameters
----------
path : str
The REST command path to send
args : list
Positional parameters to be sent along with the HTTP request
files : :class:`io.RawIOBase` | :obj:`str` | :obj:`list`
The file object(s) or path(s) to stream to the daemon
opts : dict
Query string paramters to be sent along with the HTTP request
decoder : str
The encoder to use to parse the HTTP response
kwargs : dict
Additional arguments to pass to :mod:`requests` | [
"Makes",
"an",
"HTTP",
"request",
"to",
"the",
"IPFS",
"daemon",
"."
] | 7574dad04877b45dbe4ad321dcfa9e880eb2d90c | https://github.com/ipfs/py-ipfs-api/blob/7574dad04877b45dbe4ad321dcfa9e880eb2d90c/ipfsapi/http.py#L202-L247 |
248,490 | ipfs/py-ipfs-api | ipfsapi/http.py | HTTPClient.download | def download(self, path, args=[], filepath=None, opts={},
compress=True, **kwargs):
"""Makes a request to the IPFS daemon to download a file.
Downloads a file or files from IPFS into the current working
directory, or the directory given by ``filepath``.
Raises
------
~ipfsapi.exceptions.ErrorResponse
~ipfsapi.exceptions.ConnectionError
~ipfsapi.exceptions.ProtocolError
~ipfsapi.exceptions.StatusError
~ipfsapi.exceptions.TimeoutError
Parameters
----------
path : str
The REST command path to send
filepath : str
The local path where IPFS will store downloaded files
Defaults to the current working directory.
args : list
Positional parameters to be sent along with the HTTP request
opts : dict
Query string paramters to be sent along with the HTTP request
compress : bool
Whether the downloaded file should be GZip compressed by the
daemon before being sent to the client
kwargs : dict
Additional arguments to pass to :mod:`requests`
"""
url = self.base + path
wd = filepath or '.'
params = []
params.append(('stream-channels', 'true'))
params.append(('archive', 'true'))
if compress:
params.append(('compress', 'true'))
for opt in opts.items():
params.append(opt)
for arg in args:
params.append(('arg', arg))
method = 'get'
res = self._do_request(method, url, params=params, stream=True,
**kwargs)
self._do_raise_for_status(res)
# try to stream download as a tar file stream
mode = 'r|gz' if compress else 'r|'
with tarfile.open(fileobj=res.raw, mode=mode) as tf:
tf.extractall(path=wd) | python | def download(self, path, args=[], filepath=None, opts={},
compress=True, **kwargs):
url = self.base + path
wd = filepath or '.'
params = []
params.append(('stream-channels', 'true'))
params.append(('archive', 'true'))
if compress:
params.append(('compress', 'true'))
for opt in opts.items():
params.append(opt)
for arg in args:
params.append(('arg', arg))
method = 'get'
res = self._do_request(method, url, params=params, stream=True,
**kwargs)
self._do_raise_for_status(res)
# try to stream download as a tar file stream
mode = 'r|gz' if compress else 'r|'
with tarfile.open(fileobj=res.raw, mode=mode) as tf:
tf.extractall(path=wd) | [
"def",
"download",
"(",
"self",
",",
"path",
",",
"args",
"=",
"[",
"]",
",",
"filepath",
"=",
"None",
",",
"opts",
"=",
"{",
"}",
",",
"compress",
"=",
"True",
",",
"*",
"*",
"kwargs",
")",
":",
"url",
"=",
"self",
".",
"base",
"+",
"path",
"wd",
"=",
"filepath",
"or",
"'.'",
"params",
"=",
"[",
"]",
"params",
".",
"append",
"(",
"(",
"'stream-channels'",
",",
"'true'",
")",
")",
"params",
".",
"append",
"(",
"(",
"'archive'",
",",
"'true'",
")",
")",
"if",
"compress",
":",
"params",
".",
"append",
"(",
"(",
"'compress'",
",",
"'true'",
")",
")",
"for",
"opt",
"in",
"opts",
".",
"items",
"(",
")",
":",
"params",
".",
"append",
"(",
"opt",
")",
"for",
"arg",
"in",
"args",
":",
"params",
".",
"append",
"(",
"(",
"'arg'",
",",
"arg",
")",
")",
"method",
"=",
"'get'",
"res",
"=",
"self",
".",
"_do_request",
"(",
"method",
",",
"url",
",",
"params",
"=",
"params",
",",
"stream",
"=",
"True",
",",
"*",
"*",
"kwargs",
")",
"self",
".",
"_do_raise_for_status",
"(",
"res",
")",
"# try to stream download as a tar file stream",
"mode",
"=",
"'r|gz'",
"if",
"compress",
"else",
"'r|'",
"with",
"tarfile",
".",
"open",
"(",
"fileobj",
"=",
"res",
".",
"raw",
",",
"mode",
"=",
"mode",
")",
"as",
"tf",
":",
"tf",
".",
"extractall",
"(",
"path",
"=",
"wd",
")"
] | Makes a request to the IPFS daemon to download a file.
Downloads a file or files from IPFS into the current working
directory, or the directory given by ``filepath``.
Raises
------
~ipfsapi.exceptions.ErrorResponse
~ipfsapi.exceptions.ConnectionError
~ipfsapi.exceptions.ProtocolError
~ipfsapi.exceptions.StatusError
~ipfsapi.exceptions.TimeoutError
Parameters
----------
path : str
The REST command path to send
filepath : str
The local path where IPFS will store downloaded files
Defaults to the current working directory.
args : list
Positional parameters to be sent along with the HTTP request
opts : dict
Query string paramters to be sent along with the HTTP request
compress : bool
Whether the downloaded file should be GZip compressed by the
daemon before being sent to the client
kwargs : dict
Additional arguments to pass to :mod:`requests` | [
"Makes",
"a",
"request",
"to",
"the",
"IPFS",
"daemon",
"to",
"download",
"a",
"file",
"."
] | 7574dad04877b45dbe4ad321dcfa9e880eb2d90c | https://github.com/ipfs/py-ipfs-api/blob/7574dad04877b45dbe4ad321dcfa9e880eb2d90c/ipfsapi/http.py#L250-L308 |
248,491 | ipfs/py-ipfs-api | ipfsapi/http.py | HTTPClient.session | def session(self):
"""A context manager for this client's session.
This function closes the current session when this client goes out of
scope.
"""
self._session = requests.session()
yield
self._session.close()
self._session = None | python | def session(self):
self._session = requests.session()
yield
self._session.close()
self._session = None | [
"def",
"session",
"(",
"self",
")",
":",
"self",
".",
"_session",
"=",
"requests",
".",
"session",
"(",
")",
"yield",
"self",
".",
"_session",
".",
"close",
"(",
")",
"self",
".",
"_session",
"=",
"None"
] | A context manager for this client's session.
This function closes the current session when this client goes out of
scope. | [
"A",
"context",
"manager",
"for",
"this",
"client",
"s",
"session",
"."
] | 7574dad04877b45dbe4ad321dcfa9e880eb2d90c | https://github.com/ipfs/py-ipfs-api/blob/7574dad04877b45dbe4ad321dcfa9e880eb2d90c/ipfsapi/http.py#L311-L320 |
248,492 | ipfs/py-ipfs-api | ipfsapi/client.py | assert_version | def assert_version(version, minimum=VERSION_MINIMUM, maximum=VERSION_MAXIMUM):
"""Make sure that the given daemon version is supported by this client
version.
Raises
------
~ipfsapi.exceptions.VersionMismatch
Parameters
----------
version : str
The version of an IPFS daemon.
minimum : str
The minimal IPFS version to allow.
maximum : str
The maximum IPFS version to allow.
"""
# Convert version strings to integer tuples
version = list(map(int, version.split('-', 1)[0].split('.')))
minimum = list(map(int, minimum.split('-', 1)[0].split('.')))
maximum = list(map(int, maximum.split('-', 1)[0].split('.')))
if minimum > version or version >= maximum:
raise exceptions.VersionMismatch(version, minimum, maximum) | python | def assert_version(version, minimum=VERSION_MINIMUM, maximum=VERSION_MAXIMUM):
# Convert version strings to integer tuples
version = list(map(int, version.split('-', 1)[0].split('.')))
minimum = list(map(int, minimum.split('-', 1)[0].split('.')))
maximum = list(map(int, maximum.split('-', 1)[0].split('.')))
if minimum > version or version >= maximum:
raise exceptions.VersionMismatch(version, minimum, maximum) | [
"def",
"assert_version",
"(",
"version",
",",
"minimum",
"=",
"VERSION_MINIMUM",
",",
"maximum",
"=",
"VERSION_MAXIMUM",
")",
":",
"# Convert version strings to integer tuples",
"version",
"=",
"list",
"(",
"map",
"(",
"int",
",",
"version",
".",
"split",
"(",
"'-'",
",",
"1",
")",
"[",
"0",
"]",
".",
"split",
"(",
"'.'",
")",
")",
")",
"minimum",
"=",
"list",
"(",
"map",
"(",
"int",
",",
"minimum",
".",
"split",
"(",
"'-'",
",",
"1",
")",
"[",
"0",
"]",
".",
"split",
"(",
"'.'",
")",
")",
")",
"maximum",
"=",
"list",
"(",
"map",
"(",
"int",
",",
"maximum",
".",
"split",
"(",
"'-'",
",",
"1",
")",
"[",
"0",
"]",
".",
"split",
"(",
"'.'",
")",
")",
")",
"if",
"minimum",
">",
"version",
"or",
"version",
">=",
"maximum",
":",
"raise",
"exceptions",
".",
"VersionMismatch",
"(",
"version",
",",
"minimum",
",",
"maximum",
")"
] | Make sure that the given daemon version is supported by this client
version.
Raises
------
~ipfsapi.exceptions.VersionMismatch
Parameters
----------
version : str
The version of an IPFS daemon.
minimum : str
The minimal IPFS version to allow.
maximum : str
The maximum IPFS version to allow. | [
"Make",
"sure",
"that",
"the",
"given",
"daemon",
"version",
"is",
"supported",
"by",
"this",
"client",
"version",
"."
] | 7574dad04877b45dbe4ad321dcfa9e880eb2d90c | https://github.com/ipfs/py-ipfs-api/blob/7574dad04877b45dbe4ad321dcfa9e880eb2d90c/ipfsapi/client.py#L23-L46 |
248,493 | ipfs/py-ipfs-api | ipfsapi/client.py | Client.add | def add(self, files, recursive=False, pattern='**', *args, **kwargs):
"""Add a file, or directory of files to IPFS.
.. code-block:: python
>>> with io.open('nurseryrhyme.txt', 'w', encoding='utf-8') as f:
... numbytes = f.write('Mary had a little lamb')
>>> c.add('nurseryrhyme.txt')
{'Hash': 'QmZfF6C9j4VtoCsTp4KSrhYH47QMd3DNXVZBKaxJdhaPab',
'Name': 'nurseryrhyme.txt'}
Parameters
----------
files : str
A filepath to either a file or directory
recursive : bool
Controls if files in subdirectories are added or not
pattern : str | list
Single `*glob* <https://docs.python.org/3/library/glob.html>`_
pattern or list of *glob* patterns and compiled regular expressions
to match the names of the filepaths to keep
trickle : bool
Use trickle-dag format (optimized for streaming) when generating
the dag; see `the FAQ <https://github.com/ipfs/faq/issues/218>` for
more information (Default: ``False``)
only_hash : bool
Only chunk and hash, but do not write to disk (Default: ``False``)
wrap_with_directory : bool
Wrap files with a directory object to preserve their filename
(Default: ``False``)
chunker : str
The chunking algorithm to use
pin : bool
Pin this object when adding (Default: ``True``)
Returns
-------
dict: File name and hash of the added file node
"""
#PY2: No support for kw-only parameters after glob parameters
opts = {
"trickle": kwargs.pop("trickle", False),
"only-hash": kwargs.pop("only_hash", False),
"wrap-with-directory": kwargs.pop("wrap_with_directory", False),
"pin": kwargs.pop("pin", True)
}
if "chunker" in kwargs:
opts["chunker"] = kwargs.pop("chunker")
kwargs.setdefault("opts", opts)
body, headers = multipart.stream_filesystem_node(
files, recursive, pattern, self.chunk_size
)
return self._client.request('/add', decoder='json',
data=body, headers=headers, **kwargs) | python | def add(self, files, recursive=False, pattern='**', *args, **kwargs):
#PY2: No support for kw-only parameters after glob parameters
opts = {
"trickle": kwargs.pop("trickle", False),
"only-hash": kwargs.pop("only_hash", False),
"wrap-with-directory": kwargs.pop("wrap_with_directory", False),
"pin": kwargs.pop("pin", True)
}
if "chunker" in kwargs:
opts["chunker"] = kwargs.pop("chunker")
kwargs.setdefault("opts", opts)
body, headers = multipart.stream_filesystem_node(
files, recursive, pattern, self.chunk_size
)
return self._client.request('/add', decoder='json',
data=body, headers=headers, **kwargs) | [
"def",
"add",
"(",
"self",
",",
"files",
",",
"recursive",
"=",
"False",
",",
"pattern",
"=",
"'**'",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"#PY2: No support for kw-only parameters after glob parameters",
"opts",
"=",
"{",
"\"trickle\"",
":",
"kwargs",
".",
"pop",
"(",
"\"trickle\"",
",",
"False",
")",
",",
"\"only-hash\"",
":",
"kwargs",
".",
"pop",
"(",
"\"only_hash\"",
",",
"False",
")",
",",
"\"wrap-with-directory\"",
":",
"kwargs",
".",
"pop",
"(",
"\"wrap_with_directory\"",
",",
"False",
")",
",",
"\"pin\"",
":",
"kwargs",
".",
"pop",
"(",
"\"pin\"",
",",
"True",
")",
"}",
"if",
"\"chunker\"",
"in",
"kwargs",
":",
"opts",
"[",
"\"chunker\"",
"]",
"=",
"kwargs",
".",
"pop",
"(",
"\"chunker\"",
")",
"kwargs",
".",
"setdefault",
"(",
"\"opts\"",
",",
"opts",
")",
"body",
",",
"headers",
"=",
"multipart",
".",
"stream_filesystem_node",
"(",
"files",
",",
"recursive",
",",
"pattern",
",",
"self",
".",
"chunk_size",
")",
"return",
"self",
".",
"_client",
".",
"request",
"(",
"'/add'",
",",
"decoder",
"=",
"'json'",
",",
"data",
"=",
"body",
",",
"headers",
"=",
"headers",
",",
"*",
"*",
"kwargs",
")"
] | Add a file, or directory of files to IPFS.
.. code-block:: python
>>> with io.open('nurseryrhyme.txt', 'w', encoding='utf-8') as f:
... numbytes = f.write('Mary had a little lamb')
>>> c.add('nurseryrhyme.txt')
{'Hash': 'QmZfF6C9j4VtoCsTp4KSrhYH47QMd3DNXVZBKaxJdhaPab',
'Name': 'nurseryrhyme.txt'}
Parameters
----------
files : str
A filepath to either a file or directory
recursive : bool
Controls if files in subdirectories are added or not
pattern : str | list
Single `*glob* <https://docs.python.org/3/library/glob.html>`_
pattern or list of *glob* patterns and compiled regular expressions
to match the names of the filepaths to keep
trickle : bool
Use trickle-dag format (optimized for streaming) when generating
the dag; see `the FAQ <https://github.com/ipfs/faq/issues/218>` for
more information (Default: ``False``)
only_hash : bool
Only chunk and hash, but do not write to disk (Default: ``False``)
wrap_with_directory : bool
Wrap files with a directory object to preserve their filename
(Default: ``False``)
chunker : str
The chunking algorithm to use
pin : bool
Pin this object when adding (Default: ``True``)
Returns
-------
dict: File name and hash of the added file node | [
"Add",
"a",
"file",
"or",
"directory",
"of",
"files",
"to",
"IPFS",
"."
] | 7574dad04877b45dbe4ad321dcfa9e880eb2d90c | https://github.com/ipfs/py-ipfs-api/blob/7574dad04877b45dbe4ad321dcfa9e880eb2d90c/ipfsapi/client.py#L135-L189 |
248,494 | ipfs/py-ipfs-api | ipfsapi/client.py | Client.get | def get(self, multihash, **kwargs):
"""Downloads a file, or directory of files from IPFS.
Files are placed in the current working directory.
Parameters
----------
multihash : str
The path to the IPFS object(s) to be outputted
"""
args = (multihash,)
return self._client.download('/get', args, **kwargs) | python | def get(self, multihash, **kwargs):
args = (multihash,)
return self._client.download('/get', args, **kwargs) | [
"def",
"get",
"(",
"self",
",",
"multihash",
",",
"*",
"*",
"kwargs",
")",
":",
"args",
"=",
"(",
"multihash",
",",
")",
"return",
"self",
".",
"_client",
".",
"download",
"(",
"'/get'",
",",
"args",
",",
"*",
"*",
"kwargs",
")"
] | Downloads a file, or directory of files from IPFS.
Files are placed in the current working directory.
Parameters
----------
multihash : str
The path to the IPFS object(s) to be outputted | [
"Downloads",
"a",
"file",
"or",
"directory",
"of",
"files",
"from",
"IPFS",
"."
] | 7574dad04877b45dbe4ad321dcfa9e880eb2d90c | https://github.com/ipfs/py-ipfs-api/blob/7574dad04877b45dbe4ad321dcfa9e880eb2d90c/ipfsapi/client.py#L191-L202 |
248,495 | ipfs/py-ipfs-api | ipfsapi/client.py | Client.cat | def cat(self, multihash, offset=0, length=-1, **kwargs):
r"""Retrieves the contents of a file identified by hash.
.. code-block:: python
>>> c.cat('QmTkzDwWqPbnAh5YiV5VwcTLnGdwSNsNTn2aDxdXBFca7D')
Traceback (most recent call last):
...
ipfsapi.exceptions.Error: this dag node is a directory
>>> c.cat('QmeKozNssnkJ4NcyRidYgDY2jfRZqVEoRGfipkgath71bX')
b'<!DOCTYPE html>\n<html>\n\n<head>\n<title>ipfs example viewer</…'
Parameters
----------
multihash : str
The path to the IPFS object(s) to be retrieved
offset : int
Byte offset to begin reading from
length : int
Maximum number of bytes to read(-1 for all)
Returns
-------
str : File contents
"""
opts = {}
if offset != 0:
opts['offset'] = offset
if length != -1:
opts['length'] = length
args = (multihash,)
return self._client.request('/cat', args, opts=opts, **kwargs) | python | def cat(self, multihash, offset=0, length=-1, **kwargs):
r"""Retrieves the contents of a file identified by hash.
.. code-block:: python
>>> c.cat('QmTkzDwWqPbnAh5YiV5VwcTLnGdwSNsNTn2aDxdXBFca7D')
Traceback (most recent call last):
...
ipfsapi.exceptions.Error: this dag node is a directory
>>> c.cat('QmeKozNssnkJ4NcyRidYgDY2jfRZqVEoRGfipkgath71bX')
b'<!DOCTYPE html>\n<html>\n\n<head>\n<title>ipfs example viewer</…'
Parameters
----------
multihash : str
The path to the IPFS object(s) to be retrieved
offset : int
Byte offset to begin reading from
length : int
Maximum number of bytes to read(-1 for all)
Returns
-------
str : File contents
"""
opts = {}
if offset != 0:
opts['offset'] = offset
if length != -1:
opts['length'] = length
args = (multihash,)
return self._client.request('/cat', args, opts=opts, **kwargs) | [
"def",
"cat",
"(",
"self",
",",
"multihash",
",",
"offset",
"=",
"0",
",",
"length",
"=",
"-",
"1",
",",
"*",
"*",
"kwargs",
")",
":",
"opts",
"=",
"{",
"}",
"if",
"offset",
"!=",
"0",
":",
"opts",
"[",
"'offset'",
"]",
"=",
"offset",
"if",
"length",
"!=",
"-",
"1",
":",
"opts",
"[",
"'length'",
"]",
"=",
"length",
"args",
"=",
"(",
"multihash",
",",
")",
"return",
"self",
".",
"_client",
".",
"request",
"(",
"'/cat'",
",",
"args",
",",
"opts",
"=",
"opts",
",",
"*",
"*",
"kwargs",
")"
] | r"""Retrieves the contents of a file identified by hash.
.. code-block:: python
>>> c.cat('QmTkzDwWqPbnAh5YiV5VwcTLnGdwSNsNTn2aDxdXBFca7D')
Traceback (most recent call last):
...
ipfsapi.exceptions.Error: this dag node is a directory
>>> c.cat('QmeKozNssnkJ4NcyRidYgDY2jfRZqVEoRGfipkgath71bX')
b'<!DOCTYPE html>\n<html>\n\n<head>\n<title>ipfs example viewer</…'
Parameters
----------
multihash : str
The path to the IPFS object(s) to be retrieved
offset : int
Byte offset to begin reading from
length : int
Maximum number of bytes to read(-1 for all)
Returns
-------
str : File contents | [
"r",
"Retrieves",
"the",
"contents",
"of",
"a",
"file",
"identified",
"by",
"hash",
"."
] | 7574dad04877b45dbe4ad321dcfa9e880eb2d90c | https://github.com/ipfs/py-ipfs-api/blob/7574dad04877b45dbe4ad321dcfa9e880eb2d90c/ipfsapi/client.py#L204-L235 |
248,496 | ipfs/py-ipfs-api | ipfsapi/client.py | Client.ls | def ls(self, multihash, **kwargs):
"""Returns a list of objects linked to by the given hash.
.. code-block:: python
>>> c.ls('QmTkzDwWqPbnAh5YiV5VwcTLnGdwSNsNTn2aDxdXBFca7D')
{'Objects': [
{'Hash': 'QmTkzDwWqPbnAh5YiV5VwcTLnGdwSNsNTn2aDxdXBFca7D',
'Links': [
{'Hash': 'Qmd2xkBfEwEs9oMTk77A6jrsgurpF3ugXSg7dtPNFkcNMV',
'Name': 'Makefile', 'Size': 174, 'Type': 2},
…
{'Hash': 'QmSY8RfVntt3VdxWppv9w5hWgNrE31uctgTiYwKir8eXJY',
'Name': 'published-version', 'Size': 55, 'Type': 2}
]}
]}
Parameters
----------
multihash : str
The path to the IPFS object(s) to list links from
Returns
-------
dict : Directory information and contents
"""
args = (multihash,)
return self._client.request('/ls', args, decoder='json', **kwargs) | python | def ls(self, multihash, **kwargs):
args = (multihash,)
return self._client.request('/ls', args, decoder='json', **kwargs) | [
"def",
"ls",
"(",
"self",
",",
"multihash",
",",
"*",
"*",
"kwargs",
")",
":",
"args",
"=",
"(",
"multihash",
",",
")",
"return",
"self",
".",
"_client",
".",
"request",
"(",
"'/ls'",
",",
"args",
",",
"decoder",
"=",
"'json'",
",",
"*",
"*",
"kwargs",
")"
] | Returns a list of objects linked to by the given hash.
.. code-block:: python
>>> c.ls('QmTkzDwWqPbnAh5YiV5VwcTLnGdwSNsNTn2aDxdXBFca7D')
{'Objects': [
{'Hash': 'QmTkzDwWqPbnAh5YiV5VwcTLnGdwSNsNTn2aDxdXBFca7D',
'Links': [
{'Hash': 'Qmd2xkBfEwEs9oMTk77A6jrsgurpF3ugXSg7dtPNFkcNMV',
'Name': 'Makefile', 'Size': 174, 'Type': 2},
…
{'Hash': 'QmSY8RfVntt3VdxWppv9w5hWgNrE31uctgTiYwKir8eXJY',
'Name': 'published-version', 'Size': 55, 'Type': 2}
]}
]}
Parameters
----------
multihash : str
The path to the IPFS object(s) to list links from
Returns
-------
dict : Directory information and contents | [
"Returns",
"a",
"list",
"of",
"objects",
"linked",
"to",
"by",
"the",
"given",
"hash",
"."
] | 7574dad04877b45dbe4ad321dcfa9e880eb2d90c | https://github.com/ipfs/py-ipfs-api/blob/7574dad04877b45dbe4ad321dcfa9e880eb2d90c/ipfsapi/client.py#L237-L264 |
248,497 | ipfs/py-ipfs-api | ipfsapi/client.py | Client.refs | def refs(self, multihash, **kwargs):
"""Returns a list of hashes of objects referenced by the given hash.
.. code-block:: python
>>> c.refs('QmTkzDwWqPbnAh5YiV5VwcTLnGdwSNsNTn2aDxdXBFca7D')
[{'Ref': 'Qmd2xkBfEwEs9oMTk77A6jrsgurpF3ugXSg7 … cNMV', 'Err': ''},
…
{'Ref': 'QmSY8RfVntt3VdxWppv9w5hWgNrE31uctgTi … eXJY', 'Err': ''}]
Parameters
----------
multihash : str
Path to the object(s) to list refs from
Returns
-------
list
"""
args = (multihash,)
return self._client.request('/refs', args, decoder='json', **kwargs) | python | def refs(self, multihash, **kwargs):
args = (multihash,)
return self._client.request('/refs', args, decoder='json', **kwargs) | [
"def",
"refs",
"(",
"self",
",",
"multihash",
",",
"*",
"*",
"kwargs",
")",
":",
"args",
"=",
"(",
"multihash",
",",
")",
"return",
"self",
".",
"_client",
".",
"request",
"(",
"'/refs'",
",",
"args",
",",
"decoder",
"=",
"'json'",
",",
"*",
"*",
"kwargs",
")"
] | Returns a list of hashes of objects referenced by the given hash.
.. code-block:: python
>>> c.refs('QmTkzDwWqPbnAh5YiV5VwcTLnGdwSNsNTn2aDxdXBFca7D')
[{'Ref': 'Qmd2xkBfEwEs9oMTk77A6jrsgurpF3ugXSg7 … cNMV', 'Err': ''},
…
{'Ref': 'QmSY8RfVntt3VdxWppv9w5hWgNrE31uctgTi … eXJY', 'Err': ''}]
Parameters
----------
multihash : str
Path to the object(s) to list refs from
Returns
-------
list | [
"Returns",
"a",
"list",
"of",
"hashes",
"of",
"objects",
"referenced",
"by",
"the",
"given",
"hash",
"."
] | 7574dad04877b45dbe4ad321dcfa9e880eb2d90c | https://github.com/ipfs/py-ipfs-api/blob/7574dad04877b45dbe4ad321dcfa9e880eb2d90c/ipfsapi/client.py#L266-L286 |
248,498 | ipfs/py-ipfs-api | ipfsapi/client.py | Client.block_stat | def block_stat(self, multihash, **kwargs):
"""Returns a dict with the size of the block with the given hash.
.. code-block:: python
>>> c.block_stat('QmTkzDwWqPbnAh5YiV5VwcTLnGdwSNsNTn2aDxdXBFca7D')
{'Key': 'QmTkzDwWqPbnAh5YiV5VwcTLnGdwSNsNTn2aDxdXBFca7D',
'Size': 258}
Parameters
----------
multihash : str
The base58 multihash of an existing block to stat
Returns
-------
dict : Information about the requested block
"""
args = (multihash,)
return self._client.request('/block/stat', args,
decoder='json', **kwargs) | python | def block_stat(self, multihash, **kwargs):
args = (multihash,)
return self._client.request('/block/stat', args,
decoder='json', **kwargs) | [
"def",
"block_stat",
"(",
"self",
",",
"multihash",
",",
"*",
"*",
"kwargs",
")",
":",
"args",
"=",
"(",
"multihash",
",",
")",
"return",
"self",
".",
"_client",
".",
"request",
"(",
"'/block/stat'",
",",
"args",
",",
"decoder",
"=",
"'json'",
",",
"*",
"*",
"kwargs",
")"
] | Returns a dict with the size of the block with the given hash.
.. code-block:: python
>>> c.block_stat('QmTkzDwWqPbnAh5YiV5VwcTLnGdwSNsNTn2aDxdXBFca7D')
{'Key': 'QmTkzDwWqPbnAh5YiV5VwcTLnGdwSNsNTn2aDxdXBFca7D',
'Size': 258}
Parameters
----------
multihash : str
The base58 multihash of an existing block to stat
Returns
-------
dict : Information about the requested block | [
"Returns",
"a",
"dict",
"with",
"the",
"size",
"of",
"the",
"block",
"with",
"the",
"given",
"hash",
"."
] | 7574dad04877b45dbe4ad321dcfa9e880eb2d90c | https://github.com/ipfs/py-ipfs-api/blob/7574dad04877b45dbe4ad321dcfa9e880eb2d90c/ipfsapi/client.py#L304-L324 |
248,499 | ipfs/py-ipfs-api | ipfsapi/client.py | Client.block_get | def block_get(self, multihash, **kwargs):
r"""Returns the raw contents of a block.
.. code-block:: python
>>> c.block_get('QmTkzDwWqPbnAh5YiV5VwcTLnGdwSNsNTn2aDxdXBFca7D')
b'\x121\n"\x12 \xdaW>\x14\xe5\xc1\xf6\xe4\x92\xd1 … \n\x02\x08\x01'
Parameters
----------
multihash : str
The base58 multihash of an existing block to get
Returns
-------
str : Value of the requested block
"""
args = (multihash,)
return self._client.request('/block/get', args, **kwargs) | python | def block_get(self, multihash, **kwargs):
r"""Returns the raw contents of a block.
.. code-block:: python
>>> c.block_get('QmTkzDwWqPbnAh5YiV5VwcTLnGdwSNsNTn2aDxdXBFca7D')
b'\x121\n"\x12 \xdaW>\x14\xe5\xc1\xf6\xe4\x92\xd1 … \n\x02\x08\x01'
Parameters
----------
multihash : str
The base58 multihash of an existing block to get
Returns
-------
str : Value of the requested block
"""
args = (multihash,)
return self._client.request('/block/get', args, **kwargs) | [
"def",
"block_get",
"(",
"self",
",",
"multihash",
",",
"*",
"*",
"kwargs",
")",
":",
"args",
"=",
"(",
"multihash",
",",
")",
"return",
"self",
".",
"_client",
".",
"request",
"(",
"'/block/get'",
",",
"args",
",",
"*",
"*",
"kwargs",
")"
] | r"""Returns the raw contents of a block.
.. code-block:: python
>>> c.block_get('QmTkzDwWqPbnAh5YiV5VwcTLnGdwSNsNTn2aDxdXBFca7D')
b'\x121\n"\x12 \xdaW>\x14\xe5\xc1\xf6\xe4\x92\xd1 … \n\x02\x08\x01'
Parameters
----------
multihash : str
The base58 multihash of an existing block to get
Returns
-------
str : Value of the requested block | [
"r",
"Returns",
"the",
"raw",
"contents",
"of",
"a",
"block",
"."
] | 7574dad04877b45dbe4ad321dcfa9e880eb2d90c | https://github.com/ipfs/py-ipfs-api/blob/7574dad04877b45dbe4ad321dcfa9e880eb2d90c/ipfsapi/client.py#L326-L344 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.