INSTRUCTION
stringlengths 1
46.3k
| RESPONSE
stringlengths 75
80.2k
|
---|---|
Ping {url} until it returns a results payload, timing out after
{pings} pings and waiting {sleep} seconds between pings.
|
def await_results(url, pings=45, sleep=2):
"""
Ping {url} until it returns a results payload, timing out after
{pings} pings and waiting {sleep} seconds between pings.
"""
print("Checking...", end="", flush=True)
for _ in range(pings):
# Query for check results.
res = requests.post(url)
if res.status_code != 200:
continue
payload = res.json()
if payload["complete"]:
break
print(".", end="", flush=True)
time.sleep(sleep)
else:
# Terminate if no response
print()
raise Error(
_("check50 is taking longer than normal!\nSee https://cs50.me/checks/{} for more detail.").format(commit_hash))
print()
# TODO: Should probably check payload["checks"]["version"] here to make sure major version is same as __version__
# (otherwise we may not be able to parse results)
return (CheckResult(**result) for result in payload["checks"]["results"])
|
Copy files/directories from the check directory (:data:`check50.internal.check_dir`),
to the current directory
:params paths: files/directories to be copied
Example usage::
check50.include("foo.txt", "bar.txt")
assert os.path.exists("foo.txt") and os.path.exists("bar.txt")
|
def include(*paths):
"""
Copy files/directories from the check directory (:data:`check50.internal.check_dir`),
to the current directory
:params paths: files/directories to be copied
Example usage::
check50.include("foo.txt", "bar.txt")
assert os.path.exists("foo.txt") and os.path.exists("bar.txt")
"""
cwd = os.getcwd()
for path in paths:
_copy((internal.check_dir / path).resolve(), cwd)
|
Hashes file using SHA-256.
:param file: name of file to be hashed
:type file: str
:rtype: str
:raises check50.Failure: if ``file`` does not exist
|
def hash(file):
"""
Hashes file using SHA-256.
:param file: name of file to be hashed
:type file: str
:rtype: str
:raises check50.Failure: if ``file`` does not exist
"""
exists(file)
log(_("hashing {}...").format(file))
# https://stackoverflow.com/a/22058673
with open(file, "rb") as f:
sha256 = hashlib.sha256()
for block in iter(lambda: f.read(65536), b""):
sha256.update(block)
return sha256.hexdigest()
|
Assert that all given paths exist.
:params paths: files/directories to be checked for existence
:raises check50.Failure: if any ``path in paths`` does not exist
Example usage::
check50.exists("foo.c", "foo.h")
|
def exists(*paths):
"""
Assert that all given paths exist.
:params paths: files/directories to be checked for existence
:raises check50.Failure: if any ``path in paths`` does not exist
Example usage::
check50.exists("foo.c", "foo.h")
"""
for path in paths:
log(_("checking that {} exists...").format(path))
if not os.path.exists(path):
raise Failure(_("{} not found").format(path))
|
Import checks module given relative path.
:param path: relative path from which to import checks module
:type path: str
:returns: the imported module
:raises FileNotFoundError: if ``path / .check50.yaml`` does not exist
:raises yaml.YAMLError: if ``path / .check50.yaml`` is not a valid YAML file
This function is particularly useful when a set of checks logically extends
another, as is often the case in CS50's own problems that have a "less comfy"
and "more comfy" version. The "more comfy" version can include all of the
"less comfy" checks like so::
less = check50.import_checks("../less")
from less import *
.. note::
the ``__name__`` of the imported module is given by the basename
of the specified path (``less`` in the above example).
|
def import_checks(path):
"""
Import checks module given relative path.
:param path: relative path from which to import checks module
:type path: str
:returns: the imported module
:raises FileNotFoundError: if ``path / .check50.yaml`` does not exist
:raises yaml.YAMLError: if ``path / .check50.yaml`` is not a valid YAML file
This function is particularly useful when a set of checks logically extends
another, as is often the case in CS50's own problems that have a "less comfy"
and "more comfy" version. The "more comfy" version can include all of the
"less comfy" checks like so::
less = check50.import_checks("../less")
from less import *
.. note::
the ``__name__`` of the imported module is given by the basename
of the specified path (``less`` in the above example).
"""
dir = internal.check_dir / path
file = internal.load_config(dir)["checks"]
mod = internal.import_file(dir.name, (dir / file).resolve())
sys.modules[dir.name] = mod
return mod
|
Get raw representation of s, truncating if too long.
|
def _raw(s):
"""Get raw representation of s, truncating if too long."""
if isinstance(s, list):
s = "\n".join(_raw(item) for item in s)
if s == EOF:
return "EOF"
s = repr(s) # Get raw representation of string
s = s[1:-1] # Strip away quotation marks
if len(s) > 15:
s = s[:15] + "..." # Truncate if too long
return s
|
Copy src to dst, copying recursively if src is a directory.
|
def _copy(src, dst):
"""Copy src to dst, copying recursively if src is a directory."""
try:
shutil.copy(src, dst)
except IsADirectoryError:
if os.path.isdir(dst):
dst = os.path.join(dst, os.path.basename(src))
shutil.copytree(src, dst)
|
Send line to stdin, optionally expect a prompt.
:param line: line to be send to stdin
:type line: str
:param prompt: boolean indicating whether a prompt is expected, if True absorbs \
all of stdout before inserting line into stdin and raises \
:class:`check50.Failure` if stdout is empty
:type prompt: bool
:param timeout: maximum number of seconds to wait for prompt
:type timeout: int / float
:raises check50.Failure: if ``prompt`` is set to True and no prompt is given
|
def stdin(self, line, prompt=True, timeout=3):
"""
Send line to stdin, optionally expect a prompt.
:param line: line to be send to stdin
:type line: str
:param prompt: boolean indicating whether a prompt is expected, if True absorbs \
all of stdout before inserting line into stdin and raises \
:class:`check50.Failure` if stdout is empty
:type prompt: bool
:param timeout: maximum number of seconds to wait for prompt
:type timeout: int / float
:raises check50.Failure: if ``prompt`` is set to True and no prompt is given
"""
if line == EOF:
log("sending EOF...")
else:
log(_("sending input {}...").format(line))
if prompt:
try:
self.process.expect(".+", timeout=timeout)
except (TIMEOUT, EOF):
raise Failure(_("expected prompt for input, found none"))
except UnicodeDecodeError:
raise Failure(_("output not valid ASCII text"))
try:
if line == EOF:
self.process.sendeof()
else:
self.process.sendline(line)
except OSError:
pass
return self
|
Retrieve all output from stdout until timeout (3 sec by default). If ``output``
is None, ``stdout`` returns all of the stdout outputted by the process, else
it returns ``self``.
:param output: optional output to be expected from stdout, raises \
:class:`check50.Failure` if no match
:type output: str
:param str_output: what will be displayed as expected output, a human \
readable form of ``output``
:type str_output: str
:param regex: flag indicating whether ``output`` should be treated as a regex
:type regex: bool
:param timeout: maximum number of seconds to wait for ``output``
:type timeout: int / float
:raises check50.Mismatch: if ``output`` is specified and nothing that the \
process outputs matches it
:raises check50.Failure: if process times out or if it outputs invalid UTF-8 text.
Example usage::
check50.run("./hello").stdout("[Hh]ello, world!?", "hello, world").exit()
output = check50.run("./hello").stdout()
if not re.match("[Hh]ello, world!?", output):
raise check50.Mismatch("hello, world", output)
|
def stdout(self, output=None, str_output=None, regex=True, timeout=3):
"""
Retrieve all output from stdout until timeout (3 sec by default). If ``output``
is None, ``stdout`` returns all of the stdout outputted by the process, else
it returns ``self``.
:param output: optional output to be expected from stdout, raises \
:class:`check50.Failure` if no match
:type output: str
:param str_output: what will be displayed as expected output, a human \
readable form of ``output``
:type str_output: str
:param regex: flag indicating whether ``output`` should be treated as a regex
:type regex: bool
:param timeout: maximum number of seconds to wait for ``output``
:type timeout: int / float
:raises check50.Mismatch: if ``output`` is specified and nothing that the \
process outputs matches it
:raises check50.Failure: if process times out or if it outputs invalid UTF-8 text.
Example usage::
check50.run("./hello").stdout("[Hh]ello, world!?", "hello, world").exit()
output = check50.run("./hello").stdout()
if not re.match("[Hh]ello, world!?", output):
raise check50.Mismatch("hello, world", output)
"""
if output is None:
self._wait(timeout)
return self.process.before.replace("\r\n", "\n").lstrip("\n")
try:
output = output.read()
except AttributeError:
pass
expect = self.process.expect if regex else self.process.expect_exact
if str_output is None:
str_output = output
if output == EOF:
log(_("checking for EOF..."))
else:
output = output.replace("\n", "\r\n")
log(_("checking for output \"{}\"...").format(str_output))
try:
expect(output, timeout=timeout)
except EOF:
result = self.process.before + self.process.buffer
if self.process.after != EOF:
result += self.process.after
raise Mismatch(str_output, result.replace("\r\n", "\n"))
except TIMEOUT:
raise Failure(_("did not find \"{}\"").format(_raw(str_output)))
except UnicodeDecodeError:
raise Failure(_("output not valid ASCII text"))
except Exception:
raise Failure(_("check50 could not verify output"))
# If we expected EOF and we still got output, report an error.
if output == EOF and self.process.before:
raise Mismatch(EOF, self.process.before.replace("\r\n", "\n"))
return self
|
Check that the process survives for timeout. Useful for checking whether program is waiting on input.
:param timeout: number of seconds to wait
:type timeout: int / float
:raises check50.Failure: if process ends before ``timeout``
|
def reject(self, timeout=1):
"""
Check that the process survives for timeout. Useful for checking whether program is waiting on input.
:param timeout: number of seconds to wait
:type timeout: int / float
:raises check50.Failure: if process ends before ``timeout``
"""
log(_("checking that input was rejected..."))
try:
self._wait(timeout)
except Failure as e:
if not isinstance(e.__cause__, TIMEOUT):
raise
else:
raise Failure(_("expected program to reject input, but it did not"))
return self
|
Wait for process to exit or until timeout (5 sec by default) and asserts
that process exits with ``code``. If ``code`` is ``None``, returns the code
the process exited with.
..note:: In order to ensure that spawned child processes do not outlive the check that spawned them, it is good practice to call either method (with no arguments if the exit code doesn't matter) or ``.kill()`` on every spawned process.
:param code: code to assert process exits with
:type code: int
:param timeout: maximum number of seconds to wait for the program to end
:type timeout: int / float
:raises check50.Failure: if ``code`` is given and does not match the actual exitcode within ``timeout``
Example usage::
check50.run("./hello").exit(0)
code = check50.run("./hello").exit()
if code != 0:
raise check50.Failure(f"expected exit code 0, not {code}")
|
def exit(self, code=None, timeout=5):
"""
Wait for process to exit or until timeout (5 sec by default) and asserts
that process exits with ``code``. If ``code`` is ``None``, returns the code
the process exited with.
..note:: In order to ensure that spawned child processes do not outlive the check that spawned them, it is good practice to call either method (with no arguments if the exit code doesn't matter) or ``.kill()`` on every spawned process.
:param code: code to assert process exits with
:type code: int
:param timeout: maximum number of seconds to wait for the program to end
:type timeout: int / float
:raises check50.Failure: if ``code`` is given and does not match the actual exitcode within ``timeout``
Example usage::
check50.run("./hello").exit(0)
code = check50.run("./hello").exit()
if code != 0:
raise check50.Failure(f"expected exit code 0, not {code}")
"""
self._wait(timeout)
if code is None:
return self.exitcode
log(_("checking that program exited with status {}...").format(code))
if self.exitcode != code:
raise Failure(_("expected exit code {}, not {}").format(code, self.exitcode))
return self
|
Load configuration file from ``check_dir / ".cs50.yaml"``, applying
defaults to unspecified values.
:param check_dir: directory from which to load config file
:type check_dir: str / Path
:rtype: dict
|
def load_config(check_dir):
"""
Load configuration file from ``check_dir / ".cs50.yaml"``, applying
defaults to unspecified values.
:param check_dir: directory from which to load config file
:type check_dir: str / Path
:rtype: dict
"""
# Defaults for top-level keys
options = {
"checks": "__init__.py",
"dependencies": None,
"translations": None
}
# Defaults for translation keys
translation_options = {
"localedir": "locale",
"domain": "messages",
}
config_file = Path(check_dir) / ".cs50.yaml"
with open(config_file) as f:
config = lib50.config.load(f.read(), "check50")
if isinstance(config, dict):
options.update(config)
if options["translations"]:
if isinstance(options["translations"], dict):
translation_options.update(options["translations"])
options["translations"] = translation_options
if isinstance(options["checks"], dict):
# Compile simple checks
with open(check_dir / "__init__.py", "w") as f:
f.write(simple.compile(options["checks"]))
options["checks"] = "__init__.py"
return options
|
Import a file given a raw file path.
:param name: Name of module to be imported
:type name: str
:param path: Path to Python file
:type path: str / Path
|
def import_file(name, path):
"""
Import a file given a raw file path.
:param name: Name of module to be imported
:type name: str
:param path: Path to Python file
:type path: str / Path
"""
spec = importlib.util.spec_from_file_location(name, path)
mod = importlib.util.module_from_spec(spec)
spec.loader.exec_module(mod)
return mod
|
Compile C source files.
:param files: filenames to be compiled
:param exe_name: name of resulting executable
:param cc: compiler to use (:data:`check50.c.CC` by default)
:param cflags: additional flags to pass to the compiler
:raises check50.Failure: if compilation failed (i.e., if the compiler returns a non-zero exit status).
:raises RuntimeError: if no filenames are specified
If ``exe_name`` is None, :func:`check50.c.compile` will default to the first
file specified sans the ``.c`` extension::
check50.c.compile("foo.c", "bar.c") # clang foo.c bar.c -o foo -std=c11 -ggdb -lm
Additional CFLAGS may be passed as keyword arguments like so::
check50.c.compile("foo.c", "bar.c", lcs50=True) # clang foo.c bar.c -o foo -std=c11 -ggdb -lm -lcs50
In the same vein, the default CFLAGS may be overriden via keyword arguments::
check50.c.compile("foo.c", "bar.c", std="c99", lm=False) # clang foo.c bar.c -o foo -std=c99 -ggdb
|
def compile(*files, exe_name=None, cc=CC, **cflags):
"""
Compile C source files.
:param files: filenames to be compiled
:param exe_name: name of resulting executable
:param cc: compiler to use (:data:`check50.c.CC` by default)
:param cflags: additional flags to pass to the compiler
:raises check50.Failure: if compilation failed (i.e., if the compiler returns a non-zero exit status).
:raises RuntimeError: if no filenames are specified
If ``exe_name`` is None, :func:`check50.c.compile` will default to the first
file specified sans the ``.c`` extension::
check50.c.compile("foo.c", "bar.c") # clang foo.c bar.c -o foo -std=c11 -ggdb -lm
Additional CFLAGS may be passed as keyword arguments like so::
check50.c.compile("foo.c", "bar.c", lcs50=True) # clang foo.c bar.c -o foo -std=c11 -ggdb -lm -lcs50
In the same vein, the default CFLAGS may be overriden via keyword arguments::
check50.c.compile("foo.c", "bar.c", std="c99", lm=False) # clang foo.c bar.c -o foo -std=c99 -ggdb
"""
if not files:
raise RuntimeError(_("compile requires at least one file"))
if exe_name is None and files[0].endswith(".c"):
exe_name = Path(files[0]).stem
files = " ".join(files)
flags = CFLAGS.copy()
flags.update(cflags)
flags = " ".join((f"-{flag}" + (f"={value}" if value is not True else "")).replace("_", "-")
for flag, value in flags.items() if value)
out_flag = f" -o {exe_name} " if exe_name is not None else " "
run(f"{cc} {files}{out_flag}{flags}").exit(0)
|
Run a command with valgrind.
:param command: command to be run
:type command: str
:param env: environment in which to run command
:type env: str
:raises check50.Failure: if, at the end of the check, valgrind reports any errors
This function works exactly like :func:`check50.run`, with the additional effect that ``command`` is run through
``valgrind`` and ``valgrind``'s output is automatically reviewed at the end of the check for memory leaks and other
bugs. If ``valgrind`` reports any issues, the check is failed and student-friendly messages are printed to the log.
Example usage::
check50.c.valgrind("./leaky").stdin("foo").stdout("bar").exit(0)
.. note::
It is recommended that the student's code is compiled with the `-ggdb`
flag so that additional information, such as the file and line number at which
the issue was detected can be included in the log as well.
|
def valgrind(command, env={}):
"""Run a command with valgrind.
:param command: command to be run
:type command: str
:param env: environment in which to run command
:type env: str
:raises check50.Failure: if, at the end of the check, valgrind reports any errors
This function works exactly like :func:`check50.run`, with the additional effect that ``command`` is run through
``valgrind`` and ``valgrind``'s output is automatically reviewed at the end of the check for memory leaks and other
bugs. If ``valgrind`` reports any issues, the check is failed and student-friendly messages are printed to the log.
Example usage::
check50.c.valgrind("./leaky").stdin("foo").stdout("bar").exit(0)
.. note::
It is recommended that the student's code is compiled with the `-ggdb`
flag so that additional information, such as the file and line number at which
the issue was detected can be included in the log as well.
"""
xml_file = tempfile.NamedTemporaryFile()
internal.register.after_check(lambda: _check_valgrind(xml_file))
# Ideally we'd like for this whole command not to be logged.
return run(f"valgrind --show-leak-kinds=all --xml=yes --xml-file={xml_file.name} -- {command}", env=env)
|
Log and report any errors encountered by valgrind.
|
def _check_valgrind(xml_file):
"""Log and report any errors encountered by valgrind."""
log(_("checking for valgrind errors..."))
# Load XML file created by valgrind
xml = ET.ElementTree(file=xml_file)
# Ensure that we don't get duplicate error messages.
reported = set()
for error in xml.iterfind("error"):
# Type of error valgrind encountered
kind = error.find("kind").text
# Valgrind's error message
what = error.find("xwhat/text" if kind.startswith("Leak_") else "what").text
# Error message that we will report
msg = ["\t", what]
# Find first stack frame within student's code.
for frame in error.iterfind("stack/frame"):
obj = frame.find("obj")
if obj is not None and internal.run_dir in Path(obj.text).parents:
file, line = frame.find("file"), frame.find("line")
if file is not None and line is not None:
msg.append(f": ({_('file')}: {file.text}, {_('line')}: {line.text})")
break
msg = "".join(msg)
if msg not in reported:
log(msg)
reported.add(msg)
# Only raise exception if we encountered errors.
if reported:
raise Failure(_("valgrind tests failed; rerun with --log for more information."))
|
Context manager that runs code block until timeout is reached.
Example usage::
try:
with _timeout(10):
do_stuff()
except Timeout:
print("do_stuff timed out")
|
def _timeout(seconds):
"""Context manager that runs code block until timeout is reached.
Example usage::
try:
with _timeout(10):
do_stuff()
except Timeout:
print("do_stuff timed out")
"""
def _handle_timeout(*args):
raise Timeout(seconds)
signal.signal(signal.SIGALRM, _handle_timeout)
signal.alarm(seconds)
try:
yield
finally:
signal.alarm(0)
signal.signal(signal.SIGALRM, signal.SIG_DFL)
|
Mark function as a check.
:param dependency: the check that this check depends on
:type dependency: function
:param timeout: maximum number of seconds the check can run
:type timeout: int
When a check depends on another, the former will only run if the latter passes.
Additionally, the dependent check will inherit the filesystem of its dependency.
This is particularly useful when writing e.g., a ``compiles`` check that compiles a
student's program (and checks that it compiled successfully). Any checks that run the
student's program will logically depend on this check, and since they inherit the
resulting filesystem of the check, they will immidiately have access to the compiled
program without needing to recompile.
Example usage::
@check50.check() # Mark 'exists' as a check
def exists():
\"""hello.c exists\"""
check50.exists("hello.c")
@check50.check(exists) # Mark 'compiles' as a check that depends on 'exists'
def compiles():
\"""hello.c compiles\"""
check50.c.compile("hello.c")
@check50.check(compiles)
def prints_hello():
\"""prints "Hello, world!\\\\n\"""
# Since 'prints_hello', depends on 'compiles' it inherits the compiled binary
check50.run("./hello").stdout("[Hh]ello, world!?\\n", "hello, world\\n").exit()
|
def check(dependency=None, timeout=60):
"""Mark function as a check.
:param dependency: the check that this check depends on
:type dependency: function
:param timeout: maximum number of seconds the check can run
:type timeout: int
When a check depends on another, the former will only run if the latter passes.
Additionally, the dependent check will inherit the filesystem of its dependency.
This is particularly useful when writing e.g., a ``compiles`` check that compiles a
student's program (and checks that it compiled successfully). Any checks that run the
student's program will logically depend on this check, and since they inherit the
resulting filesystem of the check, they will immidiately have access to the compiled
program without needing to recompile.
Example usage::
@check50.check() # Mark 'exists' as a check
def exists():
\"""hello.c exists\"""
check50.exists("hello.c")
@check50.check(exists) # Mark 'compiles' as a check that depends on 'exists'
def compiles():
\"""hello.c compiles\"""
check50.c.compile("hello.c")
@check50.check(compiles)
def prints_hello():
\"""prints "Hello, world!\\\\n\"""
# Since 'prints_hello', depends on 'compiles' it inherits the compiled binary
check50.run("./hello").stdout("[Hh]ello, world!?\\n", "hello, world\\n").exit()
"""
def decorator(check):
# Modules are evaluated from the top of the file down, so _check_names will
# contain the names of the checks in the order in which they are declared
_check_names.append(check.__name__)
check._check_dependency = dependency
@functools.wraps(check)
def wrapper(checks_root, dependency_state):
# Result template
result = CheckResult.from_check(check)
# Any shared (returned) state
state = None
try:
# Setup check environment, copying disk state from dependency
internal.run_dir = checks_root / check.__name__
src_dir = checks_root / (dependency.__name__ if dependency else "-")
shutil.copytree(src_dir, internal.run_dir)
os.chdir(internal.run_dir)
# Run registered functions before/after running check and set timeout
with internal.register, _timeout(seconds=timeout):
args = (dependency_state,) if inspect.getfullargspec(check).args else ()
state = check(*args)
except Failure as e:
result.passed = False
result.cause = e.payload
except BaseException as e:
result.passed = None
result.cause = {"rationale": _("check50 ran into an error while running checks!")}
log(repr(e))
for line in traceback.format_tb(e.__traceback__):
log(line.rstrip())
log(_("Contact [email protected] with the URL of this check!"))
else:
result.passed = True
finally:
result.log = _log
result.data = _data
return result, state
return wrapper
return decorator
|
Run checks concurrently.
Returns a list of CheckResults ordered by declaration order of the checks in the imported module
|
def run(self, files, working_area):
"""
Run checks concurrently.
Returns a list of CheckResults ordered by declaration order of the checks in the imported module
"""
# Ensure that dictionary is ordered by check declaration order (via self.check_names)
# NOTE: Requires CPython 3.6. If we need to support older versions of Python, replace with OrderedDict.
results = {name: None for name in self.check_names}
checks_root = working_area.parent
with futures.ProcessPoolExecutor() as executor:
# Start all checks that have no dependencies
not_done = set(executor.submit(run_check(name, self.checks_spec, checks_root))
for name, _ in self.child_map[None])
not_passed = []
while not_done:
done, not_done = futures.wait(not_done, return_when=futures.FIRST_COMPLETED)
for future in done:
# Get result from completed check
result, state = future.result()
results[result.name] = result
if result.passed:
# Dispatch dependent checks
for child_name, _ in self.child_map[result.name]:
not_done.add(executor.submit(
run_check(child_name, self.checks_spec, checks_root, state)))
else:
not_passed.append(result.name)
for name in not_passed:
self._skip_children(name, results)
return results.values()
|
Recursively skip the children of check_name (presumably because check_name
did not pass).
|
def _skip_children(self, check_name, results):
"""
Recursively skip the children of check_name (presumably because check_name
did not pass).
"""
for name, description in self.child_map[check_name]:
if results[name] is None:
results[name] = CheckResult(name=name, description=_(description),
passed=None,
dependency=check_name,
cause={"rationale": _("can't check until a frown turns upside down")})
self._skip_children(name, results)
|
Append the contents of one file to another.
:param original: name of file that will be appended to
:type original: str
:param codefile: name of file that will be appende
:type codefile: str
This function is particularly useful when one wants to replace a function
in student code with their own implementation of one. If two functions are
defined with the same name in Python, the latter definition is taken so overwriting
a function is as simple as writing it to a file and then appending it to the
student's code.
Example usage::
# Include a file containing our own implementation of a lookup function.
check50.include("lookup.py")
# Overwrite the lookup function in helpers.py with our own implementation.
check50.py.append_code("helpers.py", "lookup.py")
|
def append_code(original, codefile):
"""Append the contents of one file to another.
:param original: name of file that will be appended to
:type original: str
:param codefile: name of file that will be appende
:type codefile: str
This function is particularly useful when one wants to replace a function
in student code with their own implementation of one. If two functions are
defined with the same name in Python, the latter definition is taken so overwriting
a function is as simple as writing it to a file and then appending it to the
student's code.
Example usage::
# Include a file containing our own implementation of a lookup function.
check50.include("lookup.py")
# Overwrite the lookup function in helpers.py with our own implementation.
check50.py.append_code("helpers.py", "lookup.py")
"""
with open(codefile) as code, open(original, "a") as o:
o.write("\n")
o.writelines(code)
|
Import a Python program given a raw file path
:param path: path to python file to be imported
:type path: str
:raises check50.Failure: if ``path`` doesn't exist, or if the Python file at ``path`` throws an exception when imported.
|
def import_(path):
"""Import a Python program given a raw file path
:param path: path to python file to be imported
:type path: str
:raises check50.Failure: if ``path`` doesn't exist, or if the Python file at ``path`` throws an exception when imported.
"""
exists(path)
log(_("importing {}...").format(path))
name = Path(path).stem
try:
return internal.import_file(name, path)
except Exception as e:
raise Failure(str(e))
|
Compile a Python program into byte code
:param file: file to be compiled
:raises check50.Failure: if compilation fails e.g. if there is a SyntaxError
|
def compile(file):
"""
Compile a Python program into byte code
:param file: file to be compiled
:raises check50.Failure: if compilation fails e.g. if there is a SyntaxError
"""
log(_("compiling {} into byte code...").format(file))
try:
py_compile.compile(file, doraise=True)
except py_compile.PyCompileError as e:
log(_("Exception raised: "))
for line in e.msg.splitlines():
log(line)
raise Failure(_("{} raised while compiling {} (rerun with --log for more details)").format(e.exc_type_name, file))
|
Set check50 __version__
|
def _set_version():
"""Set check50 __version__"""
global __version__
from pkg_resources import get_distribution, DistributionNotFound
import os
# https://stackoverflow.com/questions/17583443/what-is-the-correct-way-to-share-package-version-with-setup-py-and-the-package
try:
dist = get_distribution("check50")
# Normalize path for cross-OS compatibility.
dist_loc = os.path.normcase(dist.location)
here = os.path.normcase(__file__)
if not here.startswith(os.path.join(dist_loc, "check50")):
# This version is not installed, but another version is.
raise DistributionNotFound
except DistributionNotFound:
__version__ = "locally installed, no version information available"
else:
__version__ = dist.version
|
Send GET request to app.
:param route: route to send request to
:type route: str
:param data: form data to include in request
:type data: dict
:param params: URL parameters to include in request
:param follow_redirects: enable redirection (defaults to ``True``)
:type follow_redirects: bool
:returns: ``self``
:raises check50.Failure: if Flask application throws an uncaught exception
Example usage::
check50.flask.app("application.py").get("/buy", params={"q": "02138"}).content()
|
def get(self, route, data=None, params=None, follow_redirects=True):
"""Send GET request to app.
:param route: route to send request to
:type route: str
:param data: form data to include in request
:type data: dict
:param params: URL parameters to include in request
:param follow_redirects: enable redirection (defaults to ``True``)
:type follow_redirects: bool
:returns: ``self``
:raises check50.Failure: if Flask application throws an uncaught exception
Example usage::
check50.flask.app("application.py").get("/buy", params={"q": "02138"}).content()
"""
return self._send("GET", route, data, params, follow_redirects=follow_redirects)
|
Send POST request to app.
:param route: route to send request to
:type route: str
:param data: form data to include in request
:type data: dict
:param params: URL parameters to include in request
:param follow_redirects: enable redirection (defaults to ``True``)
:type follow_redirects: bool
:raises check50.Failure: if Flask application throws an uncaught exception
Example usage::
check50.flask.app("application.py").post("/buy", data={"symbol": "GOOG", "shares": 10}).status(200)
|
def post(self, route, data=None, params=None, follow_redirects=True):
"""Send POST request to app.
:param route: route to send request to
:type route: str
:param data: form data to include in request
:type data: dict
:param params: URL parameters to include in request
:param follow_redirects: enable redirection (defaults to ``True``)
:type follow_redirects: bool
:raises check50.Failure: if Flask application throws an uncaught exception
Example usage::
check50.flask.app("application.py").post("/buy", data={"symbol": "GOOG", "shares": 10}).status(200)
"""
return self._send("POST", route, data, params, follow_redirects=follow_redirects)
|
Check status code in response returned by application.
If ``code`` is not None, assert that ``code`` is returned by application,
else simply return the status code.
:param code: ``code`` to assert that application returns
:type code: int
Example usage::
check50.flask.app("application.py").status(200)
status = check50.flask.app("application.py").get("/").status()
if status != 200:
raise check50.Failure(f"expected status code 200, but got {status}")
|
def status(self, code=None):
"""Check status code in response returned by application.
If ``code`` is not None, assert that ``code`` is returned by application,
else simply return the status code.
:param code: ``code`` to assert that application returns
:type code: int
Example usage::
check50.flask.app("application.py").status(200)
status = check50.flask.app("application.py").get("/").status()
if status != 200:
raise check50.Failure(f"expected status code 200, but got {status}")
"""
if code is None:
return self.response.status_code
log(_("checking that status code {} is returned...").format(code))
if code != self.response.status_code:
raise Failure(_("expected status code {}, but got {}").format(
code, self.response.status_code))
return self
|
Searches for `output` regex match within content of page, regardless of mimetype.
|
def raw_content(self, output=None, str_output=None):
"""Searches for `output` regex match within content of page, regardless of mimetype."""
return self._search_page(output, str_output, self.response.data, lambda regex, content: regex.search(content.decode()))
|
Searches for `output` regex within HTML page. kwargs are passed to BeautifulSoup's find function to filter for tags.
|
def content(self, output=None, str_output=None, **kwargs):
"""Searches for `output` regex within HTML page. kwargs are passed to BeautifulSoup's find function to filter for tags."""
if self.response.mimetype != "text/html":
raise Failure(_("expected request to return HTML, but it returned {}").format(
self.response.mimetype))
# TODO: Remove once beautiful soup updates to accomodate python 3.7
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
content = BeautifulSoup(self.response.data, "html.parser")
return self._search_page(
output,
str_output,
content,
lambda regex, content: any(regex.search(str(tag)) for tag in content.find_all(**kwargs)))
|
Send request of type `method` to `route`.
|
def _send(self, method, route, data, params, **kwargs):
"""Send request of type `method` to `route`."""
route = self._fmt_route(route, params)
log(_("sending {} request to {}").format(method.upper(), route))
try:
self.response = getattr(self._client, method.lower())(route, data=data, **kwargs)
except BaseException as e: # Catch all exceptions thrown by app
log(_("exception raised in application: {}: {}").format(type(e).__name__, e))
raise Failure(_("application raised an exception (rerun with --log for more details)"))
return self
|
Returns compiled check50 checks from simple YAML checks in path.
|
def compile(checks):
"""Returns compiled check50 checks from simple YAML checks in path."""
out = ["import check50"]
for name, check in checks.items():
out.append(_compile_check(name, check))
return "\n\n".join(out)
|
Create an index of days at time ``t``, interpreted in timezone ``tz``.
The returned index is localized to UTC.
Parameters
----------
days : DatetimeIndex
An index of dates (represented as midnight).
t : datetime.time
The time to apply as an offset to each day in ``days``.
tz : pytz.timezone
The timezone to use to interpret ``t``.
day_offset : int
The number of days we want to offset @days by
Examples
--------
In the example below, the times switch from 13:45 to 12:45 UTC because
March 13th is the daylight savings transition for US/Eastern. All the
times are still 8:45 when interpreted in US/Eastern.
>>> import pandas as pd; import datetime; import pprint
>>> dts = pd.date_range('2016-03-12', '2016-03-14')
>>> dts_at_845 = days_at_time(dts, datetime.time(8, 45), 'US/Eastern')
>>> pprint.pprint([str(dt) for dt in dts_at_845])
['2016-03-12 13:45:00+00:00',
'2016-03-13 12:45:00+00:00',
'2016-03-14 12:45:00+00:00']
|
def days_at_time(days, t, tz, day_offset=0):
"""
Create an index of days at time ``t``, interpreted in timezone ``tz``.
The returned index is localized to UTC.
Parameters
----------
days : DatetimeIndex
An index of dates (represented as midnight).
t : datetime.time
The time to apply as an offset to each day in ``days``.
tz : pytz.timezone
The timezone to use to interpret ``t``.
day_offset : int
The number of days we want to offset @days by
Examples
--------
In the example below, the times switch from 13:45 to 12:45 UTC because
March 13th is the daylight savings transition for US/Eastern. All the
times are still 8:45 when interpreted in US/Eastern.
>>> import pandas as pd; import datetime; import pprint
>>> dts = pd.date_range('2016-03-12', '2016-03-14')
>>> dts_at_845 = days_at_time(dts, datetime.time(8, 45), 'US/Eastern')
>>> pprint.pprint([str(dt) for dt in dts_at_845])
['2016-03-12 13:45:00+00:00',
'2016-03-13 12:45:00+00:00',
'2016-03-14 12:45:00+00:00']
"""
days = pd.DatetimeIndex(days).tz_localize(None)
if len(days) == 0:
return days.tz_localize(UTC)
# Offset days without tz to avoid timezone issues.
delta = pd.Timedelta(
days=day_offset,
hours=t.hour,
minutes=t.minute,
seconds=t.second,
)
return (days + delta).tz_localize(tz).tz_convert(UTC)
|
A vectorized implementation of
:func:`pandas.tseries.holiday.sunday_to_monday`.
Parameters
----------
dtix : pd.DatetimeIndex
The index to shift sundays to mondays.
Returns
-------
sundays_as_mondays : pd.DatetimeIndex
``dtix`` with all sundays moved to the next monday.
|
def vectorized_sunday_to_monday(dtix):
"""A vectorized implementation of
:func:`pandas.tseries.holiday.sunday_to_monday`.
Parameters
----------
dtix : pd.DatetimeIndex
The index to shift sundays to mondays.
Returns
-------
sundays_as_mondays : pd.DatetimeIndex
``dtix`` with all sundays moved to the next monday.
"""
values = dtix.values.copy()
values[dtix.weekday == 6] += np.timedelta64(1, 'D')
return pd.DatetimeIndex(values)
|
If boxing day is saturday then Monday 28th is a holiday
If boxing day is sunday then Tuesday 28th is a holiday
|
def weekend_boxing_day(start_date=None, end_date=None, observance=None):
"""
If boxing day is saturday then Monday 28th is a holiday
If boxing day is sunday then Tuesday 28th is a holiday
"""
return Holiday(
"Weekend Boxing Day",
month=12,
day=28,
days_of_week=(MONDAY, TUESDAY),
start_date=start_date,
end_date=end_date,
observance=observance,
)
|
Given a list of holidays, return whether dt is a holiday
or it is on a weekend.
|
def is_holiday_or_weekend(holidays, dt):
"""
Given a list of holidays, return whether dt is a holiday
or it is on a weekend.
"""
one_day = timedelta(days=1)
for h in holidays:
if dt in h.dates(dt - one_day, dt + one_day) or \
dt.weekday() in WEEKENDS:
return True
return False
|
If a holiday falls on a Sunday, observe it on the next non-holiday weekday.
Parameters
----------
holidays : list[pd.tseries.holiday.Holiday]
list of holidays
dt : pd.Timestamp
date of holiday.
|
def next_non_holiday_weekday(holidays, dt):
"""
If a holiday falls on a Sunday, observe it on the next non-holiday weekday.
Parameters
----------
holidays : list[pd.tseries.holiday.Holiday]
list of holidays
dt : pd.Timestamp
date of holiday.
"""
day_of_week = dt.weekday()
if day_of_week == SUNDAY:
while is_holiday_or_weekend(holidays, dt):
dt += timedelta(1)
return dt
|
Given arrays of opens and closes, both in nanoseconds,
return an array of each minute between the opens and closes.
|
def compute_all_minutes(opens_in_ns, closes_in_ns):
"""
Given arrays of opens and closes, both in nanoseconds,
return an array of each minute between the opens and closes.
"""
deltas = closes_in_ns - opens_in_ns
# + 1 because we want 390 mins per standard day, not 389
daily_sizes = (deltas // NANOSECONDS_PER_MINUTE) + 1
num_minutes = daily_sizes.sum()
# One allocation for the entire thing. This assumes that each day
# represents a contiguous block of minutes.
pieces = []
for open_, size in zip(opens_in_ns, daily_sizes):
pieces.append(
np.arange(open_,
open_ + size * NANOSECONDS_PER_MINUTE,
NANOSECONDS_PER_MINUTE)
)
out = np.concatenate(pieces).view('datetime64[ns]')
assert len(out) == num_minutes
return out
|
Retrieves an instance of an TradingCalendar whose name is given.
Parameters
----------
name : str
The name of the TradingCalendar to be retrieved.
Returns
-------
calendar : calendars.TradingCalendar
The desired calendar.
|
def get_calendar(self, name):
"""
Retrieves an instance of an TradingCalendar whose name is given.
Parameters
----------
name : str
The name of the TradingCalendar to be retrieved.
Returns
-------
calendar : calendars.TradingCalendar
The desired calendar.
"""
canonical_name = self.resolve_alias(name)
try:
return self._calendars[canonical_name]
except KeyError:
# We haven't loaded this calendar yet, so make a new one.
pass
try:
factory = self._calendar_factories[canonical_name]
except KeyError:
# We don't have a factory registered for this name. Barf.
raise InvalidCalendarName(calendar_name=name)
# Cache the calendar for future use.
calendar = self._calendars[canonical_name] = factory()
return calendar
|
Do we have (or have the ability to make) a calendar with ``name``?
|
def has_calendar(self, name):
"""
Do we have (or have the ability to make) a calendar with ``name``?
"""
return (
name in self._calendars
or name in self._calendar_factories
or name in self._aliases
)
|
Registers a calendar for retrieval by the get_calendar method.
Parameters
----------
name: str
The key with which to register this calendar.
calendar: TradingCalendar
The calendar to be registered for retrieval.
force : bool, optional
If True, old calendars will be overwritten on a name collision.
If False, name collisions will raise an exception.
Default is False.
Raises
------
CalendarNameCollision
If a calendar is already registered with the given calendar's name.
|
def register_calendar(self, name, calendar, force=False):
"""
Registers a calendar for retrieval by the get_calendar method.
Parameters
----------
name: str
The key with which to register this calendar.
calendar: TradingCalendar
The calendar to be registered for retrieval.
force : bool, optional
If True, old calendars will be overwritten on a name collision.
If False, name collisions will raise an exception.
Default is False.
Raises
------
CalendarNameCollision
If a calendar is already registered with the given calendar's name.
"""
if force:
self.deregister_calendar(name)
if self.has_calendar(name):
raise CalendarNameCollision(calendar_name=name)
self._calendars[name] = calendar
|
Registers a calendar by type.
This is useful for registering a new calendar to be lazily instantiated
at some future point in time.
Parameters
----------
name: str
The key with which to register this calendar.
calendar_type: type
The type of the calendar to register.
force : bool, optional
If True, old calendars will be overwritten on a name collision.
If False, name collisions will raise an exception.
Default is False.
Raises
------
CalendarNameCollision
If a calendar is already registered with the given calendar's name.
|
def register_calendar_type(self, name, calendar_type, force=False):
"""
Registers a calendar by type.
This is useful for registering a new calendar to be lazily instantiated
at some future point in time.
Parameters
----------
name: str
The key with which to register this calendar.
calendar_type: type
The type of the calendar to register.
force : bool, optional
If True, old calendars will be overwritten on a name collision.
If False, name collisions will raise an exception.
Default is False.
Raises
------
CalendarNameCollision
If a calendar is already registered with the given calendar's name.
"""
if force:
self.deregister_calendar(name)
if self.has_calendar(name):
raise CalendarNameCollision(calendar_name=name)
self._calendar_factories[name] = calendar_type
|
Register an alias for a calendar.
This is useful when multiple exchanges should share a calendar, or when
there are multiple ways to refer to the same exchange.
After calling ``register_alias('alias', 'real_name')``, subsequent
calls to ``get_calendar('alias')`` will return the same result as
``get_calendar('real_name')``.
Parameters
----------
alias : str
The name to be used to refer to a calendar.
real_name : str
The canonical name of the registered calendar.
force : bool, optional
If True, old calendars will be overwritten on a name collision.
If False, name collisions will raise an exception.
Default is False.
|
def register_calendar_alias(self, alias, real_name, force=False):
"""
Register an alias for a calendar.
This is useful when multiple exchanges should share a calendar, or when
there are multiple ways to refer to the same exchange.
After calling ``register_alias('alias', 'real_name')``, subsequent
calls to ``get_calendar('alias')`` will return the same result as
``get_calendar('real_name')``.
Parameters
----------
alias : str
The name to be used to refer to a calendar.
real_name : str
The canonical name of the registered calendar.
force : bool, optional
If True, old calendars will be overwritten on a name collision.
If False, name collisions will raise an exception.
Default is False.
"""
if force:
self.deregister_calendar(alias)
if self.has_calendar(alias):
raise CalendarNameCollision(calendar_name=alias)
self._aliases[alias] = real_name
# Ensure that the new alias doesn't create a cycle, and back it out if
# we did.
try:
self.resolve_alias(alias)
except CyclicCalendarAlias:
del self._aliases[alias]
raise
|
Resolve a calendar alias for retrieval.
Parameters
----------
name : str
The name of the requested calendar.
Returns
-------
canonical_name : str
The real name of the calendar to create/return.
|
def resolve_alias(self, name):
"""
Resolve a calendar alias for retrieval.
Parameters
----------
name : str
The name of the requested calendar.
Returns
-------
canonical_name : str
The real name of the calendar to create/return.
"""
seen = []
while name in self._aliases:
seen.append(name)
name = self._aliases[name]
# This is O(N ** 2), but if there's an alias chain longer than 2,
# something strange has happened.
if name in seen:
seen.append(name)
raise CyclicCalendarAlias(
cycle=" -> ".join(repr(k) for k in seen)
)
return name
|
If a calendar is registered with the given name, it is de-registered.
Parameters
----------
cal_name : str
The name of the calendar to be deregistered.
|
def deregister_calendar(self, name):
"""
If a calendar is registered with the given name, it is de-registered.
Parameters
----------
cal_name : str
The name of the calendar to be deregistered.
"""
self._calendars.pop(name, None)
self._calendar_factories.pop(name, None)
self._aliases.pop(name, None)
|
Deregisters all current registered calendars
|
def clear_calendars(self):
"""
Deregisters all current registered calendars
"""
self._calendars.clear()
self._calendar_factories.clear()
self._aliases.clear()
|
Returns a Series mapping each holiday (as a UTC midnight Timestamp)
in ``calendar`` between ``start`` and ``end`` to that session at
``time`` (as a UTC Timestamp).
|
def scheduled_special_times(calendar, start, end, time, tz):
"""
Returns a Series mapping each holiday (as a UTC midnight Timestamp)
in ``calendar`` between ``start`` and ``end`` to that session at
``time`` (as a UTC Timestamp).
"""
days = calendar.holidays(start, end)
return pd.Series(
index=pd.DatetimeIndex(days, tz=UTC),
data=days_at_time(days, time, tz=tz),
)
|
Overwrite dates in open_or_closes with corresponding dates in
special_opens_or_closes, using midnight_utcs for alignment.
|
def _overwrite_special_dates(midnight_utcs,
opens_or_closes,
special_opens_or_closes):
"""
Overwrite dates in open_or_closes with corresponding dates in
special_opens_or_closes, using midnight_utcs for alignment.
"""
# Short circuit when nothing to apply.
if not len(special_opens_or_closes):
return
len_m, len_oc = len(midnight_utcs), len(opens_or_closes)
if len_m != len_oc:
raise ValueError(
"Found misaligned dates while building calendar.\n"
"Expected midnight_utcs to be the same length as open_or_closes,\n"
"but len(midnight_utcs)=%d, len(open_or_closes)=%d" % len_m, len_oc
)
# Find the array indices corresponding to each special date.
indexer = midnight_utcs.get_indexer(special_opens_or_closes.index)
# -1 indicates that no corresponding entry was found. If any -1s are
# present, then we have special dates that doesn't correspond to any
# trading day.
if -1 in indexer:
bad_dates = list(special_opens_or_closes[indexer == -1])
raise ValueError("Special dates %s are not trading days." % bad_dates)
# NOTE: This is a slightly dirty hack. We're in-place overwriting the
# internal data of an Index, which is conceptually immutable. Since we're
# maintaining sorting, this should be ok, but this is a good place to
# sanity check if things start going haywire with calendar computations.
opens_or_closes.values[indexer] = special_opens_or_closes.values
|
Parameters
----------
start_session: pd.Timestamp
The first session.
end_session: pd.Timestamp
The last session.
Returns
-------
int: The total number of minutes for the contiguous chunk of sessions.
between start_session and end_session, inclusive.
|
def minutes_count_for_sessions_in_range(self, start_session, end_session):
"""
Parameters
----------
start_session: pd.Timestamp
The first session.
end_session: pd.Timestamp
The last session.
Returns
-------
int: The total number of minutes for the contiguous chunk of sessions.
between start_session and end_session, inclusive.
"""
return int(self._minutes_per_session[start_session:end_session].sum())
|
Given a dt, return whether this exchange is open at the given dt.
Parameters
----------
dt: pd.Timestamp
The dt for which to check if this exchange is open.
Returns
-------
bool
Whether the exchange is open on this dt.
|
def is_open_on_minute(self, dt):
"""
Given a dt, return whether this exchange is open at the given dt.
Parameters
----------
dt: pd.Timestamp
The dt for which to check if this exchange is open.
Returns
-------
bool
Whether the exchange is open on this dt.
"""
return is_open(self.market_opens_nanos, self.market_closes_nanos,
dt.value)
|
Given a dt, returns the next open.
If the given dt happens to be a session open, the next session's open
will be returned.
Parameters
----------
dt: pd.Timestamp
The dt for which to get the next open.
Returns
-------
pd.Timestamp
The UTC timestamp of the next open.
|
def next_open(self, dt):
"""
Given a dt, returns the next open.
If the given dt happens to be a session open, the next session's open
will be returned.
Parameters
----------
dt: pd.Timestamp
The dt for which to get the next open.
Returns
-------
pd.Timestamp
The UTC timestamp of the next open.
"""
idx = next_divider_idx(self.market_opens_nanos, dt.value)
return pd.Timestamp(self.market_opens_nanos[idx], tz=UTC)
|
Given a dt, returns the next close.
Parameters
----------
dt: pd.Timestamp
The dt for which to get the next close.
Returns
-------
pd.Timestamp
The UTC timestamp of the next close.
|
def next_close(self, dt):
"""
Given a dt, returns the next close.
Parameters
----------
dt: pd.Timestamp
The dt for which to get the next close.
Returns
-------
pd.Timestamp
The UTC timestamp of the next close.
"""
idx = next_divider_idx(self.market_closes_nanos, dt.value)
return pd.Timestamp(self.market_closes_nanos[idx], tz=UTC)
|
Given a dt, returns the previous open.
Parameters
----------
dt: pd.Timestamp
The dt for which to get the previous open.
Returns
-------
pd.Timestamp
The UTC imestamp of the previous open.
|
def previous_open(self, dt):
"""
Given a dt, returns the previous open.
Parameters
----------
dt: pd.Timestamp
The dt for which to get the previous open.
Returns
-------
pd.Timestamp
The UTC imestamp of the previous open.
"""
idx = previous_divider_idx(self.market_opens_nanos, dt.value)
return pd.Timestamp(self.market_opens_nanos[idx], tz=UTC)
|
Given a dt, returns the previous close.
Parameters
----------
dt: pd.Timestamp
The dt for which to get the previous close.
Returns
-------
pd.Timestamp
The UTC timestamp of the previous close.
|
def previous_close(self, dt):
"""
Given a dt, returns the previous close.
Parameters
----------
dt: pd.Timestamp
The dt for which to get the previous close.
Returns
-------
pd.Timestamp
The UTC timestamp of the previous close.
"""
idx = previous_divider_idx(self.market_closes_nanos, dt.value)
return pd.Timestamp(self.market_closes_nanos[idx], tz=UTC)
|
Given a dt, return the next exchange minute. If the given dt is not
an exchange minute, returns the next exchange open.
Parameters
----------
dt: pd.Timestamp
The dt for which to get the next exchange minute.
Returns
-------
pd.Timestamp
The next exchange minute.
|
def next_minute(self, dt):
"""
Given a dt, return the next exchange minute. If the given dt is not
an exchange minute, returns the next exchange open.
Parameters
----------
dt: pd.Timestamp
The dt for which to get the next exchange minute.
Returns
-------
pd.Timestamp
The next exchange minute.
"""
idx = next_divider_idx(self._trading_minutes_nanos, dt.value)
return self.all_minutes[idx]
|
Given a dt, return the previous exchange minute.
Raises KeyError if the given timestamp is not an exchange minute.
Parameters
----------
dt: pd.Timestamp
The dt for which to get the previous exchange minute.
Returns
-------
pd.Timestamp
The previous exchange minute.
|
def previous_minute(self, dt):
"""
Given a dt, return the previous exchange minute.
Raises KeyError if the given timestamp is not an exchange minute.
Parameters
----------
dt: pd.Timestamp
The dt for which to get the previous exchange minute.
Returns
-------
pd.Timestamp
The previous exchange minute.
"""
idx = previous_divider_idx(self._trading_minutes_nanos, dt.value)
return self.all_minutes[idx]
|
Given a session label, returns the label of the next session.
Parameters
----------
session_label: pd.Timestamp
A session whose next session is desired.
Returns
-------
pd.Timestamp
The next session label (midnight UTC).
Notes
-----
Raises ValueError if the given session is the last session in this
calendar.
|
def next_session_label(self, session_label):
"""
Given a session label, returns the label of the next session.
Parameters
----------
session_label: pd.Timestamp
A session whose next session is desired.
Returns
-------
pd.Timestamp
The next session label (midnight UTC).
Notes
-----
Raises ValueError if the given session is the last session in this
calendar.
"""
idx = self.schedule.index.get_loc(session_label)
try:
return self.schedule.index[idx + 1]
except IndexError:
if idx == len(self.schedule.index) - 1:
raise ValueError("There is no next session as this is the end"
" of the exchange calendar.")
else:
raise
|
Given a session label, returns the label of the previous session.
Parameters
----------
session_label: pd.Timestamp
A session whose previous session is desired.
Returns
-------
pd.Timestamp
The previous session label (midnight UTC).
Notes
-----
Raises ValueError if the given session is the first session in this
calendar.
|
def previous_session_label(self, session_label):
"""
Given a session label, returns the label of the previous session.
Parameters
----------
session_label: pd.Timestamp
A session whose previous session is desired.
Returns
-------
pd.Timestamp
The previous session label (midnight UTC).
Notes
-----
Raises ValueError if the given session is the first session in this
calendar.
"""
idx = self.schedule.index.get_loc(session_label)
if idx == 0:
raise ValueError("There is no previous session as this is the"
" beginning of the exchange calendar.")
return self.schedule.index[idx - 1]
|
Given a session label, return the minutes for that session.
Parameters
----------
session_label: pd.Timestamp (midnight UTC)
A session label whose session's minutes are desired.
Returns
-------
pd.DateTimeIndex
All the minutes for the given session.
|
def minutes_for_session(self, session_label):
"""
Given a session label, return the minutes for that session.
Parameters
----------
session_label: pd.Timestamp (midnight UTC)
A session label whose session's minutes are desired.
Returns
-------
pd.DateTimeIndex
All the minutes for the given session.
"""
return self.minutes_in_range(
start_minute=self.schedule.at[session_label, 'market_open'],
end_minute=self.schedule.at[session_label, 'market_close'],
)
|
Given a session label, return the execution minutes for that session.
Parameters
----------
session_label: pd.Timestamp (midnight UTC)
A session label whose session's minutes are desired.
Returns
-------
pd.DateTimeIndex
All the execution minutes for the given session.
|
def execution_minutes_for_session(self, session_label):
"""
Given a session label, return the execution minutes for that session.
Parameters
----------
session_label: pd.Timestamp (midnight UTC)
A session label whose session's minutes are desired.
Returns
-------
pd.DateTimeIndex
All the execution minutes for the given session.
"""
return self.minutes_in_range(
start_minute=self.execution_time_from_open(
self.schedule.at[session_label, 'market_open'],
),
end_minute=self.execution_time_from_close(
self.schedule.at[session_label, 'market_close'],
),
)
|
Given start and end session labels, return all the sessions in that
range, inclusive.
Parameters
----------
start_session_label: pd.Timestamp (midnight UTC)
The label representing the first session of the desired range.
end_session_label: pd.Timestamp (midnight UTC)
The label representing the last session of the desired range.
Returns
-------
pd.DatetimeIndex
The desired sessions.
|
def sessions_in_range(self, start_session_label, end_session_label):
"""
Given start and end session labels, return all the sessions in that
range, inclusive.
Parameters
----------
start_session_label: pd.Timestamp (midnight UTC)
The label representing the first session of the desired range.
end_session_label: pd.Timestamp (midnight UTC)
The label representing the last session of the desired range.
Returns
-------
pd.DatetimeIndex
The desired sessions.
"""
return self.all_sessions[
self.all_sessions.slice_indexer(
start_session_label,
end_session_label
)
]
|
Given a session label and a window size, returns a list of sessions
of size `count` + 1, that either starts with the given session
(if `count` is positive) or ends with the given session (if `count` is
negative).
Parameters
----------
session_label: pd.Timestamp
The label of the initial session.
count: int
Defines the length and the direction of the window.
Returns
-------
pd.DatetimeIndex
The desired sessions.
|
def sessions_window(self, session_label, count):
"""
Given a session label and a window size, returns a list of sessions
of size `count` + 1, that either starts with the given session
(if `count` is positive) or ends with the given session (if `count` is
negative).
Parameters
----------
session_label: pd.Timestamp
The label of the initial session.
count: int
Defines the length and the direction of the window.
Returns
-------
pd.DatetimeIndex
The desired sessions.
"""
start_idx = self.schedule.index.get_loc(session_label)
end_idx = start_idx + count
return self.all_sessions[
min(start_idx, end_idx):max(start_idx, end_idx) + 1
]
|
Given a start and end session label, returns the distance between them.
For example, for three consecutive sessions Mon., Tues., and Wed,
``session_distance(Mon, Wed)`` returns 3. If ``start_session`` is after
``end_session``, the value will be negated.
Parameters
----------
start_session_label: pd.Timestamp
The label of the start session.
end_session_label: pd.Timestamp
The label of the ending session inclusive.
Returns
-------
int
The distance between the two sessions.
|
def session_distance(self, start_session_label, end_session_label):
"""
Given a start and end session label, returns the distance between them.
For example, for three consecutive sessions Mon., Tues., and Wed,
``session_distance(Mon, Wed)`` returns 3. If ``start_session`` is after
``end_session``, the value will be negated.
Parameters
----------
start_session_label: pd.Timestamp
The label of the start session.
end_session_label: pd.Timestamp
The label of the ending session inclusive.
Returns
-------
int
The distance between the two sessions.
"""
negate = end_session_label < start_session_label
if negate:
start_session_label, end_session_label = (
end_session_label,
start_session_label,
)
start_idx = self.all_sessions.searchsorted(start_session_label)
end_idx = self.all_sessions.searchsorted(
end_session_label,
side='right',
)
out = end_idx - start_idx
if negate:
out = -out
return out
|
Given start and end minutes, return all the calendar minutes
in that range, inclusive.
Given minutes don't need to be calendar minutes.
Parameters
----------
start_minute: pd.Timestamp
The minute representing the start of the desired range.
end_minute: pd.Timestamp
The minute representing the end of the desired range.
Returns
-------
pd.DatetimeIndex
The minutes in the desired range.
|
def minutes_in_range(self, start_minute, end_minute):
"""
Given start and end minutes, return all the calendar minutes
in that range, inclusive.
Given minutes don't need to be calendar minutes.
Parameters
----------
start_minute: pd.Timestamp
The minute representing the start of the desired range.
end_minute: pd.Timestamp
The minute representing the end of the desired range.
Returns
-------
pd.DatetimeIndex
The minutes in the desired range.
"""
start_idx = searchsorted(self._trading_minutes_nanos,
start_minute.value)
end_idx = searchsorted(self._trading_minutes_nanos,
end_minute.value)
if end_minute.value == self._trading_minutes_nanos[end_idx]:
# if the end minute is a market minute, increase by 1
end_idx += 1
return self.all_minutes[start_idx:end_idx]
|
Returns all the minutes for all the sessions from the given start
session label to the given end session label, inclusive.
Parameters
----------
start_session_label: pd.Timestamp
The label of the first session in the range.
end_session_label: pd.Timestamp
The label of the last session in the range.
Returns
-------
pd.DatetimeIndex
The minutes in the desired range.
|
def minutes_for_sessions_in_range(self,
start_session_label,
end_session_label):
"""
Returns all the minutes for all the sessions from the given start
session label to the given end session label, inclusive.
Parameters
----------
start_session_label: pd.Timestamp
The label of the first session in the range.
end_session_label: pd.Timestamp
The label of the last session in the range.
Returns
-------
pd.DatetimeIndex
The minutes in the desired range.
"""
first_minute, _ = self.open_and_close_for_session(start_session_label)
_, last_minute = self.open_and_close_for_session(end_session_label)
return self.minutes_in_range(first_minute, last_minute)
|
Returns a tuple of timestamps of the open and close of the session
represented by the given label.
Parameters
----------
session_label: pd.Timestamp
The session whose open and close are desired.
Returns
-------
(Timestamp, Timestamp)
The open and close for the given session.
|
def open_and_close_for_session(self, session_label):
"""
Returns a tuple of timestamps of the open and close of the session
represented by the given label.
Parameters
----------
session_label: pd.Timestamp
The session whose open and close are desired.
Returns
-------
(Timestamp, Timestamp)
The open and close for the given session.
"""
sched = self.schedule
# `market_open` and `market_close` should be timezone aware, but pandas
# 0.16.1 does not appear to support this:
# http://pandas.pydata.org/pandas-docs/stable/whatsnew.html#datetime-with-tz # noqa
return (
sched.at[session_label, 'market_open'].tz_localize(UTC),
sched.at[session_label, 'market_close'].tz_localize(UTC),
)
|
Returns a DatetimeIndex representing all the minutes in this calendar.
|
def all_minutes(self):
"""
Returns a DatetimeIndex representing all the minutes in this calendar.
"""
opens_in_ns = self._opens.values.astype(
'datetime64[ns]',
).view('int64')
closes_in_ns = self._closes.values.astype(
'datetime64[ns]',
).view('int64')
return DatetimeIndex(
compute_all_minutes(opens_in_ns, closes_in_ns),
tz=UTC,
)
|
Given a minute, get the label of its containing session.
Parameters
----------
dt : pd.Timestamp or nanosecond offset
The dt for which to get the containing session.
direction: str
"next" (default) means that if the given dt is not part of a
session, return the label of the next session.
"previous" means that if the given dt is not part of a session,
return the label of the previous session.
"none" means that a KeyError will be raised if the given
dt is not part of a session.
Returns
-------
pd.Timestamp (midnight UTC)
The label of the containing session.
|
def minute_to_session_label(self, dt, direction="next"):
"""
Given a minute, get the label of its containing session.
Parameters
----------
dt : pd.Timestamp or nanosecond offset
The dt for which to get the containing session.
direction: str
"next" (default) means that if the given dt is not part of a
session, return the label of the next session.
"previous" means that if the given dt is not part of a session,
return the label of the previous session.
"none" means that a KeyError will be raised if the given
dt is not part of a session.
Returns
-------
pd.Timestamp (midnight UTC)
The label of the containing session.
"""
if direction == "next":
try:
return self._minute_to_session_label_cache[dt]
except KeyError:
pass
idx = searchsorted(self.market_closes_nanos, dt)
current_or_next_session = self.schedule.index[idx]
self._minute_to_session_label_cache[dt] = current_or_next_session
if direction == "next":
return current_or_next_session
elif direction == "previous":
if not is_open(self.market_opens_nanos, self.market_closes_nanos,
dt):
# if the exchange is closed, use the previous session
return self.schedule.index[idx - 1]
elif direction == "none":
if not is_open(self.market_opens_nanos, self.market_closes_nanos,
dt):
# if the exchange is closed, blow up
raise ValueError("The given dt is not an exchange minute!")
else:
# invalid direction
raise ValueError("Invalid direction parameter: "
"{0}".format(direction))
return current_or_next_session
|
Given a sorted DatetimeIndex of market minutes, return a
DatetimeIndex of the corresponding session labels.
Parameters
----------
index: pd.DatetimeIndex or pd.Series
The ordered list of market minutes we want session labels for.
Returns
-------
pd.DatetimeIndex (UTC)
The list of session labels corresponding to the given minutes.
|
def minute_index_to_session_labels(self, index):
"""
Given a sorted DatetimeIndex of market minutes, return a
DatetimeIndex of the corresponding session labels.
Parameters
----------
index: pd.DatetimeIndex or pd.Series
The ordered list of market minutes we want session labels for.
Returns
-------
pd.DatetimeIndex (UTC)
The list of session labels corresponding to the given minutes.
"""
if not index.is_monotonic_increasing:
raise ValueError(
"Non-ordered index passed to minute_index_to_session_labels."
)
# Find the indices of the previous open and the next close for each
# minute.
prev_opens = (
self._opens.values.searchsorted(index.values, side='right') - 1
)
next_closes = (
self._closes.values.searchsorted(index.values, side='left')
)
# If they don't match, the minute is outside the trading day. Barf.
mismatches = (prev_opens != next_closes)
if mismatches.any():
# Show the first bad minute in the error message.
bad_ix = np.flatnonzero(mismatches)[0]
example = index[bad_ix]
prev_day = prev_opens[bad_ix]
prev_open, prev_close = self.schedule.iloc[prev_day]
next_open, next_close = self.schedule.iloc[prev_day + 1]
raise ValueError(
"{num} non-market minutes in minute_index_to_session_labels:\n"
"First Bad Minute: {first_bad}\n"
"Previous Session: {prev_open} -> {prev_close}\n"
"Next Session: {next_open} -> {next_close}"
.format(
num=mismatches.sum(),
first_bad=example,
prev_open=prev_open, prev_close=prev_close,
next_open=next_open, next_close=next_close)
)
return self.schedule.index[prev_opens]
|
Compute a Series of times associated with special dates.
Parameters
----------
holiday_calendars : list[(datetime.time, HolidayCalendar)]
Pairs of time and calendar describing when that time occurs. These
are used to describe regularly-scheduled late opens or early
closes.
ad_hoc_dates : list[(datetime.time, list[pd.Timestamp])]
Pairs of time and list of dates associated with the given times.
These are used to describe late opens or early closes that occurred
for unscheduled or otherwise irregular reasons.
start_date : pd.Timestamp
Start of the range for which we should calculate special dates.
end_date : pd.Timestamp
End of the range for which we should calculate special dates.
Returns
-------
special_dates : pd.Series
Series mapping trading sessions with special opens/closes to the
special open/close for that session.
|
def _special_dates(self, calendars, ad_hoc_dates, start_date, end_date):
"""
Compute a Series of times associated with special dates.
Parameters
----------
holiday_calendars : list[(datetime.time, HolidayCalendar)]
Pairs of time and calendar describing when that time occurs. These
are used to describe regularly-scheduled late opens or early
closes.
ad_hoc_dates : list[(datetime.time, list[pd.Timestamp])]
Pairs of time and list of dates associated with the given times.
These are used to describe late opens or early closes that occurred
for unscheduled or otherwise irregular reasons.
start_date : pd.Timestamp
Start of the range for which we should calculate special dates.
end_date : pd.Timestamp
End of the range for which we should calculate special dates.
Returns
-------
special_dates : pd.Series
Series mapping trading sessions with special opens/closes to the
special open/close for that session.
"""
# List of Series for regularly-scheduled times.
regular = [
scheduled_special_times(
calendar,
start_date,
end_date,
time_,
self.tz,
)
for time_, calendar in calendars
]
# List of Series for ad-hoc times.
ad_hoc = [
pd.Series(
index=pd.to_datetime(datetimes, utc=True),
data=days_at_time(datetimes, time_, self.tz),
)
for time_, datetimes in ad_hoc_dates
]
merged = regular + ad_hoc
if not merged:
# Concat barfs if the input has length 0.
return pd.Series([])
result = pd.concat(merged).sort_index()
return result.loc[(result >= start_date) & (result <= end_date)]
|
Read a text file.
|
def read(*paths):
"""Read a text file."""
basedir = os.path.dirname(__file__)
fullpath = os.path.join(basedir, *paths)
contents = io.open(fullpath, encoding='utf-8').read().strip()
return contents
|
Bootstrap a processing pipeline script.
ARG is either a path or a URL for some data to read from, 'hello-world' for a full working code example,
or leave empty for an interactive walkthrough.
|
def init(arg):
"""Bootstrap a processing pipeline script.
ARG is either a path or a URL for some data to read from, 'hello-world' for a full working code example,
or leave empty for an interactive walkthrough.
"""
answers = {'a': 1}
if arg == 'interactive':
input("""Hi There!
DataFlows will now bootstrap a data processing flow based on your needs.
Press any key to start...
""")
elif arg == 'hello-world':
raise NotImplementedError()
else:
url = arg
answers = dict(
input='remote',
title=os.path.basename(url),
input_url=url,
processing=[],
output='print_n_pkg'
)
extract_format(answers, url)
questions = [
# Input
inquirer.List('input_str',
message='What is the source of your data?',
choices=INPUTS.keys(),
ignore=lambda ctx: ctx.get('input') is not None,
validate=convert_input),
# Input Parameters
inquirer.Text('input_url',
message="What is the path of that file",
ignore=fany(lambda ctx: ctx.get('input') != 'file',
lambda ctx: ctx.get('input_url') is not None),
validate=fall(not_empty, extract_format)),
inquirer.List('format',
message="We couldn't detect the file format - which is it?",
choices=FORMATS[:-1],
ignore=fany(lambda ctx: ctx.get('input') != 'file',
lambda ctx: ctx.get('format') in FORMATS)),
inquirer.Text('input_url',
message="Where is that file located (URL)",
ignore=fany(lambda ctx: ctx.get('input') != 'remote',
lambda ctx: ctx.get('input_url') is not None),
validate=fall(extract_format, not_empty, valid_url)),
inquirer.List('format',
message="We couldn't detect the source format - which is it",
choices=FORMATS,
ignore=fany(lambda ctx: ctx['input'] != 'remote',
lambda ctx: ctx.get('format') in FORMATS)
),
inquirer.Text('sheet',
message="Which sheet in the spreadsheet should be processed (name or index)",
validate=not_empty,
ignore=lambda ctx: ctx.get('format') not in ('xls', 'xlsx', 'ods'),
),
inquirer.Text('input_url',
message="What is the connection string to the database",
validate=not_empty,
ignore=fany(lambda ctx: ctx['input'] != 'sql',
lambda ctx: ctx.get('input_url') is not None),
),
inquirer.Text('input_db_table',
message="...and the name of the database table to extract",
validate=not_empty,
ignore=fany(lambda ctx: ctx['input'] != 'sql',
lambda ctx: ctx.get('input_db_table') is not None),
),
inquirer.Text('input_url',
message="Describe that other source (shortly)",
ignore=fany(lambda ctx: ctx['input'] != 'other',
lambda ctx: ctx.get('input_url') is not None),
),
# Processing
inquirer.Checkbox('processing_str',
message="What kind of processing would you like to run on the data",
choices=PROCESSING.keys(),
ignore=lambda ctx: ctx.get('processing') is not None,
validate=convert_processing),
# Output
inquirer.List('output_str',
message="Finally, where would you like the output data",
choices=OUTPUTS.keys(),
ignore=lambda ctx: ctx.get('output') is not None,
validate=convert_output),
inquirer.Text('output_url',
message="What is the connection string to the database",
validate=not_empty,
ignore=fany(lambda ctx: ctx['output'] != 'sql',
lambda ctx: ctx.get('output_url') is not None),
),
inquirer.Text('output_db_table',
message="...and the name of the database table to write to",
validate=not_empty,
ignore=fany(lambda ctx: ctx['output'] != 'sql',
lambda ctx: ctx.get('output_db_table') is not None),
),
# # Finalize
inquirer.Text('title',
message="That's it! Now, just provide a title for your processing flow",
ignore=lambda ctx: ctx.get('title') is not None,
validate=not_empty),
]
answers = inquirer.prompt(questions, answers=answers, theme=themes.GreenPassion())
if answers is None:
return
answers['slug'] = slugify.slugify(answers['title'], separator='_')
filename = '{slug}.py'.format(**answers)
with open(filename, 'w') as out:
print('Writing processing code into {}'.format(filename))
out.write(render(answers))
try:
print('Running {}'.format(filename))
ret = subprocess.check_output('python '+filename,
stderr=subprocess.PIPE, universal_newlines=True, shell=True)
print(ret)
print('Done!')
except subprocess.CalledProcessError as e:
print("Processing failed, here's the error:")
print(e.stderr)
answers = inquirer.prompt([
inquirer.Confirm('edit',
message='Would you like to open {} in the default editor?'.format(filename),
default=False)
])
if answers['edit']:
click.edit(filename=filename)
|
Returns a Python object decoded from the bytes of this encoding.
Raises
------
~ipfsapi.exceptions.DecodingError
Parameters
----------
raw : bytes
Data to be parsed
Returns
-------
object
|
def parse(self, raw):
"""Returns a Python object decoded from the bytes of this encoding.
Raises
------
~ipfsapi.exceptions.DecodingError
Parameters
----------
raw : bytes
Data to be parsed
Returns
-------
object
"""
results = list(self.parse_partial(raw))
results.extend(self.parse_finalize())
return results[0] if len(results) == 1 else results
|
Incrementally decodes JSON data sets into Python objects.
Raises
------
~ipfsapi.exceptions.DecodingError
Returns
-------
generator
|
def parse_partial(self, data):
"""Incrementally decodes JSON data sets into Python objects.
Raises
------
~ipfsapi.exceptions.DecodingError
Returns
-------
generator
"""
try:
# Python 3 requires all JSON data to be a text string
lines = self._decoder1.decode(data, False).split("\n")
# Add first input line to last buffer line, if applicable, to
# handle cases where the JSON string has been chopped in half
# at the network level due to streaming
if len(self._buffer) > 0 and self._buffer[-1] is not None:
self._buffer[-1] += lines[0]
self._buffer.extend(lines[1:])
else:
self._buffer.extend(lines)
except UnicodeDecodeError as error:
raise exceptions.DecodingError('json', error)
# Process data buffer
index = 0
try:
# Process each line as separate buffer
#PERF: This way the `.lstrip()` call becomes almost always a NOP
# even if it does return a different string it will only
# have to allocate a new buffer for the currently processed
# line.
while index < len(self._buffer):
while self._buffer[index]:
# Make sure buffer does not start with whitespace
#PERF: `.lstrip()` does not reallocate if the string does
# not actually start with whitespace.
self._buffer[index] = self._buffer[index].lstrip()
# Handle case where the remainder of the line contained
# only whitespace
if not self._buffer[index]:
self._buffer[index] = None
continue
# Try decoding the partial data buffer and return results
# from this
data = self._buffer[index]
for index2 in range(index, len(self._buffer)):
# If decoding doesn't succeed with the currently
# selected buffer (very unlikely with our current
# class of input data) then retry with appending
# any other pending pieces of input data
# This will happen with JSON data that contains
# arbitrary new-lines: "{1:\n2,\n3:4}"
if index2 > index:
data += "\n" + self._buffer[index2]
try:
(obj, offset) = self._decoder2.raw_decode(data)
except ValueError:
# Treat error as fatal if we have already added
# the final buffer to the input
if (index2 + 1) == len(self._buffer):
raise
else:
index = index2
break
# Decoding succeeded – yield result and shorten buffer
yield obj
if offset < len(self._buffer[index]):
self._buffer[index] = self._buffer[index][offset:]
else:
self._buffer[index] = None
index += 1
except ValueError as error:
# It is unfortunately not possible to reliably detect whether
# parsing ended because of an error *within* the JSON string, or
# an unexpected *end* of the JSON string.
# We therefor have to assume that any error that occurs here
# *might* be related to the JSON parser hitting EOF and therefor
# have to postpone error reporting until `parse_finalize` is
# called.
self._lasterror = error
finally:
# Remove all processed buffers
del self._buffer[0:index]
|
Raises errors for incomplete buffered data that could not be parsed
because the end of the input data has been reached.
Raises
------
~ipfsapi.exceptions.DecodingError
Returns
-------
tuple : Always empty
|
def parse_finalize(self):
"""Raises errors for incomplete buffered data that could not be parsed
because the end of the input data has been reached.
Raises
------
~ipfsapi.exceptions.DecodingError
Returns
-------
tuple : Always empty
"""
try:
try:
# Raise exception for remaining bytes in bytes decoder
self._decoder1.decode(b'', True)
except UnicodeDecodeError as error:
raise exceptions.DecodingError('json', error)
# Late raise errors that looked like they could have been fixed if
# the caller had provided more data
if self._buffer:
raise exceptions.DecodingError('json', self._lasterror)
finally:
# Reset state
self._buffer = []
self._lasterror = None
self._decoder1.reset()
return ()
|
Returns ``obj`` serialized as JSON formatted bytes.
Raises
------
~ipfsapi.exceptions.EncodingError
Parameters
----------
obj : str | list | dict | int
JSON serializable Python object
Returns
-------
bytes
|
def encode(self, obj):
"""Returns ``obj`` serialized as JSON formatted bytes.
Raises
------
~ipfsapi.exceptions.EncodingError
Parameters
----------
obj : str | list | dict | int
JSON serializable Python object
Returns
-------
bytes
"""
try:
result = json.dumps(obj, sort_keys=True, indent=None,
separators=(',', ':'), ensure_ascii=False)
if isinstance(result, six.text_type):
return result.encode("utf-8")
else:
return result
except (UnicodeEncodeError, TypeError) as error:
raise exceptions.EncodingError('json', error)
|
Parses the buffered data and yields the result.
Raises
------
~ipfsapi.exceptions.DecodingError
Returns
-------
generator
|
def parse_finalize(self):
"""Parses the buffered data and yields the result.
Raises
------
~ipfsapi.exceptions.DecodingError
Returns
-------
generator
"""
try:
self._buffer.seek(0, 0)
yield pickle.load(self._buffer)
except pickle.UnpicklingError as error:
raise exceptions.DecodingError('pickle', error)
|
Returns ``obj`` serialized as a pickle binary string.
Raises
------
~ipfsapi.exceptions.EncodingError
Parameters
----------
obj : object
Serializable Python object
Returns
-------
bytes
|
def encode(self, obj):
"""Returns ``obj`` serialized as a pickle binary string.
Raises
------
~ipfsapi.exceptions.EncodingError
Parameters
----------
obj : object
Serializable Python object
Returns
-------
bytes
"""
try:
return pickle.dumps(obj)
except pickle.PicklingError as error:
raise exceptions.EncodingError('pickle', error)
|
Translate a shell glob PATTERN to a regular expression.
This is almost entirely based on `fnmatch.translate` source-code from the
python 3.5 standard-library.
|
def glob_compile(pat):
"""Translate a shell glob PATTERN to a regular expression.
This is almost entirely based on `fnmatch.translate` source-code from the
python 3.5 standard-library.
"""
i, n = 0, len(pat)
res = ''
while i < n:
c = pat[i]
i = i + 1
if c == '/' and len(pat) > (i + 2) and pat[i:(i + 3)] == '**/':
# Special-case for "any number of sub-directories" operator since
# may also expand to no entries:
# Otherwise `a/**/b` would expand to `a[/].*[/]b` which wouldn't
# match the immediate sub-directories of `a`, like `a/b`.
i = i + 3
res = res + '[/]([^/]*[/])*'
elif c == '*':
if len(pat) > i and pat[i] == '*':
i = i + 1
res = res + '.*'
else:
res = res + '[^/]*'
elif c == '?':
res = res + '[^/]'
elif c == '[':
j = i
if j < n and pat[j] == '!':
j = j + 1
if j < n and pat[j] == ']':
j = j + 1
while j < n and pat[j] != ']':
j = j + 1
if j >= n:
res = res + '\\['
else:
stuff = pat[i:j].replace('\\', '\\\\')
i = j + 1
if stuff[0] == '!':
stuff = '^' + stuff[1:]
elif stuff[0] == '^':
stuff = '\\' + stuff
res = '%s[%s]' % (res, stuff)
else:
res = res + re.escape(c)
return re.compile('^' + res + '\Z(?ms)' + '$')
|
Gets a buffered generator for streaming files.
Returns a buffered generator which encodes a file or list of files as
:mimetype:`multipart/form-data` with the corresponding headers.
Parameters
----------
files : str
The file(s) to stream
chunk_size : int
Maximum size of each stream chunk
|
def stream_files(files, chunk_size=default_chunk_size):
"""Gets a buffered generator for streaming files.
Returns a buffered generator which encodes a file or list of files as
:mimetype:`multipart/form-data` with the corresponding headers.
Parameters
----------
files : str
The file(s) to stream
chunk_size : int
Maximum size of each stream chunk
"""
stream = FileStream(files, chunk_size=chunk_size)
return stream.body(), stream.headers
|
Gets a buffered generator for streaming directories.
Returns a buffered generator which encodes a directory as
:mimetype:`multipart/form-data` with the corresponding headers.
Parameters
----------
directory : str
The filepath of the directory to stream
recursive : bool
Stream all content within the directory recursively?
patterns : str | list
Single *glob* pattern or list of *glob* patterns and compiled
regular expressions to match the names of the filepaths to keep
chunk_size : int
Maximum size of each stream chunk
|
def stream_directory(directory,
recursive=False,
patterns='**',
chunk_size=default_chunk_size):
"""Gets a buffered generator for streaming directories.
Returns a buffered generator which encodes a directory as
:mimetype:`multipart/form-data` with the corresponding headers.
Parameters
----------
directory : str
The filepath of the directory to stream
recursive : bool
Stream all content within the directory recursively?
patterns : str | list
Single *glob* pattern or list of *glob* patterns and compiled
regular expressions to match the names of the filepaths to keep
chunk_size : int
Maximum size of each stream chunk
"""
stream = DirectoryStream(directory,
recursive=recursive,
patterns=patterns,
chunk_size=chunk_size)
return stream.body(), stream.headers
|
Gets a buffered generator for streaming either files or directories.
Returns a buffered generator which encodes the file or directory at the
given path as :mimetype:`multipart/form-data` with the corresponding
headers.
Parameters
----------
path : str
The filepath of the directory or file to stream
recursive : bool
Stream all content within the directory recursively?
patterns : str | list
Single *glob* pattern or list of *glob* patterns and compiled
regular expressions to match the names of the filepaths to keep
chunk_size : int
Maximum size of each stream chunk
|
def stream_filesystem_node(path,
recursive=False,
patterns='**',
chunk_size=default_chunk_size):
"""Gets a buffered generator for streaming either files or directories.
Returns a buffered generator which encodes the file or directory at the
given path as :mimetype:`multipart/form-data` with the corresponding
headers.
Parameters
----------
path : str
The filepath of the directory or file to stream
recursive : bool
Stream all content within the directory recursively?
patterns : str | list
Single *glob* pattern or list of *glob* patterns and compiled
regular expressions to match the names of the filepaths to keep
chunk_size : int
Maximum size of each stream chunk
"""
is_dir = isinstance(path, six.string_types) and os.path.isdir(path)
if recursive or is_dir:
return stream_directory(path, recursive, patterns, chunk_size)
else:
return stream_files(path, chunk_size)
|
Gets a buffered generator for streaming binary data.
Returns a buffered generator which encodes binary data as
:mimetype:`multipart/form-data` with the corresponding headers.
Parameters
----------
data : bytes
The data bytes to stream
chunk_size : int
The maximum size of each stream chunk
Returns
-------
(generator, dict)
|
def stream_bytes(data, chunk_size=default_chunk_size):
"""Gets a buffered generator for streaming binary data.
Returns a buffered generator which encodes binary data as
:mimetype:`multipart/form-data` with the corresponding headers.
Parameters
----------
data : bytes
The data bytes to stream
chunk_size : int
The maximum size of each stream chunk
Returns
-------
(generator, dict)
"""
stream = BytesStream(data, chunk_size=chunk_size)
return stream.body(), stream.headers
|
Gets a buffered generator for streaming text.
Returns a buffered generator which encodes a string as
:mimetype:`multipart/form-data` with the corresponding headers.
Parameters
----------
text : str
The data bytes to stream
chunk_size : int
The maximum size of each stream chunk
Returns
-------
(generator, dict)
|
def stream_text(text, chunk_size=default_chunk_size):
"""Gets a buffered generator for streaming text.
Returns a buffered generator which encodes a string as
:mimetype:`multipart/form-data` with the corresponding headers.
Parameters
----------
text : str
The data bytes to stream
chunk_size : int
The maximum size of each stream chunk
Returns
-------
(generator, dict)
"""
if isgenerator(text):
def binary_stream():
for item in text:
if six.PY2 and isinstance(text, six.binary_type):
#PY2: Allow binary strings under Python 2 since
# Python 2 code is not expected to always get the
# distinction between text and binary strings right.
yield text
else:
yield text.encode("utf-8")
data = binary_stream()
elif six.PY2 and isinstance(text, six.binary_type):
#PY2: See above.
data = text
else:
data = text.encode("utf-8")
return stream_bytes(data, chunk_size)
|
Yields the HTTP header text for some content.
Parameters
----------
headers : dict
The headers to yield
|
def _write_headers(self, headers):
"""Yields the HTTP header text for some content.
Parameters
----------
headers : dict
The headers to yield
"""
if headers:
for name in sorted(headers.keys()):
yield name.encode("ascii")
yield b': '
yield headers[name].encode("ascii")
yield CRLF
yield CRLF
|
Yields the opening text of a file section in multipart HTTP.
Parameters
----------
fn : str
Filename for the file being opened and added to the HTTP body
|
def file_open(self, fn):
"""Yields the opening text of a file section in multipart HTTP.
Parameters
----------
fn : str
Filename for the file being opened and added to the HTTP body
"""
yield b'--'
yield self.boundary.encode()
yield CRLF
headers = content_disposition(fn)
headers.update(content_type(fn))
for c in self._write_headers(headers):
yield c
|
Yields chunks of a file.
Parameters
----------
fp : io.RawIOBase
The file to break into chunks
(must be an open file or have the ``readinto`` method)
|
def file_chunks(self, fp):
"""Yields chunks of a file.
Parameters
----------
fp : io.RawIOBase
The file to break into chunks
(must be an open file or have the ``readinto`` method)
"""
fsize = utils.file_size(fp)
offset = 0
if hasattr(fp, 'readinto'):
while offset < fsize:
nb = fp.readinto(self._internal)
yield self.buf[:nb]
offset += nb
else:
while offset < fsize:
nb = min(self.chunk_size, fsize - offset)
yield fp.read(nb)
offset += nb
|
Generates byte chunks of a given size.
Takes a bytes generator and yields chunks of a maximum of
``chunk_size`` bytes.
Parameters
----------
gen : generator
The bytes generator that produces the bytes
|
def gen_chunks(self, gen):
"""Generates byte chunks of a given size.
Takes a bytes generator and yields chunks of a maximum of
``chunk_size`` bytes.
Parameters
----------
gen : generator
The bytes generator that produces the bytes
"""
for data in gen:
size = len(data)
if size < self.chunk_size:
yield data
else:
mv = buffer(data)
offset = 0
while offset < size:
nb = min(self.chunk_size, size - offset)
yield mv[offset:offset + nb]
offset += nb
|
Yields the body of the buffered file.
|
def body(self):
"""Yields the body of the buffered file."""
for fp, need_close in self.files:
try:
name = os.path.basename(fp.name)
except AttributeError:
name = ''
for chunk in self.gen_chunks(self.envelope.file_open(name)):
yield chunk
for chunk in self.file_chunks(fp):
yield chunk
for chunk in self.gen_chunks(self.envelope.file_close()):
yield chunk
if need_close:
fp.close()
for chunk in self.close():
yield chunk
|
Pre-formats the multipart HTTP request to transmit the directory.
|
def _prepare(self):
"""Pre-formats the multipart HTTP request to transmit the directory."""
names = []
added_directories = set()
def add_directory(short_path):
# Do not continue if this directory has already been added
if short_path in added_directories:
return
# Scan for first super-directory that has already been added
dir_base = short_path
dir_parts = []
while dir_base:
dir_base, dir_name = os.path.split(dir_base)
dir_parts.append(dir_name)
if dir_base in added_directories:
break
# Add missing intermediate directory nodes in the right order
while dir_parts:
dir_base = os.path.join(dir_base, dir_parts.pop())
# Create an empty, fake file to represent the directory
mock_file = io.StringIO()
mock_file.write(u'')
# Add this directory to those that will be sent
names.append(('files',
(dir_base.replace(os.sep, '/'), mock_file, 'application/x-directory')))
# Remember that this directory has already been sent
added_directories.add(dir_base)
def add_file(short_path, full_path):
try:
# Always add files in wildcard directories
names.append(('files', (short_name.replace(os.sep, '/'),
open(full_path, 'rb'),
'application/octet-stream')))
except OSError:
# File might have disappeared between `os.walk()` and `open()`
pass
def match_short_path(short_path):
# Remove initial path component so that all files are based in
# the target directory itself (not one level above)
if os.sep in short_path:
path = short_path.split(os.sep, 1)[1]
else:
return False
# Convert all path seperators to POSIX style
path = path.replace(os.sep, '/')
# Do the matching and the simplified path
for pattern in self.patterns:
if pattern.match(path):
return True
return False
# Identify the unecessary portion of the relative path
truncate = os.path.dirname(self.directory)
# Traverse the filesystem downward from the target directory's uri
# Errors: `os.walk()` will simply return an empty generator if the
# target directory does not exist.
wildcard_directories = set()
for curr_dir, _, files in os.walk(self.directory):
# find the path relative to the directory being added
if len(truncate) > 0:
_, _, short_path = curr_dir.partition(truncate)
else:
short_path = curr_dir
# remove leading / or \ if it is present
if short_path.startswith(os.sep):
short_path = short_path[1:]
wildcard_directory = False
if os.path.split(short_path)[0] in wildcard_directories:
# Parent directory has matched a pattern, all sub-nodes should
# be added too
wildcard_directories.add(short_path)
wildcard_directory = True
else:
# Check if directory path matches one of the patterns
if match_short_path(short_path):
# Directory matched pattern and it should therefor
# be added along with all of its contents
wildcard_directories.add(short_path)
wildcard_directory = True
# Always add directories within wildcard directories - even if they
# are empty
if wildcard_directory:
add_directory(short_path)
# Iterate across the files in the current directory
for filename in files:
# Find the filename relative to the directory being added
short_name = os.path.join(short_path, filename)
filepath = os.path.join(curr_dir, filename)
if wildcard_directory:
# Always add files in wildcard directories
add_file(short_name, filepath)
else:
# Add file (and all missing intermediary directories)
# if it matches one of the patterns
if match_short_path(short_name):
add_directory(short_path)
add_file(short_name, filepath)
# Send the request and present the response body to the user
req = requests.Request("POST", 'http://localhost', files=names)
prep = req.prepare()
return prep
|
Yields the encoded body.
|
def body(self):
"""Yields the encoded body."""
for chunk in self.gen_chunks(self.envelope.file_open(self.name)):
yield chunk
for chunk in self.gen_chunks(self.data):
yield chunk
for chunk in self.gen_chunks(self.envelope.file_close()):
yield chunk
for chunk in self.close():
yield chunk
|
Decorator that returns a function named wrapper.
When invoked, wrapper invokes func with default kwargs appended.
Parameters
----------
func : callable
The function to append the default kwargs to
|
def pass_defaults(func):
"""Decorator that returns a function named wrapper.
When invoked, wrapper invokes func with default kwargs appended.
Parameters
----------
func : callable
The function to append the default kwargs to
"""
@functools.wraps(func)
def wrapper(self, *args, **kwargs):
merged = {}
merged.update(self.defaults)
merged.update(kwargs)
return func(self, *args, **merged)
return wrapper
|
Makes an HTTP request to the IPFS daemon.
This function returns the contents of the HTTP response from the IPFS
daemon.
Raises
------
~ipfsapi.exceptions.ErrorResponse
~ipfsapi.exceptions.ConnectionError
~ipfsapi.exceptions.ProtocolError
~ipfsapi.exceptions.StatusError
~ipfsapi.exceptions.TimeoutError
Parameters
----------
path : str
The REST command path to send
args : list
Positional parameters to be sent along with the HTTP request
files : :class:`io.RawIOBase` | :obj:`str` | :obj:`list`
The file object(s) or path(s) to stream to the daemon
opts : dict
Query string paramters to be sent along with the HTTP request
decoder : str
The encoder to use to parse the HTTP response
kwargs : dict
Additional arguments to pass to :mod:`requests`
|
def request(self, path,
args=[], files=[], opts={}, stream=False,
decoder=None, headers={}, data=None):
"""Makes an HTTP request to the IPFS daemon.
This function returns the contents of the HTTP response from the IPFS
daemon.
Raises
------
~ipfsapi.exceptions.ErrorResponse
~ipfsapi.exceptions.ConnectionError
~ipfsapi.exceptions.ProtocolError
~ipfsapi.exceptions.StatusError
~ipfsapi.exceptions.TimeoutError
Parameters
----------
path : str
The REST command path to send
args : list
Positional parameters to be sent along with the HTTP request
files : :class:`io.RawIOBase` | :obj:`str` | :obj:`list`
The file object(s) or path(s) to stream to the daemon
opts : dict
Query string paramters to be sent along with the HTTP request
decoder : str
The encoder to use to parse the HTTP response
kwargs : dict
Additional arguments to pass to :mod:`requests`
"""
url = self.base + path
params = []
params.append(('stream-channels', 'true'))
for opt in opts.items():
params.append(opt)
for arg in args:
params.append(('arg', arg))
method = 'post' if (files or data) else 'get'
parser = encoding.get_encoding(decoder if decoder else "none")
return self._request(method, url, params, parser, stream,
files, headers, data)
|
Makes a request to the IPFS daemon to download a file.
Downloads a file or files from IPFS into the current working
directory, or the directory given by ``filepath``.
Raises
------
~ipfsapi.exceptions.ErrorResponse
~ipfsapi.exceptions.ConnectionError
~ipfsapi.exceptions.ProtocolError
~ipfsapi.exceptions.StatusError
~ipfsapi.exceptions.TimeoutError
Parameters
----------
path : str
The REST command path to send
filepath : str
The local path where IPFS will store downloaded files
Defaults to the current working directory.
args : list
Positional parameters to be sent along with the HTTP request
opts : dict
Query string paramters to be sent along with the HTTP request
compress : bool
Whether the downloaded file should be GZip compressed by the
daemon before being sent to the client
kwargs : dict
Additional arguments to pass to :mod:`requests`
|
def download(self, path, args=[], filepath=None, opts={},
compress=True, **kwargs):
"""Makes a request to the IPFS daemon to download a file.
Downloads a file or files from IPFS into the current working
directory, or the directory given by ``filepath``.
Raises
------
~ipfsapi.exceptions.ErrorResponse
~ipfsapi.exceptions.ConnectionError
~ipfsapi.exceptions.ProtocolError
~ipfsapi.exceptions.StatusError
~ipfsapi.exceptions.TimeoutError
Parameters
----------
path : str
The REST command path to send
filepath : str
The local path where IPFS will store downloaded files
Defaults to the current working directory.
args : list
Positional parameters to be sent along with the HTTP request
opts : dict
Query string paramters to be sent along with the HTTP request
compress : bool
Whether the downloaded file should be GZip compressed by the
daemon before being sent to the client
kwargs : dict
Additional arguments to pass to :mod:`requests`
"""
url = self.base + path
wd = filepath or '.'
params = []
params.append(('stream-channels', 'true'))
params.append(('archive', 'true'))
if compress:
params.append(('compress', 'true'))
for opt in opts.items():
params.append(opt)
for arg in args:
params.append(('arg', arg))
method = 'get'
res = self._do_request(method, url, params=params, stream=True,
**kwargs)
self._do_raise_for_status(res)
# try to stream download as a tar file stream
mode = 'r|gz' if compress else 'r|'
with tarfile.open(fileobj=res.raw, mode=mode) as tf:
tf.extractall(path=wd)
|
A context manager for this client's session.
This function closes the current session when this client goes out of
scope.
|
def session(self):
"""A context manager for this client's session.
This function closes the current session when this client goes out of
scope.
"""
self._session = requests.session()
yield
self._session.close()
self._session = None
|
Make sure that the given daemon version is supported by this client
version.
Raises
------
~ipfsapi.exceptions.VersionMismatch
Parameters
----------
version : str
The version of an IPFS daemon.
minimum : str
The minimal IPFS version to allow.
maximum : str
The maximum IPFS version to allow.
|
def assert_version(version, minimum=VERSION_MINIMUM, maximum=VERSION_MAXIMUM):
"""Make sure that the given daemon version is supported by this client
version.
Raises
------
~ipfsapi.exceptions.VersionMismatch
Parameters
----------
version : str
The version of an IPFS daemon.
minimum : str
The minimal IPFS version to allow.
maximum : str
The maximum IPFS version to allow.
"""
# Convert version strings to integer tuples
version = list(map(int, version.split('-', 1)[0].split('.')))
minimum = list(map(int, minimum.split('-', 1)[0].split('.')))
maximum = list(map(int, maximum.split('-', 1)[0].split('.')))
if minimum > version or version >= maximum:
raise exceptions.VersionMismatch(version, minimum, maximum)
|
Create a new :class:`~ipfsapi.Client` instance and connect to the
daemon to validate that its version is supported.
Raises
------
~ipfsapi.exceptions.VersionMismatch
~ipfsapi.exceptions.ErrorResponse
~ipfsapi.exceptions.ConnectionError
~ipfsapi.exceptions.ProtocolError
~ipfsapi.exceptions.StatusError
~ipfsapi.exceptions.TimeoutError
All parameters are identical to those passed to the constructor of the
:class:`~ipfsapi.Client` class.
Returns
-------
~ipfsapi.Client
|
def connect(host=DEFAULT_HOST, port=DEFAULT_PORT, base=DEFAULT_BASE,
chunk_size=multipart.default_chunk_size, **defaults):
"""Create a new :class:`~ipfsapi.Client` instance and connect to the
daemon to validate that its version is supported.
Raises
------
~ipfsapi.exceptions.VersionMismatch
~ipfsapi.exceptions.ErrorResponse
~ipfsapi.exceptions.ConnectionError
~ipfsapi.exceptions.ProtocolError
~ipfsapi.exceptions.StatusError
~ipfsapi.exceptions.TimeoutError
All parameters are identical to those passed to the constructor of the
:class:`~ipfsapi.Client` class.
Returns
-------
~ipfsapi.Client
"""
# Create client instance
client = Client(host, port, base, chunk_size, **defaults)
# Query version number from daemon and validate it
assert_version(client.version()['Version'])
return client
|
Add a file, or directory of files to IPFS.
.. code-block:: python
>>> with io.open('nurseryrhyme.txt', 'w', encoding='utf-8') as f:
... numbytes = f.write('Mary had a little lamb')
>>> c.add('nurseryrhyme.txt')
{'Hash': 'QmZfF6C9j4VtoCsTp4KSrhYH47QMd3DNXVZBKaxJdhaPab',
'Name': 'nurseryrhyme.txt'}
Parameters
----------
files : str
A filepath to either a file or directory
recursive : bool
Controls if files in subdirectories are added or not
pattern : str | list
Single `*glob* <https://docs.python.org/3/library/glob.html>`_
pattern or list of *glob* patterns and compiled regular expressions
to match the names of the filepaths to keep
trickle : bool
Use trickle-dag format (optimized for streaming) when generating
the dag; see `the FAQ <https://github.com/ipfs/faq/issues/218>` for
more information (Default: ``False``)
only_hash : bool
Only chunk and hash, but do not write to disk (Default: ``False``)
wrap_with_directory : bool
Wrap files with a directory object to preserve their filename
(Default: ``False``)
chunker : str
The chunking algorithm to use
pin : bool
Pin this object when adding (Default: ``True``)
Returns
-------
dict: File name and hash of the added file node
|
def add(self, files, recursive=False, pattern='**', *args, **kwargs):
"""Add a file, or directory of files to IPFS.
.. code-block:: python
>>> with io.open('nurseryrhyme.txt', 'w', encoding='utf-8') as f:
... numbytes = f.write('Mary had a little lamb')
>>> c.add('nurseryrhyme.txt')
{'Hash': 'QmZfF6C9j4VtoCsTp4KSrhYH47QMd3DNXVZBKaxJdhaPab',
'Name': 'nurseryrhyme.txt'}
Parameters
----------
files : str
A filepath to either a file or directory
recursive : bool
Controls if files in subdirectories are added or not
pattern : str | list
Single `*glob* <https://docs.python.org/3/library/glob.html>`_
pattern or list of *glob* patterns and compiled regular expressions
to match the names of the filepaths to keep
trickle : bool
Use trickle-dag format (optimized for streaming) when generating
the dag; see `the FAQ <https://github.com/ipfs/faq/issues/218>` for
more information (Default: ``False``)
only_hash : bool
Only chunk and hash, but do not write to disk (Default: ``False``)
wrap_with_directory : bool
Wrap files with a directory object to preserve their filename
(Default: ``False``)
chunker : str
The chunking algorithm to use
pin : bool
Pin this object when adding (Default: ``True``)
Returns
-------
dict: File name and hash of the added file node
"""
#PY2: No support for kw-only parameters after glob parameters
opts = {
"trickle": kwargs.pop("trickle", False),
"only-hash": kwargs.pop("only_hash", False),
"wrap-with-directory": kwargs.pop("wrap_with_directory", False),
"pin": kwargs.pop("pin", True)
}
if "chunker" in kwargs:
opts["chunker"] = kwargs.pop("chunker")
kwargs.setdefault("opts", opts)
body, headers = multipart.stream_filesystem_node(
files, recursive, pattern, self.chunk_size
)
return self._client.request('/add', decoder='json',
data=body, headers=headers, **kwargs)
|
Downloads a file, or directory of files from IPFS.
Files are placed in the current working directory.
Parameters
----------
multihash : str
The path to the IPFS object(s) to be outputted
|
def get(self, multihash, **kwargs):
"""Downloads a file, or directory of files from IPFS.
Files are placed in the current working directory.
Parameters
----------
multihash : str
The path to the IPFS object(s) to be outputted
"""
args = (multihash,)
return self._client.download('/get', args, **kwargs)
|
r"""Retrieves the contents of a file identified by hash.
.. code-block:: python
>>> c.cat('QmTkzDwWqPbnAh5YiV5VwcTLnGdwSNsNTn2aDxdXBFca7D')
Traceback (most recent call last):
...
ipfsapi.exceptions.Error: this dag node is a directory
>>> c.cat('QmeKozNssnkJ4NcyRidYgDY2jfRZqVEoRGfipkgath71bX')
b'<!DOCTYPE html>\n<html>\n\n<head>\n<title>ipfs example viewer</…'
Parameters
----------
multihash : str
The path to the IPFS object(s) to be retrieved
offset : int
Byte offset to begin reading from
length : int
Maximum number of bytes to read(-1 for all)
Returns
-------
str : File contents
|
def cat(self, multihash, offset=0, length=-1, **kwargs):
r"""Retrieves the contents of a file identified by hash.
.. code-block:: python
>>> c.cat('QmTkzDwWqPbnAh5YiV5VwcTLnGdwSNsNTn2aDxdXBFca7D')
Traceback (most recent call last):
...
ipfsapi.exceptions.Error: this dag node is a directory
>>> c.cat('QmeKozNssnkJ4NcyRidYgDY2jfRZqVEoRGfipkgath71bX')
b'<!DOCTYPE html>\n<html>\n\n<head>\n<title>ipfs example viewer</…'
Parameters
----------
multihash : str
The path to the IPFS object(s) to be retrieved
offset : int
Byte offset to begin reading from
length : int
Maximum number of bytes to read(-1 for all)
Returns
-------
str : File contents
"""
opts = {}
if offset != 0:
opts['offset'] = offset
if length != -1:
opts['length'] = length
args = (multihash,)
return self._client.request('/cat', args, opts=opts, **kwargs)
|
Returns a list of objects linked to by the given hash.
.. code-block:: python
>>> c.ls('QmTkzDwWqPbnAh5YiV5VwcTLnGdwSNsNTn2aDxdXBFca7D')
{'Objects': [
{'Hash': 'QmTkzDwWqPbnAh5YiV5VwcTLnGdwSNsNTn2aDxdXBFca7D',
'Links': [
{'Hash': 'Qmd2xkBfEwEs9oMTk77A6jrsgurpF3ugXSg7dtPNFkcNMV',
'Name': 'Makefile', 'Size': 174, 'Type': 2},
…
{'Hash': 'QmSY8RfVntt3VdxWppv9w5hWgNrE31uctgTiYwKir8eXJY',
'Name': 'published-version', 'Size': 55, 'Type': 2}
]}
]}
Parameters
----------
multihash : str
The path to the IPFS object(s) to list links from
Returns
-------
dict : Directory information and contents
|
def ls(self, multihash, **kwargs):
"""Returns a list of objects linked to by the given hash.
.. code-block:: python
>>> c.ls('QmTkzDwWqPbnAh5YiV5VwcTLnGdwSNsNTn2aDxdXBFca7D')
{'Objects': [
{'Hash': 'QmTkzDwWqPbnAh5YiV5VwcTLnGdwSNsNTn2aDxdXBFca7D',
'Links': [
{'Hash': 'Qmd2xkBfEwEs9oMTk77A6jrsgurpF3ugXSg7dtPNFkcNMV',
'Name': 'Makefile', 'Size': 174, 'Type': 2},
…
{'Hash': 'QmSY8RfVntt3VdxWppv9w5hWgNrE31uctgTiYwKir8eXJY',
'Name': 'published-version', 'Size': 55, 'Type': 2}
]}
]}
Parameters
----------
multihash : str
The path to the IPFS object(s) to list links from
Returns
-------
dict : Directory information and contents
"""
args = (multihash,)
return self._client.request('/ls', args, decoder='json', **kwargs)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.