code
stringlengths 52
7.75k
| docs
stringlengths 1
5.85k
|
---|---|
def generate_null_timeseries(self, ts, mu, sigma):
l = len(ts)
return np.random.normal(mu, sigma, l) | Generate a time series with a given mu and sigma. This serves as the
NULL distribution. |
def compute_balance_mean(self, ts, t):
""" For changed words we expect an increase in the mean, and so only 1 """
return np.mean(ts[t + 1:]) - np.mean(ts[:t + 1]) | Compute the balance. The right end - the left end. |
def compute_balance_mean_ts(self, ts):
balance = [self.compute_balance_mean(ts, t) for t in np.arange(0, len(ts) - 1)]
return balance | Compute the balance at each time 't' of the time series. |
def compute_balance_median(self, ts, t):
return np.median(ts[t + 1:]) - np.median(ts[:t + 1]) | Compute the balance at either end. |
def compute_balance_median_ts(self, ts):
balance = [self.compute_balance_median(ts, t) for t in np.arange(0, len(ts) - 1)]
return balance | Compute the balance at each time 't' of the time series. |
def compute_cusum_ts(self, ts):
mean = np.mean(ts)
cusums = np.zeros(len(ts))
cusum[0] = (ts[0] - mean)
for i in np.arange(1, len(ts)):
cusums[i] = cusums[i - 1] + (ts[i] - mean)
assert(np.isclose(cumsum[-1], 0.0))
return cusums | Compute the Cumulative Sum at each point 't' of the time series. |
def detect_mean_shift(self, ts, B=1000):
x = np.arange(0, len(ts))
stat_ts_func = self.compute_balance_mean_ts
null_ts_func = self.shuffle_timeseries
stats_ts, pvals, nums = self.get_ts_stats_significance(x, ts, stat_ts_func, null_ts_func, B=B, permute_fast=True)
return stats_ts, pvals, nums | Detect mean shift in a time series. B is number of bootstrapped
samples to draw. |
def parallelize_func(iterable, func, chunksz=1, n_jobs=16, *args, **kwargs):
chunker = func
chunks = more_itertools.chunked(iterable, chunksz)
chunks_results = Parallel(n_jobs=n_jobs, verbose=50)(
delayed(chunker)(chunk, *args, **kwargs) for chunk in chunks)
results = more_itertools.flatten(chunks_results)
return list(results) | Parallelize a function over each element of an iterable. |
def ts_stats_significance(ts, ts_stat_func, null_ts_func, B=1000, permute_fast=False):
stats_ts = ts_stat_func(ts)
if permute_fast:
# Permute it in 1 shot
null_ts = map(np.random.permutation, np.array([ts, ] * B))
else:
null_ts = np.vstack([null_ts_func(ts) for i in np.arange(0, B)])
stats_null_ts = np.vstack([ts_stat_func(nts) for nts in null_ts])
pvals = []
nums = []
for i in np.arange(0, len(stats_ts)):
num_samples = np.sum((stats_null_ts[:, i] >= stats_ts[i]))
nums.append(num_samples)
pval = num_samples / float(B)
pvals.append(pval)
return stats_ts, pvals, nums | Compute the statistical significance of a test statistic at each point
of the time series. |
def bootstrap_ts(y, func, B=1000, b=3):
beta_star = np.empty(B)
z = y
z_star = np.empty(len(z))
for boot_i in range(B):
for block_i, start in enumerate(np.random.randint(len(z) - b + 1, size=len(z) / b)):
z_star[block_i * b:(block_i + 1) * b] = z[start:start + b]
beta_star[boot_i] = func(z_star)
return beta_star | Bootstrap a timeseries using a window size:b. |
def get_ci(theta_star, blockratio=1.0):
# get rid of nans while we sort
b_star = np.sort(theta_star[~np.isnan(theta_star)])
se = np.std(b_star) * np.sqrt(blockratio)
# bootstrap 95% CI based on empirical percentiles
ci = [b_star[int(len(b_star) * .025)], b_star[int(len(b_star) * .975)]]
return ci | Get the confidence interval. |
def get_pvalue(value, ci):
from scipy.stats import norm
se = (ci[1] - ci[0]) / (2.0 * 1.96)
z = value / se
pvalue = -2 * norm.cdf(-np.abs(z))
return pvalue | Get the p-value from the confidence interval. |
def ts_stats_significance_bootstrap(ts, stats_ts, stats_func, B=1000, b=3):
pvals = []
for tp in np.arange(0, len(stats_ts)):
pf = partial(stats_func, t=tp)
bs = bootstrap_ts(ts, pf, B=B, b=b)
ci = get_ci(bs, blockratio=b / len(stats_ts))
pval = abs(get_pvalue(stats_ts[tp], ci))
pvals.append(pval)
return pvals | Compute the statistical significance of a test statistic at each point
of the time series by using timeseries boootstrap. |
def extract_relevant_tb(tb, exctype, is_test_failure):
# Skip test runner traceback levels:
while tb and _is_unittest_frame(tb):
tb = tb.tb_next
if is_test_failure:
# Skip assert*() traceback levels:
length = _count_relevant_tb_levels(tb)
return extract_tb(tb, length)
return extract_tb(tb) | Return extracted traceback frame 4-tuples that aren't unittest ones.
This used to be _exc_info_to_string(). |
def _unicode_decode_extracted_tb(extracted_tb):
return [(_decode(file), line_number, _decode(function), _decode(text))
for file, line_number, function, text in extracted_tb] | Return a traceback with the string elements translated into Unicode. |
def _count_relevant_tb_levels(tb):
length = contiguous_unittest_frames = 0
while tb:
length += 1
if _is_unittest_frame(tb):
contiguous_unittest_frames += 1
else:
contiguous_unittest_frames = 0
tb = tb.tb_next
return length - contiguous_unittest_frames | Return the number of frames in ``tb`` before all that's left is unittest frames.
Unlike its namesake in unittest, this doesn't bail out as soon as it hits a
unittest frame, which means we don't bail out as soon as somebody uses the
mock library, which defines ``__unittest``. |
def cmdloop(self, *args, **kwargs):
def unwrapping_raw_input(*args, **kwargs):
"""Call raw_input(), making sure it finds an unwrapped stdout."""
wrapped_stdout = sys.stdout
sys.stdout = wrapped_stdout.stream
ret = orig_raw_input(*args, **kwargs)
sys.stdout = wrapped_stdout
return ret
try:
orig_raw_input = raw_input
except NameError:
orig_raw_input = input
if hasattr(sys.stdout, 'stream'):
__builtin__.raw_input = unwrapping_raw_input
# else if capture plugin has replaced it with a StringIO, don't bother.
try:
# Interesting things happen when you try to not reference the
# superclass explicitly.
ret = cmd.Cmd.cmdloop(self, *args, **kwargs)
finally:
__builtin__.raw_input = orig_raw_input
return ret | Call pdb's cmdloop, making readline work.
Patch raw_input so it sees the original stdin and stdout, lest
readline refuse to work.
The C implementation of raw_input uses readline functionality only if
both stdin and stdout are from a terminal AND are FILE*s (not
PyObject*s): http://bugs.python.org/issue5727 and
https://bugzilla.redhat.com/show_bug.cgi?id=448864 |
def set_trace(*args, **kwargs):
# There's no stream attr if capture plugin is enabled:
out = sys.stdout.stream if hasattr(sys.stdout, 'stream') else None
# Python 2.5 can't put an explicit kwarg and **kwargs in the same function
# call.
kwargs['stdout'] = out
debugger = pdb.Pdb(*args, **kwargs)
# Ordinarily (and in a silly fashion), pdb refuses to use raw_input() if
# you pass it a stream on instantiation. Fix that:
debugger.use_rawinput = True
debugger.set_trace(sys._getframe().f_back) | Call pdb.set_trace, making sure it receives the unwrapped stdout.
This is so we don't keep drawing progress bars over debugger output. |
def begin(self):
# The calls to begin/finalize end up like this: a call to begin() on
# instance A of the plugin, then a paired begin/finalize for each test
# on instance B, then a final call to finalize() on instance A.
# TODO: Do only if isatty.
self._stderr.append(sys.stderr)
sys.stderr = StreamWrapper(sys.stderr, self) # TODO: Any point?
self._stdout.append(sys.stdout)
sys.stdout = StreamWrapper(sys.stdout, self)
self._set_trace.append(pdb.set_trace)
pdb.set_trace = set_trace
self._cmdloop.append(pdb.Pdb.cmdloop)
pdb.Pdb.cmdloop = cmdloop
# nosetests changes directories to the tests dir when run from a
# distribution dir, so save the original cwd for relativizing paths.
self._cwd = '' if self.conf.options.absolute_paths else getcwd() | Make some monkeypatches to dodge progress bar.
Wrap stderr and stdout to keep other users of them from smearing the
progress bar. Wrap some pdb routines to stop showing the bar while in
the debugger. |
def finalize(self, result):
sys.stderr = self._stderr.pop()
sys.stdout = self._stdout.pop()
pdb.set_trace = self._set_trace.pop()
pdb.Pdb.cmdloop = self._cmdloop.pop() | Put monkeypatches back as we found them. |
def configure(self, options, conf):
super(ProgressivePlugin, self).configure(options, conf)
if (getattr(options, 'verbosity', 0) > 1 and
getattr(options, 'enable_plugin_id', False)):
# TODO: Can we forcibly disable the ID plugin?
print ('Using --with-id and --verbosity=2 or higher with '
'nose-progressive causes visualization errors. Remove one '
'or the other to avoid a mess.')
if options.with_bar:
options.with_styling = True | Turn style-forcing on if bar-forcing is on.
It'd be messy to position the bar but still have the rest of the
terminal capabilities emit ''. |
def update(self, test_path, number):
# TODO: Play nicely with absurdly narrow terminals. (OS X's won't even
# go small enough to hurt us.)
# Figure out graph:
GRAPH_WIDTH = 14
# min() is in case we somehow get the total test count wrong. It's tricky.
num_filled = int(round(min(1.0, float(number) / self.max) * GRAPH_WIDTH))
graph = ''.join([self._fill_cap(' ' * num_filled),
self._empty_cap(self._empty_char * (GRAPH_WIDTH - num_filled))])
# Figure out the test identifier portion:
cols_for_path = self.cols - GRAPH_WIDTH - 2 # 2 spaces between path & graph
if len(test_path) > cols_for_path:
test_path = test_path[len(test_path) - cols_for_path:]
else:
test_path += ' ' * (cols_for_path - len(test_path))
# Put them together, and let simmer:
self.last = self._term.bold(test_path) + ' ' + graph
with self._at_last_line():
self.stream.write(self.last)
self.stream.flush() | Draw an updated progress bar.
At the moment, the graph takes a fixed width, and the test identifier
takes the rest of the row, truncated from the left to fit.
test_path -- the selector of the test being run
number -- how many tests have been run so far, including this one |
def erase(self):
with self._at_last_line():
self.stream.write(self._term.clear_eol)
self.stream.flush() | White out the progress bar. |
def dodging(bar):
class ShyProgressBar(object):
"""Context manager that implements a progress bar that gets out of the way"""
def __enter__(self):
"""Erase the progress bar so bits of disembodied progress bar don't get scrolled up the terminal."""
# My terminal has no status line, so we make one manually.
bar._is_dodging += 1 # Increment before calling erase(), which
# calls dodging() again.
if bar._is_dodging <= 1: # It *was* 0.
bar.erase()
def __exit__(self, type, value, tb):
"""Redraw the last saved state of the progress bar."""
if bar._is_dodging == 1: # Can't decrement yet; write() could
# read it.
# This is really necessary only because we monkeypatch
# stderr; the next test is about to start and will redraw
# the bar.
with bar._at_last_line():
bar.stream.write(bar.last)
bar.stream.flush()
bar._is_dodging -= 1
return ShyProgressBar() | Return a context manager which erases the bar, lets you output things, and then redraws the bar.
It's reentrant. |
def _makeResult(self):
return ProgressiveResult(self._cwd,
self._totalTests,
self.stream,
config=self.config) | Return a Result that doesn't print dots.
Nose's ResultProxy will wrap it, and other plugins can still print
stuff---but without smashing into our progress bar, care of
ProgressivePlugin's stderr/out wrapping. |
def _printTraceback(self, test, err):
# Don't bind third item to a local var; that can create
# circular refs which are expensive to collect. See the
# sys.exc_info() docs.
exception_type, exception_value = err[:2]
# TODO: In Python 3, the traceback is attached to the exception
# instance through the __traceback__ attribute. If the instance
# is saved in a local variable that persists outside the except
# block, the traceback will create a reference cycle with the
# current frame and its dictionary of local variables. This will
# delay reclaiming dead resources until the next cyclic garbage
# collection pass.
extracted_tb = extract_relevant_tb(
err[2],
exception_type,
exception_type is test.failureException)
test_frame_index = index_of_test_frame(
extracted_tb,
exception_type,
exception_value,
test)
if test_frame_index:
# We have a good guess at which frame is the test, so
# trim everything until that. We don't care to see test
# framework frames.
extracted_tb = extracted_tb[test_frame_index:]
with self.bar.dodging():
self.stream.write(''.join(
format_traceback(
extracted_tb,
exception_type,
exception_value,
self._cwd,
self._term,
self._options.function_color,
self._options.dim_color,
self._options.editor,
self._options.editor_shortcut_template))) | Print a nicely formatted traceback.
:arg err: exc_info()-style traceback triple
:arg test: the test that precipitated this call |
def _printHeadline(self, kind, test, is_failure=True):
if is_failure or self._options.show_advisories:
with self.bar.dodging():
self.stream.writeln(
'\n' +
(self._term.bold if is_failure else '') +
'%s: %s' % (kind, nose_selector(test)) +
(self._term.normal if is_failure else '')) | Output a 1-line error summary to the stream if appropriate.
The line contains the kind of error and the pathname of the test.
:arg kind: The (string) type of incident the precipitated this call
:arg test: The test that precipitated this call |
def _recordAndPrintHeadline(self, test, error_class, artifact):
# We duplicate the errorclass handling from super rather than calling
# it and monkeying around with showAll flags to keep it from printing
# anything.
is_error_class = False
for cls, (storage, label, is_failure) in self.errorClasses.items():
if isclass(error_class) and issubclass(error_class, cls):
if is_failure:
test.passed = False
storage.append((test, artifact))
is_error_class = True
if not is_error_class:
self.errors.append((test, artifact))
test.passed = False
is_any_failure = not is_error_class or is_failure
self._printHeadline(label if is_error_class else 'ERROR',
test,
is_failure=is_any_failure)
return is_any_failure | Record that an error-like thing occurred, and print a summary.
Store ``artifact`` with the record.
Return whether the test result is any sort of failure. |
def addSkip(self, test, reason):
self._recordAndPrintHeadline(test, SkipTest, reason)
# Python 2.7 users get a little bonus: the reason the test was skipped.
if isinstance(reason, Exception):
reason = getattr(reason, 'message', None) or getattr(
reason, 'args')[0]
if reason and self._options.show_advisories:
with self.bar.dodging():
self.stream.writeln(reason) | Catch skipped tests in Python 2.7 and above.
Though ``addSkip()`` is deprecated in the nose plugin API, it is very
much not deprecated as a Python 2.7 ``TestResult`` method. In Python
2.7, this will get called instead of ``addError()`` for skips.
:arg reason: Text describing why the test was skipped |
def printSummary(self, start, stop):
def renderResultType(type, number, is_failure):
"""Return a rendering like '2 failures'.
:arg type: A singular label, like "failure"
:arg number: The number of tests with a result of that type
:arg is_failure: Whether that type counts as a failure
"""
# I'd rather hope for the best with plurals than totally punt on
# being Englishlike:
ret = '%s %s%s' % (number, type, 's' if number != 1 else '')
if is_failure and number:
ret = self._term.bold(ret)
return ret
# Summarize the special cases:
counts = [('test', self.testsRun, False),
('failure', len(self.failures), True),
('error', len(self.errors), True)]
# Support custom errorclasses as well as normal failures and errors.
# Lowercase any all-caps labels, but leave the rest alone in case there
# are hard-to-read camelCaseWordBreaks.
counts.extend([(label.lower() if label.isupper() else label,
len(storage),
is_failure)
for (storage, label, is_failure) in
self.errorClasses.values() if len(storage)])
summary = (', '.join(renderResultType(*a) for a in counts) +
' in %.1fs' % (stop - start))
# Erase progress bar. Bash doesn't clear the whole line when printing
# the prompt, leaving a piece of the bar. Also, the prompt may not be
# at the bottom of the terminal.
self.bar.erase()
self.stream.writeln()
if self.wasSuccessful():
self.stream.write(self._term.bold_green('OK! '))
self.stream.writeln(summary) | As a final summary, print number of tests, broken down by result. |
def nose_selector(test):
address = test_address(test)
if address:
file, module, rest = address
if module:
if rest:
try:
return '%s:%s%s' % (module, rest, test.test.arg or '')
except AttributeError:
return '%s:%s' % (module, rest)
else:
return module
return 'Unknown test' | Return the string you can pass to nose to run `test`, including argument
values if the test was made by a test generator.
Return "Unknown test" if it can't construct a decent path. |
def human_path(path, cwd):
# TODO: Canonicalize the path to remove /kitsune/../kitsune nonsense.
path = abspath(path)
if cwd and path.startswith(cwd):
path = path[len(cwd) + 1:] # Make path relative. Remove leading slash.
return path | Return the most human-readable representation of the given path.
If an absolute path is given that's within the current directory, convert
it to a relative path to shorten it. Otherwise, return the absolute path. |
def know(self, what, confidence):
if confidence > self.confidence:
self.best = what
self.confidence = confidence
return self | Know something with the given confidence, and return self for chaining.
If confidence is higher than that of what we already know, replace
what we already know with what you're telling us. |
def _generate_arg_types(coordlist_length, shape_name):
from .ds9_region_parser import ds9_shape_defs
from .ds9_attr_parser import ds9_shape_in_comment_defs
if shape_name in ds9_shape_defs:
shape_def = ds9_shape_defs[shape_name]
else:
shape_def = ds9_shape_in_comment_defs[shape_name]
initial_arg_types = shape_def.args_list
arg_repeats = shape_def.args_repeat
if arg_repeats is None:
return initial_arg_types
# repeat args between n1 and n2
n1, n2 = arg_repeats
arg_types = list(initial_arg_types[:n1])
num_of_repeats = coordlist_length - (len(initial_arg_types) - n2)
arg_types.extend((num_of_repeats - n1) //
(n2 - n1) * initial_arg_types[n1:n2])
arg_types.extend(initial_arg_types[n2:])
return arg_types | Find coordinate types based on shape name and coordlist length
This function returns a list of coordinate types based on which
coordinates can be repeated for a given type of shap
Parameters
----------
coordlist_length : int
The number of coordinates or arguments used to define the shape.
shape_name : str
One of the names in `pyregion.ds9_shape_defs`.
Returns
-------
arg_types : list
A list of objects from `pyregion.region_numbers` with a length equal to
coordlist_length. |
def convert_to_imagecoord(shape, header):
arg_types = _generate_arg_types(len(shape.coord_list), shape.name)
new_coordlist = []
is_even_distance = True
coord_list_iter = iter(zip(shape.coord_list, arg_types))
new_wcs = WCS(header)
pixel_scales = proj_plane_pixel_scales(new_wcs)
for coordinate, coordinate_type in coord_list_iter:
if coordinate_type == CoordOdd:
even_coordinate = next(coord_list_iter)[0]
old_coordinate = SkyCoord(coordinate, even_coordinate,
frame=shape.coord_format, unit='degree',
obstime='J2000')
new_coordlist.extend(
np.asscalar(x)
for x in old_coordinate.to_pixel(new_wcs, origin=1)
)
elif coordinate_type == Distance:
if arg_types[-1] == Angle:
degree_per_pixel = pixel_scales[0 if is_even_distance else 1]
is_even_distance = not is_even_distance
else:
degree_per_pixel = np.sqrt(proj_plane_pixel_area(new_wcs))
new_coordlist.append(coordinate / degree_per_pixel)
elif coordinate_type == Angle:
new_angle = _estimate_angle(coordinate,
shape.coord_format,
header)
new_coordlist.append(new_angle)
else:
new_coordlist.append(coordinate)
return new_coordlist | Convert the coordlist of `shape` to image coordinates
Parameters
----------
shape : `pyregion.parser_helper.Shape`
The `Shape` to convert coordinates
header : `~astropy.io.fits.Header`
Specifies what WCS transformations to use.
Returns
-------
new_coordlist : list
A list of image coordinates defining the shape. |
def get_auth_info():
env_username = os.environ.get('JOTTACLOUD_USERNAME')
env_password = os.environ.get('JOTTACLOUD_PASSWORD')
netrc_auth = None
try:
netrc_file = netrc.netrc()
netrc_auth = netrc_file.authenticators('jottacloud.com')
except IOError:
# .netrc file doesn't exist
pass
netrc_username = None
netrc_password = None
if netrc_auth:
netrc_username, _, netrc_password = netrc_auth
username = env_username or netrc_username
password = env_password or netrc_password
if not (username and password):
raise JFSError('Could not find username and password in either env or ~/.netrc, '
'you need to add one of these to use these tools')
return (username, password) | Get authentication details to jottacloud.
Will first check environment variables, then the .netrc file. |
def calculate_md5(fileobject, size=2**16):
fileobject.seek(0)
md5 = hashlib.md5()
for data in iter(lambda: fileobject.read(size), b''):
if not data: break
if isinstance(data, six.text_type):
data = data.encode('utf-8') # md5 needs a byte string
md5.update(data)
fileobject.seek(0) # rewind read head
return md5.hexdigest() | Utility function to calculate md5 hashes while being light on memory usage.
By reading the fileobject piece by piece, we are able to process content that
is larger than available memory |
def deleted(self):
'Return datetime.datetime or None if the file isnt deleted'
_d = self.folder.attrib.get('deleted', None)
if _d is None: return None
return dateutil.parser.parse(str(_d)f deleted(self):
'Return datetime.datetime or None if the file isnt deleted'
_d = self.folder.attrib.get('deleted', None)
if _d is None: return None
return dateutil.parser.parse(str(_d)) | Return datetime.datetime or None if the file isnt deleted |
def sync(self):
'Update state of folder from Jottacloud server'
log.info("syncing %r" % self.path)
self.folder = self.jfs.get(self.path)
self.synced = Truf sync(self):
'Update state of folder from Jottacloud server'
log.info("syncing %r" % self.path)
self.folder = self.jfs.get(self.path)
self.synced = True | Update state of folder from Jottacloud server |
def mkdir(self, foldername):
'Create a new subfolder and return the new JFSFolder'
#url = '%s?mkDir=true' % posixpath.join(self.path, foldername)
url = posixpath.join(self.path, foldername)
params = {'mkDir':'true'}
r = self.jfs.post(url, params)
self.sync()
return f mkdir(self, foldername):
'Create a new subfolder and return the new JFSFolder'
#url = '%s?mkDir=true' % posixpath.join(self.path, foldername)
url = posixpath.join(self.path, foldername)
params = {'mkDir':'true'}
r = self.jfs.post(url, params)
self.sync()
return r | Create a new subfolder and return the new JFSFolder |
def delete(self):
'Delete this folder and return a deleted JFSFolder'
#url = '%s?dlDir=true' % self.path
params = {'dlDir':'true'}
r = self.jfs.post(self.path, params)
self.sync()
return f delete(self):
'Delete this folder and return a deleted JFSFolder'
#url = '%s?dlDir=true' % self.path
params = {'dlDir':'true'}
r = self.jfs.post(self.path, params)
self.sync()
return r | Delete this folder and return a deleted JFSFolder |
def hard_delete(self):
'Deletes without possibility to restore'
url = 'https://www.jottacloud.com/rest/webrest/%s/action/delete' % self.jfs.username
data = {'paths[]': self.path.replace(JFS_ROOT, ''),
'web': 'true',
'ts': int(time.time()),
'authToken': 0}
r = self.jfs.post(url, content=data)
return f hard_delete(self):
'Deletes without possibility to restore'
url = 'https://www.jottacloud.com/rest/webrest/%s/action/delete' % self.jfs.username
data = {'paths[]': self.path.replace(JFS_ROOT, ''),
'web': 'true',
'ts': int(time.time()),
'authToken': 0}
r = self.jfs.post(url, content=data)
return r | Deletes without possibility to restore |
def rename(self, newpath):
"Move folder to a new name, possibly a whole new path"
# POST https://www.jottacloud.com/jfs/**USERNAME**/Jotta/Sync/Ny%20mappe?mvDir=/**USERNAME**/Jotta/Sync/testFolder
#url = '%s?mvDir=/%s%s' % (self.path, self.jfs.username, newpath)
params = {'mvDir':'/%s%s' % (self.jfs.username, newpath)}
r = self.jfs.post(self.path,
extra_headers={'Content-Type':'application/octet-stream'},
params=params)
return f rename(self, newpath):
"Move folder to a new name, possibly a whole new path"
# POST https://www.jottacloud.com/jfs/**USERNAME**/Jotta/Sync/Ny%20mappe?mvDir=/**USERNAME**/Jotta/Sync/testFolder
#url = '%s?mvDir=/%s%s' % (self.path, self.jfs.username, newpath)
params = {'mvDir':'/%s%s' % (self.jfs.username, newpath)}
r = self.jfs.post(self.path,
extra_headers={'Content-Type':'application/octet-stream'},
params=params)
return r | Move folder to a new name, possibly a whole new path |
def factory(fileobject, jfs, parentpath): # fileobject from lxml.objectify
'Class method to get the correct file class instatiated'
if hasattr(fileobject, 'currentRevision'): # a normal file
return JFSFile(fileobject, jfs, parentpath)
elif str(fileobject.latestRevision.state) == ProtoFile.STATE_INCOMPLETE:
return JFSIncompleteFile(fileobject, jfs, parentpath)
elif str(fileobject.latestRevision.state) == ProtoFile.STATE_CORRUPT:
return JFSCorruptFile(fileobject, jfs, parentpath)
else:
raise NotImplementedError('No JFS*File support for state %r. Please file a bug!' % fileobject.latestRevision.statef factory(fileobject, jfs, parentpath): # fileobject from lxml.objectify
'Class method to get the correct file class instatiated'
if hasattr(fileobject, 'currentRevision'): # a normal file
return JFSFile(fileobject, jfs, parentpath)
elif str(fileobject.latestRevision.state) == ProtoFile.STATE_INCOMPLETE:
return JFSIncompleteFile(fileobject, jfs, parentpath)
elif str(fileobject.latestRevision.state) == ProtoFile.STATE_CORRUPT:
return JFSCorruptFile(fileobject, jfs, parentpath)
else:
raise NotImplementedError('No JFS*File support for state %r. Please file a bug!' % fileobject.latestRevision.state) | Class method to get the correct file class instatiated |
def created(self):
'return datetime.datetime'
return dateutil.parser.parse(str(self.f.latestRevision.created)f created(self):
'return datetime.datetime'
return dateutil.parser.parse(str(self.f.latestRevision.created)) | return datetime.datetime |
def modified(self):
'return datetime.datetime'
return dateutil.parser.parse(str(self.f.latestRevision.modified)f modified(self):
'return datetime.datetime'
return dateutil.parser.parse(str(self.f.latestRevision.modified)) | return datetime.datetime |
def updated(self):
'return datetime.datetime'
return dateutil.parser.parse(str(self.f.latestRevision.updated)f updated(self):
'return datetime.datetime'
return dateutil.parser.parse(str(self.f.latestRevision.updated)) | return datetime.datetime |
def size(self):
if hasattr(self.f.latestRevision, 'size'):
return int(self.f.latestRevision.size)
return None | Bytes uploaded of the file so far.
Note that we only have the file size if the file was requested directly,
not if it's part of a folder listing. |
def stream(self, chunk_size=64*1024):
'Returns a generator to iterate over the file contents'
#return self.jfs.stream(url='%s?mode=bin' % self.path, chunk_size=chunk_size)
return self.jfs.stream(url=self.path, params={'mode':'bin'}, chunk_size=chunk_sizef stream(self, chunk_size=64*1024):
'Returns a generator to iterate over the file contents'
#return self.jfs.stream(url='%s?mode=bin' % self.path, chunk_size=chunk_size)
return self.jfs.stream(url=self.path, params={'mode':'bin'}, chunk_size=chunk_size) | Returns a generator to iterate over the file contents |
def readpartial(self, start, end):
'Get a part of the file, from start byte to end byte (integers)'
#return self.jfs.raw('%s?mode=bin' % self.path,
return self.jfs.raw(url=self.path, params={'mode':'bin'},
# note that we deduct 1 from end because
# in http Range requests, the end value is included in the slice,
# whereas in python, it is not
extra_headers={'Range':'bytes=%s-%s' % (start, end-1)}f readpartial(self, start, end):
'Get a part of the file, from start byte to end byte (integers)'
#return self.jfs.raw('%s?mode=bin' % self.path,
return self.jfs.raw(url=self.path, params={'mode':'bin'},
# note that we deduct 1 from end because
# in http Range requests, the end value is included in the slice,
# whereas in python, it is not
extra_headers={'Range':'bytes=%s-%s' % (start, end-1)}) | Get a part of the file, from start byte to end byte (integers) |
def write(self, data):
'Put, possibly replace, file contents with (new) data'
if not hasattr(data, 'read'):
data = six.BytesIO(data)#StringIO(data)
self.jfs.up(self.path, dataf write(self, data):
'Put, possibly replace, file contents with (new) data'
if not hasattr(data, 'read'):
data = six.BytesIO(data)#StringIO(data)
self.jfs.up(self.path, data) | Put, possibly replace, file contents with (new) data |
def delete(self):
'Delete this file and return the new, deleted JFSFile'
#url = '%s?dl=true' % self.path
r = self.jfs.post(url=self.path, params={'dl':'true'})
return f delete(self):
'Delete this file and return the new, deleted JFSFile'
#url = '%s?dl=true' % self.path
r = self.jfs.post(url=self.path, params={'dl':'true'})
return r | Delete this file and return the new, deleted JFSFile |
def thumb(self, size=BIGTHUMB):
'''Get a thumbnail as string or None if the file isnt an image
size would be one of JFSFile.BIGTHUMB, .MEDIUMTHUMB, .SMALLTHUMB or .XLTHUMB'''
if not self.is_image():
return None
if not size in (self.BIGTHUMB, self.MEDIUMTHUMB, self.SMALLTHUMB, self.XLTHUMB):
raise JFSError('Invalid thumbnail size: %s for image %s' % (size, self.path))
#return self.jfs.raw('%s?mode=thumb&ts=%s' % (self.path, size))
return self.jfs.raw(url=self.path,
params={'mode':'thumb', 'ts':size}f thumb(self, size=BIGTHUMB):
'''Get a thumbnail as string or None if the file isnt an image
size would be one of JFSFile.BIGTHUMB, .MEDIUMTHUMB, .SMALLTHUMB or .XLTHUMB'''
if not self.is_image():
return None
if not size in (self.BIGTHUMB, self.MEDIUMTHUMB, self.SMALLTHUMB, self.XLTHUMB):
raise JFSError('Invalid thumbnail size: %s for image %s' % (size, self.path))
#return self.jfs.raw('%s?mode=thumb&ts=%s' % (self.path, size))
return self.jfs.raw(url=self.path,
params={'mode':'thumb', 'ts':size}) | Get a thumbnail as string or None if the file isnt an image
size would be one of JFSFile.BIGTHUMB, .MEDIUMTHUMB, .SMALLTHUMB or .XLTHUMB |
def created(self):
'return datetime.datetime'
return dateutil.parser.parse(str(self.f.currentRevision.created)f created(self):
'return datetime.datetime'
return dateutil.parser.parse(str(self.f.currentRevision.created)) | return datetime.datetime |
def modified(self):
'return datetime.datetime'
return dateutil.parser.parse(str(self.f.currentRevision.modified)f modified(self):
'return datetime.datetime'
return dateutil.parser.parse(str(self.f.currentRevision.modified)) | return datetime.datetime |
def updated(self):
'return datetime.datetime'
return dateutil.parser.parse(str(self.f.currentRevision.updated)f updated(self):
'return datetime.datetime'
return dateutil.parser.parse(str(self.f.currentRevision.updated)) | return datetime.datetime |
def contents(self, path=None):
if isinstance(path, object) and hasattr(path, 'name'):
log.debug("passed an object, use .'name' as path value")
# passed an object, use .'name' as path value
path = '/%s' % path.name
c = self._jfs.get('%s%s' % (self.path, path or '/'))
return c | Get _all_ metadata for this device.
Call this method if you have the lite/abbreviated device info from e.g. <user/>. |
def files(self, mountPoint):
if isinstance(mountPoint, six.string_types):
# shortcut: pass a mountpoint name
mountPoint = self.mountPoints[mountPoint]
try:
return [JFSFile(f, self, parentpath='%s/%s' % (self.path, mountPoint.name)) for f in self.contents(mountPoint).files.iterchildren()]
except AttributeError as err:
# no files at all
return [x for x in []] | Get an iterator of JFSFile() from the given mountPoint.
"mountPoint" may be either an actual mountPoint element from JFSDevice.mountPoints{} or its .name. |
def folders(self, mountPoint):
if isinstance(mountPoint, six.string_types):
# shortcut: pass a mountpoint name
mountPoint = self.mountPoints[mountPoint]
try:
return [JFSFolder(f, self, parentpath='%s/%s' % (self.path, mountPoint.name)) for f in self.contents(mountPoint).folders.iterchildren()]
except AttributeError as err:
# no files at all
return [x for x in []] | Get an iterator of JFSFolder() from the given mountPoint.
"mountPoint" may be either an actual mountPoint element from JFSDevice.mountPoints{} or its .name. |
def new_mountpoint(self, name):
url = posixpath.join(self.path, name)
r = self._jfs.post(url, extra_headers={'content-type': 'application/x-www-form-urlencoded'})
return r | Create a new mountpoint |
def sharedFiles(self):
'iterate over shared files and get their public URI'
for f in self.sharing.files.iterchildren():
yield (f.attrib['name'], f.attrib['uuid'],
'https://www.jottacloud.com/p/%s/%s' % (self.jfs.username, f.publicURI.text)f sharedFiles(self):
'iterate over shared files and get their public URI'
for f in self.sharing.files.iterchildren():
yield (f.attrib['name'], f.attrib['uuid'],
'https://www.jottacloud.com/p/%s/%s' % (self.jfs.username, f.publicURI.text)) | iterate over shared files and get their public URI |
def files(self):
'iterate over found files'
for _f in self.searchresult.files.iterchildren():
yield ProtoFile.factory(_f, jfs=self.jfs, parentpath=unicode(_f.abspath)f files(self):
'iterate over found files'
for _f in self.searchresult.files.iterchildren():
yield ProtoFile.factory(_f, jfs=self.jfs, parentpath=unicode(_f.abspath)) | iterate over found files |
def request(self, url, extra_headers=None, params=None):
'Make a GET request for url, with or without caching'
if not url.startswith('http'):
# relative url
url = self.rootpath + url
log.debug("getting url: %r, extra_headers=%r, params=%r", url, extra_headers, params)
if extra_headers is None: extra_headers={}
r = self.session.get(url, headers=extra_headers, params=params, timeout=1800) #max retries is set in __init__
if r.status_code in ( 500, ):
raise JFSError(r.reason)
return f request(self, url, extra_headers=None, params=None):
'Make a GET request for url, with or without caching'
if not url.startswith('http'):
# relative url
url = self.rootpath + url
log.debug("getting url: %r, extra_headers=%r, params=%r", url, extra_headers, params)
if extra_headers is None: extra_headers={}
r = self.session.get(url, headers=extra_headers, params=params, timeout=1800) #max retries is set in __init__
if r.status_code in ( 500, ):
raise JFSError(r.reason)
return r | Make a GET request for url, with or without caching |
def raw(self, url, extra_headers=None, params=None):
'Make a GET request for url and return whatever content we get'
r = self.request(url, extra_headers=extra_headers, params=params)
# uncomment to dump raw xml
# with open('/tmp/%s.xml' % time.time(), 'wb') as f:
# f.write(r.content)
if not r.ok:
o = lxml.objectify.fromstring(r.content)
JFSError.raiseError(o, url)
return r.contenf raw(self, url, extra_headers=None, params=None):
'Make a GET request for url and return whatever content we get'
r = self.request(url, extra_headers=extra_headers, params=params)
# uncomment to dump raw xml
# with open('/tmp/%s.xml' % time.time(), 'wb') as f:
# f.write(r.content)
if not r.ok:
o = lxml.objectify.fromstring(r.content)
JFSError.raiseError(o, url)
return r.content | Make a GET request for url and return whatever content we get |
def stream(self, url, params=None, chunk_size=64*1024):
'Iterator to get remote content by chunk_size (bytes)'
r = self.request(url, params=params)
for chunk in r.iter_content(chunk_size):
yield chunf stream(self, url, params=None, chunk_size=64*1024):
'Iterator to get remote content by chunk_size (bytes)'
r = self.request(url, params=params)
for chunk in r.iter_content(chunk_size):
yield chunk | Iterator to get remote content by chunk_size (bytes) |
def new_device(self, name, type):
# at least android client also includes a "cid" with is derived from the unique device id
# and encrypted with a public key in the apk. The field appears to be optional
url = posixpath.join(self.rootpath, name)
r = self.post(url, {'type': type})
return r | Create a new (backup) device on jottacloud. Types can be one of
['workstation', 'imac', 'laptop', 'macbook', 'ipad', 'android', 'iphone', 'windows_phone'] |
def devices(self):
'return generator of configured devices'
return self.fs is not None and [JFSDevice(d, self, parentpath=self.rootpath) for d in self.fs.devices.iterchildren()] or [x for x in []f devices(self):
'return generator of configured devices'
return self.fs is not None and [JFSDevice(d, self, parentpath=self.rootpath) for d in self.fs.devices.iterchildren()] or [x for x in []] | return generator of configured devices |
def parse(region_string):
rp = RegionParser()
ss = rp.parse(region_string)
sss1 = rp.convert_attr(ss)
sss2 = _check_wcs(sss1)
shape_list, comment_list = rp.filter_shape2(sss2)
return ShapeList(shape_list, comment_list=comment_list) | Parse DS9 region string into a ShapeList.
Parameters
----------
region_string : str
Region string
Returns
-------
shapes : `ShapeList`
List of `~pyregion.Shape` |
def open(fname):
with _builtin_open(fname) as fh:
region_string = fh.read()
return parse(region_string) | Open, read and parse DS9 region file.
Parameters
----------
fname : str
Filename
Returns
-------
shapes : `ShapeList`
List of `~pyregion.Shape` |
def read_region(s):
rp = RegionParser()
ss = rp.parse(s)
sss1 = rp.convert_attr(ss)
sss2 = _check_wcs(sss1)
shape_list = rp.filter_shape(sss2)
return ShapeList(shape_list) | Read region.
Parameters
----------
s : str
Region string
Returns
-------
shapes : `ShapeList`
List of `~pyregion.Shape` |
def read_region_as_imagecoord(s, header):
rp = RegionParser()
ss = rp.parse(s)
sss1 = rp.convert_attr(ss)
sss2 = _check_wcs(sss1)
sss3 = rp.sky_to_image(sss2, header)
shape_list = rp.filter_shape(sss3)
return ShapeList(shape_list) | Read region as image coordinates.
Parameters
----------
s : str
Region string
header : `~astropy.io.fits.Header`
FITS header
Returns
-------
shapes : `~pyregion.ShapeList`
List of `~pyregion.Shape` |
def get_mask(region, hdu, origin=1):
from pyregion.region_to_filter import as_region_filter
data = hdu.data
region_filter = as_region_filter(region, origin=origin)
mask = region_filter.mask(data)
return mask | Get mask.
Parameters
----------
region : `~pyregion.ShapeList`
List of `~pyregion.Shape`
hdu : `~astropy.io.fits.ImageHDU`
FITS image HDU
origin : float
TODO: document me
Returns
-------
mask : `~numpy.array`
Boolean mask
Examples
--------
>>> from astropy.io import fits
>>> from pyregion import read_region_as_imagecoord, get_mask
>>> hdu = fits.open("test.fits")[0]
>>> region = "test01.reg"
>>> reg = read_region_as_imagecoord(open(region), f[0].header)
>>> mask = get_mask(reg, hdu) |
def as_imagecoord(self, header):
comment_list = self._comment_list
if comment_list is None:
comment_list = cycle([None])
r = RegionParser.sky_to_image(zip(self, comment_list),
header)
shape_list, comment_list = zip(*list(r))
return ShapeList(shape_list, comment_list=comment_list) | New shape list in image coordinates.
Parameters
----------
header : `~astropy.io.fits.Header`
FITS header
Returns
-------
shape_list : `ShapeList`
New shape list, with coordinates of the each shape
converted to the image coordinate using the given header
information. |
def get_mpl_patches_texts(self, properties_func=None,
text_offset=5.0,
origin=1):
from .mpl_helper import as_mpl_artists
patches, txts = as_mpl_artists(self, properties_func,
text_offset,
origin=origin)
return patches, txts | Often, the regions files implicitly assume the lower-left
corner of the image as a coordinate (1,1). However, the python
convetion is that the array index starts from 0. By default
(``origin=1``), coordinates of the returned mpl artists have
coordinate shifted by (1, 1). If you do not want this shift,
use ``origin=0``. |
def get_filter(self, header=None, origin=1):
from .region_to_filter import as_region_filter
if header is None:
if not self.check_imagecoord():
raise RuntimeError("the region has non-image coordinate. header is required.")
reg_in_imagecoord = self
else:
reg_in_imagecoord = self.as_imagecoord(header)
region_filter = as_region_filter(reg_in_imagecoord, origin=origin)
return region_filter | Get filter.
Often, the regions files implicitly assume the lower-left
corner of the image as a coordinate (1,1). However, the python
convetion is that the array index starts from 0. By default
(``origin=1``), coordinates of the returned mpl artists have
coordinate shifted by (1, 1). If you do not want this shift,
use ``origin=0``.
Parameters
----------
header : `astropy.io.fits.Header`
FITS header
origin : {0, 1}
Pixel coordinate origin
Returns
-------
filter : TODO
Filter object |
def get_mask(self, hdu=None, header=None, shape=None):
if hdu and header is None:
header = hdu.header
if hdu and shape is None:
shape = hdu.data.shape
region_filter = self.get_filter(header=header)
mask = region_filter.mask(shape)
return mask | Create a 2-d mask.
Parameters
----------
hdu : `astropy.io.fits.ImageHDU`
FITS image HDU
header : `~astropy.io.fits.Header`
FITS header
shape : tuple
Image shape
Returns
-------
mask : `numpy.array`
Boolean mask
Examples
--------
get_mask(hdu=f[0])
get_mask(shape=(10,10))
get_mask(header=f[0].header, shape=(10,10)) |
def AppConfigFlagHandler(feature=None):
if not current_app:
log.warn(u"Got a request to check for {feature} but we're outside the request context. Returning False".format(feature=feature))
return False
try:
return current_app.config[FEATURE_FLAGS_CONFIG][feature]
except (AttributeError, KeyError):
raise NoFeatureFlagFound() | This is the default handler. It checks for feature flags in the current app's configuration.
For example, to have 'unfinished_feature' hidden in production but active in development:
config.py
class ProductionConfig(Config):
FEATURE_FLAGS = {
'unfinished_feature' : False,
}
class DevelopmentConfig(Config):
FEATURE_FLAGS = {
'unfinished_feature' : True,
} |
def is_active(feature):
if current_app:
feature_flagger = current_app.extensions.get(EXTENSION_NAME)
if feature_flagger:
return feature_flagger.check(feature)
else:
raise AssertionError("Oops. This application doesn't have the Flask-FeatureFlag extention installed.")
else:
log.warn(u"Got a request to check for {feature} but we're running outside the request context. Check your setup. Returning False".format(feature=feature))
return False | Check if a feature is active |
def is_active_feature(feature, redirect_to=None, redirect=None):
def _is_active_feature(func):
@wraps(func)
def wrapped(*args, **kwargs):
if not is_active(feature):
url = redirect_to
if redirect:
url = url_for(redirect)
if url:
log.debug(u'Feature {feature} is off, redirecting to {url}'.format(feature=feature, url=url))
return _redirect(url, code=302)
else:
log.debug(u'Feature {feature} is off, aborting request'.format(feature=feature))
abort(404)
return func(*args, **kwargs)
return wrapped
return _is_active_feature | Decorator for Flask views. If a feature is off, it can either return a 404 or redirect to a URL if you'd rather. |
def init_app(self, app):
app.config.setdefault(FEATURE_FLAGS_CONFIG, {})
app.config.setdefault(RAISE_ERROR_ON_MISSING_FEATURES, False)
if hasattr(app, "add_template_test"):
# flask 0.10 and higher has a proper hook
app.add_template_test(self.check, name=self.JINJA_TEST_NAME)
else:
app.jinja_env.tests[self.JINJA_TEST_NAME] = self.check
if not hasattr(app, 'extensions'):
app.extensions = {}
app.extensions[EXTENSION_NAME] = self | Add ourselves into the app config and setup, and add a jinja function test |
def check(self, feature):
found = False
for handler in self.handlers:
try:
if handler(feature):
return True
except StopCheckingFeatureFlags:
return False
except NoFeatureFlagFound:
pass
else:
found = True
if not found:
message = u"No feature flag defined for {feature}".format(feature=feature)
if current_app.debug and current_app.config.get(RAISE_ERROR_ON_MISSING_FEATURES, False):
raise KeyError(message)
else:
log.info(message)
missing_feature.send(self, feature=feature)
return False | Loop through all our feature flag checkers and return true if any of them are true.
The order of handlers matters - we will immediately return True if any handler returns true.
If you want to a handler to return False and stop the chain, raise the StopCheckingFeatureFlags exception. |
def get_attr(attr_list, global_attrs):
local_attr = [], {}
for kv in attr_list:
keyword = kv[0]
if len(kv) == 1:
local_attr[0].append(keyword)
continue
elif len(kv) == 2:
value = kv[1]
elif len(kv) > 2:
value = kv[1:]
if keyword == 'tag':
local_attr[1].setdefault(keyword, set()).add(value)
else:
local_attr[1][keyword] = value
attr0 = copy.copy(global_attrs[0])
attr1 = copy.copy(global_attrs[1])
if local_attr[0]:
attr0.extend(local_attr[0])
if local_attr[1]:
attr1.update(local_attr[1])
return attr0, attr1 | Parameters
----------
attr_list : list
A list of (keyword, value) tuple pairs
global_attrs : tuple(list, dict)
Global attributes which update the local attributes |
def yank_path(self, path):
for func in self._caches:
cache = {}
for key in self._caches[func].keys():
log.debug("cache key %s for func %s", key, func)
if path in key[0]:
log.debug("del cache key %s", key)
del self._caches[func][key] | Clear cache of results from a specific path |
def _getpath(self, path):
"A wrapper of JFS.getObject(), with some tweaks that make sense in a file system."
if is_blacklisted(path):
raise JottaFuseError('Blacklisted file, refusing to retrieve it')
return self.client.getObject(pathf _getpath(self, path):
"A wrapper of JFS.getObject(), with some tweaks that make sense in a file system."
if is_blacklisted(path):
raise JottaFuseError('Blacklisted file, refusing to retrieve it')
return self.client.getObject(path) | A wrapper of JFS.getObject(), with some tweaks that make sense in a file system. |
def release(self, path, fh):
"Run after a read or write operation has finished. This is where we upload on writes"
#print "release! inpath:", path in self.__newfiles.keys()
# if the path exists in self.__newfiles.keys(), we have a new version to upload
try:
f = self.__newfiles[path] # make a local shortcut to Stringio object
f.seek(0, os.SEEK_END)
if f.tell() > 0: # file has length
self.client.up(path, f) # upload to jottacloud
del self.__newfiles[path]
del f
self._dirty(path)
except KeyError:
pass
return ESUCCESf release(self, path, fh):
"Run after a read or write operation has finished. This is where we upload on writes"
#print "release! inpath:", path in self.__newfiles.keys()
# if the path exists in self.__newfiles.keys(), we have a new version to upload
try:
f = self.__newfiles[path] # make a local shortcut to Stringio object
f.seek(0, os.SEEK_END)
if f.tell() > 0: # file has length
self.client.up(path, f) # upload to jottacloud
del self.__newfiles[path]
del f
self._dirty(path)
except KeyError:
pass
return ESUCCESS | Run after a read or write operation has finished. This is where we upload on writes |
def symlink(self, linkname, existing_file):
log.info("***SYMLINK* %s (link) -> %s (existing)", linkname, existing_file)
sourcepath = os.path.abspath(existing_file)
if not os.path.exists(sourcepath): # broken symlink
raise OSError(errno.ENOENT, '')
try:
with open(sourcepath) as sourcefile:
self.client.up(linkname, sourcefile)
return ESUCCESS
except Exception as e:
log.exception(e)
raise OSError(errno.ENOENT, '') | Called to create a symlink `target -> source` (e.g. ln -s existing_file linkname). In jottafuse, we upload the _contents_ of source.
This is a handy shortcut for streaming uploads directly from disk, without reading the file
into memory first |
def truncate(self, path, length, fh=None):
"Download existing path, truncate and reupload"
try:
f = self._getpath(path)
except JFS.JFSError:
raise OSError(errno.ENOENT, '')
if isinstance(f, (JFS.JFSFile, JFS.JFSFolder)) and f.is_deleted():
raise OSError(errno.ENOENT)
data = StringIO(f.read())
data.truncate(length)
try:
self.client.up(path, data) # replace file contents
self._dirty(path)
return ESUCCESS
except:
raise OSError(errno.ENOENT, ''f truncate(self, path, length, fh=None):
"Download existing path, truncate and reupload"
try:
f = self._getpath(path)
except JFS.JFSError:
raise OSError(errno.ENOENT, '')
if isinstance(f, (JFS.JFSFile, JFS.JFSFolder)) and f.is_deleted():
raise OSError(errno.ENOENT)
data = StringIO(f.read())
data.truncate(length)
try:
self.client.up(path, data) # replace file contents
self._dirty(path)
return ESUCCESS
except:
raise OSError(errno.ENOENT, '') | Download existing path, truncate and reupload |
def commandline_text(bytestring):
'Convert bytestring from command line to unicode, using default file system encoding'
if six.PY3:
return bytestring
unicode_string = bytestring.decode(sys.getfilesystemencoding())
return unicode_strinf commandline_text(bytestring):
'Convert bytestring from command line to unicode, using default file system encoding'
if six.PY3:
return bytestring
unicode_string = bytestring.decode(sys.getfilesystemencoding())
return unicode_string | Convert bytestring from command line to unicode, using default file system encoding |
def sky_to_image(shape_list, header):
for shape, comment in shape_list:
if isinstance(shape, Shape) and \
(shape.coord_format not in image_like_coordformats):
new_coords = convert_to_imagecoord(shape, header)
l1n = copy.copy(shape)
l1n.coord_list = new_coords
l1n.coord_format = "image"
yield l1n, comment
elif isinstance(shape, Shape) and shape.coord_format == "physical":
if header is None:
raise RuntimeError("Physical coordinate is not known.")
new_coordlist = convert_physical_to_imagecoord(shape, header)
l1n = copy.copy(shape)
l1n.coord_list = new_coordlist
l1n.coord_format = "image"
yield l1n, comment
else:
yield shape, comment | Converts a `ShapeList` into shapes with coordinates in image coordinates
Parameters
----------
shape_list : `pyregion.ShapeList`
The ShapeList to convert
header : `~astropy.io.fits.Header`
Specifies what WCS transformations to use.
Yields
-------
shape, comment : Shape, str
Shape with image coordinates and the associated comment
Note
----
The comments in the original `ShapeList` are unaltered |
def on_created(self, event, dry_run=False, remove_uploaded=True):
'Called when a file (or directory) is created. '
super(ArchiveEventHandler, self).on_created(event)
log.info("created: %s", eventf on_created(self, event, dry_run=False, remove_uploaded=True):
'Called when a file (or directory) is created. '
super(ArchiveEventHandler, self).on_created(event)
log.info("created: %s", event) | Called when a file (or directory) is created. |
def _estimate_angle(angle, reg_coordinate_frame, header):
y_axis_rot = _calculate_rotation_angle(reg_coordinate_frame, header)
return angle - y_axis_rot | Transform an angle into a different frame
Parameters
----------
angle : float, int
The number of degrees, measured from the Y axis in origin's frame
reg_coordinate_frame : str
Coordinate frame in which ``angle`` is defined
header : `~astropy.io.fits.Header` instance
Header describing the image
Returns
-------
angle : float
The angle, measured from the Y axis in the WCS defined by ``header'` |
def sf(f, dirpath, jottapath):
log.debug('Create SyncFile from %s', repr(f))
log.debug('Got encoded filename %r, joining with dirpath %r', _encode_filename_to_filesystem(f), dirpath)
return SyncFile(localpath=os.path.join(dirpath, _encode_filename_to_filesystem(f)),
jottapath=posixpath.join(_decode_filename_to_unicode(jottapath), _decode_filename_to_unicode(f))) | Create and return a SyncFile tuple from filename.
localpath will be a byte string with utf8 code points
jottapath will be a unicode string |
def get_jottapath(localtopdir, dirpath, jottamountpoint):
log.debug("get_jottapath %r %r %r", localtopdir, dirpath, jottamountpoint)
normpath = posixpath.normpath(posixpath.join(jottamountpoint, posixpath.basename(localtopdir),
posixpath.relpath(dirpath, localtopdir)))
return _decode_filename_to_unicode(normpath) | Translate localtopdir to jottapath. Returns unicode string |
def is_file(jottapath, JFS):
log.debug("is_file %r", jottapath)
try:
jf = JFS.getObject(jottapath)
except JFSNotFoundError:
return False
return isinstance(jf, JFSFile) | Check if a file exists on jottacloud |
def filelist(jottapath, JFS):
log.debug("filelist %r", jottapath)
try:
jf = JFS.getObject(jottapath)
except JFSNotFoundError:
return set() # folder does not exist, so pretend it is an empty folder
if not isinstance(jf, JFSFolder):
return False
return set([f.name for f in jf.files() if not f.is_deleted()]) | Get a set() of files from a jottapath (a folder) |
def folderlist(jottapath, JFS):
logging.debug("folderlist %r", jottapath)
try:
jf = JFS.getObject(jottapath)
except JFSNotFoundError:
return set() # folder does not exist, so pretend it is an empty folder
if not isinstance(jf, JFSFolder):
return False
return set([f.name for f in jf.folders() if not f.is_deleted()]) | Get a set() of folders from a jottapath (a folder) |
def _encode_filename_to_filesystem(f):
'''Get a unicode filename and return bytestring, encoded to file system default.
If the argument already is a bytestring, return as is'''
log.debug('_encode_filename_to_filesystem(%s)', repr(f))
if isinstance(f, str):
return f
try:
return f.encode(sys.getfilesystemencoding())
except UnicodeEncodeError:
raisf _encode_filename_to_filesystem(f):
'''Get a unicode filename and return bytestring, encoded to file system default.
If the argument already is a bytestring, return as is'''
log.debug('_encode_filename_to_filesystem(%s)', repr(f))
if isinstance(f, str):
return f
try:
return f.encode(sys.getfilesystemencoding())
except UnicodeEncodeError:
raise | Get a unicode filename and return bytestring, encoded to file system default.
If the argument already is a bytestring, return as is |
def new(localfile, jottapath, JFS):
with open(localfile) as lf:
_new = JFS.up(jottapath, lf)
return _new | Upload a new file from local disk (doesn't exist on JottaCloud).
Returns JottaFile object |
def resume(localfile, jottafile, JFS):
with open(localfile) as lf:
_complete = jottafile.resume(lf)
return _complete | Continue uploading a new file from local file (already exists on JottaCloud |
def replace_if_changed(localfile, jottapath, JFS):
jf = JFS.getObject(jottapath)
lf_hash = getxattrhash(localfile) # try to read previous hash, stored in xattr
if lf_hash is None: # no valid hash found in xattr,
with open(localfile) as lf:
lf_hash = calculate_md5(lf) # (re)calculate it
if type(jf) == JFSIncompleteFile:
log.debug("Local file %s is incompletely uploaded, continue", localfile)
return resume(localfile, jf, JFS)
elif jf.md5 == lf_hash: # hashes are the same
log.debug("hash match (%s), file contents haven't changed", lf_hash)
setxattrhash(localfile, lf_hash)
return jf # return the version from jottaclouds
else:
setxattrhash(localfile, lf_hash)
return new(localfile, jottapath, JFS) | Compare md5 hash to determine if contents have changed.
Upload a file from local disk and replace file on JottaCloud if the md5s differ,
or continue uploading if the file is incompletely uploaded.
Returns the JottaFile object |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.