text_prompt
stringlengths 100
17.7k
⌀ | code_prompt
stringlengths 7
9.86k
⌀ |
---|---|
<SYSTEM_TASK:>
Normalize each column vector in mat to length
<END_TASK>
<USER_TASK:>
Description:
def vector_normalize(mat, max_vec_norm=1.):
""" Normalize each column vector in mat to length
max_vec_norm if it is longer than max_vec_norm
""" |
assert mat.flags.c_contiguous
n, m = mat.shape
vector_normalize_kernel.prepared_call(
(m, 1, 1), (32, 1, 1),
mat.gpudata,
np.float32(max_vec_norm),
np.int32(m),
np.int32(n)) |
<SYSTEM_TASK:>
splits a string into a list of tokens
<END_TASK>
<USER_TASK:>
Description:
def tokenize_by_number(s):
""" splits a string into a list of tokens
each is either a string containing no numbers
or a float """ |
r = find_number(s)
if r == None:
return [ s ]
else:
tokens = []
if r[0] > 0:
tokens.append(s[0:r[0]])
tokens.append( float(s[r[0]:r[1]]) )
if r[1] < len(s):
tokens.extend(tokenize_by_number(s[r[1]:]))
return tokens
assert False |
<SYSTEM_TASK:>
cmp function for sorting a list of strings by alphabetical order, but with
<END_TASK>
<USER_TASK:>
Description:
def number_aware_alphabetical_cmp(str1, str2):
""" cmp function for sorting a list of strings by alphabetical order, but with
numbers sorted numerically.
i.e., foo1, foo2, foo10, foo11
instead of foo1, foo10
""" |
def flatten_tokens(tokens):
l = []
for token in tokens:
if isinstance(token, str):
for char in token:
l.append(char)
else:
assert isinstance(token, float)
l.append(token)
return l
seq1 = flatten_tokens(tokenize_by_number(str1))
seq2 = flatten_tokens(tokenize_by_number(str2))
l = min(len(seq1),len(seq2))
i = 0
while i < l:
if seq1[i] < seq2[i]:
return -1
elif seq1[i] > seq2[i]:
return 1
i += 1
if len(seq1) < len(seq2):
return -1
elif len(seq1) > len(seq2):
return 1
return 0 |
<SYSTEM_TASK:>
Check if a given period is possibly an alias.
<END_TASK>
<USER_TASK:>
Description:
def is_period_alias(period):
"""
Check if a given period is possibly an alias.
Parameters
----------
period : float
A period to test if it is a possible alias or not.
Returns
-------
is_alias : boolean
True if the given period is in a range of period alias.
""" |
# Based on the period vs periodSN plot of EROS-2 dataset (Kim+ 2014).
# Period alias occurs mostly at ~1 and ~30.
# Check each 1, 2, 3, 4, 5 factors.
for i in range(1, 6):
# One-day and one-month alias
if (.99 / float(i)) < period < (1.004 / float(i)):
return True
if (1.03 / float(i)) < period < (1.04 / float(i)):
return True
if (29.2 / float(i)) < period < (29.9 / float(i)):
return True
# From candidates from the two fields 01, 08.
# All of them are close to one day (or sidereal) alias.
if (0.96465 / float(i)) < period < (0.96485 / float(i)):
return True
if (0.96725 / float(i)) < period < (0.96745 / float(i)):
return True
if (0.98190 / float(i)) < period < (0.98230 / float(i)):
return True
if (1.01034 / float(i)) < period < (1.01076 / float(i)):
return True
if (1.01568 / float(i)) < period < (1.01604 / float(i)):
return True
if (1.01718 / float(i)) < period < (1.01742 / float(i)):
return True
# From the all candidates from the entire LMC fields.
# Some of these could be overlapped with the above cuts.
if (0.50776 / float(i)) < period < (0.50861 / float(i)):
return True
if (0.96434 / float(i)) < period < (0.9652 / float(i)):
return True
if (0.96688 / float(i)) < period < (0.96731 / float(i)):
return True
if (1.0722 / float(i)) < period < (1.0729 / float(i)):
return True
if (27.1 / float(i)) < period < (27.5 / float(i)):
return True
# Not in the range of any alias.
return False |
<SYSTEM_TASK:>
Serialize `object` to a file denoted by `filepath`.
<END_TASK>
<USER_TASK:>
Description:
def save(filepath, obj, on_overwrite = 'ignore'):
"""
Serialize `object` to a file denoted by `filepath`.
Parameters
----------
filepath : str
A filename. If the suffix is `.joblib` and joblib can be
imported, `joblib.dump` is used in place of the regular
pickling mechanisms; this results in much faster saves by
saving arrays as separate .npy files on disk. If the file
suffix is `.npy` than `numpy.save` is attempted on `obj`.
Otherwise, (c)pickle is used.
obj : object
A Python object to be serialized.
on_overwrite: A string specifying what to do if the file already
exists.
ignore: just overwrite it
backup: make a copy of the file (<filepath>.bak) and
delete it when done saving the new copy.
this allows recovery of the old version of
the file if saving the new one fails
""" |
filepath = preprocess(filepath)
if os.path.exists(filepath):
if on_overwrite == 'backup':
backup = filepath + '.bak'
shutil.move(filepath, backup)
save(filepath, obj)
try:
os.remove(backup)
except Exception, e:
warnings.warn("Got an error while traing to remove "+backup+":"+str(e))
return
else:
assert on_overwrite == 'ignore'
try:
_save(filepath, obj)
except RuntimeError, e:
""" Sometimes for large theano graphs, pickle/cPickle exceed the
maximum recursion depth. This seems to me like a fundamental
design flaw in pickle/cPickle. The workaround I employ here
is the one recommended to someone who had a similar problem
on stackexchange:
http://stackoverflow.com/questions/2134706/hitting-maximum-recursion-depth-using-pythons-pickle-cpickle
Obviously this does not scale and could cause a crash
but I don't see another solution short of writing our
own implementation of pickle.
"""
if str(e).find('recursion') != -1:
warnings.warn('pylearn2.utils.save encountered the following '
'error: ' + str(e) +
'\nAttempting to resolve this error by calling ' +
'sys.setrecusionlimit and retrying')
old_limit = sys.getrecursionlimit()
try:
sys.setrecursionlimit(50000)
_save(filepath, obj)
finally:
sys.setrecursionlimit(old_limit) |
<SYSTEM_TASK:>
Allow configuration of the pickle protocol on a per-machine basis.
<END_TASK>
<USER_TASK:>
Description:
def get_pickle_protocol():
"""
Allow configuration of the pickle protocol on a per-machine basis.
This way, if you use multiple platforms with different versions of
pickle, you can configure each of them to use the highest protocol
supported by all of the machines that you want to be able to
communicate.
""" |
try:
protocol_str = os.environ['PYLEARN2_PICKLE_PROTOCOL']
except KeyError:
# If not defined, we default to 0 because this is the default
# protocol used by cPickle.dump (and because it results in
# maximum portability)
protocol_str = '0'
if protocol_str == 'pickle.HIGHEST_PROTOCOL':
return pickle.HIGHEST_PROTOCOL
return int(protocol_str) |
<SYSTEM_TASK:>
Loads and parses a yaml file for a Train object.
<END_TASK>
<USER_TASK:>
Description:
def load_train_file(config_file_path):
"""Loads and parses a yaml file for a Train object.
Publishes the relevant training environment variables""" |
from pylearn2.config import yaml_parse
suffix_to_strip = '.yaml'
# publish environment variables related to file name
if config_file_path.endswith(suffix_to_strip):
config_file_full_stem = config_file_path[0:-len(suffix_to_strip)]
else:
config_file_full_stem = config_file_path
for varname in ["PYLEARN2_TRAIN_FILE_NAME", #this one is deprecated
"PYLEARN2_TRAIN_FILE_FULL_STEM"]: #this is the new, accepted name
environ.putenv(varname, config_file_full_stem)
directory = config_file_path.split('/')[:-1]
directory = '/'.join(directory)
if directory != '':
directory += '/'
environ.putenv("PYLEARN2_TRAIN_DIR", directory)
environ.putenv("PYLEARN2_TRAIN_BASE_NAME", config_file_path.split('/')[-1] )
environ.putenv("PYLEARN2_TRAIN_FILE_STEM", config_file_full_stem.split('/')[-1] )
return yaml_parse.load_path(config_file_path) |
<SYSTEM_TASK:>
Create ctypes pointer to object.
<END_TASK>
<USER_TASK:>
Description:
def POINTER(obj):
"""
Create ctypes pointer to object.
Notes
-----
This function converts None to a real NULL pointer because of bug
in how ctypes handles None on 64-bit platforms.
""" |
p = ctypes.POINTER(obj)
if not isinstance(p.from_param, classmethod):
def from_param(cls, x):
if x is None:
return cls()
else:
return x
p.from_param = classmethod(from_param)
return p |
<SYSTEM_TASK:>
Allocate device memory.
<END_TASK>
<USER_TASK:>
Description:
def cudaMalloc(count, ctype=None):
"""
Allocate device memory.
Allocate memory on the device associated with the current active
context.
Parameters
----------
count : int
Number of bytes of memory to allocate
ctype : _ctypes.SimpleType, optional
ctypes type to cast returned pointer.
Returns
-------
ptr : ctypes pointer
Pointer to allocated device memory.
""" |
ptr = ctypes.c_void_p()
status = _libcudart.cudaMalloc(ctypes.byref(ptr), count)
cudaCheckStatus(status)
if ctype != None:
ptr = ctypes.cast(ptr, ctypes.POINTER(ctype))
return ptr |
<SYSTEM_TASK:>
Allocate pitched device memory.
<END_TASK>
<USER_TASK:>
Description:
def cudaMallocPitch(pitch, rows, cols, elesize):
"""
Allocate pitched device memory.
Allocate pitched memory on the device associated with the current active
context.
Parameters
----------
pitch : int
Pitch for allocation.
rows : int
Requested pitched allocation height.
cols : int
Requested pitched allocation width.
elesize : int
Size of memory element.
Returns
-------
ptr : ctypes pointer
Pointer to allocated device memory.
""" |
ptr = ctypes.c_void_p()
status = _libcudart.cudaMallocPitch(ctypes.byref(ptr),
ctypes.c_size_t(pitch), cols*elesize,
rows)
cudaCheckStatus(status)
return ptr, pitch |
<SYSTEM_TASK:>
Copy memory from host to device.
<END_TASK>
<USER_TASK:>
Description:
def cudaMemcpy_htod(dst, src, count):
"""
Copy memory from host to device.
Copy data from host memory to device memory.
Parameters
----------
dst : ctypes pointer
Device memory pointer.
src : ctypes pointer
Host memory pointer.
count : int
Number of bytes to copy.
""" |
status = _libcudart.cudaMemcpy(dst, src,
ctypes.c_size_t(count),
cudaMemcpyHostToDevice)
cudaCheckStatus(status) |
<SYSTEM_TASK:>
Copy memory from device to host.
<END_TASK>
<USER_TASK:>
Description:
def cudaMemcpy_dtoh(dst, src, count):
"""
Copy memory from device to host.
Copy data from device memory to host memory.
Parameters
----------
dst : ctypes pointer
Host memory pointer.
src : ctypes pointer
Device memory pointer.
count : int
Number of bytes to copy.
""" |
status = _libcudart.cudaMemcpy(dst, src,
ctypes.c_size_t(count),
cudaMemcpyDeviceToHost)
cudaCheckStatus(status) |
<SYSTEM_TASK:>
Return the amount of free and total device memory.
<END_TASK>
<USER_TASK:>
Description:
def cudaMemGetInfo():
"""
Return the amount of free and total device memory.
Returns
-------
free : long
Free memory in bytes.
total : long
Total memory in bytes.
""" |
free = ctypes.c_size_t()
total = ctypes.c_size_t()
status = _libcudart.cudaMemGetInfo(ctypes.byref(free),
ctypes.byref(total))
cudaCheckStatus(status)
return free.value, total.value |
<SYSTEM_TASK:>
Get current CUDA device.
<END_TASK>
<USER_TASK:>
Description:
def cudaGetDevice():
"""
Get current CUDA device.
Return the identifying number of the device currently used to
process CUDA operations.
Returns
-------
dev : int
Device number.
""" |
dev = ctypes.c_int()
status = _libcudart.cudaGetDevice(ctypes.byref(dev))
cudaCheckStatus(status)
return dev.value |
<SYSTEM_TASK:>
Get installed CUDA driver version.
<END_TASK>
<USER_TASK:>
Description:
def cudaDriverGetVersion():
"""
Get installed CUDA driver version.
Return the version of the installed CUDA driver as an integer. If
no driver is detected, 0 is returned.
Returns
-------
version : int
Driver version.
""" |
version = ctypes.c_int()
status = _libcudart.cudaDriverGetVersion(ctypes.byref(version))
cudaCheckStatus(status)
return version.value |
<SYSTEM_TASK:>
Get memory pointer attributes.
<END_TASK>
<USER_TASK:>
Description:
def cudaPointerGetAttributes(ptr):
"""
Get memory pointer attributes.
Returns attributes of the specified pointer.
Parameters
----------
ptr : ctypes pointer
Memory pointer to examine.
Returns
-------
memory_type : int
Memory type; 1 indicates host memory, 2 indicates device
memory.
device : int
Number of device associated with pointer.
Notes
-----
This function only works with CUDA 4.0 and later.
""" |
attributes = cudaPointerAttributes()
status = \
_libcudart.cudaPointerGetAttributes(ctypes.byref(attributes), ptr)
cudaCheckStatus(status)
return attributes.memoryType, attributes.device |
<SYSTEM_TASK:>
Evaluate a thunk in an environment.
<END_TASK>
<USER_TASK:>
Description:
def eval(thunk, env):
"""Evaluate a thunk in an environment.
Will defer the actual evaluation to the thunk itself, but adds two things:
caching and recursion detection.
Since we have to use a global evaluation stack (because there is a variety of functions that may
be invoked, not just eval() but also __getitem__, and not all of them can pass along a context
object), GCL evaluation is not thread safe.
With regard to schemas:
- A schema can be passed in from outside. The returned object will be validated to see that it
conforms to the schema. The schema will be attached to the value if possible.
- Some objects may contain their own schema, such as tuples. This would be out of scope of the
eval() function, were it not for:
- Schema validation can be disabled in an evaluation call stack. This is useful if we're
evaluating a tuple only for its schema information. At that point, we're not interested if the
object is value-complete.
""" |
key = Activation.key(thunk, env)
if Activation.activated(key):
raise exceptions.RecursionError('Reference cycle')
with Activation(key):
return eval_cache.get(key, thunk.eval, env) |
<SYSTEM_TASK:>
Delegate to our current "value provider" for the node belonging to this key.
<END_TASK>
<USER_TASK:>
Description:
def get_node(self, key):
"""Delegate to our current "value provider" for the node belonging to this key.""" |
if key in self.names:
return self.values.get_member_node(key) if hasattr(self.values, 'get_member_node') else None
return self.parent.get_node(key) |
<SYSTEM_TASK:>
Reify values to their Python equivalents.
<END_TASK>
<USER_TASK:>
Description:
def to_python(value, seen=None):
"""Reify values to their Python equivalents.
Does recursion detection, failing when that happens.
""" |
seen = seen or set()
if isinstance(value, framework.TupleLike):
if value.ident in seen:
raise RecursionException('to_python: infinite recursion while evaluating %r' % value)
new_seen = seen.union([value.ident])
return {k: to_python(value[k], seen=new_seen) for k in value.exportable_keys()}
if isinstance(value, dict):
return {k: to_python(value[k], seen=seen) for k in value.keys()}
if isinstance(value, list):
return [to_python(x, seen=seen) for x in value]
return value |
<SYSTEM_TASK:>
Walks the _evaluated_ tree of the given GCL tuple.
<END_TASK>
<USER_TASK:>
Description:
def walk(value, walker, path=None, seen=None):
"""Walks the _evaluated_ tree of the given GCL tuple.
The appropriate methods of walker will be invoked for every element in the
tree.
""" |
seen = seen or set()
path = path or []
# Recursion
if id(value) in seen:
walker.visitRecursion(path)
return
# Error
if isinstance(value, Exception):
walker.visitError(path, value)
return
# List
if isinstance(value, list):
# Not actually a tuple, but okay
recurse = walker.enterList(value, path)
if not recurse: return
next_walker = walker if recurse is True else recurse
with TempSetAdd(seen, id(value)):
for i, x in enumerate(value):
walk(x, next_walker, path=path + ['[%d]' % i], seen=seen)
walker.leaveList(value, path)
return
# Scalar
if not isinstance(value, framework.TupleLike):
walker.visitScalar(path, value)
return
# Tuple
recurse = walker.enterTuple(value, path)
if not recurse: return
next_walker = walker if recurse is True else recurse
with TempSetAdd(seen, id(value)):
keys = sorted(value.keys())
for key in keys:
key_path = path + [key]
elm = get_or_error(value, key)
walk(elm, next_walker, path=key_path, seen=seen)
walker.leaveTuple(value, path) |
<SYSTEM_TASK:>
Return a hash value that uniquely identifies the GCL value.
<END_TASK>
<USER_TASK:>
Description:
def fingerprint(value):
"""Return a hash value that uniquely identifies the GCL value.""" |
h = hashlib.sha256()
_digest(value, h)
return h.digest().encode('hex') |
<SYSTEM_TASK:>
Return the the last 2 error messages from an error stack.
<END_TASK>
<USER_TASK:>
Description:
def compact_error(err):
"""Return the the last 2 error messages from an error stack.
These error messages turns out to be the most descriptive.
""" |
def err2(e):
if isinstance(e, exceptions.EvaluationError) and e.inner:
message, i = err2(e.inner)
if i == 1:
return ', '.join([e.args[0], str(e.inner)]), i + 1
else:
return message, i + 1
else:
return str(e), 1
return err2(err)[0] |
<SYSTEM_TASK:>
Backpropagate through the logistic layer.
<END_TASK>
<USER_TASK:>
Description:
def backprop(self, input_data, targets,
cache=None):
""" Backpropagate through the logistic layer.
**Parameters:**
input_data : ``GPUArray``
Inpute data to compute activations for.
targets : ``GPUArray``
The target values of the units.
cache : list of ``GPUArray``
Cache obtained from forward pass. If the cache is
provided, then the activations are not recalculated.
**Returns:**
gradients : tuple of ``GPUArray``
Gradients with respect to the weights and biases in the
form ``(df_weights, df_biases)``.
df_input : ``GPUArray``
Gradients with respect to the input.
""" |
if cache is not None:
activations = cache
else:
activations = self.feed_forward(input_data, prediction=False)
if activations.shape != targets.shape:
raise ValueError('Activations (shape = %s) and targets (shape = %s) are different sizes' %
(activations.shape, targets.shape))
delta = substract_matrix(activations, targets)
nan_to_zeros(delta, delta)
# Gradient wrt weights
df_W = linalg.dot(input_data, delta, transa='T')
# Gradient wrt bias
df_b = matrix_sum_out_axis(delta, 0)
# Gradient wrt input
df_input = linalg.dot(delta, self.W, transb='T')
# L1 penalty
if self.l1_penalty_weight:
df_W += self.l1_penalty_weight * sign(self.W)
# L2 penalty
if self.l2_penalty_weight:
df_W += self.l2_penalty_weight * self.W
return (df_W, df_b), df_input |
<SYSTEM_TASK:>
Parse comment lines and make subsequent indented lines into a code block
<END_TASK>
<USER_TASK:>
Description:
def stylize_comment_block(lines):
"""Parse comment lines and make subsequent indented lines into a code block
block.
""" |
normal, sep, in_code = range(3)
state = normal
for line in lines:
indented = line.startswith(' ')
empty_line = line.strip() == ''
if state == normal and empty_line:
state = sep
elif state in [sep, normal] and indented:
yield ''
if indented:
yield '.. code-block:: javascript'
yield ''
yield line
state = in_code
else:
state = normal
elif state == sep and not empty_line:
yield ''
yield line
state = normal
else:
yield line
if state == in_code and not (indented or empty_line):
sep = normal |
<SYSTEM_TASK:>
Return two pairs of members, scalar and tuple members.
<END_TASK>
<USER_TASK:>
Description:
def sort_members(tup, names):
"""Return two pairs of members, scalar and tuple members.
The scalars will be sorted s.t. the unbound members are at the top.
""" |
scalars, tuples = partition(lambda x: not is_tuple_node(tup.member[x].value), names)
unbound, bound = partition(lambda x: tup.member[x].value.is_unbound(), scalars)
return usorted(unbound) + usorted(bound), usorted(tuples) |
<SYSTEM_TASK:>
Resolve filename relatively against one of the given paths, if possible.
<END_TASK>
<USER_TASK:>
Description:
def resolve_file(fname, paths):
"""Resolve filename relatively against one of the given paths, if possible.""" |
fpath = path.abspath(fname)
for p in paths:
spath = path.abspath(p)
if fpath.startswith(spath):
return fpath[len(spath) + 1:]
return fname |
<SYSTEM_TASK:>
Generate a list of strings representing the table in RST format.
<END_TASK>
<USER_TASK:>
Description:
def generate(self):
"""Generate a list of strings representing the table in RST format.""" |
header = ' '.join('=' * self.width[i] for i in range(self.w))
lines = [
' '.join(row[i].ljust(self.width[i]) for i in range(self.w))
for row in self.rows]
return [header] + lines + [header] |
<SYSTEM_TASK:>
Select nodes according to the input selector.
<END_TASK>
<USER_TASK:>
Description:
def select(self, model):
"""Select nodes according to the input selector.
This can ALWAYS return multiple root elements.
""" |
res = []
def doSelect(value, pre, remaining):
if not remaining:
res.append((pre, value))
else:
# For the other selectors to work, value must be a Tuple or a list at this point.
if not is_tuple(value) and not isinstance(value, list):
return
qhead, qtail = remaining[0], remaining[1:]
if isinstance(qhead, tuple) and is_tuple(value):
for alt in qhead:
if alt in value:
doSelect(value[alt], pre + [alt], qtail)
elif qhead == '*':
if isinstance(value, list):
indices = range(len(value))
reprs = [listKey(i) for i in indices]
else:
indices = value.keys()
reprs = indices
for key, rep in zip(indices, reprs):
doSelect(value[key], pre + [rep], qtail)
elif isinstance(qhead, int) and isinstance(value, list):
doSelect(value[qhead], pre + [listKey(qhead)], qtail)
elif is_tuple(value):
if qhead in value:
doSelect(value[qhead], pre + [qhead], qtail)
for selector in self.selectors:
doSelect(model, [], selector)
return QueryResult(res) |
<SYSTEM_TASK:>
Return a deep dict of the values selected.
<END_TASK>
<USER_TASK:>
Description:
def deep(self):
"""Return a deep dict of the values selected.
The leaf values may still be gcl Tuples. Use util.to_python() if you want
to reify everything to real Python values.
""" |
self.lists = {}
ret = {}
for path, value in self.paths_values():
self.recursiveSet(ret, path, value)
self.removeMissingValuesFromLists()
return ret |
<SYSTEM_TASK:>
Return a list of nodes that have a recursive dependency.
<END_TASK>
<USER_TASK:>
Description:
def find_recursive_dependency(self):
"""Return a list of nodes that have a recursive dependency.""" |
nodes_on_path = []
def helper(nodes):
for node in nodes:
cycle = node in nodes_on_path
nodes_on_path.append(node)
if cycle or helper(self.deps.get(node, [])):
return True
nodes_on_path.pop()
return False
helper(self.unordered)
return nodes_on_path |
<SYSTEM_TASK:>
Called for every tuple.
<END_TASK>
<USER_TASK:>
Description:
def enterTuple(self, tuple, path):
"""Called for every tuple.
If this returns False, the elements of the tuple will not be recursed over
and leaveTuple() will not be called.
""" |
if skip_name(path):
return False
node = Node(path, tuple)
if self.condition.matches(node):
self.unordered.append(node)
return False
return True |
<SYSTEM_TASK:>
Make a sequence of applications from a list of tokens.
<END_TASK>
<USER_TASK:>
Description:
def mkApplications(location, *atoms):
"""Make a sequence of applications from a list of tokens.
atoms is a list of atoms, which will be handled left-associatively. E.g:
['foo', [], []] == foo()() ==> Application(Application('foo', []), [])
""" |
atoms = list(atoms)
while len(atoms) > 1:
atoms[0:2] = [Application(location, atoms[0], atoms[1])]
# Nothing left to apply
return atoms[0] |
<SYSTEM_TASK:>
Call a function, respecting all the various types of functions that exist.
<END_TASK>
<USER_TASK:>
Description:
def call_fn(fn, arglist, env):
"""Call a function, respecting all the various types of functions that exist.""" |
if isinstance(fn, framework.LazyFunction):
# The following looks complicated, but this is necessary because you can't
# construct closures over the loop variable directly.
thunks = [(lambda thunk: lambda: framework.eval(thunk, env))(th) for th in arglist.values]
return fn(*thunks)
evaled_args = framework.eval(arglist, env)
if isinstance(fn, framework.EnvironmentFunction):
return fn(*evaled_args, env=env)
return fn(*evaled_args) |
<SYSTEM_TASK:>
Make a Schema object from the given spec.
<END_TASK>
<USER_TASK:>
Description:
def make_schema_from(value, env):
"""Make a Schema object from the given spec.
The input and output types of this function are super unclear, and are held together by ponies,
wishes, duct tape, and a load of tests. See the comments for horrific entertainment.
""" |
# So this thing may not need to evaluate anything[0]
if isinstance(value, framework.Thunk):
value = framework.eval(value, env)
# We're a bit messy. In general, this has evaluated to a Schema object, but not necessarily:
# for tuples and lists, we still need to treat the objects as specs.
if isinstance(value, schema.Schema):
return value
if framework.is_tuple(value):
# If it so happens that the thing is a tuple, we need to pass in the data in a bit of a
# different way into the schema factory (in a dictionary with {fields, required} keys).
return schema_spec_from_tuple(value)
if framework.is_list(value):
# [0] This list may contain tuples, which oughta be treated as specs, or already-resolved schema
# objects (as returned by 'int' and 'string' literals). make_schema_from
# deals with both.
return schema.from_spec([make_schema_from(x, env) for x in value])
raise exceptions.EvaluationError('Can\'t make a schema from %r' % value) |
<SYSTEM_TASK:>
Parse bracketed list.
<END_TASK>
<USER_TASK:>
Description:
def bracketedList(l, r, sep, expr, allow_missing_close=False):
"""Parse bracketed list.
Empty list is possible, as is a trailing separator.
""" |
# We may need to backtrack for lists, because of list comprehension, but not for
# any of the other lists
strict = l != '['
closer = sym(r) if not allow_missing_close else p.Optional(sym(r))
if strict:
return sym(l) - listMembers(sep, expr) - closer
else:
return sym(l) + listMembers(sep, expr) + closer |
<SYSTEM_TASK:>
Unquote the indicated string.
<END_TASK>
<USER_TASK:>
Description:
def unquote(s):
"""Unquote the indicated string.""" |
# Ignore the left- and rightmost chars (which should be quotes).
# Use the Python engine to decode the escape sequence
i, N = 1, len(s) - 1
ret = []
while i < N:
if s[i] == '\\' and i < N - 1:
ret.append(UNQUOTE_MAP.get(s[i+1], s[i+1]))
i += 2
else:
ret.append(s[i])
i += 1
return ''.join(ret) |
<SYSTEM_TASK:>
Function to put a name on a pyparsing pattern.
<END_TASK>
<USER_TASK:>
Description:
def pattern(name, pattern):
"""Function to put a name on a pyparsing pattern.
Just for ease of debugging/tracing parse errors.
""" |
pattern.setName(name)
astracing.maybe_trace(pattern)
return pattern |
<SYSTEM_TASK:>
Find all AST nodes at the given filename, line and column.
<END_TASK>
<USER_TASK:>
Description:
def find_tokens(self, q):
"""Find all AST nodes at the given filename, line and column.""" |
found_me = []
if hasattr(self, 'location'):
if self.location.contains(q):
found_me = [self]
elif self._found_by(q):
found_me = [self]
cs = [n.find_tokens(q) for n in self._children()]
return found_me + list(itertools.chain(*cs)) |
<SYSTEM_TASK:>
Instantiate the Tuple based on this TupleNode.
<END_TASK>
<USER_TASK:>
Description:
def _make_tuple(self, env):
"""Instantiate the Tuple based on this TupleNode.""" |
t = runtime.Tuple(self, env, dict2tuple)
# A tuple also provides its own schema spec
schema = schema_spec_from_tuple(t)
t.attach_schema(schema)
return t |
<SYSTEM_TASK:>
Apply a tuple to something else.
<END_TASK>
<USER_TASK:>
Description:
def applyTuple(self, tuple, right, env):
"""Apply a tuple to something else.""" |
if len(right) != 1:
raise exceptions.EvaluationError('Tuple (%r) can only be applied to one argument, got %r' % (self.left, self.right))
right = right[0]
return tuple(right) |
<SYSTEM_TASK:>
Apply a list to something else.
<END_TASK>
<USER_TASK:>
Description:
def applyIndex(self, lst, right):
"""Apply a list to something else.""" |
if len(right) != 1:
raise exceptions.EvaluationError('%r can only be applied to one argument, got %r' % (self.left, self.right))
right = right[0]
if isinstance(right, int):
return lst[right]
raise exceptions.EvaluationError("Can't apply %r to argument (%r): integer expected, got %r" % (self.left, self.right, right)) |
<SYSTEM_TASK:>
Return the classification error rate
<END_TASK>
<USER_TASK:>
Description:
def class_error(self, input_data, targets, average=True,
cache=None, prediction=False):
""" Return the classification error rate
""" |
if cache is not None:
activations = cache
else:
activations = \
self.feed_forward(input_data, prediction=prediction)
targets = targets.get().argmax(1)
class_error = np.sum(activations.get().argmax(1) != targets)
if average: class_error = float(class_error) / targets.shape[0]
return class_error |
<SYSTEM_TASK:>
Return a list where each element contains the parameters for a task.
<END_TASK>
<USER_TASK:>
Description:
def parameters(self):
"""Return a list where each element contains the parameters for a task.
""" |
parameters = []
for task in self.tasks:
parameters.extend(task.parameters)
return parameters |
<SYSTEM_TASK:>
Update the parameters.
<END_TASK>
<USER_TASK:>
Description:
def parameters(self, value):
"""Update the parameters.
``value`` must be a list/tuple of length
``MultitaskTopLayer.n_tasks``, each element of which must have
the correct number of parameters for the task.
""" |
assert len(value) == self.n_parameters
i = 0
for task in self.tasks:
task.parameters = value[i:i + task.n_parameters]
i += task.n_parameters |
<SYSTEM_TASK:>
Call ``feed_forward`` for each task and combine the activations.
<END_TASK>
<USER_TASK:>
Description:
def feed_forward(self, input_data, prediction=False):
"""Call ``feed_forward`` for each task and combine the activations.
Passes ``input_data`` to all tasks and returns the activations
as a list.
**Parameters:**
input_data : ``GPUArray``
Inpute data to compute activations for.
prediction : bool, optional
Whether to use prediction model. Only relevant when using
dropout. If true, then weights are multiplied by
1 - dropout if the layer uses dropout.
**Returns:**
activations : list of ``GPUArray``
The activations of the output units, one element for each task.
""" |
activations = []
for task in self.tasks:
activations_task = task.feed_forward(input_data, prediction)
activations.append(activations_task)
return activations |
<SYSTEM_TASK:>
Compute gradients for each task and combine the results.
<END_TASK>
<USER_TASK:>
Description:
def backprop(self, input_data, targets, cache=None):
"""Compute gradients for each task and combine the results.
**Parameters:**
input_data : ``GPUArray``
Inpute data to compute activations for.
targets : ``GPUArray``
The target values of the units.
cache : list of ``GPUArray``
Cache obtained from forward pass. If the cache is
provided, then the activations are not recalculated.
**Returns:**
gradients : list
Gradients with respect to the weights and biases for each task
df_input : ``GPUArray``
Gradients with respect to the input, obtained by adding
the gradients with respect to the inputs from each task,
weighted by ``MultitaskTopLayer.task_weights``.
""" |
df_input = gpuarray.zeros_like(input_data)
if cache is None: cache = self.n_tasks * [None]
gradients = []
for targets_task, cache_task, task, task_weight in \
izip(targets, cache, self.tasks, self.task_weights):
gradients_task, df_input_task = \
task.backprop(input_data, targets_task,
cache_task)
df_input = df_input.mul_add(1., df_input_task, task_weight)
gradients.extend(gradients_task)
return gradients, df_input |
<SYSTEM_TASK:>
Computes the cross-entropy error for all tasks.
<END_TASK>
<USER_TASK:>
Description:
def cross_entropy_error(self, input_data, targets, average=True,
cache=None, prediction=False,
sum_errors=True):
""" Computes the cross-entropy error for all tasks.
""" |
loss = []
if cache is None:
cache = self.n_tasks * [None]
for targets_task, cache_task, task in \
izip(targets, cache, self.tasks):
loss.append(task.cross_entropy_error(
input_data, targets_task, average=average,
cache=cache_task,
prediction=prediction))
if sum_errors:
return sum(loss)
else:
return loss |
<SYSTEM_TASK:>
Returns a dictionary describing the architecture of the layer.
<END_TASK>
<USER_TASK:>
Description:
def architecture(self):
"""Returns a dictionary describing the architecture of the layer.""" |
arch = {'class': self.__class__,
'n_in': self.n_in,
'n_units': self.n_units,
'activation_function': self.activation_function
if hasattr(self, 'activation_function') else None}
return arch |
<SYSTEM_TASK:>
Matrix-vector product for real general banded matrix.
<END_TASK>
<USER_TASK:>
Description:
def cublasSgbmv(handle, trans, m, n, kl, ku, alpha, A, lda,
x, incx, beta, y, incy):
"""
Matrix-vector product for real general banded matrix.
""" |
status = _libcublas.cublasSgbmv_v2(handle,
trans, m, n, kl, ku,
ctypes.byref(ctypes.c_float(alpha)),
int(A), lda,
int(x), incx,
ctypes.byref(ctypes.c_float(beta)),
int(y), incy)
cublasCheckStatus(status) |
<SYSTEM_TASK:>
Matrix-vector product for real triangular-banded matrix.
<END_TASK>
<USER_TASK:>
Description:
def cublasStbmv(handle, uplo, trans, diag, n, k, A, lda, x, incx):
"""
Matrix-vector product for real triangular-banded matrix.
""" |
status = _libcublas.cublasStbmv_v2(handle,
_CUBLAS_FILL_MODE[uplo],
_CUBLAS_OP[trans],
_CUBLAS_DIAG[diag],
n, k, int(A), lda, int(x), incx)
cublasCheckStatus(status) |
<SYSTEM_TASK:>
Solve complex triangular-packed system with one right-hand side.
<END_TASK>
<USER_TASK:>
Description:
def cublasCtpsv(handle, uplo, trans, diag, n, AP, x, incx):
"""
Solve complex triangular-packed system with one right-hand side.
""" |
status = _libcublas.cublasCtpsv_v2(handle,
_CUBLAS_FILL_MODE[uplo],
_CUBLAS_OP[trans],
_CUBLAS_DIAG[diag],
n, int(AP), int(x), incx)
cublasCheckStatus(status) |
<SYSTEM_TASK:>
Solve complex triangular-packed system with one right-hand size.
<END_TASK>
<USER_TASK:>
Description:
def cublasZtpsv(handle, uplo, trans, diag, n, AP, x, incx):
"""
Solve complex triangular-packed system with one right-hand size.
""" |
status = _libcublas.cublasZtpsv_v2(handle,
_CUBLAS_FILL_MODE[uplo],
_CUBLAS_OP[trans],
_CUBLAS_DIAG[diag],
n, int(AP), int(x), incx)
cublasCheckStatus(status) |
<SYSTEM_TASK:>
Matrix-vector product for complex triangular matrix.
<END_TASK>
<USER_TASK:>
Description:
def cublasCtrmv(handle, uplo, trans, diag, n, A, lda, x, incx):
"""
Matrix-vector product for complex triangular matrix.
""" |
status = _libcublas.cublasCtrmv_v2(handle,
_CUBLAS_FILL_MODE[uplo],
_CUBLAS_OP[trans],
_CUBLAS_DIAG[diag],
n, int(A), lda, int(x), incx)
cublasCheckStatus(status) |
<SYSTEM_TASK:>
Matrix-vector product for real triangular matrix.
<END_TASK>
<USER_TASK:>
Description:
def cublasDtrmv(handle, uplo, trans, diag, n, A, lda, x, inx):
"""
Matrix-vector product for real triangular matrix.
""" |
status = _libcublas.cublasDtrmv_v2(handle,
_CUBLAS_FILL_MODE[uplo],
_CUBLAS_OP[trans],
_CUBLAS_DIAG[diag],
n, int(A), lda, int(x), inx)
cublasCheckStatus(status) |
<SYSTEM_TASK:>
Matrix-matrix product for complex general matrix.
<END_TASK>
<USER_TASK:>
Description:
def cublasZgemm(handle, transa, transb, m, n, k, alpha, A, lda, B, ldb, beta, C, ldc):
"""
Matrix-matrix product for complex general matrix.
""" |
status = _libcublas.cublasZgemm_v2(handle,
_CUBLAS_OP[transa],
_CUBLAS_OP[transb], m, n, k,
ctypes.byref(cuda.cuDoubleComplex(alpha.real,
alpha.imag)),
int(A), lda, int(B), ldb,
ctypes.byref(cuda.cuDoubleComplex(beta.real,
beta.imag)),
int(C), ldc)
cublasCheckStatus(status) |
<SYSTEM_TASK:>
Matrix-matrix product for symmetric matrix.
<END_TASK>
<USER_TASK:>
Description:
def cublasSsymm(handle, side, uplo, m, n, alpha, A, lda, B, ldb, beta, C, ldc):
"""
Matrix-matrix product for symmetric matrix.
""" |
status = _libcublas.cublasSsymm_v2(handle,
_CUBLAS_SIDE_MODE[side],
_CUBLAS_FILL_MODE[uplo],
m, n, ctypes.byref(ctypes.c_float(alpha)),
int(A), lda, int(B), ldb,
ctypes.byref(ctypes.c_float(beta)),
int(C), ldc)
cublasCheckStatus(status) |
<SYSTEM_TASK:>
Matrix-matrix product for real symmetric matrix.
<END_TASK>
<USER_TASK:>
Description:
def cublasDsymm(handle, side, uplo, m, n, alpha, A, lda, B, ldb, beta, C, ldc):
"""
Matrix-matrix product for real symmetric matrix.
""" |
status = _libcublas.cublasDsymm_v2(handle,
_CUBLAS_SIDE_MODE[side],
_CUBLAS_FILL_MODE[uplo],
m, n, ctypes.byref(ctypes.c_double(alpha)),
int(A), lda, int(B), ldb,
ctypes.byref(ctypes.c_double(beta)),
int(C), ldc)
cublasCheckStatus(status) |
<SYSTEM_TASK:>
Matrix-matrix product for complex symmetric matrix.
<END_TASK>
<USER_TASK:>
Description:
def cublasCsymm(handle, side, uplo, m, n, alpha, A, lda, B, ldb, beta, C, ldc):
"""
Matrix-matrix product for complex symmetric matrix.
""" |
status = _libcublas.cublasCsymm_v2(handle,
_CUBLAS_SIDE_MODE[side],
_CUBLAS_FILL_MODE[uplo],
m, n, ctypes.byref(cuda.cuFloatComplex(alpha.real,
alpha.imag)),
int(A), lda, int(B), ldb,
ctypes.byref(cuda.cuFloatComplex(beta.real,
beta.imag)),
int(C), ldc)
cublasCheckStatus(status) |
<SYSTEM_TASK:>
Rank-k operation on complex symmetric matrix.
<END_TASK>
<USER_TASK:>
Description:
def cublasZsyrk(handle, uplo, trans, n, k, alpha, A, lda, beta, C, ldc):
"""
Rank-k operation on complex symmetric matrix.
""" |
status = _libcublas.cublasZsyrk_v2(handle,
_CUBLAS_FILL_MODE[uplo],
_CUBLAS_OP[trans],
n, k, ctypes.byref(cuda.cuDoubleComplex(alpha.real,
alpha.imag)),
int(A), lda,
ctypes.byref(cuda.cuDoubleComplex(beta.real,
beta.imag)),
int(C), ldc)
cublasCheckStatus(status) |
<SYSTEM_TASK:>
Matrix-matrix product for real triangular matrix.
<END_TASK>
<USER_TASK:>
Description:
def cublasStrmm(handle, side, uplo, trans, diag, m, n, alpha, A, lda, B, ldb, C, ldc):
"""
Matrix-matrix product for real triangular matrix.
""" |
status = _libcublas.cublasStrmm_v2(handle,
_CUBLAS_SIDE_MODE[side],
_CUBLAS_FILL_MODE[uplo],
_CUBLAS_OP[trans],
_CUBLAS_DIAG[diag],
m, n, ctypes.byref(ctypes.c_float(alpha)),
int(A), lda, int(B), ldb, int(C), ldc)
cublasCheckStatus(status) |
<SYSTEM_TASK:>
Matrix-matrix product for complex triangular matrix.
<END_TASK>
<USER_TASK:>
Description:
def cublasZtrmm(handle, side, uplo, trans, diag, m, n, alpha, A, lda, B, ldb, C, ldc):
"""
Matrix-matrix product for complex triangular matrix.
""" |
status = _libcublas.cublasZtrmm_v2(handle,
_CUBLAS_SIDE_MODE[side],
_CUBLAS_FILL_MODE[uplo],
_CUBLAS_OP[trans],
_CUBLAS_DIAG[diag],
m, n, ctypes.byref(cuda.cuDoubleComplex(alpha.real,
alpha.imag)),
int(A), lda, int(B), ldb, int(C), ldc)
cublasCheckStatus(status) |
<SYSTEM_TASK:>
Solve complex triangular system with multiple right-hand sides.
<END_TASK>
<USER_TASK:>
Description:
def cublasZtrsm(handle, side, uplo, transa, diag, m, n, alpha, A, lda, B, ldb):
"""
Solve complex triangular system with multiple right-hand sides.
""" |
status = _libcublas.cublasZtrsm_v2(handle,
_CUBLAS_SIDE_MODE[side],
_CUBLAS_FILL_MODE[uplo],
_CUBLAS_OP[trans],
_CUBLAS_DIAG[diag],
m, n, ctypes.byref(cuda.cuDoubleComplex(alpha.real,
alpha.imag)),
int(A), lda, int(B), ldb)
cublasCheckStatus(status) |
<SYSTEM_TASK:>
Rank-k operation on Hermitian matrix.
<END_TASK>
<USER_TASK:>
Description:
def cublasZherk(handle, uplo, trans, n, k, alpha, A, lda, beta, C, ldc):
"""
Rank-k operation on Hermitian matrix.
""" |
status = _libcublas.cublasZherk_v2(handle,
_CUBLAS_FILL_MODE[uplo],
_CUBLAS_OP[trans],
n, k, ctypes.byref(ctypes.c_double(alpha)),
int(A), lda,
ctypes.byref(ctypes.c_double(beta)),
int(C), ldc)
cublasCheckStatus(status) |
<SYSTEM_TASK:>
Rank-2k operation on Hermitian matrix.
<END_TASK>
<USER_TASK:>
Description:
def cublasCher2k(handle, uplo, trans, n, k, alpha, A, lda, B, ldb, beta, C, ldc):
"""
Rank-2k operation on Hermitian matrix.
""" |
status = _libcublas.cublasCher2k_v2(handle,
_CUBLAS_FILL_MODE[uplo],
_CUBLAS_OP[trans],
n, k, ctypes.byref(cuda.cuFloatComplex(alpha.real,
alpha.imag)),
int(A), lda, int(B), ldb,
ctypes.byref(cuda.cuFloatComplex(beta.real,
beta.imag)),
int(C), ldc)
cublasCheckStatus(status) |
<SYSTEM_TASK:>
Matrix-diagonal matrix product for real general matrix.
<END_TASK>
<USER_TASK:>
Description:
def cublasSdgmm(handle, mode, m, n, A, lda, x, incx, C, ldc):
"""
Matrix-diagonal matrix product for real general matrix.
""" |
status = _libcublas.cublasSdgmm(handle,
_CUBLAS_SIDE[mode],
m, n,
int(A), lda,
int(x), incx,
int(C), ldc)
cublasCheckStatus(status) |
<SYSTEM_TASK:>
Read an EROS light curve and return its data.
<END_TASK>
<USER_TASK:>
Description:
def load_EROS_lc(filename='lm0010n22323.time'):
"""
Read an EROS light curve and return its data.
Parameters
----------
filename : str, optional
A light-curve filename.
Returns
-------
dates : numpy.ndarray
An array of dates.
magnitudes : numpy.ndarray
An array of magnitudes.
errors : numpy.ndarray
An array of magnitudes errors.
""" |
module_path = dirname(__file__)
file_path = join(module_path, 'lightcurves', filename)
data = np.loadtxt(file_path)
date = data[:, 0]
mag = data[:, 1]
err = data[:, 2]
return date, mag, err |
<SYSTEM_TASK:>
Samples a dropout mask and applies it in place
<END_TASK>
<USER_TASK:>
Description:
def sample_dropout_mask(x, dropout_probability=.5, columns=None, stream=None, target=None,
dropout_mask=None, dropout_prob_array=None):
""" Samples a dropout mask and applies it in place""" |
assert x.flags.c_contiguous
if columns is not None:
assert len(columns) == 2
x_tmp = x
x = extract_columns(x, columns[0], columns[1])
shape = x.shape
if dropout_prob_array is None:
dropout_prob_array = gpuarray.empty(shape, x.dtype, allocator=memory_pool.allocate)
sampler.fill_uniform(dropout_prob_array, stream)
if dropout_mask is None:
dropout_mask = gpuarray.empty(shape, np.int8, allocator=memory_pool.allocate)
if target is None: target = x
all_kernels['sample_dropout_mask'](
x, target, dropout_mask, dropout_prob_array,
np.float32(dropout_probability))
if columns is not None:
insert_columns(x, x_tmp, columns[0])
return dropout_mask |
<SYSTEM_TASK:>
Load but don't evaluate a GCL expression from a file.
<END_TASK>
<USER_TASK:>
Description:
def read(filename, loader=None, implicit_tuple=True, allow_errors=False):
"""Load but don't evaluate a GCL expression from a file.""" |
with open(filename, 'r') as f:
return reads(f.read(),
filename=filename,
loader=loader,
implicit_tuple=implicit_tuple,
allow_errors=allow_errors) |
<SYSTEM_TASK:>
Load and evaluate a GCL expression from a string.
<END_TASK>
<USER_TASK:>
Description:
def loads(s, filename=None, loader=None, implicit_tuple=True, env={}, schema=None):
"""Load and evaluate a GCL expression from a string.""" |
ast = reads(s, filename=filename, loader=loader, implicit_tuple=implicit_tuple)
if not isinstance(env, framework.Environment):
# For backwards compatibility we accept an Environment object. Otherwise assume it's a dict
# whose bindings will add/overwrite the default bindings.
env = framework.Environment(dict(_default_bindings, **env))
obj = framework.eval(ast, env)
return mod_schema.validate(obj, schema) |
<SYSTEM_TASK:>
Load and evaluate a GCL expression from a file.
<END_TASK>
<USER_TASK:>
Description:
def load(filename, loader=None, implicit_tuple=True, env={}, schema=None):
"""Load and evaluate a GCL expression from a file.""" |
with open(filename, 'r') as f:
return loads(f.read(),
filename=filename,
loader=loader,
implicit_tuple=implicit_tuple,
env=env,
schema=schema) |
<SYSTEM_TASK:>
Increases linearly and then stays flat
<END_TASK>
<USER_TASK:>
Description:
def linear_scheduler_up(init_value, target_value, duration):
""" Increases linearly and then stays flat """ |
value = init_value
t = 0
while True:
yield value
t += 1
if t < duration:
value = init_value + t * (target_value - init_value) / duration
else:
value = target_value |
<SYSTEM_TASK:>
Increases linearly to target_value, stays at target_value until
<END_TASK>
<USER_TASK:>
Description:
def linear_scheduler_up_down(init_value, target_value, final_value,
duration_up, t_decrease, duration_down):
""" Increases linearly to target_value, stays at target_value until
t_decrease and then decreases linearly
""" |
value = init_value
t = 0
while True:
yield value
t += 1
if t < duration_up:
value = init_value + t * (target_value - init_value) / \
float(duration_up)
elif t > t_decrease:
value = target_value - (t - t_decrease) * \
(target_value - final_value) / \
float(duration_down)
else:
value = target_value |
<SYSTEM_TASK:>
Loads a YAML configuration from a string or file-like object.
<END_TASK>
<USER_TASK:>
Description:
def load(stream, overrides=None, **kwargs):
"""
Loads a YAML configuration from a string or file-like object.
Parameters
----------
stream : str or object
Either a string containing valid YAML or a file-like object
supporting the .read() interface.
overrides : dict, optional
A dictionary containing overrides to apply. The location of
the override is specified in the key as a dot-delimited path
to the desired parameter, e.g. "model.corruptor.corruption_level".
Returns
-------
graph : dict or object
The dictionary or object (if the top-level element specified an
Python object to instantiate).
Notes
-----
Other keyword arguments are passed on to `yaml.load`.
""" |
global is_initialized
if not is_initialized:
initialize()
if isinstance(stream, basestring):
string = stream
else:
string = '\n'.join(stream.readlines())
# processed_string = preprocess(string)
proxy_graph = yaml.load(string, **kwargs)
from . import init
init_dict = proxy_graph.get('init', {})
init(**init_dict)
if overrides is not None:
handle_overrides(proxy_graph, overrides)
return instantiate_all(proxy_graph) |
<SYSTEM_TASK:>
Convenience function for loading a YAML configuration from a file.
<END_TASK>
<USER_TASK:>
Description:
def load_path(path, overrides=None, **kwargs):
"""
Convenience function for loading a YAML configuration from a file.
Parameters
----------
path : str
The path to the file to load on disk.
overrides : dict, optional
A dictionary containing overrides to apply. The location of
the override is specified in the key as a dot-delimited path
to the desired parameter, e.g. "model.corruptor.corruption_level".
Returns
-------
graph : dict or object
The dictionary or object (if the top-level element specified an
Python object to instantiate).
Notes
-----
Other keyword arguments are passed on to `yaml.load`.
""" |
f = open(path, 'r')
content = ''.join(f.readlines())
f.close()
if not isinstance(content, str):
raise AssertionError("Expected content to be of type str but it is "+str(type(content)))
return load(content, **kwargs) |
<SYSTEM_TASK:>
Handle any overrides for this model configuration.
<END_TASK>
<USER_TASK:>
Description:
def handle_overrides(graph, overrides):
"""
Handle any overrides for this model configuration.
Parameters
----------
graph : dict or object
A dictionary (or an ObjectProxy) containing the object graph
loaded from a YAML file.
overrides : dict
A dictionary containing overrides to apply. The location of
the override is specified in the key as a dot-delimited path
to the desired parameter, e.g. "model.corruptor.corruption_level".
""" |
for key in overrides:
levels = key.split('.')
part = graph
for lvl in levels[:-1]:
try:
part = part[lvl]
except KeyError:
raise KeyError("'%s' override failed at '%s'", (key, lvl))
try:
part[levels[-1]] = overrides[key]
except KeyError:
raise KeyError("'%s' override failed at '%s'", (key, levels[-1])) |
<SYSTEM_TASK:>
Instantiate all ObjectProxy objects in a nested hierarchy.
<END_TASK>
<USER_TASK:>
Description:
def instantiate_all(graph):
"""
Instantiate all ObjectProxy objects in a nested hierarchy.
Parameters
----------
graph : dict or object
A dictionary (or an ObjectProxy) containing the object graph
loaded from a YAML file.
Returns
-------
graph : dict or object
The dictionary or object resulting after the recursive instantiation.
""" |
def should_instantiate(obj):
classes = [ObjectProxy, dict, list]
return True in [isinstance(obj, cls) for cls in classes]
if not isinstance(graph, list):
for key in graph:
if should_instantiate(graph[key]):
graph[key] = instantiate_all(graph[key])
if hasattr(graph, 'keys'):
for key in graph.keys():
if should_instantiate(key):
new_key = instantiate_all(key)
graph[new_key] = graph[key]
del graph[key]
if isinstance(graph, ObjectProxy):
graph = graph.instantiate()
if isinstance(graph, list):
for i, elem in enumerate(graph):
if should_instantiate(elem):
graph[i] = instantiate_all(elem)
return graph |
<SYSTEM_TASK:>
Constructor function passed to PyYAML telling it how to construct
<END_TASK>
<USER_TASK:>
Description:
def multi_constructor(loader, tag_suffix, node):
"""
Constructor function passed to PyYAML telling it how to construct
objects from argument descriptions. See PyYAML documentation for
details on the call signature.
""" |
yaml_src = yaml.serialize(node)
mapping = loader.construct_mapping(node)
if '.' not in tag_suffix:
classname = tag_suffix
rval = ObjectProxy(classname, mapping, yaml_src)
else:
classname = try_to_import(tag_suffix)
rval = ObjectProxy(classname, mapping, yaml_src)
return rval |
<SYSTEM_TASK:>
Constructor function passed to PyYAML telling it how to load
<END_TASK>
<USER_TASK:>
Description:
def multi_constructor_pkl(loader, tag_suffix, node):
"""
Constructor function passed to PyYAML telling it how to load
objects from paths to .pkl files. See PyYAML documentation for
details on the call signature.
""" |
mapping = loader.construct_yaml_str(node)
if tag_suffix != "" and tag_suffix != u"":
raise AssertionError('Expected tag_suffix to be "" but it is "'+tag_suffix+'"')
rval = ObjectProxy(None, {}, yaml.serialize(node))
rval.instance = serial.load(mapping)
return rval |
<SYSTEM_TASK:>
Instantiate this object with the supplied parameters in `self.kwds`,
<END_TASK>
<USER_TASK:>
Description:
def instantiate(self):
"""
Instantiate this object with the supplied parameters in `self.kwds`,
or if already instantiated, return the cached instance.
""" |
if self.instance is None:
self.instance = checked_call(self.cls, self.kwds)
#endif
try:
self.instance.yaml_src = self.yaml_src
except AttributeError:
pass
return self.instance |
<SYSTEM_TASK:>
Propagate forward through the layer.
<END_TASK>
<USER_TASK:>
Description:
def feed_forward(self, input_data, prediction=False):
"""Propagate forward through the layer.
**Parameters:**
input_data : ``GPUArray``
Inpute data to compute activations for.
prediction : bool, optional
Whether to use prediction model. Only relevant when using
dropout. If true, then weights are multiplied by
1 - dropout if the layer uses dropout.
**Returns:**
activations : ``GPUArray``
The activations of the output units.
""" |
if input_data.shape[1] != self.W.shape[0]:
raise ValueError('Number of outputs from previous layer (%d) '
'does not match number of inputs to this layer (%d)' %
(input_data.shape[1], self.W.shape[0]))
activations = linalg.dot(input_data, self.W)
activations = add_vec_to_mat(activations, self.b, inplace=True)
return activations |
<SYSTEM_TASK:>
Return an item without validating the schema.
<END_TASK>
<USER_TASK:>
Description:
def get_no_validate(self, key):
"""Return an item without validating the schema.""" |
x, env = self.get_thunk_env(key)
# Check if this is a Thunk that needs to be lazily evaluated before we
# return it.
if isinstance(x, framework.Thunk):
x = framework.eval(x, env)
return x |
<SYSTEM_TASK:>
Return an environment that will look up in current_scope for keys in
<END_TASK>
<USER_TASK:>
Description:
def env(self, current_scope):
"""Return an environment that will look up in current_scope for keys in
this tuple, and the parent env otherwise.
""" |
return self.__env_cache.get(
current_scope.ident,
framework.Environment, current_scope,
names=self.keys(),
parent=framework.Environment({'self': current_scope}, parent=self.__parent_env)) |
<SYSTEM_TASK:>
Return the thunk AND environment for validating it in for the given key.
<END_TASK>
<USER_TASK:>
Description:
def get_thunk_env(self, k):
"""Return the thunk AND environment for validating it in for the given key.
There might be different envs in case the thunk comes from a different (composed) tuple. If the thunk needs its
environment bound on retrieval, that will be done here.
""" |
if k not in self.__items:
raise exceptions.EvaluationError('Unknown key: %r in tuple %r' % (k, self))
x = self.__items[k]
env = self.env(self)
# Bind this to the tuple's parent environment
if isinstance(x, framework.BindableThunk):
return x.bind(self.__parent_env), env
return x, env |
<SYSTEM_TASK:>
Return the evaluated schema expression from a subkey.
<END_TASK>
<USER_TASK:>
Description:
def get_schema_spec(self, key):
"""Return the evaluated schema expression from a subkey.""" |
member_node = self._ast_node.member.get(key, None)
if not member_node:
return schema.AnySchema()
s = framework.eval(member_node.member_schema, self.env(self))
if not isinstance(s, schema.Schema):
raise ValueError('Node %r with schema node %r should evaluate to Schema, got %r' % (member_node, member_node.member_schema, s))
return s |
<SYSTEM_TASK:>
Return the names of fields that are required according to the schema.
<END_TASK>
<USER_TASK:>
Description:
def get_required_fields(self):
"""Return the names of fields that are required according to the schema.""" |
return [m.name for m in self._ast_node.members if m.member_schema.required] |
<SYSTEM_TASK:>
Return the AST node for the given member, from the first tuple that serves it.
<END_TASK>
<USER_TASK:>
Description:
def get_member_node(self, key):
"""Return the AST node for the given member, from the first tuple that serves it.""" |
for tup, _ in self.lookups:
if key in tup:
return tup.get_member_node(key)
raise RuntimeError('Key not found in composite tuple: %r' % key) |
<SYSTEM_TASK:>
Return a list of keys that are exportable from this tuple.
<END_TASK>
<USER_TASK:>
Description:
def exportable_keys(self):
"""Return a list of keys that are exportable from this tuple.
Returns all keys that are not private in any of the tuples.
""" |
keys = collections.defaultdict(list)
for tup in self._tuples:
for key, private in tup._keys_and_privacy().items():
keys[key].append(private)
return [k for k, ps in keys.items() if not any(ps)] |
<SYSTEM_TASK:>
Check the call signature against a dictionary of proposed arguments,
<END_TASK>
<USER_TASK:>
Description:
def check_call_arguments(to_call, kwargs):
"""
Check the call signature against a dictionary of proposed arguments,
raising an informative exception in the case of mismatch.
Parameters
----------
to_call : class or callable
Function or class to examine (in the case of classes, the
constructor call signature is analyzed)
kwargs : dict
Dictionary mapping parameter names (including positional
arguments) to proposed values.
""" |
if 'self' in kwargs.keys():
raise TypeError("Your dictionary includes an entry for 'self', "
"which is just asking for trouble")
orig_to_call = getattr(to_call, '__name__', str(to_call))
if not isinstance(to_call, types.FunctionType):
if hasattr(to_call, '__init__'):
to_call = to_call.__init__
elif hasattr(to_call, '__call__'):
to_call = to_call.__call__
args, varargs, keywords, defaults = inspect.getargspec(to_call)
if any(not isinstance(arg, str) for arg in args):
raise TypeError('%s uses argument unpacking, which is deprecated and '
'unsupported by this pylearn2' % orig_to_call)
if varargs is not None:
raise TypeError('%s has a variable length argument list, but '
'this is not supported by config resolution' %
orig_to_call)
if keywords is None:
bad_keywords = [arg_name for arg_name in kwargs.keys()
if arg_name not in args]
if len(bad_keywords) > 0:
bad = ', '.join(bad_keywords)
args = [ arg for arg in args if arg != 'self' ]
if len(args) == 0:
matched_str = '(It does not support any keywords, actually)'
else:
matched = [ match(keyword, args) for keyword in bad_keywords ]
matched_str = 'Did you mean %s?' % (', '.join(matched))
raise TypeError('%s does not support the following '
'keywords: %s. %s' %
(orig_to_call, bad, matched_str))
if defaults is None:
num_defaults = 0
else:
num_defaults = len(defaults)
required = args[:len(args) - num_defaults]
missing = [arg for arg in required if arg not in kwargs]
if len(missing) > 0:
#iff the im_self (or __self__) field is present, this is a
# bound method, which has 'self' listed as an argument, but
# which should not be supplied by kwargs
is_bound = hasattr(to_call, 'im_self') or hasattr(to_call, '__self__')
if len(missing) > 1 or missing[0] != 'self' or not is_bound:
if 'self' in missing:
missing.remove('self')
missing = ', '.join([str(m) for m in missing])
raise TypeError('%s did not get these expected '
'arguments: %s' % (orig_to_call, missing)) |
<SYSTEM_TASK:>
Return label and probability estimated.
<END_TASK>
<USER_TASK:>
Description:
def predict(rf_model, features):
"""
Return label and probability estimated.
Parameters
----------
rf_model : sklearn.ensemble.RandomForestClassifier
The UPSILoN random forests model.
features : array_like
A list of features estimated by UPSILoN.
Returns
-------
label : str
A predicted label (i.e. class).
probability : float
Class probability.
flag : int
Classification flag.
""" |
import numpy as np
from upsilon.extract_features.feature_set import get_feature_set
feature_set = get_feature_set()
# Grab only necessary features.
cols = [feature for feature in features if feature in feature_set]
cols = sorted(cols)
filtered_features = []
for i in range(len(cols)):
filtered_features.append(features[cols[i]])
filtered_features = np.array(filtered_features).reshape(1, -1)
# Classify.
classes = rf_model.classes_
# Note that we're classifying a single source, so [0] need tobe added.
probabilities = rf_model.predict_proba(filtered_features)[0]
# Classification flag.
flag = 0
if features['period_SNR'] < 20. or is_period_alias(features['period']):
flag = 1
# Return class, probability, and flag.
max_index = np.where(probabilities == np.max(probabilities))
return classes[max_index][0], probabilities[max_index][0], flag |
<SYSTEM_TASK:>
Extract superdark data from ``DARKFILE`` or ``DRKCFILE``.
<END_TASK>
<USER_TASK:>
Description:
def extract_dark(prihdr, scihdu):
"""Extract superdark data from ``DARKFILE`` or ``DRKCFILE``.
Parameters
----------
prihdr : obj
FITS primary header HDU.
scihdu : obj
Extension HDU of the science image.
This is only used to extract subarray data.
Returns
-------
dark : ndarray or `None`
Superdark, if any. Subtract this to apply ``DARKCORR``.
""" |
if prihdr.get('PCTECORR', 'OMIT') == 'COMPLETE':
darkfile = prihdr.get('DRKCFILE', 'N/A')
else:
darkfile = prihdr.get('DARKFILE', 'N/A')
if darkfile == 'N/A':
return None
darkfile = from_irafpath(darkfile)
ampstring = prihdr['CCDAMP']
# Calculate DARKTIME
exptime = prihdr.get('EXPTIME', 0.0)
flashdur = prihdr.get('FLASHDUR', 0.0)
darktime = exptime + flashdur
if exptime > 0: # Not BIAS
darktime += 3.0
with fits.open(darkfile) as hdudark:
if ampstring == 'ABCD':
dark = np.concatenate(
(hdudark['sci', 1].data,
hdudark['sci', 2].data[::-1, :]), axis=1)
elif ampstring in ('A', 'B', 'AB'):
dark = extract_ref(scihdu, hdudark['sci', 2])
else:
dark = extract_ref(scihdu, hdudark['sci', 1])
dark = dark * darktime
return dark |
<SYSTEM_TASK:>
Extract postflash data from ``FLSHFILE``.
<END_TASK>
<USER_TASK:>
Description:
def extract_flash(prihdr, scihdu):
"""Extract postflash data from ``FLSHFILE``.
Parameters
----------
prihdr : obj
FITS primary header HDU.
scihdu : obj
Extension HDU of the science image.
This is only used to extract subarray data.
Returns
-------
flash : ndarray or `None`
Postflash, if any. Subtract this to apply ``FLSHCORR``.
""" |
flshfile = prihdr.get('FLSHFILE', 'N/A')
flashsta = prihdr.get('FLASHSTA', 'N/A')
flashdur = prihdr.get('FLASHDUR', 0.0)
if flshfile == 'N/A' or flashdur <= 0:
return None
if flashsta != 'SUCCESSFUL':
warnings.warn('Flash status is {0}'.format(flashsta),
AstropyUserWarning)
flshfile = from_irafpath(flshfile)
ampstring = prihdr['CCDAMP']
with fits.open(flshfile) as hduflash:
if ampstring == 'ABCD':
flash = np.concatenate(
(hduflash['sci', 1].data,
hduflash['sci', 2].data[::-1, :]), axis=1)
elif ampstring in ('A', 'B', 'AB'):
flash = extract_ref(scihdu, hduflash['sci', 2])
else:
flash = extract_ref(scihdu, hduflash['sci', 1])
flash = flash * flashdur
return flash |
<SYSTEM_TASK:>
Extract flatfield data from ``PFLTFILE``.
<END_TASK>
<USER_TASK:>
Description:
def extract_flatfield(prihdr, scihdu):
"""Extract flatfield data from ``PFLTFILE``.
Parameters
----------
prihdr : obj
FITS primary header HDU.
scihdu : obj
Extension HDU of the science image.
This is only used to extract subarray data.
Returns
-------
invflat : ndarray or `None`
Inverse flatfield, if any. Multiply this to apply ``FLATCORR``.
""" |
for ff in ['DFLTFILE', 'LFLTFILE']:
vv = prihdr.get(ff, 'N/A')
if vv != 'N/A':
warnings.warn('{0}={1} is not accounted for'.format(ff, vv),
AstropyUserWarning)
flatfile = prihdr.get('PFLTFILE', 'N/A')
if flatfile == 'N/A':
return None
flatfile = from_irafpath(flatfile)
ampstring = prihdr['CCDAMP']
with fits.open(flatfile) as hduflat:
if ampstring == 'ABCD':
invflat = np.concatenate(
(1 / hduflat['sci', 1].data,
1 / hduflat['sci', 2].data[::-1, :]), axis=1)
elif ampstring in ('A', 'B', 'AB'):
invflat = 1 / extract_ref(scihdu, hduflat['sci', 2])
else:
invflat = 1 / extract_ref(scihdu, hduflat['sci', 1])
return invflat |
<SYSTEM_TASK:>
Extract section of the reference image that
<END_TASK>
<USER_TASK:>
Description:
def extract_ref(scihdu, refhdu):
"""Extract section of the reference image that
corresponds to the given science image.
This only returns a view, not a copy of the
reference image's array.
Parameters
----------
scihdu, refhdu : obj
Extension HDU's of the science and reference image,
respectively.
Returns
-------
refdata : array-like
Section of the relevant reference image.
Raises
------
NotImplementedError
Either science or reference data are binned.
ValueError
Extracted section size mismatch.
""" |
same_size, rx, ry, x0, y0 = find_line(scihdu, refhdu)
# Use the whole reference image
if same_size:
return refhdu.data
# Binned data
if rx != 1 or ry != 1:
raise NotImplementedError(
'Either science or reference data are binned')
# Extract a view of the sub-section
ny, nx = scihdu.data.shape
refdata = refhdu.data[y0:y0+ny, x0:x0+nx]
if refdata.shape != (ny, nx):
raise ValueError('Extracted reference image is {0} but science image '
'is {1}'.format(refdata.shape, (ny, nx)))
return refdata |
<SYSTEM_TASK:>
Obtain bin factors and corner location to extract
<END_TASK>
<USER_TASK:>
Description:
def find_line(scihdu, refhdu):
"""Obtain bin factors and corner location to extract
and bin the appropriate subset of a reference image to
match a science image.
If the science image has zero offset and is the same size and
binning as the reference image, ``same_size`` will be set to
`True`. Otherwise, the values of ``rx``, ``ry``, ``x0``, and
``y0`` will be assigned.
Normally the science image will be binned the same or more
than the reference image. In that case, ``rx`` and ``ry``
will be the bin size of the science image divided by the
bin size of the reference image.
If the binning of the reference image is greater than the
binning of the science image, the ratios (``rx`` and ``ry``)
of the bin sizes will be the reference image size divided by
the science image bin size. This is not necessarily an error.
.. note:: Translated from ``calacs/lib/findbin.c``.
Parameters
----------
scihdu, refhdu : obj
Extension HDU's of the science and reference image,
respectively.
Returns
-------
same_size : bool
`True` if zero offset and same size and binning.
rx, ry : int
Ratio of bin sizes.
x0, y0 : int
Location of start of subimage in reference image.
Raises
------
ValueError
Science and reference data size mismatch.
""" |
sci_bin, sci_corner = get_corner(scihdu.header)
ref_bin, ref_corner = get_corner(refhdu.header)
# We can use the reference image directly, without binning
# and without extracting a subset.
if (sci_corner[0] == ref_corner[0] and sci_corner[1] == ref_corner[1] and
sci_bin[0] == ref_bin[0] and sci_bin[1] == ref_bin[1] and
scihdu.data.shape[1] == refhdu.data.shape[1]):
same_size = True
rx = 1
ry = 1
x0 = 0
y0 = 0
# Reference image is binned more than the science image.
elif ref_bin[0] > sci_bin[0] or ref_bin[1] > sci_bin[1]:
same_size = False
rx = ref_bin[0] / sci_bin[0]
ry = ref_bin[1] / sci_bin[1]
x0 = (sci_corner[0] - ref_corner[0]) / ref_bin[0]
y0 = (sci_corner[1] - ref_corner[1]) / ref_bin[1]
# For subarray input images, whether they are binned or not.
else:
same_size = False
# Ratio of bin sizes.
ratiox = sci_bin[0] / ref_bin[0]
ratioy = sci_bin[1] / ref_bin[1]
if (ratiox * ref_bin[0] != sci_bin[0] or
ratioy * ref_bin[1] != sci_bin[1]):
raise ValueError('Science and reference data size mismatch')
# cshift is the offset in units of unbinned pixels.
# Divide by ref_bin to convert to units of pixels in the ref image.
cshift = (sci_corner[0] - ref_corner[0], sci_corner[1] - ref_corner[1])
xzero = cshift[0] / ref_bin[0]
yzero = cshift[1] / ref_bin[1]
if (xzero * ref_bin[0] != cshift[0] or
yzero * ref_bin[1] != cshift[1]):
warnings.warn('Subimage offset not divisible by bin size',
AstropyUserWarning)
rx = ratiox
ry = ratioy
x0 = xzero
y0 = yzero
# Ensure integer index
x0 = int(x0)
y0 = int(y0)
return same_size, rx, ry, x0, y0 |
<SYSTEM_TASK:>
Obtain bin and corner information for a subarray.
<END_TASK>
<USER_TASK:>
Description:
def get_corner(hdr, rsize=1):
"""Obtain bin and corner information for a subarray.
``LTV1``, ``LTV2``, ``LTM1_1``, and ``LTM2_2`` keywords
are extracted from the given extension header and converted
to bin and corner values (0-indexed).
``LTV1`` for the CCD uses the beginning of the illuminated
portion as the origin, not the beginning of the overscan region.
Thus, the computed X-corner has the same origin as ``LTV1``,
which is what we want, but it differs from the ``CENTERA1``
header keyword, which has the beginning of the overscan region
as origin.
.. note:: Translated from ``calacs/lib/getcorner.c``.
Parameters
----------
hdr : obj
Extension header.
rsize : int, optional
Size of reference pixel in units of high-res pixels.
Returns
-------
bin : tuple of int
Pixel size in X and Y.
corner : tuple of int
Corner of subarray in X and Y.
""" |
ltm, ltv = get_lt(hdr)
return from_lt(rsize, ltm, ltv) |
<SYSTEM_TASK:>
Obtain the LTV and LTM keyword values.
<END_TASK>
<USER_TASK:>
Description:
def get_lt(hdr):
"""Obtain the LTV and LTM keyword values.
Note that this returns the values just as read from the header,
which means in particular that the LTV values are for one-indexed
pixel coordinates.
LTM keywords are the diagonal elements of MWCS linear
transformation matrix, while LTV's are MWCS linear transformation
vector (1-indexed).
.. note:: Translated from ``calacs/lib/getlt.c``.
Parameters
----------
hdr : obj
Extension header.
Returns
-------
ltm, ltv : tuple of float
``(LTM1_1, LTM2_2)`` and ``(LTV1, LTV2)``.
Values are ``(1, 1)`` and ``(0, 0)`` if not found,
to accomodate reference files with missing info.
Raises
------
ValueError
Invalid LTM* values.
""" |
ltm = (hdr.get('LTM1_1', 1.0), hdr.get('LTM2_2', 1.0))
if ltm[0] <= 0 or ltm[1] <= 0:
raise ValueError('(LTM1_1, LTM2_2) = {0} is invalid'.format(ltm))
ltv = (hdr.get('LTV1', 0.0), hdr.get('LTV2', 0.0))
return ltm, ltv |
<SYSTEM_TASK:>
Compute the corner location and pixel size in units
<END_TASK>
<USER_TASK:>
Description:
def from_lt(rsize, ltm, ltv):
"""Compute the corner location and pixel size in units
of unbinned pixels.
.. note:: Translated from ``calacs/lib/fromlt.c``.
Parameters
----------
rsize : int
Reference pixel size. Usually 1.
ltm, ltv : tuple of float
See :func:`get_lt`.
Returns
-------
bin : tuple of int
Pixel size in X and Y.
corner : tuple of int
Corner of subarray in X and Y.
""" |
dbinx = rsize / ltm[0]
dbiny = rsize / ltm[1]
dxcorner = (dbinx - rsize) - dbinx * ltv[0]
dycorner = (dbiny - rsize) - dbiny * ltv[1]
# Round off to the nearest integer.
bin = (_nint(dbinx), _nint(dbiny))
corner = (_nint(dxcorner), _nint(dycorner))
return bin, corner |
<SYSTEM_TASK:>
Check if the supplied parameters are in the
<END_TASK>
<USER_TASK:>
Description:
def check_oscntab(oscntab, ccdamp, xsize, ysize, leading, trailing):
"""Check if the supplied parameters are in the
``OSCNTAB`` reference file.
.. note:: Even if an entry does not exist in ``OSCNTAB``,
as long as the subarray does not have any overscan,
it should not be a problem for CALACS.
.. note:: This function does not check the virtual bias rows.
Parameters
----------
oscntab : str
Path to the ``OSCNTAB`` reference file being checked against.
ccdamp : str
Amplifier(s) used to read out the CCDs.
xsize : int
Number of columns in the readout.
ysize : int
Number of rows in the readout.
leading : int
Number of columns in the bias section ("TRIMX1" to be trimmed off
by ``BLEVCORR``) on the A/C amplifiers side of the CCDs.
trailing : int
Number of columns in the bias section ("TRIMX2" to be trimmed off
by ``BLEVCORR``) on the B/D amplifiers side of the CCDs.
Returns
-------
supported : bool
Result of test if input parameters are in ``OSCNTAB``.
""" |
tab = Table.read(oscntab)
ccdamp = ccdamp.lower().rstrip()
for row in tab:
if (row['CCDAMP'].lower().rstrip() in ccdamp and
row['NX'] == xsize and row['NY'] == ysize and
row['TRIMX1'] == leading and row['TRIMX2'] == trailing):
return True
return False |
<SYSTEM_TASK:>
Check image for bias columns.
<END_TASK>
<USER_TASK:>
Description:
def check_overscan(xstart, xsize, total_prescan_pixels=24,
total_science_pixels=4096):
"""Check image for bias columns.
Parameters
----------
xstart : int
Starting column of the readout in detector coordinates.
xsize : int
Number of columns in the readout.
total_prescan_pixels : int
Total prescan pixels for a single amplifier on a detector.
Default is 24 for WFC.
total_science_pixels : int
Total science pixels across a detector.
Default is 4096 for WFC (across two amplifiers).
Returns
-------
hasoverscan : bool
Indication if there are bias columns in the image.
leading : int
Number of bias columns on the A/C amplifiers
side of the CCDs ("TRIMX1" in ``OSCNTAB``).
trailing : int
Number of bias columns on the B/D amplifiers
side of the CCDs ("TRIMX2" in ``OSCNTAB``).
""" |
hasoverscan = False
leading = 0
trailing = 0
if xstart < total_prescan_pixels:
hasoverscan = True
leading = abs(xstart - total_prescan_pixels)
if (xstart + xsize) > total_science_pixels:
hasoverscan = True
trailing = abs(total_science_pixels -
(xstart + xsize - total_prescan_pixels))
return hasoverscan, leading, trailing |
<SYSTEM_TASK:>
Ignore unnecessary actions for static file requests, posts, or ajax
<END_TASK>
<USER_TASK:>
Description:
def process_request(self, request):
"""
Ignore unnecessary actions for static file requests, posts, or ajax
requests. We're only interested in redirecting following a 'natural'
request redirection to the `wagtailadmin_explore_root` or
`wagtailadmin_explore` views.
""" |
referer_url = request.META.get('HTTP_REFERER')
return_to_index_url = request.session.get('return_to_index_url')
try:
if all((
return_to_index_url,
referer_url,
request.method == 'GET',
not request.is_ajax(),
resolve(request.path).url_name in ('wagtailadmin_explore_root',
'wagtailadmin_explore'),
)):
perform_redirection = False
referer_match = resolve(urlparse(referer_url).path)
if all((
referer_match.namespace == 'wagtailadmin_pages',
referer_match.url_name in (
'add',
'edit',
'delete',
'unpublish',
'copy'
),
)):
perform_redirection = True
elif all((
not referer_match.namespace,
referer_match.url_name in (
'wagtailadmin_pages_create',
'wagtailadmin_pages_edit',
'wagtailadmin_pages_delete',
'wagtailadmin_pages_unpublish'
),
)):
perform_redirection = True
if perform_redirection:
del request.session['return_to_index_url']
return HttpResponseRedirect(return_to_index_url)
except Resolver404:
pass
return None |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.