text_prompt
stringlengths 100
17.7k
⌀ | code_prompt
stringlengths 7
9.86k
⌀ |
---|---|
<SYSTEM_TASK:>
Given a minified location, this tries to locate the closest
<END_TASK>
<USER_TASK:>
Description:
def lookup_token(self, line, col):
"""Given a minified location, this tries to locate the closest
token that is a match. Returns `None` if no match can be found.
""" |
# Silently ignore underflows
if line < 0 or col < 0:
return None
tok_out = _ffi.new('lsm_token_t *')
if rustcall(_lib.lsm_view_lookup_token, self._get_ptr(),
line, col, tok_out):
return convert_token(tok_out[0]) |
<SYSTEM_TASK:>
Given a token location and a minified function name and the
<END_TASK>
<USER_TASK:>
Description:
def get_original_function_name(self, line, col, minified_name,
minified_source):
"""Given a token location and a minified function name and the
minified source file this returns the original function name if it
can be found of the minified function in scope.
""" |
# Silently ignore underflows
if line < 0 or col < 0:
return None
minified_name = minified_name.encode('utf-8')
sout = _ffi.new('const char **')
try:
slen = rustcall(_lib.lsm_view_get_original_function_name,
self._get_ptr(), line, col, minified_name,
minified_source, sout)
if slen > 0:
return _ffi.unpack(sout[0], slen).decode('utf-8', 'replace')
except SourceMapError:
# In some rare cases the library is/was known to panic. We do
# not want to report this upwards (this happens on slicing
# out of range on older rust versions in the rust-sourcemap
# library)
pass |
<SYSTEM_TASK:>
Given a source ID this returns the embedded sourcecode if there
<END_TASK>
<USER_TASK:>
Description:
def get_source_contents(self, src_id):
"""Given a source ID this returns the embedded sourcecode if there
is. The sourcecode is returned as UTF-8 bytes for more efficient
processing.
""" |
len_out = _ffi.new('unsigned int *')
must_free = _ffi.new('int *')
rv = rustcall(_lib.lsm_view_get_source_contents,
self._get_ptr(), src_id, len_out, must_free)
if rv:
try:
return _ffi.unpack(rv, len_out[0])
finally:
if must_free[0]:
_lib.lsm_buffer_free(rv) |
<SYSTEM_TASK:>
Returns the name of the given source.
<END_TASK>
<USER_TASK:>
Description:
def get_source_name(self, src_id):
"""Returns the name of the given source.""" |
len_out = _ffi.new('unsigned int *')
rv = rustcall(_lib.lsm_view_get_source_name,
self._get_ptr(), src_id, len_out)
if rv:
return decode_rust_str(rv, len_out[0]) |
<SYSTEM_TASK:>
Iterates over all source names and IDs.
<END_TASK>
<USER_TASK:>
Description:
def iter_sources(self):
"""Iterates over all source names and IDs.""" |
for src_id in xrange(self.get_source_count()):
yield src_id, self.get_source_name(src_id) |
<SYSTEM_TASK:>
Converts the index into a view
<END_TASK>
<USER_TASK:>
Description:
def into_view(self):
"""Converts the index into a view""" |
try:
return View._from_ptr(rustcall(
_lib.lsm_index_into_view,
self._get_ptr()))
finally:
self._ptr = None |
<SYSTEM_TASK:>
Apply the given mapping to ``data``, recursively. The return type
<END_TASK>
<USER_TASK:>
Description:
def apply(self, data):
""" Apply the given mapping to ``data``, recursively. The return type
is a tuple of a boolean and the resulting data element. The boolean
indicates whether any values were mapped in the child nodes of the
mapping. It is used to skip optional branches of the object graph. """ |
if self.visitor.is_object:
obj = {}
if self.visitor.parent is None:
obj['$schema'] = self.visitor.path
obj_empty = True
for child in self.children:
empty, value = child.apply(data)
if empty and child.optional:
continue
obj_empty = False if not empty else obj_empty
if child.visitor.name in obj and child.visitor.is_array:
obj[child.visitor.name].extend(value)
else:
obj[child.visitor.name] = value
return obj_empty, obj
elif self.visitor.is_array:
empty, value = self.children.apply(data)
return empty, [value]
elif self.visitor.is_value:
return extract_value(self.mapping, self.visitor, data) |
<SYSTEM_TASK:>
Translate text, returns the modified text.
<END_TASK>
<USER_TASK:>
Description:
def translate(self, text):
""" Translate text, returns the modified text. """ |
# Reset substitution counter
self.count = 0
# Process text
return self._make_regex().sub(self, text) |
<SYSTEM_TASK:>
Cluster the embedded coordinates using spectral clustering
<END_TASK>
<USER_TASK:>
Description:
def cluster(self, n, embed_dim=None, algo=spectral.SPECTRAL, method=methods.KMEANS):
"""
Cluster the embedded coordinates using spectral clustering
Parameters
----------
n: int
The number of clusters to return
embed_dim: int
The dimensionality of the underlying coordinates
Defaults to same value as n
algo: enum value (spectral.SPECTRAL | spectral.KPCA | spectral.ZELNIKMANOR)
Type of embedding to use
method: enum value (methods.KMEANS | methods.GMM)
The clustering method to use
Returns
-------
Partition: Partition object describing the data partition
""" |
if n == 1:
return Partition([1] * len(self.get_dm(False)))
if embed_dim is None:
embed_dim = n
if algo == spectral.SPECTRAL:
self._coords = self.spectral_embedding(embed_dim)
elif algo == spectral.KPCA:
self._coords = self.kpca_embedding(embed_dim)
elif algo == spectral.ZELNIKMANOR:
self._coords = self.spectral_embedding_(embed_dim)
else:
raise OptionError(algo, list(spectral.reverse.values()))
if method == methods.KMEANS:
p = self.kmeans(n, self._coords.df.values)
elif method == methods.GMM:
p = self.gmm(n, self._coords.df.values)
elif method == methods.WARD:
linkmat = fastcluster.linkage(self._coords.values, 'ward')
p = _hclust(linkmat, n)
else:
raise OptionError(method, list(methods.reverse.values()))
if self._verbosity > 0:
print('Using clustering method: {}'.format(methods.reverse[method]))
return p |
<SYSTEM_TASK:>
Embed the points using spectral decomposition of the laplacian of
<END_TASK>
<USER_TASK:>
Description:
def spectral_embedding(self, n):
"""
Embed the points using spectral decomposition of the laplacian of
the affinity matrix
Parameters
----------
n: int
The number of dimensions
""" |
coords = spectral_embedding(self._affinity, n)
return CoordinateMatrix(normalise_rows(coords)) |
<SYSTEM_TASK:>
Embed the points using kernel PCA of the affinity matrix
<END_TASK>
<USER_TASK:>
Description:
def kpca_embedding(self, n):
"""
Embed the points using kernel PCA of the affinity matrix
Parameters
----------
n: int
The number of dimensions
""" |
return self.dm.embedding(n, 'kpca', affinity_matrix=self._affinity) |
<SYSTEM_TASK:>
Cluster the embedded coordinates using multidimensional scaling
<END_TASK>
<USER_TASK:>
Description:
def cluster(self, n, embed_dim=None, algo=mds.CLASSICAL, method=methods.KMEANS):
"""
Cluster the embedded coordinates using multidimensional scaling
Parameters
----------
n: int
The number of clusters to return
embed_dim int
The dimensionality of the underlying coordinates
Defaults to same value as n
method: enum value (methods.KMEANS | methods.GMM)
The clustering method to use
Returns
-------
Partition: Partition object describing the data partition
""" |
if n == 1:
return Partition([1] * len(self.get_dm(False)))
if embed_dim is None:
embed_dim = n
if algo == mds.CLASSICAL:
self._coords = self.dm.embedding(embed_dim, 'cmds')
elif algo == mds.METRIC:
self._coords = self.dm.embedding(embed_dim, 'mmds')
else:
raise OptionError(algo, list(mds.reverse.values()))
if method == methods.KMEANS:
p = self.kmeans(n, self._coords.values)
elif method == methods.GMM:
p = self.gmm(n, self._coords.values)
elif method == methods.WARD:
linkmat = fastcluster.linkage(self._coords.values, 'ward')
p = _hclust(linkmat, n)
else:
raise OptionError(method, list(methods.reverse.values()))
#if self._verbosity > 0:
# print('Using clustering method: {}'.format(methods.reverse[method]))
return p |
<SYSTEM_TASK:>
Start a thread logging output from pipe
<END_TASK>
<USER_TASK:>
Description:
def _log_thread(self, pipe, queue):
"""
Start a thread logging output from pipe
""" |
# thread function to log subprocess output (LOG is a queue)
def enqueue_output(out, q):
for line in iter(out.readline, b''):
q.put(line.rstrip())
out.close()
# start thread
t = threading.Thread(target=enqueue_output,
args=(pipe, queue))
t.daemon = True # thread dies with the program
t.start()
self.threads.append(t) |
<SYSTEM_TASK:>
Search for file give in "executable". If it is not found, we try the environment PATH.
<END_TASK>
<USER_TASK:>
Description:
def _search_for_executable(self, executable):
"""
Search for file give in "executable". If it is not found, we try the environment PATH.
Returns either the absolute path to the found executable, or None if the executable
couldn't be found.
""" |
if os.path.isfile(executable):
return os.path.abspath(executable)
else:
envpath = os.getenv('PATH')
if envpath is None:
return
for path in envpath.split(os.pathsep):
exe = os.path.join(path, executable)
if os.path.isfile(exe):
return os.path.abspath(exe) |
<SYSTEM_TASK:>
Template for Tika app commands
<END_TASK>
<USER_TASK:>
Description:
def _command_template(self, switches, objectInput=None):
"""Template for Tika app commands
Args:
switches (list): list of switches to Tika app Jar
objectInput (object): file object/standard input to analyze
Return:
Standard output data (unicode Python 2, str Python 3)
""" |
command = ["java", "-jar", self.file_jar, "-eUTF-8"]
if self.memory_allocation:
command.append("-Xmx{}".format(self.memory_allocation))
command.extend(switches)
if not objectInput:
objectInput = subprocess.PIPE
log.debug("Subprocess command: {}".format(", ".join(command)))
if six.PY2:
with open(os.devnull, "w") as devnull:
out = subprocess.Popen(
command,
stdin=objectInput,
stdout=subprocess.PIPE,
stderr=devnull)
elif six.PY3:
out = subprocess.Popen(
command,
stdin=objectInput,
stdout=subprocess.PIPE,
stderr=subprocess.DEVNULL)
stdoutdata, _ = out.communicate()
return stdoutdata.decode("utf-8").strip() |
<SYSTEM_TASK:>
Return the content type of passed file or payload.
<END_TASK>
<USER_TASK:>
Description:
def detect_content_type(self, path=None, payload=None, objectInput=None):
"""
Return the content type of passed file or payload.
Args:
path (string): Path of file to analyze
payload (string): Payload base64 to analyze
objectInput (object): file object/standard input to analyze
Returns:
content type of file (string)
""" |
# From Python detection content type from stdin doesn't work TO FIX
if objectInput:
message = "Detection content type with file object is not stable."
log.exception(message)
raise TikaAppError(message)
f = file_path(path, payload, objectInput)
switches = ["-d", f]
result = self._command_template(switches).lower()
return result, path, f |
<SYSTEM_TASK:>
Return only the text content of passed file.
<END_TASK>
<USER_TASK:>
Description:
def extract_only_content(self, path=None, payload=None, objectInput=None):
"""
Return only the text content of passed file.
These parameters are in OR. Only one of them can be analyzed.
Args:
path (string): Path of file to analyze
payload (string): Payload base64 to analyze
objectInput (object): file object/standard input to analyze
Returns:
text of file passed (string)
""" |
if objectInput:
switches = ["-t"]
result = self._command_template(switches, objectInput)
return result, True, None
else:
f = file_path(path, payload)
switches = ["-t", f]
result = self._command_template(switches)
return result, path, f |
<SYSTEM_TASK:>
This function returns a JSON of all contents and
<END_TASK>
<USER_TASK:>
Description:
def extract_all_content(
self,
path=None,
payload=None,
objectInput=None,
pretty_print=False,
convert_to_obj=False,
):
"""
This function returns a JSON of all contents and
metadata of passed file
Args:
path (string): Path of file to analyze
payload (string): Payload base64 to analyze
objectInput (object): file object/standard input to analyze
pretty_print (boolean): If True adds newlines and whitespace,
for better readability
convert_to_obj (boolean): If True convert JSON in object
""" |
f = file_path(path, payload, objectInput)
switches = ["-J", "-t", "-r", f]
if not pretty_print:
switches.remove("-r")
result = self._command_template(switches)
if result and convert_to_obj:
result = json.loads(result, encoding="utf-8")
return result, path, f |
<SYSTEM_TASK:>
This decorator removes the temp file from disk. This is the case where
<END_TASK>
<USER_TASK:>
Description:
def clean(func):
"""
This decorator removes the temp file from disk. This is the case where
you want to analyze from a payload.
""" |
def wrapper(*args, **kwargs):
# tuple: output command, path given from command line,
# path of templ file when you give the payload
out, given_path, path = func(*args, **kwargs)
try:
if not given_path:
os.remove(path)
except OSError:
pass
return out
return wrapper |
<SYSTEM_TASK:>
Given a file path, payload or file object, it writes file on disk and
<END_TASK>
<USER_TASK:>
Description:
def file_path(path=None, payload=None, objectInput=None):
"""
Given a file path, payload or file object, it writes file on disk and
returns the temp path.
Args:
path (string): path of real file
payload(string): payload in base64 of file
objectInput (object): file object/standard input to analyze
Returns:
Path of file
""" |
f = path if path else write_payload(payload, objectInput)
if not os.path.exists(f):
msg = "File {!r} does not exist".format(f)
log.exception(msg)
raise TikaAppFilePathError(msg)
return f |
<SYSTEM_TASK:>
This function writes a base64 payload or file object on disk.
<END_TASK>
<USER_TASK:>
Description:
def write_payload(payload=None, objectInput=None):
"""
This function writes a base64 payload or file object on disk.
Args:
payload (string): payload in base64
objectInput (object): file object/standard input to analyze
Returns:
Path of file
""" |
temp = tempfile.mkstemp()[1]
log.debug("Write payload in temp file {!r}".format(temp))
with open(temp, 'wb') as f:
if payload:
payload = base64.b64decode(payload)
elif objectInput:
if six.PY3:
payload = objectInput.buffer.read()
elif six.PY2:
payload = objectInput.read()
f.write(payload)
return temp |
<SYSTEM_TASK:>
Try to get a unique ID from the object. By default, this will be
<END_TASK>
<USER_TASK:>
Description:
def get_subject(self, data):
""" Try to get a unique ID from the object. By default, this will be
the 'id' field of any given object, or a field specified by the
'rdfSubject' property. If no other option is available, a UUID will be
generated. """ |
if not isinstance(data, Mapping):
return None
if data.get(self.subject):
return data.get(self.subject)
return uuid.uuid4().urn |
<SYSTEM_TASK:>
Recursively generate statements from the data supplied.
<END_TASK>
<USER_TASK:>
Description:
def triplify(self, data, parent=None):
""" Recursively generate statements from the data supplied. """ |
if data is None:
return
if self.is_object:
for res in self._triplify_object(data, parent):
yield res
elif self.is_array:
for item in data:
for res in self.items.triplify(item, parent):
yield res
else:
# TODO: figure out if I ever want to check for reverse here.
type_name = typecast.name(data)
obj = typecast.stringify(type_name, data)
if obj is not None:
obj = obj.strip()
yield (parent, self.predicate, obj, type_name) |
<SYSTEM_TASK:>
Create bi-directional statements for object relationships.
<END_TASK>
<USER_TASK:>
Description:
def _triplify_object(self, data, parent):
""" Create bi-directional statements for object relationships. """ |
subject = self.get_subject(data)
if self.path:
yield (subject, TYPE_SCHEMA, self.path, TYPE_SCHEMA)
if parent is not None:
yield (parent, self.predicate, subject, TYPE_LINK)
if self.reverse is not None:
yield (subject, self.reverse, parent, TYPE_LINK)
for prop in self.properties:
for res in prop.triplify(data.get(prop.name), subject):
yield res |
<SYSTEM_TASK:>
Raxml provides an option to fit model params to a tree,
<END_TASK>
<USER_TASK:>
Description:
def _dash_f_e_to_dict(self, info_filename, tree_filename):
"""
Raxml provides an option to fit model params to a tree,
selected with -f e.
The output is different and needs a different parser.
""" |
with open(info_filename) as fl:
models, likelihood, partition_params = self._dash_f_e_parser.parseFile(fl).asList()
with open(tree_filename) as fl:
tree = fl.read()
d = {'likelihood': likelihood, 'ml_tree': tree, 'partitions': {}}
for model, params in zip(models, partition_params):
subdict = {}
index, name, _, alpha, rates, freqs = params
subdict['alpha'] = alpha
subdict['name'] = name
subdict['rates'] = rates
subdict['frequencies'] = freqs
subdict['model'] = model
d['partitions'][index] = subdict
return d |
<SYSTEM_TASK:>
Parse raxml output and return a dict
<END_TASK>
<USER_TASK:>
Description:
def to_dict(self, info_filename, tree_filename, dash_f_e=False):
"""
Parse raxml output and return a dict
Option dash_f_e=True will parse the output of a raxml -f e run,
which has different output
""" |
logger.debug('info_filename: {} {}'
.format(info_filename, '(FOUND)' if os.path.exists(info_filename) else '(NOT FOUND)'))
logger.debug('tree_filename: {} {}'
.format(tree_filename, '(FOUND)' if os.path.exists(tree_filename) else '(NOT FOUND)'))
if dash_f_e:
return self._dash_f_e_to_dict(info_filename, tree_filename)
else:
return self._to_dict(info_filename, tree_filename) |
<SYSTEM_TASK:>
Returns a filereader object that can handle gzipped input
<END_TASK>
<USER_TASK:>
Description:
def freader(filename, gz=False, bz=False):
""" Returns a filereader object that can handle gzipped input """ |
filecheck(filename)
if filename.endswith('.gz'):
gz = True
elif filename.endswith('.bz2'):
bz = True
if gz:
return gzip.open(filename, 'rb')
elif bz:
return bz2.BZ2File(filename, 'rb')
else:
return io.open(filename, 'rb') |
<SYSTEM_TASK:>
Returns a filewriter object that can write plain or gzipped output.
<END_TASK>
<USER_TASK:>
Description:
def fwriter(filename, gz=False, bz=False):
""" Returns a filewriter object that can write plain or gzipped output.
If gzip or bzip2 compression is asked for then the usual filename extension will be added.""" |
if filename.endswith('.gz'):
gz = True
elif filename.endswith('.bz2'):
bz = True
if gz:
if not filename.endswith('.gz'):
filename += '.gz'
return gzip.open(filename, 'wb')
elif bz:
if not filename.endswith('.bz2'):
filename += '.bz2'
return bz2.BZ2File(filename, 'w')
else:
return open(filename, 'w') |
<SYSTEM_TASK:>
Returns files matched by all extensions in the extensions list
<END_TASK>
<USER_TASK:>
Description:
def glob_by_extensions(directory, extensions):
""" Returns files matched by all extensions in the extensions list """ |
directorycheck(directory)
files = []
xt = files.extend
for ex in extensions:
xt(glob.glob('{0}/*.{1}'.format(directory, ex)))
return files |
<SYSTEM_TASK:>
prints the top `n` lines of a file
<END_TASK>
<USER_TASK:>
Description:
def head(filename, n=10):
""" prints the top `n` lines of a file """ |
with freader(filename) as fr:
for _ in range(n):
print(fr.readline().strip()) |
<SYSTEM_TASK:>
A constrained and approximation of ILS using nearest-neighbour interchange
<END_TASK>
<USER_TASK:>
Description:
def ils(self, node, sorting_times=None, force_topology_change=True):
"""
A constrained and approximation of ILS using nearest-neighbour interchange
Process
-------
A node with at least three descendents is selected from an ultrametric tree
(node '2', below)
---0--... ---0--... ---0--...
| | | | --1-- |
| R --1-- R | | R
age | | | -2- |
^ | | | | | |
| --1-- -2- | | | |
| | | or | | | or | | |
| | | | | | | | |
| -2- | | | | | | |
| | | | | | | | | |
| A B C C B A A C B
Nodes 'A', 'B' and 'C' are rearranged into one of the three configurations
[(A, B), C], [A, (B, C)], [(A, C), B]
Nodes 1 and 2 are slid further up the tree, but no further than node 0
(this is why it's a constrained version), by an amount drawn from a
truncated exponential distribution.
This is approximately corresponds to the case where A and B failed to
coalesce in the branch 1->2, so they coalesce with C in the branch
0 -> 1 instead
""" |
# node = '2', par = '1', gpar = '0' -- in above diagram
n_2 = node
n_1 = n_2.parent_node
if n_1 == self.tree._tree.seed_node:
logger.warn('Node 1 is the root - calling again on child')
self.ils(n_2.child_nodes())
n_0 = n_1.parent_node
a, b = node.child_nodes()
c, = node.sister_nodes()
ages = [a.age, b.age, c.age, n_2.age, n_1.age, n_0.age]
# Do topology changes
if force_topology_change:
swap_mode = random.choice([1, 2])
else:
swap_mode = random.choice([0, 1, 2])
if swap_mode == 1:
# Exchange 'a' and 'c'
n_2.remove_child(a)
n_1.remove_child(c)
n_2.add_child(c)
n_1.add_child(a)
elif swap_mode == 2:
# Exchange 'b' and 'c'
n_2.remove_child(b)
n_1.remove_child(c)
n_2.add_child(c)
n_1.add_child(b)
# Do branch length adjustments
# Bounds - between node 0 (upper) and node 1 (lower)
min_unsorted_age = n_1.age
max_unsorted_age = n_0.age
if sorting_times is None:
sorting_times = truncated_exponential(max_unsorted_age-min_unsorted_age,
scale=0.1*(max_unsorted_age-min_unsorted_age),
sample_size=2) # E(t) = n(n-1)/2, n = 3
sorting_times += min_unsorted_age
sorting_times = np.array([min_unsorted_age, ages[3]])
# Adjust node 1 edge length
new_n1_age = max(sorting_times)
prev_age = ages[4]
slide = (new_n1_age - prev_age)
if slide < 1e-6:
slide = 0
new_n1_age = prev_age
n_1.edge.length -= slide
n_2.edge.length += slide
# Adjust node 2 edge length
new_n2_age = min(sorting_times)
prev_age = ages[3]
slide = (new_n2_age - prev_age)
if slide < 1e-6:
slide = 0
new_n2_age = prev_age
n_2.edge.length -= slide
# Adjust a, b and c edge lengths
if swap_mode == 0:
a.edge.length = (new_n2_age - ages[0])
b.edge.length = (new_n2_age - ages[1])
c.edge.length = (new_n1_age - ages[2])
elif swap_mode == 1:
a.edge.length = (new_n1_age - ages[0])
b.edge.length = (new_n2_age - ages[1])
c.edge.length = (new_n2_age - ages[2])
else:
a.edge.length = (new_n2_age - ages[0])
b.edge.length = (new_n1_age - ages[1])
c.edge.length = (new_n2_age - ages[2])
# used to be .reindex_taxa() before dendropy 4.
# migrate_taxon_namespace is recommended migrated function,
# but not sure if its even needed anymore.
self.tree._tree.migrate_taxon_namespace(self.tree._tree.taxon_namespace)
self.tree._tree.encode_bipartitions()
self._validate()
logger.debug(self.tree) |
<SYSTEM_TASK:>
Returns a list of the internal edges of the tree.
<END_TASK>
<USER_TASK:>
Description:
def get_inner_edges(self):
""" Returns a list of the internal edges of the tree. """ |
inner_edges = [e for e in self._tree.preorder_edge_iter() if e.is_internal()
and e.head_node and e.tail_node]
return inner_edges |
<SYSTEM_TASK:>
Returns the intersection of the taxon sets of two Trees
<END_TASK>
<USER_TASK:>
Description:
def intersection(self, other):
""" Returns the intersection of the taxon sets of two Trees """ |
taxa1 = self.labels
taxa2 = other.labels
return taxa1 & taxa2 |
<SYSTEM_TASK:>
Return a generator that yields the nodes of the tree in postorder.
<END_TASK>
<USER_TASK:>
Description:
def postorder(self, skip_seed=False):
"""
Return a generator that yields the nodes of the tree in postorder.
If skip_seed=True then the root node is not included.
""" |
for node in self._tree.postorder_node_iter():
if skip_seed and node is self._tree.seed_node:
continue
yield node |
<SYSTEM_TASK:>
Return a generator that yields the nodes of the tree in preorder.
<END_TASK>
<USER_TASK:>
Description:
def preorder(self, skip_seed=False):
"""
Return a generator that yields the nodes of the tree in preorder.
If skip_seed=True then the root node is not included.
""" |
for node in self._tree.preorder_node_iter():
if skip_seed and node is self._tree.seed_node:
continue
yield node |
<SYSTEM_TASK:>
Prunes the Tree to just the taxon set given in `subset`
<END_TASK>
<USER_TASK:>
Description:
def prune_to_subset(self, subset, inplace=False):
""" Prunes the Tree to just the taxon set given in `subset` """ |
if not subset.issubset(self.labels):
print('"subset" is not a subset')
return
if not inplace:
t = self.copy()
else:
t = self
t._tree.retain_taxa_with_labels(subset)
t._tree.encode_bipartitions()
t._dirty = True
return t |
<SYSTEM_TASK:>
Replaces branch lengths with values drawn from the specified
<END_TASK>
<USER_TASK:>
Description:
def randomise_branch_lengths(
self,
i=(1, 1),
l=(1, 1),
distribution_func=random.gammavariate,
inplace=False,
):
""" Replaces branch lengths with values drawn from the specified
distribution_func. Parameters of the distribution are given in the
tuples i and l, for interior and leaf nodes respectively. """ |
if not inplace:
t = self.copy()
else:
t = self
for n in t._tree.preorder_node_iter():
if n.is_internal():
n.edge.length = max(0, distribution_func(*i))
else:
n.edge.length = max(0, distribution_func(*l))
t._dirty = True
return t |
<SYSTEM_TASK:>
Shuffles the leaf labels, but doesn't alter the tree structure
<END_TASK>
<USER_TASK:>
Description:
def randomise_labels(
self,
inplace=False,
):
""" Shuffles the leaf labels, but doesn't alter the tree structure """ |
if not inplace:
t = self.copy()
else:
t = self
names = list(t.labels)
random.shuffle(names)
for l in t._tree.leaf_node_iter():
l.taxon._label = names.pop()
t._dirty = True
return t |
<SYSTEM_TASK:>
Stores info required to restore rootedness to derooted Tree. Returns
<END_TASK>
<USER_TASK:>
Description:
def reversible_deroot(self):
""" Stores info required to restore rootedness to derooted Tree. Returns
the edge that was originally rooted, the length of e1, and the length
of e2.
Dendropy Derooting Process:
In a rooted tree the root node is bifurcating. Derooting makes it
trifurcating.
Call the two edges leading out of the root node e1 and e2.
Derooting with Tree.deroot() deletes one of e1 and e2 (let's say e2),
and stretches the other to the sum of their lengths. Call this e3.
Rooted tree: Derooted tree:
A A B
|_ B \ /
/ |
/e1 |e3 (length = e1+e2; e2 is deleted)
Root--o ===> |
\e2 Root--o _ C
\ _ C |
| D
D
Reverse this with Tree.reroot_at_edge(edge, length1, length2, ...)
""" |
root_edge = self._tree.seed_node.edge
lengths = dict([(edge, edge.length) for edge
in self._tree.seed_node.incident_edges() if edge is not root_edge])
self._tree.deroot()
reroot_edge = (set(self._tree.seed_node.incident_edges())
& set(lengths.keys())).pop()
self._tree.encode_bipartitions()
self._dirty = True
return (reroot_edge, reroot_edge.length - lengths[reroot_edge],
lengths[reroot_edge]) |
<SYSTEM_TASK:>
Uses class LGT to perform random lateral gene transfer on
<END_TASK>
<USER_TASK:>
Description:
def rlgt(self, time=None, times=1,
disallow_sibling_lgts=False):
""" Uses class LGT to perform random lateral gene transfer on
ultrametric tree """ |
lgt = LGT(self.copy())
for _ in range(times):
lgt.rlgt(time, disallow_sibling_lgts)
return lgt.tree |
<SYSTEM_TASK:>
Multiplies all branch lengths by factor.
<END_TASK>
<USER_TASK:>
Description:
def scale(self, factor, inplace=True):
""" Multiplies all branch lengths by factor. """ |
if not inplace:
t = self.copy()
else:
t = self
t._tree.scale_edges(factor)
t._dirty = True
return t |
<SYSTEM_TASK:>
Easy names for debugging
<END_TASK>
<USER_TASK:>
Description:
def _name_things(self):
""" Easy names for debugging """ |
edges = {}
nodes = {None: 'root'}
for n in self._tree.postorder_node_iter():
nodes[n] = '.'.join([str(x.taxon) for x in n.leaf_nodes()])
for e in self._tree.preorder_edge_iter():
edges[e] = ' ---> '.join([nodes[e.tail_node], nodes[e.head_node]])
r_edges = {value: key for key, value in edges.items()}
r_nodes = {value: key for key, value in nodes.items()}
return edges, nodes, r_edges, r_nodes |
<SYSTEM_TASK:>
Method that fits a model with a particular estimation routine.
<END_TASK>
<USER_TASK:>
Description:
def fit(self, ini_betas=None, tol=1.0e-6, max_iter=200, solve='iwls'):
"""
Method that fits a model with a particular estimation routine.
Parameters
----------
ini_betas : array
k*1, initial coefficient values, including constant.
Default is None, which calculates initial values during
estimation.
tol: float
Tolerence for estimation convergence.
max_iter : integer
Maximum number of iterations if convergence not
achieved.
solve :string
Technique to solve MLE equations.
'iwls' = iteratively (re)weighted least squares (default)
""" |
self.fit_params['ini_betas'] = ini_betas
self.fit_params['tol'] = tol
self.fit_params['max_iter'] = max_iter
self.fit_params['solve'] = solve
if solve.lower() == 'iwls':
params, predy, w, n_iter = iwls(
self.y, self.X, self.family, self.offset, self.y_fix, ini_betas, tol, max_iter)
self.fit_params['n_iter'] = n_iter
return GLMResults(self, params.flatten(), predy, w) |
<SYSTEM_TASK:>
Inverse of the logit transform
<END_TASK>
<USER_TASK:>
Description:
def inverse(self, z):
"""
Inverse of the logit transform
Parameters
----------
z : array-like
The value of the logit transform at `p`
Returns
-------
p : array
Probabilities
Notes
-----
g^(-1)(z) = exp(z)/(1+exp(z))
""" |
z = np.asarray(z)
t = np.exp(-z)
return 1. / (1. + t) |
<SYSTEM_TASK:>
Inverse of the power transform link function
<END_TASK>
<USER_TASK:>
Description:
def inverse(self, z):
"""
Inverse of the power transform link function
Parameters
----------
`z` : array-like
Value of the transformed mean parameters at `p`
Returns
-------
`p` : array
Mean parameters
Notes
-----
g^(-1)(z`) = `z`**(1/`power`)
""" |
p = np.power(z, 1. / self.power)
return p |
<SYSTEM_TASK:>
Derivative of the power transform
<END_TASK>
<USER_TASK:>
Description:
def deriv(self, p):
"""
Derivative of the power transform
Parameters
----------
p : array-like
Mean parameters
Returns
--------
g'(p) : array
Derivative of power transform of `p`
Notes
-----
g'(`p`) = `power` * `p`**(`power` - 1)
""" |
return self.power * np.power(p, self.power - 1) |
<SYSTEM_TASK:>
Second derivative of the power transform
<END_TASK>
<USER_TASK:>
Description:
def deriv2(self, p):
"""
Second derivative of the power transform
Parameters
----------
p : array-like
Mean parameters
Returns
--------
g''(p) : array
Second derivative of the power transform of `p`
Notes
-----
g''(`p`) = `power` * (`power` - 1) * `p`**(`power` - 2)
""" |
return self.power * (self.power - 1) * np.power(p, self.power - 2) |
<SYSTEM_TASK:>
Derivative of the inverse of the power transform
<END_TASK>
<USER_TASK:>
Description:
def inverse_deriv(self, z):
"""
Derivative of the inverse of the power transform
Parameters
----------
z : array-like
`z` is usually the linear predictor for a GLM or GEE model.
Returns
-------
g^(-1)'(z) : array
The value of the derivative of the inverse of the power transform
function
""" |
return np.power(z, (1 - self.power)/self.power) / self.power |
<SYSTEM_TASK:>
Second derivative of the Cauchy link function.
<END_TASK>
<USER_TASK:>
Description:
def deriv2(self, p):
"""
Second derivative of the Cauchy link function.
Parameters
----------
p: array-like
Probabilities
Returns
-------
g''(p) : array
Value of the second derivative of Cauchy link function at `p`
""" |
a = np.pi * (p - 0.5)
d2 = 2 * np.pi**2 * np.sin(a) / np.cos(a)**3
return d2 |
<SYSTEM_TASK:>
Second derivative of the C-Log-Log ink function
<END_TASK>
<USER_TASK:>
Description:
def deriv2(self, p):
"""
Second derivative of the C-Log-Log ink function
Parameters
----------
p : array-like
Mean parameters
Returns
-------
g''(p) : array
The second derivative of the CLogLog link function
""" |
p = self._clean(p)
fl = np.log(1 - p)
d2 = -1 / ((1 - p)**2 * fl)
d2 *= 1 + 1 / fl
return d2 |
<SYSTEM_TASK:>
query the einfo endpoint
<END_TASK>
<USER_TASK:>
Description:
def einfo(self, db=None):
"""query the einfo endpoint
:param db: string (optional)
:rtype: EInfo or EInfoDB object
If db is None, the reply is a list of databases, which is returned
in an EInfo object (which has a databases() method).
If db is not None, the reply is information about the specified
database, which is returned in an EInfoDB object. (Version 2.0
data is automatically requested.)
""" |
if db is None:
return EInfoResult(self._qs.einfo()).dblist
return EInfoResult(self._qs.einfo({'db': db, 'version': '2.0'})).dbinfo |
<SYSTEM_TASK:>
query the efetch endpoint
<END_TASK>
<USER_TASK:>
Description:
def efetch(self, db, id):
"""query the efetch endpoint
""" |
db = db.lower()
xml = self._qs.efetch({'db': db, 'id': str(id)})
doc = le.XML(xml)
if db in ['gene']:
return EntrezgeneSet(doc)
if db in ['nuccore', 'nucest', 'protein']:
# TODO: GBSet is misnamed; it should be GBSeq and get the GBSeq XML node as root (see gbset.py)
return GBSet(doc)
if db in ['pubmed']:
return PubmedArticleSet(doc)
if db in ['snp']:
return ExchangeSet(xml)
if db in ['pmc']:
return PubmedCentralArticleSet(doc)
raise EutilsError('database {db} is not currently supported by eutils'.format(db=db)) |
<SYSTEM_TASK:>
Blit the current button's appearance to the surface object.
<END_TASK>
<USER_TASK:>
Description:
def draw(self, surfaceObj):
"""Blit the current button's appearance to the surface object.""" |
if self._visible:
if self.buttonDown:
surfaceObj.blit(self.surfaceDown, self._rect)
elif self.mouseOverButton:
surfaceObj.blit(self.surfaceHighlight, self._rect)
else:
surfaceObj.blit(self.surfaceNormal, self._rect) |
<SYSTEM_TASK:>
Helper method to set the link for a family.
<END_TASK>
<USER_TASK:>
Description:
def _setlink(self, link):
"""
Helper method to set the link for a family.
Raises a ValueError exception if the link is not available. Note that
the error message might not be that informative because it tells you
that the link should be in the base class for the link function.
See glm.GLM for a list of appropriate links for each family but note
that not all of these are currently available.
""" |
# TODO: change the links class attribute in the families to hold
# meaningful information instead of a list of links instances such as
# [<statsmodels.family.links.Log object at 0x9a4240c>,
# <statsmodels.family.links.Power object at 0x9a423ec>,
# <statsmodels.family.links.Power object at 0x9a4236c>]
# for Poisson...
self._link = link
if not isinstance(link, L.Link):
raise TypeError("The input should be a valid Link object.")
if hasattr(self, "links"):
validlink = link in self.links
validlink = max([isinstance(link, _) for _ in self.links])
if not validlink:
errmsg = "Invalid link for family, should be in %s. (got %s)"
raise ValueError(errmsg % (repr(self.links), link)) |
<SYSTEM_TASK:>
r"""
<END_TASK>
<USER_TASK:>
Description:
def weights(self, mu):
r"""
Weights for IRLS steps
Parameters
----------
mu : array-like
The transformed mean response variable in the exponential family
Returns
-------
w : array
The weights for the IRLS steps
""" |
return 1. / (self.link.deriv(mu)**2 * self.variance(mu)) |
<SYSTEM_TASK:>
Gaussian deviance residuals
<END_TASK>
<USER_TASK:>
Description:
def resid_dev(self, endog, mu, scale=1.):
"""
Gaussian deviance residuals
Parameters
-----------
endog : array-like
Endogenous response variable
mu : array-like
Fitted mean response variable
scale : float, optional
An optional argument to divide the residuals by scale. The default
is 1.
Returns
-------
resid_dev : array
Deviance residuals as defined below
""" |
return (endog - mu) / np.sqrt(self.variance(mu)) / scale |
<SYSTEM_TASK:>
Gaussian deviance function
<END_TASK>
<USER_TASK:>
Description:
def deviance(self, endog, mu, freq_weights=1., scale=1.):
"""
Gaussian deviance function
Parameters
----------
endog : array-like
Endogenous response variable
mu : array-like
Fitted mean response variable
freq_weights : array-like
1d array of frequency weights. The default is 1.
scale : float, optional
An optional scale argument. The default is 1.
Returns
-------
deviance : float
The deviance function at (endog,mu,freq_weights,scale)
as defined below.
""" |
return np.sum((freq_weights * (endog - mu)**2)) / scale |
<SYSTEM_TASK:>
The log-likelihood in terms of the fitted mean response.
<END_TASK>
<USER_TASK:>
Description:
def loglike(self, endog, mu, freq_weights=1., scale=1.):
"""
The log-likelihood in terms of the fitted mean response.
Parameters
----------
endog : array-like
Endogenous response variable
mu : array-like
Fitted mean response variable
freq_weights : array-like
1d array of frequency weights. The default is 1.
scale : float, optional
Scales the loglikelihood function. The default is 1.
Returns
-------
llf : float
The value of the loglikelihood function evaluated at
(endog,mu,freq_weights,scale) as defined below.
""" |
if isinstance(self.link, L.Power) and self.link.power == 1:
# This is just the loglikelihood for classical OLS
nobs2 = endog.shape[0] / 2.
SSR = np.sum((endog-self.fitted(mu))**2, axis=0)
llf = -np.log(SSR) * nobs2
llf -= (1+np.log(np.pi/nobs2))*nobs2
return llf
else:
return np.sum(freq_weights * ((endog * mu - mu**2/2)/scale -
endog**2/(2 * scale) - .5*np.log(2 * np.pi * scale))) |
<SYSTEM_TASK:>
r"""
<END_TASK>
<USER_TASK:>
Description:
def deviance(self, endog, mu, freq_weights=1., scale=1.):
r"""
Gamma deviance function
Parameters
-----------
endog : array-like
Endogenous response variable
mu : array-like
Fitted mean response variable
freq_weights : array-like
1d array of frequency weights. The default is 1.
scale : float, optional
An optional scale argument. The default is 1.
Returns
-------
deviance : float
Deviance function as defined below
""" |
endog_mu = self._clean(endog/mu)
return 2*np.sum(freq_weights*((endog-mu)/mu-np.log(endog_mu))) |
<SYSTEM_TASK:>
r"""
<END_TASK>
<USER_TASK:>
Description:
def resid_dev(self, endog, mu, scale=1.):
r"""
Gamma deviance residuals
Parameters
-----------
endog : array-like
Endogenous response variable
mu : array-like
Fitted mean response variable
scale : float, optional
An optional argument to divide the residuals by scale. The default
is 1.
Returns
-------
resid_dev : array
Deviance residuals as defined below
""" |
endog_mu = self._clean(endog / mu)
return np.sign(endog - mu) * np.sqrt(-2 * (-(endog - mu)/mu +
np.log(endog_mu))) |
<SYSTEM_TASK:>
r"""
<END_TASK>
<USER_TASK:>
Description:
def resid_dev(self, endog, mu, scale=1.):
r"""
Binomial deviance residuals
Parameters
-----------
endog : array-like
Endogenous response variable
mu : array-like
Fitted mean response variable
scale : float, optional
An optional argument to divide the residuals by scale. The default
is 1.
Returns
-------
resid_dev : array
Deviance residuals as defined below
""" |
mu = self.link._clean(mu)
if np.shape(self.n) == () and self.n == 1:
one = np.equal(endog, 1)
return np.sign(endog-mu)*np.sqrt(-2 *
np.log(one * mu + (1 - one) *
(1 - mu)))/scale
else:
return (np.sign(endog - mu) *
np.sqrt(2 * self.n *
(endog * np.log(endog/mu + 1e-200) +
(1 - endog) * np.log((1 - endog)/(1 - mu) + 1e-200)))/scale) |
<SYSTEM_TASK:>
Find an entry by exact title.
<END_TASK>
<USER_TASK:>
Description:
def find_by_title(self, title):
"""Find an entry by exact title.
:raise: EntryNotFoundError
""" |
for entry in self.entries:
if entry.title == title:
return entry
raise EntryNotFoundError("Entry not found for title: %s" % title) |
<SYSTEM_TASK:>
Find an entry by by fuzzy match.
<END_TASK>
<USER_TASK:>
Description:
def fuzzy_search_by_title(self, title, ignore_groups=None):
"""Find an entry by by fuzzy match.
This will check things such as:
* case insensitive matching
* typo checks
* prefix matches
If the ``ignore_groups`` argument is provided, then any matching
entries in the ``ignore_groups`` list will not be returned. This
argument can be used to filter out groups you are not interested in.
Returns a list of matches (an empty list is returned if no matches are
found).
""" |
entries = []
# Exact matches trump
for entry in self.entries:
if entry.title == title:
entries.append(entry)
if entries:
return self._filter_entries(entries, ignore_groups)
# Case insensitive matches next.
title_lower = title.lower()
for entry in self.entries:
if entry.title.lower() == title.lower():
entries.append(entry)
if entries:
return self._filter_entries(entries, ignore_groups)
# Subsequence/prefix matches next.
for entry in self.entries:
if self._is_subsequence(title_lower, entry.title.lower()):
entries.append(entry)
if entries:
return self._filter_entries(entries, ignore_groups)
# Finally close matches that might have mispellings.
entry_map = {entry.title.lower(): entry for entry in self.entries}
matches = difflib.get_close_matches(
title.lower(), entry_map.keys(), cutoff=0.7)
if matches:
return self._filter_entries(
[entry_map[name] for name in matches], ignore_groups)
return [] |
<SYSTEM_TASK:>
Parse the haproxy config file
<END_TASK>
<USER_TASK:>
Description:
def build_configuration(self):
"""Parse the haproxy config file
Raises:
Exception: when there are unsupported section
Returns:
config.Configuration: haproxy config object
""" |
configuration = config.Configuration()
pegtree = pegnode.parse(self.filestring)
for section_node in pegtree:
if isinstance(section_node, pegnode.GlobalSection):
configuration.globall = self.build_global(section_node)
elif isinstance(section_node, pegnode.FrontendSection):
configuration.frontends.append(
self.build_frontend(section_node))
elif isinstance(section_node, pegnode.DefaultsSection):
configuration.defaults.append(
self.build_defaults(section_node))
elif isinstance(section_node, pegnode.ListenSection):
configuration.listens.append(
self.build_listen(section_node))
elif isinstance(section_node, pegnode.UserlistSection):
configuration.userlists.append(
self.build_userlist(section_node))
elif isinstance(section_node, pegnode.BackendSection):
configuration.backends.append(
self.build_backend(section_node))
return configuration |
<SYSTEM_TASK:>
parse `config_block` in each section
<END_TASK>
<USER_TASK:>
Description:
def __build_config_block(self, config_block_node):
"""parse `config_block` in each section
Args:
config_block_node (TreeNode): Description
Returns:
[line_node1, line_node2, ...]
""" |
node_lists = []
for line_node in config_block_node:
if isinstance(line_node, pegnode.ConfigLine):
node_lists.append(self.__build_config(line_node))
elif isinstance(line_node, pegnode.OptionLine):
node_lists.append(self.__build_option(line_node))
elif isinstance(line_node, pegnode.ServerLine):
node_lists.append(
self.__build_server(line_node))
elif isinstance(line_node, pegnode.BindLine):
node_lists.append(
self.__build_bind(line_node))
elif isinstance(line_node, pegnode.AclLine):
node_lists.append(
self.__build_acl(line_node))
elif isinstance(line_node, pegnode.BackendLine):
node_lists.append(
self.__build_usebackend(line_node))
elif isinstance(line_node, pegnode.UserLine):
node_lists.append(
self.__build_user(line_node))
elif isinstance(line_node, pegnode.GroupLine):
node_lists.append(
self.__build_group(line_node))
else:
# may blank_line, comment_line
pass
return node_lists |
<SYSTEM_TASK:>
parse `defaults` sections, and return a config.Defaults
<END_TASK>
<USER_TASK:>
Description:
def build_defaults(self, defaults_node):
"""parse `defaults` sections, and return a config.Defaults
Args:
defaults_node (TreeNode): Description
Returns:
config.Defaults: an object
""" |
proxy_name = defaults_node.defaults_header.proxy_name.text
config_block_lines = self.__build_config_block(
defaults_node.config_block)
return config.Defaults(
name=proxy_name,
config_block=config_block_lines) |
<SYSTEM_TASK:>
parse `userlist` sections, and return a config.Userlist
<END_TASK>
<USER_TASK:>
Description:
def build_userlist(self, userlist_node):
"""parse `userlist` sections, and return a config.Userlist""" |
proxy_name = userlist_node.userlist_header.proxy_name.text
config_block_lines = self.__build_config_block(
userlist_node.config_block)
return config.Userlist(
name=proxy_name,
config_block=config_block_lines) |
<SYSTEM_TASK:>
parse `listen` sections, and return a config.Listen
<END_TASK>
<USER_TASK:>
Description:
def build_listen(self, listen_node):
"""parse `listen` sections, and return a config.Listen
Args:
listen_node (TreeNode): Description
Returns:
config.Listen: an object
""" |
proxy_name = listen_node.listen_header.proxy_name.text
service_address_node = listen_node.listen_header.service_address
# parse the config block
config_block_lines = self.__build_config_block(
listen_node.config_block)
# parse host and port
host, port = '', ''
if isinstance(service_address_node, pegnode.ServiceAddress):
host = service_address_node.host.text
port = service_address_node.port.text
else:
# use `bind` in config lines to fill in host and port
# just use the first
for line in config_block_lines:
if isinstance(line, config.Bind):
host, port = line.host, line.port
break
else:
raise Exception(
'Not specify host and port in `listen` definition')
return config.Listen(
name=proxy_name, host=host, port=port,
config_block=config_block_lines) |
<SYSTEM_TASK:>
parse `frontend` sections, and return a config.Frontend
<END_TASK>
<USER_TASK:>
Description:
def build_frontend(self, frontend_node):
"""parse `frontend` sections, and return a config.Frontend
Args:
frontend_node (TreeNode): Description
Raises:
Exception: Description
Returns:
config.Frontend: an object
""" |
proxy_name = frontend_node.frontend_header.proxy_name.text
service_address_node = frontend_node.frontend_header.service_address
# parse the config block
config_block_lines = self.__build_config_block(
frontend_node.config_block)
# parse host and port
host, port = '', ''
if isinstance(service_address_node, pegnode.ServiceAddress):
host = service_address_node.host.text
port = service_address_node.port.text
else:
# use `bind` in config lines to fill in host and port
# just use the first
for line in config_block_lines:
if isinstance(line, config.Bind):
host, port = line.host, line.port
break
else:
raise Exception(
'Not specify host and port in `frontend` definition')
return config.Frontend(
name=proxy_name, host=host, port=port,
config_block=config_block_lines) |
<SYSTEM_TASK:>
Trying to compile and return the compiled template code.
<END_TASK>
<USER_TASK:>
Description:
def from_string(self, template_code):
"""
Trying to compile and return the compiled template code.
:raises: TemplateSyntaxError if there's a syntax error in
the template.
:param template_code: Textual template source.
:return: Returns a compiled Mako template.
""" |
try:
return self.template_class(self.engine.from_string(template_code))
except mako_exceptions.SyntaxException as exc:
raise TemplateSyntaxError(exc.args) |
<SYSTEM_TASK:>
Render the template with a given context. Here we're adding
<END_TASK>
<USER_TASK:>
Description:
def render(self, context=None, request=None):
"""
Render the template with a given context. Here we're adding
some context variables that are required for all templates in
the system like the statix url and the CSRF tokens, etc.
:param context: It must be a dict if provided
:param request: It must be a django.http.HttpRequest if provided
:return: A rendered template
""" |
if context is None:
context = {}
context['static'] = static
context['url'] = self.get_reverse_url()
if request is not None:
# As Django doesn't have a global request object,
# it's useful to put it in the context.
context['request'] = request
# Passing the CSRF token is mandatory.
context['csrf_input'] = csrf_input_lazy(request)
context['csrf_token'] = csrf_token_lazy(request)
try:
return self.template.render(**context)
except Exception as e:
traceback = RichTraceback()
source = traceback.source
if not source:
# There's no template source lines then raise
raise e
source = source.split('\n')
line = traceback.lineno
top = max(0, line - 4)
bottom = min(len(source), line + 5)
source_lines = [(i + 1, source[i]) for i in range(top, bottom)]
e.template_debug = {
'name': traceback.records[5][4],
'message': '{}: {}'.format(
traceback.errorname, traceback.message),
'source_lines': source_lines,
'line': line,
'during': source_lines[line - top - 1][1],
'total': bottom - top,
'bottom': bottom,
'top': top + 1,
# mako's RichTraceback doesn't return column number
'before': '',
'after': '',
}
raise e |
<SYSTEM_TASK:>
Returns the confidence interval of the fitted parameters.
<END_TASK>
<USER_TASK:>
Description:
def conf_int(self, alpha=.05, cols=None, method='default'):
"""
Returns the confidence interval of the fitted parameters.
Parameters
----------
alpha : float, optional
The significance level for the confidence interval.
ie., The default `alpha` = .05 returns a 95% confidence
interval.
cols : array-like, optional
`cols` specifies which confidence intervals to return
method: string
Not Implemented Yet
Method to estimate the confidence_interval.
"Default" : uses self.bse which is based on inverse Hessian
for MLE.
"hjjh" :
"jac" :
"boot-bse"
"boot_quant"
"profile"
Returns
--------
conf_int : array
Each row contains [lower, upper] limits of the confidence
interval for the corresponding parameter. The first column
contains all lower, the second column contains all upper
limits.
Examples
--------
>>> import libpysal as ps
>>> from spglm.glm import GLM
>>> import numpy as np
>>> db = ps.io.open(ps.examples.get_path('columbus.dbf'),'r')
>>> y = np.array(db.by_col("HOVAL")).reshape((-1,1))
>>> X = []
>>> X.append(db.by_col("INC"))
>>> X.append(db.by_col("CRIME"))
>>> X = np.array(X).T
>>> model = GLM(y, X)
>>> results = model.fit()
>>> results.conf_int()
array([[ 20.57281401, 72.28355135],
[ -0.42138121, 1.67934915],
[ -0.84292086, -0.12685622]])
Notes
-----
The confidence interval is based on the standard normal distribution.
Models wish to use a different distribution should overwrite this
method.
""" |
bse = self.bse
if self.use_t:
dist = stats.t
df_resid = getattr(self, 'df_resid_inference', self.df_resid)
q = dist.ppf(1 - alpha / 2, df_resid)
else:
dist = stats.norm
q = dist.ppf(1 - alpha / 2)
if cols is None:
lower = self.params - q * bse
upper = self.params + q * bse
else:
cols = np.asarray(cols)
lower = self.params[cols] - q * bse[cols]
upper = self.params[cols] + q * bse[cols]
return np.asarray(lzip(lower, upper)) |
<SYSTEM_TASK:>
Returns md5 hash of a string.
<END_TASK>
<USER_TASK:>
Description:
def MD5Hash(password):
"""
Returns md5 hash of a string.
@param password (string) - String to be hashed.
@return (string) - Md5 hash of password.
""" |
md5_password = md5.new(password)
password_md5 = md5_password.hexdigest()
return password_md5 |
<SYSTEM_TASK:>
Set verbosity of the SenseApi object.
<END_TASK>
<USER_TASK:>
Description:
def setVerbosity(self, verbose):
"""
Set verbosity of the SenseApi object.
@param verbose (boolean) - True of False
@return (boolean) - Boolean indicating whether setVerbosity succeeded
""" |
if not (verbose == True or verbose == False):
return False
else:
self.__verbose__ = verbose
return True |
<SYSTEM_TASK:>
Set server to interact with.
<END_TASK>
<USER_TASK:>
Description:
def setServer(self, server):
"""
Set server to interact with.
@param server (string) - 'live' for live server, 'dev' for test server, 'rc' for release candidate
@return (boolean) - Boolean indicating whether setServer succeeded
""" |
if server == 'live':
self.__server__ = server
self.__server_url__ = 'api.sense-os.nl'
self.setUseHTTPS()
return True
elif server == 'dev':
self.__server__ = server
self.__server_url__ = 'api.dev.sense-os.nl'
# the dev server doesn't support https
self.setUseHTTPS(False)
return True
elif server == 'rc':
self.__server__ = server
self.__server_url__ = 'api.rc.dev.sense-os.nl'
self.setUseHTTPS(False)
else:
return False |
<SYSTEM_TASK:>
Retrieve all the user's own sensors by iterating over the SensorsGet function
<END_TASK>
<USER_TASK:>
Description:
def getAllSensors(self):
"""
Retrieve all the user's own sensors by iterating over the SensorsGet function
@return (list) - Array of sensors
""" |
j = 0
sensors = []
parameters = {'page':0, 'per_page':1000, 'owned':1}
while True:
parameters['page'] = j
if self.SensorsGet(parameters):
s = json.loads(self.getResponse())['sensors']
sensors.extend(s)
else:
# if any of the calls fails, we cannot be cannot be sure about the sensors in CommonSense
return None
if len(s) < 1000:
break
j += 1
return sensors |
<SYSTEM_TASK:>
Find a sensor in the provided list of sensors
<END_TASK>
<USER_TASK:>
Description:
def findSensor(self, sensors, sensor_name, device_type = None):
"""
Find a sensor in the provided list of sensors
@param sensors (list) - List of sensors to search in
@param sensor_name (string) - Name of sensor to find
@param device_type (string) - Device type of sensor to find, can be None
@return (string) - sensor_id of sensor or None if not found
""" |
if device_type == None:
for sensor in sensors:
if sensor['name'] == sensor_name:
return sensor['id']
else:
for sensor in sensors:
if sensor['name'] == sensor_name and sensor['device_type'] == device_type:
return sensor['id']
return None |
<SYSTEM_TASK:>
Authenticate using a username and password.
<END_TASK>
<USER_TASK:>
Description:
def AuthenticateSessionId(self, username, password):
"""
Authenticate using a username and password.
The SenseApi object will store the obtained session_id internally until a call to LogoutSessionId is performed.
@param username (string) - CommonSense username
@param password (string) - MD5Hash of CommonSense password
@return (bool) - Boolean indicating whether AuthenticateSessionId was successful
""" |
self.__setAuthenticationMethod__('authenticating_session_id')
parameters = {'username':username, 'password':password}
if self.__SenseApiCall__("/login.json", "POST", parameters = parameters):
try:
response = json.loads(self.__response__)
except:
self.__setAuthenticationMethod__('not_authenticated')
self.__error__ = "notjson"
return False
try:
self.__session_id__ = response['session_id']
self.__setAuthenticationMethod__('session_id')
return True
except:
self.__setAuthenticationMethod__('not_authenticated')
self.__error__ = "no session_id"
return False
else:
self.__setAuthenticationMethod__('not_authenticated')
self.__error__ = "api call unsuccessful"
return False |
<SYSTEM_TASK:>
Logout the current session_id from CommonSense
<END_TASK>
<USER_TASK:>
Description:
def LogoutSessionId(self):
"""
Logout the current session_id from CommonSense
@return (bool) - Boolean indicating whether LogoutSessionId was successful
""" |
if self.__SenseApiCall__('/logout.json', 'POST'):
self.__setAuthenticationMethod__('not_authenticated')
return True
else:
self.__error__ = "api call unsuccessful"
return False |
<SYSTEM_TASK:>
Authenticate using Oauth
<END_TASK>
<USER_TASK:>
Description:
def AuthenticateOauth (self, oauth_token_key, oauth_token_secret, oauth_consumer_key, oauth_consumer_secret):
"""
Authenticate using Oauth
@param oauth_token_key (string) - A valid oauth token key obtained from CommonSense
@param oauth_token_secret (string) - A valid oauth token secret obtained from CommonSense
@param oauth_consumer_key (string) - A valid oauth consumer key obtained from CommonSense
@param oauth_consumer_secret (string) - A valid oauth consumer secret obtained from CommonSense
@return (boolean) - Boolean indicating whether the provided credentials were successfully authenticated
""" |
self.__oauth_consumer__ = oauth.OAuthConsumer(str(oauth_consumer_key), str(oauth_consumer_secret))
self.__oauth_token__ = oauth.OAuthToken(str(oauth_token_key), str(oauth_token_secret))
self.__authentication__ = 'oauth'
if self.__SenseApiCall__('/users/current.json', 'GET'):
return True
else:
self.__error__ = "api call unsuccessful"
return False |
<SYSTEM_TASK:>
Delete a sensor from CommonSense.
<END_TASK>
<USER_TASK:>
Description:
def SensorsDelete(self, sensor_id):
"""
Delete a sensor from CommonSense.
@param sensor_id (int) - Sensor id of sensor to delete from CommonSense.
@return (bool) - Boolean indicating whether SensorsDelete was successful.
""" |
if self.__SenseApiCall__('/sensors/{0}.json'.format(sensor_id), 'DELETE'):
return True
else:
self.__error__ = "api call unsuccessful"
return False |
<SYSTEM_TASK:>
Retrieve sensors with their metatags.
<END_TASK>
<USER_TASK:>
Description:
def SensorsMetatagsGet(self, parameters, namespace = None):
"""
Retrieve sensors with their metatags.
@param namespace (string) - Namespace for which to retrieve the metatags.
@param parameters (dictionary - Dictionary containing further parameters.
@return (bool) - Boolean indicating whether SensorsMetatagsget was successful
""" |
ns = "default" if namespace is None else namespace
parameters['namespace'] = ns
if self.__SenseApiCall__('/sensors/metatags.json', 'GET', parameters = parameters):
return True
else:
self.__error__ = "api call unsuccessful"
return False |
<SYSTEM_TASK:>
Retrieve sensors in a group with their metatags.
<END_TASK>
<USER_TASK:>
Description:
def GroupSensorsMetatagsGet(self, group_id, parameters, namespace = None):
"""
Retrieve sensors in a group with their metatags.
@param group_id (int) - Group id for which to retrieve metatags.
@param namespace (string) - Namespace for which to retrieve the metatags.
@param parameters (dictionary) - Dictionary containing further parameters.
@return (bool) - Boolean indicating whether GroupSensorsMetatagsGet was successful
""" |
ns = "default" if namespace is None else namespace
parameters['namespace'] = ns
if self.__SenseApiCall__('/groups/{0}/sensors/metatags.json'.format(group_id), 'GET', parameters = parameters):
return True
else:
self.__error__ = "api call unsuccessful"
return False |
<SYSTEM_TASK:>
Retrieve the metatags of a sensor.
<END_TASK>
<USER_TASK:>
Description:
def SensorMetatagsGet(self, sensor_id, namespace = None):
"""
Retrieve the metatags of a sensor.
@param sensor_id (int) - Id of the sensor to retrieve metatags from
@param namespace (stirng) - Namespace for which to retrieve metatags.
@return (bool) - Boolean indicating whether SensorMetatagsGet was successful
""" |
ns = "default" if namespace is None else namespace
if self.__SenseApiCall__('/sensors/{0}/metatags.json'.format(sensor_id), 'GET', parameters = {'namespace': ns}):
return True
else:
self.__error__ = "api call unsuccessful"
return False |
<SYSTEM_TASK:>
Attach metatags to a sensor for a specific namespace
<END_TASK>
<USER_TASK:>
Description:
def SensorMetatagsPost(self, sensor_id, metatags, namespace = None):
"""
Attach metatags to a sensor for a specific namespace
@param sensor_id (int) - Id of the sensor to attach metatags to
@param namespace (string) - Namespace for which to attach metatags
@param metatags (dictionary) - Metatags to attach to the sensor
@return (bool) - Boolean indicating whether SensorMetatagsPost was successful
""" |
ns = "default" if namespace is None else namespace
if self.__SenseApiCall__("/sensors/{0}/metatags.json?namespace={1}".format(sensor_id, ns), "POST", parameters = metatags):
return True
else:
self.__error__ = "api call unsuccessful"
return False |
<SYSTEM_TASK:>
Find sensors in a group based on a number of filters on metatags
<END_TASK>
<USER_TASK:>
Description:
def GroupSensorsFind(self, group_id, parameters, filters, namespace = None):
"""
Find sensors in a group based on a number of filters on metatags
@param group_id (int) - Id of the group in which to find sensors
@param namespace (string) - Namespace to use in filtering on metatags
@param parameters (dictionary) - Dictionary containing additional parameters
@param filters (dictionary) - Dictioanry containing the filters on metatags
@return (bool) - Boolean indicating whether GroupSensorsFind was successful
""" |
ns = "default" if namespace is None else namespace
parameters['namespace'] = ns
if self.__SenseApiCall__("/groups/{0}/sensors/find.json?{1}".format(group_id, urllib.urlencode(parameters, True)), "POST", parameters = filters):
return True
else:
self.__error__ = "api call unsuccessful"
return False |
<SYSTEM_TASK:>
Find the distinct value of a metatag name in a certain namespace
<END_TASK>
<USER_TASK:>
Description:
def MetatagDistinctValuesGet(self, metatag_name, namespace = None):
"""
Find the distinct value of a metatag name in a certain namespace
@param metatag_name (string) - Name of the metatag for which to find the distinct values
@param namespace (stirng) - Namespace in which to find the distinct values
@return (bool) - Boolean indicating whether MetatagDistinctValuesGet was successful
""" |
ns = "default" if namespace is None else namespace
if self.__SenseApiCall__("/metatag_name/{0}/distinct_values.json", "GET", parameters = {'namespace': ns}):
return True
else:
self.__error__ = "api call unsuccessful"
return False |
<SYSTEM_TASK:>
Delete a sensor datum from a specific sensor in CommonSense.
<END_TASK>
<USER_TASK:>
Description:
def SensorDataDelete(self, sensor_id, data_id):
"""
Delete a sensor datum from a specific sensor in CommonSense.
@param sensor_id (int) - Sensor id of the sensor to delete data from
@param data_id (int) - Id of the data point to delete
@return (bool) - Boolean indicating whether SensorDataDelete was successful.
""" |
if self.__SenseApiCall__('/sensors/{0}/data/{1}.json'.format(sensor_id, data_id), 'DELETE'):
return True
else:
self.__error_ = "api call unsuccessful"
return False |
<SYSTEM_TASK:>
Post sensor data to multiple sensors in CommonSense simultaneously.
<END_TASK>
<USER_TASK:>
Description:
def SensorsDataPost(self, parameters):
"""
Post sensor data to multiple sensors in CommonSense simultaneously.
@param parameters (dictionary) - Data to post to the sensors.
@note - http://www.sense-os.nl/59?nodeId=59&selectedId=11887
@return (bool) - Boolean indicating whether SensorsDataPost was successful.
""" |
if self.__SenseApiCall__('/sensors/data.json', 'POST', parameters = parameters):
return True
else:
self.__error__ = "api call unsuccessful"
return False |
<SYSTEM_TASK:>
Delete a service from CommonSense.
<END_TASK>
<USER_TASK:>
Description:
def ServicesDelete (self, sensor_id, service_id):
"""
Delete a service from CommonSense.
@param sensor_id (int) - Sensor id of the sensor the service is connected to.
@param service_id (int) - Sensor id of the service to delete.
@return (bool) - Boolean indicating whether ServicesDelete was successful.
""" |
if self.__SenseApiCall__('/sensors/{0}/services/{1}.json'.format(sensor_id, service_id), 'DELETE'):
return True
else:
self.__error__ = "api call unsuccessful"
return False |
<SYSTEM_TASK:>
Indicate whether a math service should use the original timestamps of the incoming data, or let CommonSense timestamp the aggregated data.
<END_TASK>
<USER_TASK:>
Description:
def ServicesSetUseDataTimestamp(self, sensor_id, service_id, parameters):
"""
Indicate whether a math service should use the original timestamps of the incoming data, or let CommonSense timestamp the aggregated data.
@param sensors_id (int) - Sensor id of the sensor the service is connected to.
@param service_id (int) - Service id of the service for which to set the expression.
@param parameters (dictonary) - Parameters to set the expression of the math service.
@note - http://www.sense-os.nl/85?nodeId=85&selectedId=11887
@return (bool) - Boolean indicating whether ServicesSetuseDataTimestamp was successful.
""" |
if self.__SenseApiCall__('/sensors/{0}/services/{1}/SetUseDataTimestamp.json'.format(sensor_id, service_id), 'POST', parameters = parameters):
return True
else:
self.__error__ = "api call unsuccessful"
return False |
<SYSTEM_TASK:>
Create a user
<END_TASK>
<USER_TASK:>
Description:
def CreateUser (self, parameters):
"""
Create a user
This method creates a user and returns the user object and session
@param parameters (dictionary) - Parameters according to which to create the user.
""" |
print "Creating user"
print parameters
if self.__SenseApiCall__('/users.json', 'POST', parameters = parameters):
return True
else:
self.__error__ = "api call unsuccessful"
return False |
<SYSTEM_TASK:>
Update the current user.
<END_TASK>
<USER_TASK:>
Description:
def UsersUpdate (self, user_id, parameters):
"""
Update the current user.
@param user_id (int) - id of the user to be updated
@param parameters (dictionary) - user object to update the user with
@return (bool) - Boolean indicating whether UserUpdate was successful.
""" |
if self.__SenseApiCall__('/users/{0}.json'.format(user_id), 'PUT', parameters):
return True
else:
self.__error__ = "api call unsuccessful"
return False |
<SYSTEM_TASK:>
Change the password for the current user
<END_TASK>
<USER_TASK:>
Description:
def UsersChangePassword (self, current_password, new_password):
"""
Change the password for the current user
@param current_password (string) - md5 hash of the current password of the user
@param new_password (string) - md5 hash of the new password of the user (make sure to doublecheck!)
@return (bool) - Boolean indicating whether ChangePassword was successful.
""" |
if self.__SenseApiCall__('/change_password', "POST", {"current_password":current_password, "new_password":new_password}):
return True
else:
self.__error__ = "api call unsuccessful"
return False |
<SYSTEM_TASK:>
Delete user.
<END_TASK>
<USER_TASK:>
Description:
def UsersDelete (self, user_id):
"""
Delete user.
@return (bool) - Boolean indicating whether UsersDelete was successful.
""" |
if self.__SenseApiCall__('/users/{user_id}.json'.format(user_id = user_id), 'DELETE'):
return True
else:
self.__error__ = "api call unsuccessful"
return False |
<SYSTEM_TASK:>
Delete an event-notification from CommonSense.
<END_TASK>
<USER_TASK:>
Description:
def EventsNotificationsDelete(self, event_notification_id):
"""
Delete an event-notification from CommonSense.
@param event_notification_id (int) - Id of the event-notification to delete.
@return (bool) - Boolean indicating whether EventsNotificationsDelete was successful.
""" |
if self.__SenseApiCall__('/events/notifications/{0}.json'.format(event_notification_id), 'DELETE'):
return True
else:
self.__error__ = "api call unsuccessful"
return False |
<SYSTEM_TASK:>
Delete a trigger from CommonSense.
<END_TASK>
<USER_TASK:>
Description:
def TriggersDelete(self, trigger_id):
"""
Delete a trigger from CommonSense.
@param trigger_id (int) - Trigger id of the trigger to delete.
@return (bool) - Boolean indicating whether TriggersDelete was successful.
""" |
if self.__SenseApiCall__('/triggers/{0}'.format(trigger_id), 'DELETE'):
return True
else:
self.__error__ = "api call unsuccessful"
return False |
<SYSTEM_TASK:>
Disconnect a notification from a sensor-trigger combination.
<END_TASK>
<USER_TASK:>
Description:
def SensorsTriggersNotificationsDelete(self, sensor_id, trigger_id, notification_id):
"""
Disconnect a notification from a sensor-trigger combination.
@param sensor_id (int) - Sensor id if the sensor-trigger combination.
@param trigger_id (int) - Trigger id of the sensor-trigger combination.
@param notification_id (int) - Notification id of the notification to disconnect.
@param (bool) - Boolean indicating whether SensorstriggersNotificationsDelete was successful.
""" |
if self.__SenseApiCall__('/sensors/{0}/triggers/{1}/notifications/{2}.json'.format(sensor_id, trigger_id, notification_id), 'DELETE'):
return True
else:
self.__error__ = "api call unsuccessful"
return False |
<SYSTEM_TASK:>
Connect a notification to a sensor-trigger combination.
<END_TASK>
<USER_TASK:>
Description:
def SensorsTriggersNotificationsPost(self, sensor_id, trigger_id, parameters):
"""
Connect a notification to a sensor-trigger combination.
@param sensor_id (int) - Sensor id if the sensor-trigger combination.
@param trigger_id (int) - Trigger id of the sensor-trigger combination.
@param parameters (dictionary) - Dictionary containing the notification to connect.
@note -
@return (bool) - Boolean indicating whether SensorsTriggersNotificationsPost was successful.
""" |
if self.__SenseApiCall__('/sensors/{0}/triggers/{1}/notifications.json'.format(sensor_id, trigger_id), 'POST', parameters = parameters):
return True
else:
self.__error__ = "api call unsuccessful"
return False |
<SYSTEM_TASK:>
Delete a notification from CommonSense.
<END_TASK>
<USER_TASK:>
Description:
def NotificationsDelete(self, notification_id):
"""
Delete a notification from CommonSense.
@param notification_id (int) - Notification id of the notification to delete.
@return (bool) - Boolean indicating whether NotificationsDelete was successful.
""" |
if self.__SenseApiCall__('/notifications/{0}.json'.format(notification_id), 'DELETE'):
return True
else:
self.__error__ = "api call unsuccessful"
return False |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.