Unnamed: 0
int64 0
389k
| code
stringlengths 26
79.6k
| docstring
stringlengths 1
46.9k
|
---|---|---|
1,100 | def findall(dir = os.curdir):
all_files = []
for base, dirs, files in os.walk(dir, followlinks=True):
if base==os.curdir or base.startswith(os.curdir+os.sep):
base = base[2:]
if base:
files = [os.path.join(base, f) for f in files]
all_files.extend(filter(os.path.isfile, files))
return all_files | Find all files under 'dir' and return the list of full filenames
(relative to 'dir'). |
1,101 | def unit_vector_game(n, avoid_pure_nash=False, random_state=None):
random_state = check_random_state(random_state)
payoff_arrays = (np.zeros((n, n)), random_state.random_sample((n, n)))
if not avoid_pure_nash:
ones_ind = random_state.randint(n, size=n)
payoff_arrays[0][ones_ind, np.arange(n)] = 1
else:
if n == 1:
raise ValueError()
maxes = payoff_arrays[1].max(axis=0)
is_suboptimal = payoff_arrays[1] < maxes
nums_suboptimal = is_suboptimal.sum(axis=1)
while (nums_suboptimal==0).any():
payoff_arrays[1][:] = random_state.random_sample((n, n))
payoff_arrays[1].max(axis=0, out=maxes)
np.less(payoff_arrays[1], maxes, out=is_suboptimal)
is_suboptimal.sum(axis=1, out=nums_suboptimal)
for i in range(n):
one_ind = random_state.randint(n)
while not is_suboptimal[i, one_ind]:
one_ind = random_state.randint(n)
payoff_arrays[0][one_ind, i] = 1
g = NormalFormGame(
[Player(payoff_array) for payoff_array in payoff_arrays]
)
return g | Return a NormalFormGame instance of the 2-player game "unit vector
game" (Savani and von Stengel, 2016). Payoffs for player 1 are
chosen randomly from the [0, 1) range. For player 0, each column
contains exactly one 1 payoff and the rest is 0.
Parameters
----------
n : scalar(int)
Number of actions.
avoid_pure_nash : bool, optional(default=False)
If True, player 0's payoffs will be placed in order to avoid
pure Nash equilibria. (If necessary, the payoffs for player 1
are redrawn so as not to have a dominant action.)
random_state : int or np.random.RandomState, optional
Random seed (integer) or np.random.RandomState instance to set
the initial state of the random number generator for
reproducibility. If None, a randomly initialized RandomState is
used.
Returns
-------
g : NormalFormGame
Examples
--------
>>> g = unit_vector_game(4, random_state=1234)
>>> g.players[0]
Player([[ 1., 0., 1., 0.],
[ 0., 0., 0., 1.],
[ 0., 0., 0., 0.],
[ 0., 1., 0., 0.]])
>>> g.players[1]
Player([[ 0.19151945, 0.62210877, 0.43772774, 0.78535858],
[ 0.77997581, 0.27259261, 0.27646426, 0.80187218],
[ 0.95813935, 0.87593263, 0.35781727, 0.50099513],
[ 0.68346294, 0.71270203, 0.37025075, 0.56119619]])
With `avoid_pure_nash=True`:
>>> g = unit_vector_game(4, avoid_pure_nash=True, random_state=1234)
>>> g.players[0]
Player([[ 1., 1., 0., 0.],
[ 0., 0., 0., 0.],
[ 0., 0., 1., 1.],
[ 0., 0., 0., 0.]])
>>> g.players[1]
Player([[ 0.19151945, 0.62210877, 0.43772774, 0.78535858],
[ 0.77997581, 0.27259261, 0.27646426, 0.80187218],
[ 0.95813935, 0.87593263, 0.35781727, 0.50099513],
[ 0.68346294, 0.71270203, 0.37025075, 0.56119619]])
>>> pure_nash_brute(g)
[] |
1,102 | def contains(self, key):
if self._jconf is not None:
return self._jconf.contains(key)
else:
return key in self._conf | Does this configuration contain a given key? |
1,103 | def get_queryset(self):
kwargs = {}
if self.ends_at:
kwargs.update({ % self.date_field: self.ends_at})
return super(BeforeMixin, self).get_queryset().filter(**kwargs) | Implements before date filtering on ``date_field`` |
1,104 | def remove_unreferenced_items(self, stale_cts):
stale_ct_ids = list(stale_cts.keys())
parent_types = (ContentItem.objects.order_by()
.exclude(polymorphic_ctype__in=stale_ct_ids)
.values_list(, flat=True).distinct())
num_unreferenced = 0
for ct_id in parent_types:
parent_ct = ContentType.objects.get_for_id(ct_id)
unreferenced_items = (ContentItem.objects
.filter(parent_type=ct_id)
.order_by(, ))
if parent_ct.model_class() is not None:
unreferenced_items = unreferenced_items.exclude(
parent_id__in=parent_ct.get_all_objects_for_this_type()
)
if unreferenced_items:
for item in unreferenced_items:
self.stdout.write(
"- {cls}
cls=item.__class__.__name__, id=item.pk,
app_label=parent_ct.app_label, model=parent_ct.model
))
num_unreferenced += 1
if not self.dry_run and self.remove_unreferenced:
item.delete()
if not num_unreferenced:
self.stdout.write("No unreferenced items found.")
else:
self.stdout.write("{0} unreferenced items found.".format(num_unreferenced))
if not self.remove_unreferenced:
self.stdout.write("Re-run this command with --remove-unreferenced to remove these items") | See if there are items that no longer point to an existing parent. |
1,105 | def get_organizer(self, id, **data):
return self.get("/organizers/{0}/".format(id), data=data) | GET /organizers/:id/
Gets an :format:`organizer` by ID as ``organizer``. |
1,106 | def _copy_from(self, node,
copy_leaves=True,
overwrite=False,
with_links=True):
def _copy_skeleton(node_in, node_out):
new_annotations = node_out.v_annotations
node_in._annotations = new_annotations
node_in.v_comment = node_out.v_comment
def _add_leaf(leaf):
leaf_full_name = leaf.v_full_name
try:
found_leaf = self.f_get(leaf_full_name,
with_links=False,
shortcuts=False,
auto_load=False)
if overwrite:
found_leaf.__setstate__(leaf.__getstate__())
return found_leaf
except AttributeError:
pass
if copy_leaves is True or (copy_leaves == and
leaf.v_is_parameter and leaf.v_explored):
new_leaf = self.f_add_leaf(cp.copy(leaf))
else:
new_leaf = self.f_add_leaf(leaf)
if new_leaf.v_is_parameter and new_leaf.v_explored:
self._explored_parameters[new_leaf.v_full_name] = new_leaf
return new_leaf
def _add_group(group):
group_full_name = group.v_full_name
try:
found_group = self.f_get(group_full_name,
with_links=False,
shortcuts=False,
auto_load=False)
if overwrite:
_copy_skeleton(found_group, group)
return found_group
except AttributeError:
pass
new_group = self.f_add_group(group_full_name)
_copy_skeleton(new_group, group)
return new_group
is_run = self._is_run
self._is_run = False
try:
if node.v_is_leaf:
return _add_leaf(node)
elif node.v_is_group:
other_root = node.v_root
if other_root is self:
raise RuntimeError()
result = _add_group(node)
nodes_iterator = node.f_iter_nodes(recursive=True, with_links=with_links)
has_links = []
if node._links:
has_links.append(node)
for child in nodes_iterator:
if child.v_is_leaf:
_add_leaf(child)
else:
_add_group(child)
if child._links:
has_links.append(child)
if with_links:
for current in has_links:
mine = self.f_get(current.v_full_name, with_links=False,
shortcuts=False, auto_load=False)
my_link_set = set(mine._links.keys())
other_link_set = set(current._links.keys())
new_links = other_link_set - my_link_set
for link in new_links:
where_full_name = current._links[link].v_full_name
mine.f_add_link(link, where_full_name)
return result
else:
raise RuntimeError()
except Exception:
self._is_run = is_run | Pass a ``node`` to insert the full tree to the trajectory.
Considers all links in the given node!
Ignored nodes already found in the current trajectory.
:param node: The node to insert
:param copy_leaves:
If leaves should be **shallow** copied or simply referred to by both trees.
**Shallow** copying is established using the copy module.
Accepts the setting ``'explored'`` to only copy explored parameters.
Note that ``v_full_copy`` determines how these will be copied.
:param overwrite:
If existing elemenst should be overwritten. Requries ``__getstate__`` and
``__setstate__`` being implemented in the leaves.
:param with_links: If links should be ignored or followed and copied as well
:return: The corresponding (new) node in the tree. |
1,107 | def clear_modules(self):
for aModule in compat.itervalues(self.__moduleDict):
aModule.clear()
self.__moduleDict = dict() | Clears the modules snapshot. |
1,108 | def add(self, name, path):
if not (os.path.exists(path)):
raise ValueError("Workspace path `%s` doesn't exists." % path)
if (self.exists(name)):
raise ValueError("Workspace `%s` already exists." % name)
self.config["workspaces"][name] = {"path": path, "repositories": {}}
self.config.write() | Add a workspace entry in user config file. |
1,109 | def read_raw_parser_conf(data: str) -> dict:
config = configparser.ConfigParser(allow_no_value=True)
config.read_string(data)
try:
_data: dict = dict(config["commitizen"])
if "files" in _data:
files = _data["files"]
_f = json.loads(files)
_data.update({"files": _f})
return _data
except KeyError:
return {} | We expect to have a section like this
```
[commitizen]
name = cz_jira
files = [
"commitizen/__version__.py",
"pyproject.toml"
] # this tab at the end is important
``` |
1,110 | async def close(self):
if self._closed:
return
self._check_init()
self._closing = True
warning_callback = None
try:
warning_callback = self._loop.call_later(
60, self._warn_on_long_close)
release_coros = [
ch.wait_until_released() for ch in self._holders]
await asyncio.gather(*release_coros, loop=self._loop)
close_coros = [
ch.close() for ch in self._holders]
await asyncio.gather(*close_coros, loop=self._loop)
except Exception:
self.terminate()
raise
finally:
if warning_callback is not None:
warning_callback.cancel()
self._closed = True
self._closing = False | Attempt to gracefully close all connections in the pool.
Wait until all pool connections are released, close them and
shut down the pool. If any error (including cancellation) occurs
in ``close()`` the pool will terminate by calling
:meth:`Pool.terminate() <pool.Pool.terminate>`.
It is advisable to use :func:`python:asyncio.wait_for` to set
a timeout.
.. versionchanged:: 0.16.0
``close()`` now waits until all pool connections are released
before closing them and the pool. Errors raised in ``close()``
will cause immediate pool termination. |
1,111 | def solvent_per_layer(self):
if self._solvent_per_layer:
return self._solvent_per_layer
assert not (self.solvent_per_lipid is None and self.n_solvent is None)
if self.solvent_per_lipid is not None:
assert self.n_solvent is None
self._solvent_per_layer = self.n_lipids_per_layer * self.solvent_per_lipid
elif self.n_solvent is not None:
assert self.solvent_per_lipid is None
self._solvent_per_layer = self.n_solvent / 2
return self._solvent_per_layer | Determine the number of solvent molecules per single layer. |
1,112 | def bulk_insert(self, rows, return_model=False):
if self.conflict_target or self.conflict_action:
compiler = self._build_insert_compiler(rows)
objs = compiler.execute_sql(return_id=True)
if return_model:
return [self.model(**dict(r, **k)) for r, k in zip(rows, objs)]
else:
return [dict(r, **k) for r, k in zip(rows, objs)]
return super().bulk_create([self.model(**fields) for fields in rows]) | Creates multiple new records in the database.
This allows specifying custom conflict behavior using .on_conflict().
If no special behavior was specified, this uses the normal Django create(..)
Arguments:
rows:
An array of dictionaries, where each dictionary
describes the fields to insert.
return_model (default: False):
If model instances should be returned rather than
just dicts.
Returns:
A list of either the dicts of the rows inserted, including the pk or
the models of the rows inserted with defaults for any fields not specified |
1,113 | def add(self, iocb):
if _debug: IOGroup._debug("add %r", iocb)
self.ioMembers.append(iocb)
self.ioState = PENDING
self.ioComplete.clear()
iocb.add_callback(self.group_callback) | Add an IOCB to the group, you can also add other groups. |
1,114 | def labels(self):
return [
name for name in os.listdir(self.root)
if os.path.isdir(os.path.join(self.root, name))
] | Return the unique labels assigned to the documents. |
1,115 | def add_url(self, url, description=None):
url = {
: url,
}
if description:
url[] = description
self._append_to(, url) | Add a personal website.
Args:
:param url: url to the person's website.
:type url: string
:param description: short description of the website.
:type description: string |
1,116 | def encode_all_features(dataset, vocabulary):
def my_fn(features):
ret = {}
for k, v in features.items():
v = vocabulary.encode_tf(v)
v = tf.concat([tf.to_int64(v), [1]], 0)
ret[k] = v
return ret
return dataset.map(my_fn, num_parallel_calls=tf.data.experimental.AUTOTUNE) | Encode all features.
Args:
dataset: a tf.data.Dataset
vocabulary: a vocabulary.Vocabulary
Returns:
a tf.data.Dataset |
1,117 | def create(cls, env, filenames, trim=False):
import_graph = cls(env)
for filename in filenames:
import_graph.add_file_recursive(os.path.abspath(filename), trim)
import_graph.build()
return import_graph | Create and return a final graph.
Args:
env: An environment.Environment object
filenames: A list of filenames
trim: Whether to trim the dependencies of builtin and system files.
Returns:
An immutable ImportGraph with the recursive dependencies of all the
files in filenames |
1,118 | def check_exists(path, type=):
if type == :
if not os.path.isfile(path):
raise RuntimeError( % path)
else:
if not os.path.isdir(path):
raise RuntimeError( % path)
return True | Check if a file or a folder exists |
1,119 | def api_walk(uri, per_page=100, key="login"):
page = 1
result = []
while True:
response = get_json(uri + "?page=%d&per_page=%d" % (page, per_page))
if len(response) == 0:
break
else:
page += 1
for r in response:
if key == USER_LOGIN:
result.append(user_login(r))
else:
result.append(r[key])
return list(set(result)) | For a GitHub URI, walk all the pages until there's no more content |
1,120 | def terminate_ex(self, nodes, threads=False, attempts=3):
while nodes and attempts > 0:
if threads:
nodes = self.terminate_with_threads(nodes)
else:
nodes = self.terminate(nodes)
if nodes:
logger.info("Attempt to terminate the remaining instances once more.")
attempts -= 1
return nodes | Wrapper method for terminate.
:param nodes: Nodes to be destroyed.
:type nodes: ``list``
:param attempts: The amount of attempts for retrying to terminate failed instances.
:type attempts: ``int``
:param threads: Whether to use the threaded approach or not.
:type threads: ``bool`` |
1,121 | def main(self):
while True:
while self.ready:
task, a = self.ready.popleft()
self.run_task(task, *a)
if self.scheduled:
timeout = self.scheduled.timeout()
if timeout < 0:
task, a = self.scheduled.pop()
self.run_task(task, *a)
continue
if not self.registered:
time.sleep(timeout)
task, a = self.scheduled.pop()
self.run_task(task, *a)
continue
else:
timeout = -1
if not self.registered:
self.stopped.send(True)
return
events = None
try:
events = self.poll.poll(timeout=timeout)
except IOError:
pass
if events:
self.spawn(self.dispatch_events, events) | Scheduler steps:
- run ready until exhaustion
- if there's something scheduled
- run overdue scheduled immediately
- or if there's nothing registered, sleep until next scheduled
and then go back to ready
- if there's nothing registered and nothing scheduled, we've
deadlocked, so stopped
- poll on registered, with timeout of next scheduled, if something
is scheduled |
1,122 | def page(self, number):
number = self.validate_number(number)
bottom = (number - 1) * self.per_page
top = bottom + self.per_page
page_items = self.object_list[bottom:top]
if not page_items:
if number == 1 and self.allow_empty_first_page:
pass
else:
raise EmptyPage()
return InfinitePage(page_items, number, self) | Returns a Page object for the given 1-based page number. |
1,123 | async def update_data_status(self, **kwargs):
await self._send_manager_command(ExecutorProtocol.UPDATE, extra_fields={
ExecutorProtocol.UPDATE_CHANGESET: kwargs
}) | Update (PATCH) Data object.
:param kwargs: The dictionary of
:class:`~resolwe.flow.models.Data` attributes to be changed. |
1,124 | def get_neighbor_ip(ip_addr, cidr="30"):
our_octet = None
neighbor_octet = None
try:
ip_addr_split = ip_addr.split(".")
max_counter = 0
if int(cidr) == 30:
ranger = 4
elif int(cidr) == 31:
ranger = 2
while max_counter < 256:
try:
if int(ip_addr_split[3]) >= max_counter and int(ip_addr_split[3]) < (max_counter + ranger):
if ranger == 4:
our_octet = max_counter + 1
neighbor_octet = max_counter + 2
break
elif ranger == 2:
our_octet = max_counter
neighbor_octet = max_counter + 1
break
max_counter += ranger
except UnboundLocalError:
print("The mask between the neighbors must be 30, or 31")
exit("BAD NEIGHBOR MASK")
if int(ip_addr_split[3]) == our_octet:
our_ip_addr = "%s.%s.%s.%i" % (ip_addr_split[0], ip_addr_split[1], ip_addr_split[2], our_octet)
neighbor_ip_addr = "%s.%s.%s.%i" % (ip_addr_split[0], ip_addr_split[1], ip_addr_split[2], neighbor_octet)
elif int(ip_addr_split[3]) == neighbor_octet:
neighbor_ip_addr = "%s.%s.%s.%i" % (ip_addr_split[0], ip_addr_split[1], ip_addr_split[2], our_octet)
our_ip_addr = "%s.%s.%s.%i" % (ip_addr_split[0], ip_addr_split[1], ip_addr_split[2], neighbor_octet)
else:
our_ip_addr = "%s.%s.%s.%i" % (ip_addr_split[0], ip_addr_split[1], ip_addr_split[2], our_octet)
neighbor_ip_addr = "%s.%s.%s.%i" % (ip_addr_split[0], ip_addr_split[1], ip_addr_split[2], neighbor_octet)
return our_ip_addr, neighbor_ip_addr
except IndexError:
LOGGER.critical(.format(item=ip_addr,
cidr=cidr))
raise IndexError("You have entered invalid input, you must enter a ipv4 address") | Function to figure out the IP's between neighbors address
Args:
ip_addr: Unicast IP address in the following format 192.168.1.1
cidr: CIDR value of 30, or 31
Returns: returns Our IP and the Neighbor IP in a tuple |
1,125 | def get_state_variable_from_storage(
self, address: str, params: Optional[List[str]] = None
) -> str:
params = params or []
(position, length, mappings) = (0, 1, [])
try:
if params[0] == "mapping":
if len(params) < 3:
raise CriticalError("Invalid number of parameters.")
position = int(params[1])
position_formatted = utils.zpad(utils.int_to_big_endian(position), 32)
for i in range(2, len(params)):
key = bytes(params[i], "utf8")
key_formatted = utils.rzpad(key, 32)
mappings.append(
int.from_bytes(
utils.sha3(key_formatted + position_formatted),
byteorder="big",
)
)
length = len(mappings)
if length == 1:
position = mappings[0]
else:
if len(params) >= 4:
raise CriticalError("Invalid number of parameters.")
if len(params) >= 1:
position = int(params[0])
if len(params) >= 2:
length = int(params[1])
if len(params) == 3 and params[2] == "array":
position_formatted = utils.zpad(
utils.int_to_big_endian(position), 32
)
position = int.from_bytes(
utils.sha3(position_formatted), byteorder="big"
)
except ValueError:
raise CriticalError(
"Invalid storage index. Please provide a numeric value."
)
outtxt = []
try:
if length == 1:
outtxt.append(
"{}: {}".format(
position, self.eth.eth_getStorageAt(address, position)
)
)
else:
if len(mappings) > 0:
for i in range(0, len(mappings)):
position = mappings[i]
outtxt.append(
"{}: {}".format(
hex(position),
self.eth.eth_getStorageAt(address, position),
)
)
else:
for i in range(position, position + length):
outtxt.append(
"{}: {}".format(
hex(i), self.eth.eth_getStorageAt(address, i)
)
)
except FileNotFoundError as e:
raise CriticalError("IPC error: " + str(e))
except ConnectionError:
raise CriticalError(
"Could not connect to RPC server. "
"Make sure that your node is running and that RPC parameters are set correctly."
)
return "\n".join(outtxt) | Get variables from the storage
:param address: The contract address
:param params: The list of parameters
param types: [position, length] or ["mapping", position, key1, key2, ... ]
or [position, length, array]
:return: The corresponding storage slot and its value |
1,126 | def parse_argv(self, argv=None, location=):
if argv is None:
argv = list(sys.argv)
argv.pop(0)
self._parse_options(argv, location)
self._parse_positional_arguments(argv) | Parse command line arguments.
args <list str> or None:
The argument list to parse. None means use a copy of sys.argv. argv[0] is
ignored.
location = '' <str>:
A user friendly string describing where the parser got this
data from. '' means use "Command line." if args == None, and
"Builtin default." otherwise. |
1,127 | async def close_authenticator_async(self):
_logger.info("Shutting down CBS session on connection: %r.", self._connection.container_id)
try:
self._cbs_auth.destroy()
_logger.info("Auth closed, destroying session on connection: %r.", self._connection.container_id)
await self._session.destroy_async()
finally:
_logger.info("Finished shutting down CBS session on connection: %r.", self._connection.container_id) | Close the CBS auth channel and session asynchronously. |
1,128 | def formula_dual(input_formula: str) -> str:
conversion_dictionary = {
: ,
: ,
: ,
:
}
return re.sub(
.join(re.escape(key) for key in conversion_dictionary.keys()),
lambda k: conversion_dictionary[k.group(0)], input_formula) | Returns the dual of the input formula.
The dual operation on formulas in :math:`B^+(X)` is defined as:
the dual :math:`\overline{θ}` of a formula :math:`θ` is obtained from θ by
switching :math:`∧` and :math:`∨`, and
by switching :math:`true` and :math:`false`.
:param str input_formula: original string.
:return: *(str)*, dual of input formula. |
1,129 | def get_total_contributors(self, repo):
repo_contributors = 0
for contributor in repo.iter_contributors():
repo_contributors += 1
self.unique_contributors[contributor.id].append(repo.name)
self.contributors_json[repo.name].append(contributor.to_json())
return repo_contributors | Retrieves the number of contributors to a repo in the organization.
Also adds to unique contributor list. |
1,130 | def wait_until_done(self, timeout=None):
start = datetime.now()
if not self.__th:
raise IndraDBRestResponseError("There is no thread waiting to "
"complete.")
self.__th.join(timeout)
now = datetime.now()
dt = now - start
if self.__th.is_alive():
logger.warning("Timed out after %0.3f seconds waiting for "
"statement load to complete." % dt.total_seconds())
ret = False
else:
logger.info("Waited %0.3f seconds for statements to finish loading."
% dt.total_seconds())
ret = True
return ret | Wait for the background load to complete. |
1,131 | def manage_recurring_payments_profile_status(self, profileid, action,
note=None):
args = self._sanitize_locals(locals())
if not note:
del args[]
return self._call(, **args) | Shortcut to the ManageRecurringPaymentsProfileStatus method.
``profileid`` is the same profile id used for getting profile details.
``action`` should be either 'Cancel', 'Suspend', or 'Reactivate'.
``note`` is optional and is visible to the user. It contains the
reason for the change in status. |
1,132 | def set_write_bit(fn):
fn = fs_encode(fn)
if not os.path.exists(fn):
return
file_stat = os.stat(fn).st_mode
os.chmod(fn, file_stat | stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO)
if not os.path.isdir(fn):
for path in [fn, os.path.dirname(fn)]:
try:
os.chflags(path, 0)
except AttributeError:
pass
return None
for root, dirs, files in os.walk(fn, topdown=False):
for dir_ in [os.path.join(root, d) for d in dirs]:
set_write_bit(dir_)
for file_ in [os.path.join(root, f) for f in files]:
set_write_bit(file_) | Set read-write permissions for the current user on the target path. Fail silently
if the path doesn't exist.
:param str fn: The target filename or path
:return: None |
1,133 | def distance_to_interval(self, start, end):
if self.start > end:
return self.start - end
elif self.end < start:
return start - self.end
else:
return 0 | Find the distance between intervals [start1, end1] and [start2, end2].
If the intervals overlap then the distance is 0. |
1,134 | def expanded_counts_map(self):
if self.hpx._ipix is None:
return self.counts
output = np.zeros(
(self.counts.shape[0], self.hpx._maxpix), self.counts.dtype)
for i in range(self.counts.shape[0]):
output[i][self.hpx._ipix] = self.counts[i]
return output | return the full counts map |
1,135 | def output(self, resource):
@wraps(resource)
def wrapper(*args, **kwargs):
rv = resource(*args, **kwargs)
rv = self.responder(rv)
return rv
return wrapper | Wrap a resource (as a flask view function).
This is for cases where the resource does not directly return
a response object. Now everything should be a Response object.
:param resource: The resource as a flask view function |
1,136 | def record_launch(self, queue_id):
self.launches.append(
AttrDict(queue_id=queue_id, mpi_procs=self.mpi_procs, omp_threads=self.omp_threads,
mem_per_proc=self.mem_per_proc, timelimit=self.timelimit))
return len(self.launches) | Save submission |
1,137 | def safe_print(msg):
try:
print(msg)
except UnicodeEncodeError:
try:
encoded = msg.encode(sys.stdout.encoding, "replace")
decoded = encoded.decode(sys.stdout.encoding, "replace")
print(decoded)
except (UnicodeDecodeError, UnicodeEncodeError):
print(u"[ERRO] An unexpected error happened while printing to stdout.")
print(u"[ERRO] Please check that your file/string encoding matches the shell encoding.")
print(u"[ERRO] If possible, set your shell encoding to UTF-8 and convert any files with legacy encodings.") | Safely print a given Unicode string to stdout,
possibly replacing characters non-printable
in the current stdout encoding.
:param string msg: the message |
1,138 | def data_size(self, live_data=None):
if live_data is not None:
warnings.warn("The keyword argument is deprecated.",
DeprecationWarning)
output = self.nodetool()[0]
return _get_load_from_info_output(output) | Uses `nodetool info` to get the size of a node's data in KB. |
1,139 | def make_simple_equity_info(sids,
start_date,
end_date,
symbols=None,
names=None,
exchange=):
num_assets = len(sids)
if symbols is None:
symbols = list(ascii_uppercase[:num_assets])
else:
symbols = list(symbols)
if names is None:
names = [str(s) + " INC." for s in symbols]
return pd.DataFrame(
{
: symbols,
: pd.to_datetime([start_date] * num_assets),
: pd.to_datetime([end_date] * num_assets),
: list(names),
: exchange,
},
index=sids,
columns=(
,
,
,
,
,
),
) | Create a DataFrame representing assets that exist for the full duration
between `start_date` and `end_date`.
Parameters
----------
sids : array-like of int
start_date : pd.Timestamp, optional
end_date : pd.Timestamp, optional
symbols : list, optional
Symbols to use for the assets.
If not provided, symbols are generated from the sequence 'A', 'B', ...
names : list, optional
Names to use for the assets.
If not provided, names are generated by adding " INC." to each of the
symbols (which might also be auto-generated).
exchange : str, optional
The exchange name.
Returns
-------
info : pd.DataFrame
DataFrame representing newly-created assets. |
1,140 | def set_usage_rights_courses(self, file_ids, course_id, usage_rights_use_justification, folder_ids=None, publish=None, usage_rights_legal_copyright=None, usage_rights_license=None):
path = {}
data = {}
params = {}
path["course_id"] = course_id
data["file_ids"] = file_ids
if folder_ids is not None:
data["folder_ids"] = folder_ids
if publish is not None:
data["publish"] = publish
self._validate_enum(usage_rights_use_justification, ["own_copyright", "used_by_permission", "fair_use", "public_domain", "creative_commons"])
data["usage_rights[use_justification]"] = usage_rights_use_justification
if usage_rights_legal_copyright is not None:
data["usage_rights[legal_copyright]"] = usage_rights_legal_copyright
if usage_rights_license is not None:
data["usage_rights[license]"] = usage_rights_license
self.logger.debug("PUT /api/v1/courses/{course_id}/usage_rights with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("PUT", "/api/v1/courses/{course_id}/usage_rights".format(**path), data=data, params=params, single_item=True) | Set usage rights.
Sets copyright and license information for one or more files |
1,141 | def tcounts(self):
df = pd.DataFrame([[t.name(), t.size()] for t in self.tables()], columns=["name", "size"])
df.index = df.name
return df | :return: a data frame containing the names and sizes for all tables |
1,142 | def _npy2fits(d, table_type=, write_bitcols=False):
npy_dtype = d[1][1:]
if npy_dtype[0] == or npy_dtype[0] == :
name, form, dim = _npy_string2fits(d, table_type=table_type)
else:
name, form, dim = _npy_num2fits(
d, table_type=table_type, write_bitcols=write_bitcols)
return name, form, dim | d is the full element from the descr |
1,143 | def delay_on(self):
for retry in (True, False):
try:
self._delay_on, value = self.get_attr_int(self._delay_on, )
return value
except OSError:
if retry:
self._delay_on = None
else:
raise | The `timer` trigger will periodically change the LED brightness between
0 and the current brightness setting. The `on` time can
be specified via `delay_on` attribute in milliseconds. |
1,144 | def import_from_api(request):
if request.method == :
form = ImportFromAPIForm(request.POST)
if form.is_valid():
base_url = re.sub(r, , form.cleaned_data[])
import_url = (
base_url + reverse(, args=[form.cleaned_data[]])
)
r = requests.get(import_url)
import_data = r.json()
parent_page = form.cleaned_data[]
try:
page_count = import_pages(import_data, parent_page)
except LookupError as e:
messages.error(request, _(
"Import failed: %(reason)s") % {: e}
)
else:
messages.success(request, ungettext(
"%(count)s page imported.",
"%(count)s pages imported.",
page_count) % {: page_count}
)
return redirect(, parent_page.pk)
else:
form = ImportFromAPIForm()
return render(request, , {
: form,
}) | Import a part of a source site's page tree via a direct API request from
this Wagtail Admin to the source site
The source site's base url and the source page id of the point in the
tree to import defined what to import and the destination parent page
defines where to import it to. |
1,145 | async def setex(self, name, time, value):
if isinstance(time, datetime.timedelta):
time = time.seconds + time.days * 24 * 3600
return await self.execute_command(, name, time, value) | Set the value of key ``name`` to ``value`` that expires in ``time``
seconds. ``time`` can be represented by an integer or a Python
timedelta object. |
1,146 | def copy(self):
dup = type(self)()
dup._indices = OrderedDict(
(k, list(v)) for k,v in six.iteritems(self._indices)
)
dup._lines = self._lines.copy()
return dup | Create a copy of the mapping, including formatting information |
1,147 | def set_figure(self, figure, handle=None):
self.figure = figure
self.bkimage = None
self._push_handle = handle
wd = figure.plot_width
ht = figure.plot_height
self.configure_window(wd, ht)
doc = curdoc()
self.logger.info(str(dir(doc)))
self.logger.info("figure set") | Call this with the Bokeh figure object. |
1,148 | def get(key, default=None):
val = os.environ.get(key, default)
if val == :
val = True
elif val == :
val = False
return val | Retrieves env vars and makes Python boolean replacements |
1,149 | def substitution(self, substitution):
if isinstance(substitution, list):
for s in substitution:
self.add_substitution(s)
else:
self.add_substitution(substitution) | Add substitutions to the email
:param value: Add substitutions to the email
:type value: Substitution, list(Substitution) |
1,150 | def check_requirements(dist, attr, value):
try:
list(pkg_resources.parse_requirements(value))
except (TypeError, ValueError) as error:
tmpl = (
"{attr!r} must be a string or list of strings "
"containing valid project/version requirement specifiers; {error}"
)
raise DistutilsSetupError(tmpl.format(attr=attr, error=error)) | Verify that install_requires is a valid requirements list |
1,151 | def _check_algorithm_values(item):
problems = []
for k, v in item.get("algorithm", {}).items():
if v is True and k not in ALG_ALLOW_BOOLEANS:
problems.append("%s set as true" % k)
elif v is False and (k not in ALG_ALLOW_BOOLEANS and k not in ALG_ALLOW_FALSE):
problems.append("%s set as false" % k)
if len(problems) > 0:
raise ValueError("Incorrect settings in section for %s:\n%s"
"\nSee configuration documentation for supported options:\n%s\n"
% (item["description"], "\n".join(problems), ALG_DOC_URL)) | Check for misplaced inputs in the algorithms.
- Identify incorrect boolean values where a choice is required. |
1,152 | def load(self, table_names=None, table_schemas=None, table_rowgens=None):
if table_schemas is not None:
table_schemas = self._check_case_dict(table_schemas, warn=True)
for schema_key, schema_value in table_schemas.items():
table_schemas[schema_key] = self._check_columns(schema_value, add_id=True)
elif not self.dynamic_schema:
raise ValueError()
if table_rowgens is not None:
table_rowgens = self._check_case_dict(table_rowgens, warn=True)
if table_names is None:
if table_schemas is not None:
table_names = list(table_schemas.keys())
self._print(
% table_names)
elif table_rowgens is not None:
table_names = list(table_rowgens.keys())
self._print(
% table_names)
else:
req_params =
raise ValueError(
% req_params)
table_names = self._check_table_names(table_names, warn=True)
self._print( % table_names)
for tname in table_names:
if table_schemas is not None and tname in table_schemas:
self._schemas[tname] = list(table_schemas[tname])
if table_rowgens is not None and tname in table_rowgens:
self._rowgens[tname] = table_rowgens[tname]
with self._lock:
for tname in table_names:
tname = self._check_case_str(tname, warn=True)
tpath = os.path.join(self.root_dir, self.name, tname + )
if os.path.isfile(tpath):
if self.auto_load:
dataframe = read_csv(tpath, dtype=str)
self._db[tname] = dataframe
schema = self._check_columns(dataframe.columns.tolist())
self._schemas[tname] = schema
elif self.persistent:
raise ValueError(
% tname)
elif table_schemas is not None and tname in self._schemas:
self._db[tname] = DataFrame(columns=self._schemas[tname], dtype=str)
elif self.dynamic_schema:
self._print( % tname)
self._db[tname] = DataFrame(columns=self._blank_schema, dtype=str)
self._schemas[tname] = list(self._blank_schema)
else:
raise ValueError(
% tname) | Initiates the tables, schemas and record generators for this database.
Parameters
----------
table_names : list of str, str or None
List of tables to load into this database. If `auto_load` is true, inserting a record
into a new table not provided here will automatically create that table.
table_schemas : dict of <table_name, column_list> or None
Dictionary with each table name as a key and a list of its columns as value. Any keys
present here but not present in `table_names` will also trigger table creation, so
table names provided in both parameters are redundant but harmless.
table_rowgens: dict of <table_name, function> or None
For all tables present in the keys of the provided dictionary, when an insert operation
occurs, the corresponding function is called. The function must return a dictionary and
is used as a "base record" which is complemented by the actual record being inserted.
For example, when a table has a rowgen like `lambda: {"Timestamp": time.ctime()}` and
a record like `{"Name": "John"}` is inserted, the database will then contain a record
like `{"Timestamp": "Sun Jan 10 08:36:12 2016", "Name": "John"}`. |
1,153 | def filter_reads(self, input_bam, output_bam, metrics_file, paired=False, cpus=16, Q=30):
nodups = re.sub("\.bam$", "", output_bam) + ".nodups.nofilter.bam"
cmd1 = self.tools.sambamba + " markdup -t {0} -r --compression-level=0 {1} {2} 2> {3}".format(cpus, input_bam, nodups, metrics_file)
cmd2 = self.tools.sambamba + .format(cpus)
if paired:
cmd2 +=
else:
cmd2 +=
cmd2 += .format(Q)
cmd2 += .format(nodups)
cmd2 += self.tools.sambamba + " sort -t {0} /dev/stdin -o {1}".format(cpus, output_bam)
cmd3 = "if [[ -s {0} ]]; then rm {0}; fi".format(nodups)
cmd4 = "if [[ -s {0} ]]; then rm {0}; fi".format(nodups + ".bai")
return [cmd1, cmd2, cmd3, cmd4] | Remove duplicates, filter for >Q, remove multiple mapping reads.
For paired-end reads, keep only proper pairs. |
1,154 | def _from_dict(cls, _dict):
args = {}
if in _dict:
args[] = _dict.get()
if in _dict:
args[] = [
QueryResult._from_dict(x) for x in (_dict.get())
]
return cls(**args) | Initialize a TopHitsResults object from a json dictionary. |
1,155 | def call_rpc(*inputs, **kwargs):
rpc_executor = kwargs[]
output = []
try:
value = inputs[1].pop()
addr = value.value >> 16
rpc_id = value.value & 0xFFFF
reading_value = rpc_executor.rpc(addr, rpc_id)
output.append(IOTileReading(0, 0, reading_value))
except (HardwareError, StreamEmptyError):
pass
for input_x in inputs:
input_x.skip_all()
return output | Call an RPC based on the encoded value read from input b.
The response of the RPC must be a 4 byte value that is used as
the output of this call. The encoded RPC must be a 32 bit value
encoded as "BBH":
B: ignored, should be 0
B: the address of the tile that we should call
H: The id of the RPC to call
All other readings are then skipped so that there are no
readings in any input queue when this function returns
Returns:
list(IOTileReading) |
1,156 | def init(plugin_manager, _, _2, _3):
page_pattern_course = r
page_pattern_scoreboard = r
plugin_manager.add_page(page_pattern_course, ScoreBoardCourse)
plugin_manager.add_page(page_pattern_scoreboard, ScoreBoard)
plugin_manager.add_hook(, course_menu)
plugin_manager.add_hook(, task_menu) | Init the plugin.
Available configuration in configuration.yaml:
::
- plugin_module: "inginious.frontend.plugins.scoreboard"
Available configuration in course.yaml:
::
- scoreboard: #you can define multiple scoreboards
- content: "taskid1" #creates a scoreboard for taskid1
name: "Scoreboard task 1"
- content: ["taskid2", "taskid3"] #creates a scoreboard for taskid2 and taskid3 (sum of both score is taken as overall score)
name: "Scoreboard for task 2 and 3"
- content: {"taskid4": 2, "taskid5": 3} #creates a scoreboard where overall score is 2*score of taskid4 + 3*score of taskid5
name: "Another scoreboard"
reverse: True #reverse the score (less is better) |
1,157 | def cite(self, max_authors=5):
citation_data = {
: self.title,
: self.authors_et_al(max_authors),
: self.year,
: self.journal,
: self.volume,
: self.issue,
: self.pages,
}
citation = "{authors} ({year}). {title} {journal}".format(
**citation_data)
if self.volume and self.issue and self.pages:
citation += " {volume}({issue}): {pages}.".format(**citation_data)
elif self.volume and self.issue:
citation += " {volume}({issue}).".format(**citation_data)
elif self.volume and self.pages:
citation += " {volume}: {pages}.".format(**citation_data)
elif self.volume:
citation += " {volume}.".format(**citation_data)
elif self.pages:
citation += " {pages}.".format(**citation_data)
else:
citation += "."
return citation | Return string with a citation for the record, formatted as:
'{authors} ({year}). {title} {journal} {volume}({issue}): {pages}.' |
1,158 | def news(symbol, count=10, token=, version=):
_raiseIfNotStr(symbol)
return _getJson( + symbol + + str(count), token, version) | News about company
https://iexcloud.io/docs/api/#news
Continuous
Args:
symbol (string); Ticker to request
count (int): limit number of results
token (string); Access token
version (string); API version
Returns:
dict: result |
1,159 | def plot(self, flip=False, ax_channels=None, ax=None, *args, **kwargs):
if ax == None:
ax = pl.gca()
kwargs.setdefault(, )
if ax_channels is not None:
flip = self._find_orientation(ax_channels)
if not flip:
a1 = ax.axes.axvline(self.vert[0], *args, **kwargs)
a2 = ax.axes.axhline(self.vert[1], *args, **kwargs)
else:
a1 = ax.axes.axvline(self.vert[1], *args, **kwargs)
a2 = ax.axes.axhline(self.vert[0], *args, **kwargs)
return (a1, a2) | {_gate_plot_doc} |
1,160 | def pow(self, other, axis="columns", level=None, fill_value=None):
return self._binary_op(
"pow", other, axis=axis, level=level, fill_value=fill_value
) | Pow this DataFrame against another DataFrame/Series/scalar.
Args:
other: The object to use to apply the pow against this.
axis: The axis to pow over.
level: The Multilevel index level to apply pow over.
fill_value: The value to fill NaNs with.
Returns:
A new DataFrame with the Pow applied. |
1,161 | def from_string(cls, string):
cls.TYPE.setParseAction(cls.make)
try:
return cls.TYPE.parseString(string, parseAll=True)[0]
except ParseException:
log.error("Failed to parse ".format(string))
raise | Parse ``string`` into a CPPType instance |
1,162 | def cublasSger(handle, m, n, alpha, x, incx, y, incy, A, lda):
status = _libcublas.cublasSger_v2(handle,
m, n,
ctypes.byref(ctypes.c_float(alpha)),
int(x), incx,
int(y), incy, int(A), lda)
cublasCheckStatus(status) | Rank-1 operation on real general matrix. |
1,163 | def set_quota_volume(name, path, size, enable_quota=False):
*
cmd = .format(name)
if path:
cmd += .format(path)
if size:
cmd += .format(size)
if enable_quota:
if not enable_quota_volume(name):
pass
if not _gluster(cmd):
return False
return True | Set quota to glusterfs volume.
name
Name of the gluster volume
path
Folder path for restriction in volume ("/")
size
Hard-limit size of the volume (MB/GB)
enable_quota
Enable quota before set up restriction
CLI Example:
.. code-block:: bash
salt '*' glusterfs.set_quota_volume <volume> <path> <size> enable_quota=True |
1,164 | def follow_bytes(self, s, index):
"Follows transitions."
for ch in s:
index = self.follow_char(int_from_byte(ch), index)
if index is None:
return None
return index | Follows transitions. |
1,165 | def removeTab(self, index):
curr_index = self.currentIndex()
items = list(self.items())
item = items[index]
item.close()
if index <= curr_index:
self._currentIndex -= 1 | Removes the tab at the inputed index.
:param index | <int> |
1,166 | def simxStart(connectionAddress, connectionPort, waitUntilConnected, doNotReconnectOnceDisconnected, timeOutInMs, commThreadCycleInMs):
if (sys.version_info[0] == 3) and (type(connectionAddress) is str):
connectionAddress=connectionAddress.encode()
return c_Start(connectionAddress, connectionPort, waitUntilConnected, doNotReconnectOnceDisconnected, timeOutInMs, commThreadCycleInMs) | Please have a look at the function description/documentation in the V-REP user manual |
1,167 | def hasReaders(self, ulBuffer):
fn = self.function_table.hasReaders
result = fn(ulBuffer)
return result | inexpensively checks for readers to allow writers to fast-fail potentially expensive copies and writes. |
1,168 | def apply_mask(self, x=None):
if x is None:
return np.delete(np.arange(len(self.time)), self.mask)
else:
return np.delete(x, self.mask, axis=0) | Returns the outlier mask, an array of indices corresponding to the
non-outliers.
:param numpy.ndarray x: If specified, returns the masked version of \
:py:obj:`x` instead. Default :py:obj:`None` |
1,169 | def command_x(self, x, to=None):
if to is None:
ActionChains(self.driver) \
.send_keys([Keys.COMMAND, x, Keys.COMMAND]) \
.perform()
else:
self.send_keys(to, [Keys.COMMAND, x, Keys.COMMAND]) | Sends a character to the currently active element with Command
pressed. This method takes care of pressing and releasing
Command. |
1,170 | def crop_frequencies(self, low=None, high=None, copy=False):
if low is not None:
low = units.Quantity(low, self._default_yunit)
if high is not None:
high = units.Quantity(high, self._default_yunit)
if low is not None and low == self.f0:
low = None
elif low is not None and low < self.f0:
warnings.warn(
)
if high is not None and high.value == self.band[1]:
high = None
elif high is not None and high.value > self.band[1]:
warnings.warn(
)
if low is None:
idx0 = None
else:
idx0 = int(float(low.value - self.f0.value) // self.df.value)
if high is None:
idx1 = None
else:
idx1 = int(float(high.value - self.f0.value) // self.df.value)
if copy:
return self[:, idx0:idx1].copy()
return self[:, idx0:idx1] | Crop this `Spectrogram` to the specified frequencies
Parameters
----------
low : `float`
lower frequency bound for cropped `Spectrogram`
high : `float`
upper frequency bound for cropped `Spectrogram`
copy : `bool`
if `False` return a view of the original data, otherwise create
a fresh memory copy
Returns
-------
spec : `Spectrogram`
A new `Spectrogram` with a subset of data from the frequency
axis |
1,171 | def format_choices(self):
ce = enumerate(self.choices)
f = lambda i, c: % (c, i+1)
toks = [f(i,c) for i, c in ce] + []
return .join(toks) | Return the choices in string form. |
1,172 | def set_current_thumbnail(self, thumbnail):
self.current_thumbnail = thumbnail
self.figure_viewer.load_figure(
thumbnail.canvas.fig, thumbnail.canvas.fmt)
for thumbnail in self._thumbnails:
thumbnail.highlight_canvas(thumbnail == self.current_thumbnail) | Set the currently selected thumbnail. |
1,173 | def text_entry(self):
allowed_sequences = set([, , ])
sys.stdout.write()
sys.stdout.flush()
cur_column -= 1
else:
self.roku.literal(val)
sys.stdout.write(val)
cur_column += 1
sys.stdout.flush()
sys.stdout.write(self.term.clear_bol)
sys.stdout.write(self.term.move(self.term.height, 0))
sys.stdout.flush() | Relay literal text entry from user to Roku until
<Enter> or <Esc> pressed. |
1,174 | def get_dict(self):
return {: self.get_name(),
: self.get_address(),
: self.get_protocol(),
: self.get_tcp_port()} | Returns a dict containing the host's attributes. The following
keys are contained:
- hostname
- address
- protocol
- port
:rtype: dict
:return: The resulting dictionary. |
1,175 | def clear_graph(identifier=None):
graph = get_graph()
if identifier:
graph.destroy(identifier)
try:
graph.close()
except:
warn("Unable to close the Graph") | Clean up a graph by removing it
:param identifier: Root identifier of the graph
:return: |
1,176 | def local_global_attention(x,
self_attention_bias,
hparams,
q_padding="LEFT",
kv_padding="LEFT"):
with tf.variable_scope("self_local_global_att"):
[x_global, x_local] = tf.split(x, 2, axis=-1)
split_hidden_size = int(hparams.hidden_size / 2)
split_heads = int(hparams.num_heads / 2)
if self_attention_bias is not None:
self_attention_bias = get_self_attention_bias(x)
y_global = common_attention.multihead_attention(
x_global,
None,
self_attention_bias,
hparams.attention_key_channels or split_hidden_size,
hparams.attention_value_channels or split_hidden_size,
split_hidden_size,
split_heads,
hparams.attention_dropout,
q_filter_width=hparams.q_filter_width,
kv_filter_width=hparams.kv_filter_width,
q_padding=q_padding,
kv_padding=kv_padding,
name="global_self_att")
y_local = common_attention.multihead_attention(
x_local,
None,
None,
hparams.attention_key_channels or split_hidden_size,
hparams.attention_value_channels or split_hidden_size,
split_hidden_size,
split_heads,
hparams.attention_dropout,
attention_type="local_masked",
block_length=hparams.block_length,
block_width=hparams.block_width,
q_filter_width=hparams.q_filter_width,
kv_filter_width=hparams.kv_filter_width,
q_padding=q_padding,
kv_padding=kv_padding,
name="local_self_att")
y = tf.concat([y_global, y_local], axis=-1)
return y | Local and global 1d self attention. |
1,177 | def compute(self, inputs, outputs):
if "resetIn" in inputs:
assert len(inputs["resetIn"]) == 1
if inputs["resetIn"][0] != 0:
self._tm.reset()
outputs["activeCells"][:] = 0
outputs["nextPredictedCells"][:] = 0
outputs["predictedActiveCells"][:] = 0
outputs["winnerCells"][:] = 0
return
activeColumns = inputs["activeColumns"].nonzero()[0]
if "apicalInput" in inputs:
apicalInput = inputs["apicalInput"].nonzero()[0]
else:
apicalInput = np.empty(0, dtype="uint32")
if "apicalGrowthCandidates" in inputs:
apicalGrowthCandidates = inputs["apicalGrowthCandidates"].nonzero()[0]
else:
apicalGrowthCandidates = apicalInput
self._tm.compute(activeColumns, apicalInput, apicalGrowthCandidates,
self.learn)
outputs["activeCells"][:] = 0
outputs["activeCells"][self._tm.getActiveCells()] = 1
outputs["nextPredictedCells"][:] = 0
outputs["nextPredictedCells"][
self._tm.getNextPredictedCells()] = 1
outputs["predictedActiveCells"][:] = 0
outputs["predictedActiveCells"][
self._tm.getPredictedActiveCells()] = 1
outputs["winnerCells"][:] = 0
outputs["winnerCells"][self._tm.getWinnerCells()] = 1 | Run one iteration of TM's compute. |
1,178 | def root_manifest_id(self, root_manifest_id):
if root_manifest_id is not None and len(root_manifest_id) > 32:
raise ValueError("Invalid value for `root_manifest_id`, length must be less than or equal to `32`")
self._root_manifest_id = root_manifest_id | Sets the root_manifest_id of this UpdateCampaignPutRequest.
:param root_manifest_id: The root_manifest_id of this UpdateCampaignPutRequest.
:type: str |
1,179 | def list_namespaced_stateful_set(self, namespace, **kwargs):
kwargs[] = True
if kwargs.get():
return self.list_namespaced_stateful_set_with_http_info(namespace, **kwargs)
else:
(data) = self.list_namespaced_stateful_set_with_http_info(namespace, **kwargs)
return data | list_namespaced_stateful_set # noqa: E501
list or watch objects of kind StatefulSet # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_namespaced_stateful_set(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1StatefulSetList
If the method is called asynchronously,
returns the request thread. |
1,180 | def create_wsgi_request(event, server_name=):
path = urllib.url2pathname(event[])
script_name = (
event[][].endswith() and
event[][] or ).encode()
query = event[]
query_string = query and urllib.urlencode(query) or ""
body = event[] and event[].encode() or
environ = {
: ,
: path.encode(),
: query_string.encode(),
: event[
][][].encode(),
: event[].encode(),
: script_name,
: server_name.encode(),
: .encode(),
: u.encode(),
: sys.stderr,
: StringIO(body),
: False,
: False,
: False,
: u.encode(),
: (1, 0),
}
headers = event[]
if event[] in ("POST", "PUT", "PATCH"):
if in headers:
environ[] = headers[]
environ[] = str(len(body))
for header in list(event[].keys()):
wsgi_name = "HTTP_" + header.upper().replace(, )
environ[wsgi_name] = headers[header].encode()
if script_name:
path_info = environ[]
if script_name in path_info:
environ[].replace(script_name, )
remote_user = None
if event[].get():
remote_user = event[
][].get()
elif event[].get():
remote_user = event[][].get()
if remote_user:
environ[] = remote_user
environ[] = event[]
environ[] = event[]
return environ | Create a wsgi environment from an apigw request. |
1,181 | def flatten(text):
lines = text.split("\n")
tokens = []
for l in lines:
if len(l) == 0:
continue
l = l.replace("\t", " ")
tokens += filter(lambda x: len(x) > 0, l.split(" ")) + []
capturing = False
captured = []
flattened = []
while len(tokens) > 0:
tok = tokens.pop(0)
if not capturing and len(tok) == 0:
if len(captured) > 0:
flattened.append(" ".join(captured))
captured = []
continue
if tok.startswith("("):
tok = tok.lstrip("(")
capturing = True
if capturing and tok.endswith(")"):
tok = tok.rstrip(")")
capturing = False
captured.append(tok)
return "\n".join(flattened) | Flatten the text:
* make sure each record is on one line.
* remove parenthesis |
1,182 | def wishart_pairwise_pvals(self, axis=0):
if axis != 0:
raise NotImplementedError("Pairwise comparison only implemented for colums")
return WishartPairwiseSignificance.pvals(self, axis=axis) | Return square symmetric matrix of pairwise column-comparison p-values.
Square, symmetric matrix along *axis* of pairwise p-values for the
null hypothesis that col[i] = col[j] for each pair of columns.
*axis* (int): axis along which to perform comparison. Only columns (0)
are implemented currently. |
1,183 | def parse_binary_descriptor(bindata):
func_names = {0: , 1: ,
2: , 3: ,
4: , 5: ,
6: , 7: }
if len(bindata) != 20:
raise ArgumentError("Invalid binary node descriptor with incorrect size", size=len(bindata), expected=20, bindata=bindata)
a_trig, b_trig, stream_id, a_id, b_id, proc, a_cond, b_cond, trig_combiner = struct.unpack("<LLHHHBBBB2x", bindata)
node_stream = DataStream.FromEncoded(stream_id)
if a_id == 0xFFFF:
raise ArgumentError("Invalid binary node descriptor with invalid first input", input_selector=a_id)
a_selector = DataStreamSelector.FromEncoded(a_id)
a_trigger = _process_binary_trigger(a_trig, a_cond)
b_selector = None
b_trigger = None
if b_id != 0xFFFF:
b_selector = DataStreamSelector.FromEncoded(b_id)
b_trigger = _process_binary_trigger(b_trig, b_cond)
if trig_combiner == SGNode.AndTriggerCombiner:
comb =
elif trig_combiner == SGNode.OrTriggerCombiner:
comb =
else:
raise ArgumentError("Invalid trigger combiner in binary node descriptor", combiner=trig_combiner)
if proc not in func_names:
raise ArgumentError("Unknown processing function", function_id=proc, known_functions=func_names)
func_name = func_names[proc]
if b_selector is None:
return .format(a_selector, a_trigger, node_stream, func_name)
return .format(a_selector, a_trigger, comb,
b_selector, b_trigger,
node_stream, func_name) | Convert a binary node descriptor into a string descriptor.
Binary node descriptor are 20-byte binary structures that encode all
information needed to create a graph node. They are used to communicate
that information to an embedded device in an efficent format. This
function exists to turn such a compressed node description back into
an understandable string.
Args:
bindata (bytes): The raw binary structure that contains the node
description.
Returns:
str: The corresponding string description of the same sensor_graph node |
1,184 | def split_header(fp):
body_start, header_ended = 0, False
lines = []
for line in fp:
if line.startswith() and not header_ended:
body_start += 1
else:
header_ended = True
lines.append(line)
return lines[:body_start], lines[body_start:] | Read file pointer and return pair of lines lists:
first - header, second - the rest. |
1,185 | def moment_sequence(self):
r
A, C, G, H = self.A, self.C, self.G, self.H
mu_x, Sigma_x = self.mu_0, self.Sigma_0
while 1:
mu_y = G.dot(mu_x)
if H is None:
Sigma_y = G.dot(Sigma_x).dot(G.T)
else:
Sigma_y = G.dot(Sigma_x).dot(G.T) + H.dot(H.T)
yield mu_x, mu_y, Sigma_x, Sigma_y
mu_x = A.dot(mu_x)
Sigma_x = A.dot(Sigma_x).dot(A.T) + C.dot(C.T) | r"""
Create a generator to calculate the population mean and
variance-convariance matrix for both :math:`x_t` and :math:`y_t`
starting at the initial condition (self.mu_0, self.Sigma_0).
Each iteration produces a 4-tuple of items (mu_x, mu_y, Sigma_x,
Sigma_y) for the next period.
Yields
------
mu_x : array_like(float)
An n x 1 array representing the population mean of x_t
mu_y : array_like(float)
A k x 1 array representing the population mean of y_t
Sigma_x : array_like(float)
An n x n array representing the variance-covariance matrix
of x_t
Sigma_y : array_like(float)
A k x k array representing the variance-covariance matrix
of y_t |
1,186 | def example_yaml(cls, skip=()):
return cls.example_instance(skip=skip).to_yaml(skip=skip) | Generate an example yaml string for a Serializable subclass.
If traits have been tagged with an `example` value, then we use that
value. Otherwise we fall back the default_value for the instance. |
1,187 | def get_available_tokens(self, count=10, token_length=15, **kwargs):
token_buffer = int(math.ceil(count * .05))
if token_buffer < 5:
token_buffer = 5
available = set([])
while True:
tokens = [random_alphanum(length=token_length)
for t in range(count + token_buffer)]
db_tokens = self.filter(token__in=tokens).values_list(,
flat=True)
available.update(set(tokens).difference(db_tokens))
if len(available) >= count:
return list(available)[:count] | Gets a list of available tokens.
:param count: the number of tokens to return.
:param token_length: the length of the tokens. The higher the number
the easier it will be to return a list. If token_length == 1
there's a strong probability that the enough tokens will exist in
the db. |
1,188 | def _bulk_to_linear(M, N, L, qubits):
"Converts a list of chimera coordinates to linear indices."
return [2 * L * N * x + 2 * L * y + L * u + k for x, y, u, k in qubits] | Converts a list of chimera coordinates to linear indices. |
1,189 | def _get_user_agent():
client = "{0}/{1}".format(__name__.split(".")[0], ver.__version__)
python_version = "Python/{v.major}.{v.minor}.{v.micro}".format(
v=sys.version_info
)
system_info = "{0}/{1}".format(platform.system(), platform.release())
user_agent_string = " ".join([python_version, client, system_info])
return user_agent_string | Construct the user-agent header with the package info,
Python version and OS version.
Returns:
The user agent string.
e.g. 'Python/3.6.7 slack/2.0.0 Darwin/17.7.0' |
1,190 | def channels_set_topic(self, room_id, topic, **kwargs):
return self.__call_api_post(, roomId=room_id, topic=topic, kwargs=kwargs) | Sets the topic for the channel. |
1,191 | def dateindex(self, col: str):
df = self._dateindex(col)
if df is None:
self.err("Can not create date index")
return
self.df = df
self.ok("Added a datetime index from column", col) | Set a datetime index from a column
:param col: column name where to index the date from
:type col: str
:example: ``ds.dateindex("mycol")`` |
1,192 | def _parse_textgroup_wrapper(self, cts_file):
try:
return self._parse_textgroup(cts_file)
except Exception as E:
self.logger.error("Error parsing %s ", cts_file)
if self.RAISE_ON_GENERIC_PARSING_ERROR:
raise E | Wraps with a Try/Except the textgroup parsing from a cts file
:param cts_file: Path to the CTS File
:type cts_file: str
:return: CtsTextgroupMetadata |
1,193 | def _gradient_penalty(self, real_samples, fake_samples, kwargs):
import torch
from torch.autograd import Variable, grad
real_samples = real_samples.view(fake_samples.shape)
subset_size = real_samples.shape[0]
real_samples = real_samples[:subset_size]
fake_samples = fake_samples[:subset_size]
alpha = torch.rand(subset_size)
if self.use_cuda:
alpha = alpha.cuda()
alpha = alpha.view((-1,) + ((1,) * (real_samples.dim() - 1)))
interpolates = alpha * real_samples + ((1 - alpha) * fake_samples)
interpolates = Variable(interpolates, requires_grad=True)
if self.use_cuda:
interpolates = interpolates.cuda()
d_output = self.critic(interpolates, **kwargs)
grad_ouputs = torch.ones(d_output.size())
if self.use_cuda:
grad_ouputs = grad_ouputs.cuda()
gradients = grad(
outputs=d_output,
inputs=interpolates,
grad_outputs=grad_ouputs,
create_graph=True,
retain_graph=True,
only_inputs=True)[0]
return ((gradients.norm(2, dim=1) - 1) ** 2).mean() * 10 | Compute the norm of the gradients for each sample in a batch, and
penalize anything on either side of unit norm |
1,194 | def min(self):
res = self._qexec("min(%s)" % self._name)
if len(res) > 0:
self._min = res[0][0]
return self._min | :returns the minimum of the column |
1,195 | def unit(w, sparsity):
w_shape = common_layers.shape_list(w)
count = tf.to_int32(w_shape[-1] * sparsity)
mask = common_layers.unit_targeting(w, count)
return (1 - mask) * w | Unit-level magnitude pruning. |
1,196 | def new(image):
pointer = vips_lib.vips_region_new(image.pointer)
if pointer == ffi.NULL:
raise Error()
return pyvips.Region(pointer) | Make a region on an image.
Returns:
A new :class:`.Region`.
Raises:
:class:`.Error` |
1,197 | def pipeline_launchpad(job, fastqs, univ_options, tool_options):
univ_options[] = fastqs[]
ncpu = cpu_count()
tool_options[][] = tool_options[][] = tool_options[][] = \
tool_options[][] = ncpu / 3
sample_prep = job.wrapJobFn(prepare_samples, fastqs, univ_options, disk=)
cutadapt = job.wrapJobFn(run_cutadapt, sample_prep.rv(), univ_options, tool_options[],
cores=1, disk=)
star = job.wrapJobFn(run_star, cutadapt.rv(), univ_options, tool_options[],
cores=tool_options[][], memory=, disk=).encapsulate()
bwa_tumor = job.wrapJobFn(run_bwa, sample_prep.rv(), , univ_options,
tool_options[], cores=tool_options[][],
disk=).encapsulate()
bwa_normal = job.wrapJobFn(run_bwa, sample_prep.rv(), , univ_options,
tool_options[], cores=tool_options[][],
disk=).encapsulate()
phlat_tumor_dna = job.wrapJobFn(run_phlat, sample_prep.rv(), , univ_options,
tool_options[], cores=tool_options[][],
disk=)
phlat_normal_dna = job.wrapJobFn(run_phlat, sample_prep.rv(), , univ_options,
tool_options[], cores=tool_options[][],
disk=)
phlat_tumor_rna = job.wrapJobFn(run_phlat, sample_prep.rv(), , univ_options,
tool_options[], cores=tool_options[][],
disk=)
fastq_deletion = job.wrapJobFn(delete_fastqs, sample_prep.rv())
rsem = job.wrapJobFn(run_rsem, star.rv(), univ_options, tool_options[],
cores=tool_options[][], disk=)
mhc_pathway_assessment = job.wrapJobFn(assess_mhc_genes, rsem.rv(), phlat_tumor_rna.rv(),
univ_options, tool_options[])
fusions = job.wrapJobFn(run_fusion_caller, star.rv(), univ_options, )
Sradia = job.wrapJobFn(spawn_radia, star.rv(), bwa_tumor.rv(),
bwa_normal.rv(), univ_options, tool_options[]).encapsulate()
Mradia = job.wrapJobFn(merge_radia, Sradia.rv())
Smutect = job.wrapJobFn(spawn_mutect, bwa_tumor.rv(), bwa_normal.rv(), univ_options,
tool_options[]).encapsulate()
Mmutect = job.wrapJobFn(merge_mutect, Smutect.rv())
indels = job.wrapJobFn(run_indel_caller, bwa_tumor.rv(), bwa_normal.rv(), univ_options,
)
merge_mutations = job.wrapJobFn(run_mutation_aggregator, fusions.rv(), Mradia.rv(),
Mmutect.rv(), indels.rv(), univ_options)
snpeff = job.wrapJobFn(run_snpeff, merge_mutations.rv(), univ_options, tool_options[],
disk=)
transgene = job.wrapJobFn(run_transgene, snpeff.rv(), univ_options, tool_options[],
disk=)
merge_phlat = job.wrapJobFn(merge_phlat_calls, phlat_tumor_dna.rv(), phlat_normal_dna.rv(),
phlat_tumor_rna.rv(), disk=)
spawn_mhc = job.wrapJobFn(spawn_antigen_predictors, transgene.rv(), merge_phlat.rv(),
univ_options, (tool_options[],
tool_options[])).encapsulate()
merge_mhc = job.wrapJobFn(merge_mhc_peptide_calls, spawn_mhc.rv(), transgene.rv(), disk=)
rank_boost = job.wrapJobFn(boost_ranks, rsem.rv(), merge_mhc.rv(), transgene.rv(), univ_options,
tool_options[], disk=)
job.addChild(sample_prep)
sample_prep.addChild(cutadapt)
sample_prep.addChild(bwa_tumor)
sample_prep.addChild(bwa_normal)
sample_prep.addChild(phlat_tumor_dna)
sample_prep.addChild(phlat_normal_dna)
sample_prep.addChild(phlat_tumor_rna)
cutadapt.addChild(star)
star.addChild(rsem)
star.addChild(fusions)
star.addChild(Sradia)
bwa_tumor.addChild(Sradia)
bwa_normal.addChild(Sradia)
bwa_tumor.addChild(Smutect)
bwa_normal.addChild(Smutect)
bwa_tumor.addChild(indels)
bwa_normal.addChild(indels)
phlat_tumor_dna.addChild(merge_phlat)
phlat_normal_dna.addChild(merge_phlat)
phlat_tumor_rna.addChild(merge_phlat)
sample_prep.addChild(fastq_deletion)
cutadapt.addChild(fastq_deletion)
bwa_normal.addChild(fastq_deletion)
bwa_tumor.addChild(fastq_deletion)
phlat_normal_dna.addChild(fastq_deletion)
phlat_tumor_dna.addChild(fastq_deletion)
phlat_tumor_rna.addChild(fastq_deletion)
Sradia.addChild(Mradia)
Smutect.addChild(Mmutect)
fusions.addChild(merge_mutations)
Mradia.addChild(merge_mutations)
Mmutect.addChild(merge_mutations)
indels.addChild(merge_mutations)
merge_mutations.addChild(snpeff)
snpeff.addChild(transgene)
merge_phlat.addChild(spawn_mhc)
transgene.addChild(spawn_mhc)
spawn_mhc.addFollowOn(merge_mhc)
rsem.addChild(rank_boost)
merge_mhc.addChild(rank_boost)
phlat_tumor_rna.addChild(mhc_pathway_assessment)
rsem.addChild(mhc_pathway_assessment)
return None | The precision immuno pipeline begins at this module. The DAG can be viewed in Flowchart.txt
This module corresponds to node 0 on the tree |
1,198 | def todict(self, exclude_cache=False):
odict = {}
for field, value in self.fieldvalue_pairs(exclude_cache=exclude_cache):
value = field.serialise(value)
if value:
odict[field.name] = value
if self._dbdata and in self._dbdata:
odict[] = {: self._dbdata[]}
return odict | Return a dictionary of serialised scalar field for pickling.
If the *exclude_cache* flag is ``True``, fields with :attr:`Field.as_cache`
attribute set to ``True`` will be excluded. |
1,199 | def parse_xmlsec_output(output):
for line in output.splitlines():
if line == :
return True
elif line == :
raise XmlsecError(output)
raise XmlsecError(output) | Parse the output from xmlsec to try to find out if the
command was successfull or not.
:param output: The output from Popen
:return: A boolean; True if the command was a success otherwise False |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.