Unnamed: 0
int64 0
389k
| code
stringlengths 26
79.6k
| docstring
stringlengths 1
46.9k
|
---|---|---|
25,100 |
def sync(self, ws_name):
path = self.config["workspaces"][ws_name]["path"]
repositories = self.config["workspaces"][ws_name]["repositories"]
logger = logging.getLogger(__name__)
color = Color()
for r in os.listdir(path):
try:
repo = Repository(os.path.join(path, r))
except RepositoryError:
continue
else:
repositories[r] = repo.path
for repo_name, path in repositories.items():
logger.info(color.colored(
" - %s" % repo_name, "blue"))
self.config["workspaces"][ws_name]["repositories"]
self.config.write()
|
Synchronise workspace's repositories.
|
25,101 |
def set_windows_env_var(key, value):
if not isinstance(key, text_type):
raise TypeError("%r not of type %r" % (key, text_type))
if not isinstance(value, text_type):
raise TypeError("%r not of type %r" % (value, text_type))
status = winapi.SetEnvironmentVariableW(key, value)
if status == 0:
raise ctypes.WinError()
|
Set an env var.
Raises:
WindowsError
|
25,102 |
def x(self, x):
if x is None:
return None
if self._force_vertical:
return super(HorizontalLogView, self).x(x)
return super(XLogView, self).y(x)
|
Project x as y
|
25,103 |
def log_predictive_density(self, x_test, y_test, Y_metadata=None):
mu_star, var_star = self._raw_predict(x_test)
return self.likelihood.log_predictive_density(y_test, mu_star, var_star, Y_metadata=Y_metadata)
|
Calculation of the log predictive density
.. math:
p(y_{*}|D) = p(y_{*}|f_{*})p(f_{*}|\mu_{*}\\sigma^{2}_{*})
:param x_test: test locations (x_{*})
:type x_test: (Nx1) array
:param y_test: test observations (y_{*})
:type y_test: (Nx1) array
:param Y_metadata: metadata associated with the test points
|
25,104 |
def locator(self, value):
self._locator = value
self._latitude, self._longitude = utils.from_grid_locator(value)
|
Update the locator, and trigger a latitude and longitude update.
Args:
value (str): New Maidenhead locator string
|
25,105 |
def is_depfilter_handler(class_, filter_name, filter_):
try:
handlers = get_magic_attr(filter_)
except AttributeError:
return False
return _depfilter_spec(class_, filter_name) in handlers
|
Return true if `filter_` has been decorated with :func:`depfilter` for the
given filter and class.
|
25,106 |
def _BuildHttpRoutingMap(self, router_cls):
if not issubclass(router_cls, api_call_router.ApiCallRouter):
raise ValueError("Router has to be an instance of ApiCallRouter.")
routing_map = routing.Map()
for _, metadata in iteritems(router_cls.GetAnnotatedMethods()):
for http_method, path, unused_options in metadata.http_methods:
routing_map.add(
routing.Rule(path, methods=[http_method], endpoint=metadata))
routing_map.add(
routing.Rule(
path.replace("/api/", "/api/v2/"),
methods=[http_method],
endpoint=metadata))
return routing_map
|
Builds a werkzeug routing map out of a given router class.
|
25,107 |
def convert_job(row: list) -> dict:
state = row[-2]
start_time_raw = row[-4]
end_time_raw = row[-3]
if state not in (, ):
start_time = datetime.strptime(start_time_raw, )
if state != :
end_time = datetime.strptime(end_time_raw, )
else:
end_time = None
else:
start_time = end_time = None
job_name = row[1]
step_name, step_context = job_name.rstrip().rstrip().rsplit(, 1)
return {
: int(row[0]),
: job_name,
: step_name,
: step_context,
: state,
: start_time,
: end_time,
: time_to_sec(row[-5]),
: time_to_sec(row[-6]),
: state == ,
}
|
Convert sacct row to dict.
|
25,108 |
def getListForEvent(self, event=None):
names = list(self.guestlistname_set.annotate(
guestType=Case(
When(notes__isnull=False, then=F()),
default=Value(ugettext()),
output_field=models.CharField()
)
).values(,,))
components = self.guestlistcomponent_set.all()
filters = Q(pk__isnull=True)
for component in components:
if event and self.appliesToEvent(event):
filters = filters | self.getComponentFilters(component,event=event)
else:
filters = filters | self.getComponentFilters(component,dateTime=timezone.now())
if self.includeStaff and event and self.appliesToEvent(event):
filters = filters | Q(eventstaffmember__event=event)
names += list(StaffMember.objects.filter(filters).annotate(
guestType=Case(
When(eventstaffmember__event=event, then=Concat(Value(), )),
default=Value(ugettext()),
output_field=models.CharField()
)
).distinct().values(,,))
if self.includeRegistrants and event and self.appliesToEvent(event):
names += list(Registration.objects.filter(eventregistration__event=event).annotate(
guestType=Value(_(),output_field=models.CharField())
).values(,,))
return names
|
Get the list of names associated with a particular event.
|
25,109 |
def add(setname=None, entry=None, family=, **kwargs):
**
if not setname:
return
if not entry:
return
setinfo = _find_set_info(setname)
if not setinfo:
return .format(setname)
settype = setinfo[]
cmd = .format(entry)
if in kwargs:
if not in setinfo[]:
return .format(setname)
if in kwargs or in kwargs:
if not in setinfo[]:
return .format(setname)
if in kwargs:
if not in setinfo[]:
return .format(setname)
if not in entry:
cmd = .format(cmd, kwargs[])
if set([, , ]) & set(kwargs):
if not in setinfo[]:
return .format(setname)
for item in _ADD_OPTIONS[settype]:
if item in kwargs:
cmd = .format(cmd, item, kwargs[item])
current_members = _find_set_members(setname)
if cmd in current_members:
return .format(cmd, setname)
cmd = .format(_ipset_cmd(), setname, cmd)
out = __salt__[](cmd, python_shell=False)
if not out:
return
return .format(out)
|
Append an entry to the specified set.
CLI Example:
.. code-block:: bash
salt '*' ipset.add setname 192.168.1.26
salt '*' ipset.add setname 192.168.0.3,AA:BB:CC:DD:EE:FF
|
25,110 |
def _connect(self):
self.logger.info("Attempting connection to %s:%s", self.server[0], self.server[1])
try:
self._open_socket()
peer = self.sock.getpeername()
self.logger.info("Connected to %s", str(peer))
self.sock.setblocking(1)
self.sock.settimeout(5)
self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
banner = self.sock.recv(512)
if is_py3:
banner = banner.decode()
if banner[0] == "
self.logger.debug("Banner: %s", banner.rstrip())
else:
raise ConnectionError("invalid banner from server")
except ConnectionError as e:
self.logger.error(str(e))
self.close()
raise
except (socket.error, socket.timeout) as e:
self.close()
self.logger.error("Socket error: %s" % str(e))
if str(e) == "timed out":
raise ConnectionError("no banner from server")
else:
raise ConnectionError(e)
self._connected = True
|
Attemps connection to the server
|
25,111 |
def cli_tempurl(context, method, path, seconds=None, use_container=False):
with contextlib.nested(
context.io_manager.with_stdout(),
context.client_manager.with_client()) as (fp, client):
method = method.upper()
path = path.lstrip()
seconds = seconds if seconds is not None else 3600
if not in path:
raise ReturnCode(
% path)
if use_container:
key_type =
container = path.split(, 1)[0]
status, reason, headers, contents = \
client.head_container(container)
else:
key_type =
status, reason, headers, contents = \
client.head_account()
if status // 100 != 2:
raise ReturnCode(
%
(key_type.title(), status, reason))
key = headers.get( % key_type)
if not key:
raise ReturnCode(
%
(key_type.title(), key_type))
url = client.storage_url + + path
fp.write(generate_temp_url(method, url, seconds, key))
fp.write()
fp.flush()
|
Generates a TempURL and sends that to the context.io_manager's
stdout.
See :py:mod:`swiftly.cli.tempurl` for context usage information.
See :py:class:`CLITempURL` for more information.
:param context: The :py:class:`swiftly.cli.context.CLIContext` to
use.
:param method: The method for the TempURL (GET, PUT, etc.)
:param path: The path the TempURL should direct to.
:param seconds: The number of seconds the TempURL should be good
for. Default: 3600
:param use_container: If True, will create a container level TempURL
useing X-Container-Meta-Temp-Url-Key instead of
X-Account-Meta-Temp-Url-Key.
|
25,112 |
def batch_accumulate(max_batch_size, a_generator, cooperator=None):
if cooperator:
own_cooperate = cooperator.cooperate
else:
own_cooperate = cooperate
spigot = ValueBucket()
items = stream_tap((spigot,), a_generator)
d = own_cooperate(i_batch(max_batch_size, items)).whenDone()
d.addCallback(accumulation_handler, spigot)
return d
|
Start a Deferred whose callBack arg is a deque of the accumulation
of the values yielded from a_generator which is iterated over
in batches the size of max_batch_size.
It should be more efficient to iterate over the generator in
batches and still provide enough speed for non-blocking execution.
:param max_batch_size: The number of iterations of the generator
to consume at a time.
:param a_generator: An iterator which yields some not None values.
:return: A Deferred to which the next callback will be called with
the yielded contents of the generator function.
|
25,113 |
def _set_dense_defaults_and_eval(kwargs):
kwargs[] = kwargs.get(, )
kwargs[] = kwargs.get(, )
kwargs[] = kwargs.get(, False)
kwargs[] = kwargs.get(, False)
kwargs[] = kwargs.get(, )
kwargs[] = kwargs.get(, )
for key, val in kwargs.iteritems():
try:
kwargs[key] = eval(val)
except:
kwargs[key] = val
return kwargs
|
Sets default values in kwargs if kwargs are not already given.
Evaluates all values using eval
Parameters
-----------
kwargs : dict
Dictionary of dense specific keyword args
Returns
-------
: dict
Default, evaluated dictionary
|
25,114 |
def submit(self, coro, callback=None):
callback = callback or self.default_callback
if self.async_running:
return self.run_coroutine_threadsafe(coro, callback=callback)
else:
return NewTask(coro, loop=self.loop, callback=callback)
|
Submit a coro as NewTask to self.loop without loop.frequncy control.
::
from torequests.dummy import Loop
import asyncio
loop = Loop()
async def test(i):
result = await asyncio.sleep(1)
return (loop.frequency, i)
coro = test(0)
task = loop.submit(coro)
print(task)
# loop.x can be ignore
loop.x
print(task.x)
# <NewTask pending coro=<test() running at torequests/temp_code.py:58>>
# (Frequency(sem=<0/0>, interval=0, name=loop_sem), 0)
|
25,115 |
def list_all_store_credit_transactions(cls, **kwargs):
kwargs[] = True
if kwargs.get():
return cls._list_all_store_credit_transactions_with_http_info(**kwargs)
else:
(data) = cls._list_all_store_credit_transactions_with_http_info(**kwargs)
return data
|
List StoreCreditTransactions
Return a list of StoreCreditTransactions
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.list_all_store_credit_transactions(async=True)
>>> result = thread.get()
:param async bool
:param int page: page number
:param int size: page size
:param str sort: page order
:return: page[StoreCreditTransaction]
If the method is called asynchronously,
returns the request thread.
|
25,116 |
def _surpress_formatting_errors(fn):
@wraps(fn)
def inner(*args, **kwargs):
try:
return fn(*args, **kwargs)
except ValueError:
return ""
return inner
|
I know this is dangerous and the wrong way to solve the problem, but when
using both row and columns summaries it's easier to just swallow errors
so users can format their tables how they need.
|
25,117 |
def _parse_guild_info(self, info_container):
m = founded_regex.search(info_container.text)
if m:
description = m.group("desc").strip()
self.description = description if description else None
self.world = m.group("world")
self.founded = parse_tibia_date(m.group("date").replace("\xa0", " "))
self.active = "currently active" in m.group("status")
|
Parses the guild's general information and applies the found values.
Parameters
----------
info_container: :class:`bs4.Tag`
The parsed content of the information container.
|
25,118 |
def accept(self, *args):
integer64t match, return None.
'
token = self.peek()
if token is None:
return None
for arg in args:
if token.type == arg:
self.position += 1
return token
return None
|
Consume and return the next token if it has the correct type
Multiple token types (as strings, e.g. 'integer64') can be given
as arguments. If the next token is one of them, consume and return it.
If the token type doesn't match, return None.
|
25,119 |
def simulate(args):
p = OptionParser(simulate.__doc__)
p.set_outfile()
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
idsfile, = args
fp = open(idsfile)
fw = must_open(opts.outfile, "w")
for row in fp:
name, size = row.split()
size = int(size)
simulate_one(fw, name, size)
fp.close()
|
%prog simulate idsfile
Simulate random FASTA file based on idsfile, which is a two-column
tab-separated file with sequence name and size.
|
25,120 |
def _scale_fig_size(figsize, textsize, rows=1, cols=1):
params = mpl.rcParams
rc_width, rc_height = tuple(params["figure.figsize"])
rc_ax_labelsize = params["axes.labelsize"]
rc_titlesize = params["axes.titlesize"]
rc_xt_labelsize = params["xtick.labelsize"]
rc_linewidth = params["lines.linewidth"]
rc_markersize = params["lines.markersize"]
if isinstance(rc_ax_labelsize, str):
rc_ax_labelsize = 15
if isinstance(rc_titlesize, str):
rc_titlesize = 16
if isinstance(rc_xt_labelsize, str):
rc_xt_labelsize = 14
if figsize is None:
width, height = rc_width, rc_height
sff = 1 if (rows == cols == 1) else 1.15
width = width * cols * sff
height = height * rows * sff
else:
width, height = figsize
if textsize is not None:
scale_factor = textsize / rc_xt_labelsize
elif rows == cols == 1:
scale_factor = ((width * height) / (rc_width * rc_height)) ** 0.5
else:
scale_factor = 1
ax_labelsize = rc_ax_labelsize * scale_factor
titlesize = rc_titlesize * scale_factor
xt_labelsize = rc_xt_labelsize * scale_factor
linewidth = rc_linewidth * scale_factor
markersize = rc_markersize * scale_factor
return (width, height), ax_labelsize, titlesize, xt_labelsize, linewidth, markersize
|
Scale figure properties according to rows and cols.
Parameters
----------
figsize : float or None
Size of figure in inches
textsize : float or None
fontsize
rows : int
Number of rows
cols : int
Number of columns
Returns
-------
figsize : float or None
Size of figure in inches
ax_labelsize : int
fontsize for axes label
titlesize : int
fontsize for title
xt_labelsize : int
fontsize for axes ticks
linewidth : int
linewidth
markersize : int
markersize
|
25,121 |
def cluster(dset,min_distance,min_cluster_size,prefix=None):
if prefix==None:
prefix = nl.suffix(dset, % min_cluster_size)
return available_method()(dset,min_distance,min_cluster_size,prefix)
|
clusters given ``dset`` connecting voxels ``min_distance``mm away with minimum cluster size of ``min_cluster_size``
default prefix is ``dset`` suffixed with ``_clust%d``
|
25,122 |
def conv_lstm_2d(inputs, state, output_channels,
kernel_size=5, name=None, spatial_dims=None):
input_shape = common_layers.shape_list(inputs)
batch_size, input_channels = input_shape[0], input_shape[-1]
if spatial_dims is None:
input_shape = input_shape[1:]
else:
input_shape = spatial_dims + [input_channels]
cell = tf.contrib.rnn.ConvLSTMCell(
2, input_shape, output_channels,
[kernel_size, kernel_size], name=name)
if state is None:
state = cell.zero_state(batch_size, tf.float32)
outputs, new_state = cell(inputs, state)
return outputs, new_state
|
2D Convolutional LSTM.
|
25,123 |
def show_tables():
_State.connection()
_State.reflect_metadata()
metadata = _State.metadata
response = select()
return {row[]: row[] for row in response}
|
Return the names of the tables currently in the database.
|
25,124 |
def partial_transform(self, traj):
ca = [a.index for a in traj.top.atoms if a.name == ]
if len(ca) < 4:
return np.zeros((len(traj), 0), dtype=np.float32)
alpha_indices = np.array(
[(ca[i - 1], ca[i], ca[i + 1], ca[i + 2]) for i in range(1, len(ca) - 2)])
result = md.compute_dihedrals(traj, alpha_indices)
x = []
if self.atom_indices is None:
self.atom_indices = np.vstack(alpha_indices)
if self.sincos:
x.extend([np.cos(result), np.sin(result)])
else:
x.append(result)
return np.hstack(x)
|
Featurize an MD trajectory into a vector space via calculation
of dihedral (torsion) angles of alpha carbon backbone
Parameters
----------
traj : mdtraj.Trajectory
A molecular dynamics trajectory to featurize.
Returns
-------
features : np.ndarray, dtype=float, shape=(n_samples, n_features)
A featurized trajectory is a 2D array of shape
`(length_of_trajectory x n_features)` where each `features[i]`
vector is computed by applying the featurization function
to the `i`th snapshot of the input trajectory.
|
25,125 |
def _InitializeURL(self, upload_url, current_content_length):
if current_content_length != 0:
return upload_url
headers = {
: ,
: 0,
:
}
req = urllib2.Request(upload_url, data={}, headers=headers)
resp = self._url_opener.open(req)
return resp.headers[]
|
Ensures that the URL used to upload operations is properly initialized.
Args:
upload_url: a string url.
current_content_length: an integer identifying the current content length
of data uploaded to the Batch Job.
Returns:
An initialized string URL, or the provided string URL if the URL has
already been initialized.
|
25,126 |
def generate_template(template_name, **context):
context.update(href=href, format_datetime=format_datetime)
return template_loader.load(template_name).generate(**context)
|
Load and generate a template.
|
25,127 |
def _sample_action_fluent(self,
name: str,
dtype: tf.DType,
size: Sequence[int],
constraints: Dict[str, Constraints],
default_value: tf.Tensor,
prob: float) -> tf.Tensor:
shape = [self.batch_size] + list(size)
if dtype == tf.float32:
bounds = constraints.get(name)
if bounds is None:
low, high = -self.MAX_REAL_VALUE, self.MAX_REAL_VALUE
dist = tf.distributions.Uniform(low=low, high=high)
sampled_fluent = dist.sample(shape)
else:
low, high = bounds
batch = (low is not None and low.batch) or (high is not None and high.batch)
low = tf.cast(low.tensor, tf.float32) if low is not None else -self.MAX_REAL_VALUE
high = tf.cast(high.tensor, tf.float32) if high is not None else self.MAX_REAL_VALUE
dist = tf.distributions.Uniform(low=low, high=high)
if batch:
sampled_fluent = dist.sample()
elif isinstance(low, tf.Tensor) or isinstance(high, tf.Tensor):
if (low+high).shape.as_list() == list(size):
sampled_fluent = dist.sample([self.batch_size])
else:
raise ValueError()
else:
sampled_fluent = dist.sample(shape)
elif dtype == tf.int32:
logits = [1.0] * self.MAX_INT_VALUE
dist = tf.distributions.Categorical(logits=logits, dtype=tf.int32)
sampled_fluent = dist.sample(shape)
elif dtype == tf.bool:
probs = 0.5
dist = tf.distributions.Bernoulli(probs=probs, dtype=tf.bool)
sampled_fluent = dist.sample(shape)
select_default = tf.distributions.Bernoulli(prob, dtype=tf.bool).sample(self.batch_size)
action_fluent = tf.where(select_default, default_value, sampled_fluent)
return action_fluent
|
Samples the action fluent with given `name`, `dtype`, and `size`.
With probability `prob` it chooses the action fluent `default_value`,
with probability 1-`prob` it samples the fluent w.r.t. its `constraints`.
Args:
name (str): The name of the action fluent.
dtype (tf.DType): The data type of the action fluent.
size (Sequence[int]): The size and shape of the action fluent.
constraints (Dict[str, Tuple[Optional[TensorFluent], Optional[TensorFluent]]]): The bounds for each action fluent.
default_value (tf.Tensor): The default value for the action fluent.
prob (float): A probability measure.
Returns:
tf.Tensor: A tensor for sampling the action fluent.
|
25,128 |
def add_value(self, value, row, col):
self.__values[(row, col)] = value
|
Adds a single value (cell) to a worksheet at (row, col).
Return the (row, col) where the value has been put.
:param value: Value to write to the sheet.
:param row: Row where the value should be written.
:param col: Column where the value should be written.
|
25,129 |
def assign(pid_type, pid_value, status, object_type, object_uuid, overwrite):
from .models import PersistentIdentifier
obj = PersistentIdentifier.get(pid_type, pid_value)
if status is not None:
obj.status = status
obj.assign(object_type, object_uuid, overwrite=overwrite)
db.session.commit()
click.echo(obj.status)
|
Assign persistent identifier.
|
25,130 |
def list_records_for_project(id=None, name=None, page_size=200, page_index=0, sort="", q=""):
data = list_records_for_project_raw(id, name, page_size, page_index, sort, q)
if data:
return utils.format_json_list(data)
|
List all BuildRecords for a given Project
|
25,131 |
def object_build(self, node, obj):
if obj in self._done:
return self._done[obj]
self._done[obj] = node
for name in dir(obj):
try:
member = getattr(obj, name)
except AttributeError:
_build_from_function(node, name, member, self._module)
else:
attach_dummy_node(node, name, member)
return None
|
recursive method which create a partial ast from real objects
(only function, class, and method are handled)
|
25,132 |
def wrap_cell(entity, json_obj, mapping, table_view=False):
html_class =
out =
if entity[]:
out = ", ".join( map(lambda x: num2name(x, entity, mapping), json_obj.get(entity[], [])) )
elif entity[]:
out = html_formula(json_obj[ entity[] ]) if entity[] in json_obj else
elif entity[] == :
html_class =
out = json_obj.get()
if out is None: out =
elif entity[] == :
html_class =
out = "%6.5f" % json_obj[] if json_obj[] else
elif entity[] == :
out = "%4.2f" % json_obj[] if json_obj[] in [2, 3] else
else:
out = num2name(json_obj.get(entity[]), entity, mapping) or
if table_view:
return + str(entity[]) + html_class + + str(out) +
elif html_class:
return + html_class + + str(out) +
return str(out)
|
Cell wrappers
for customizing the GUI data table
TODO : must coincide with hierarchy!
TODO : simplify this!
|
25,133 |
def collection(self, collection_name):
path = .format(collection_name)
url = self._build_url(path)
LOGGER.debug(.format(url))
response = requests.get(url, headers=REQUEST_HEADERS).json()
return response
|
implements Requirement 15 (/req/core/sfc-md-op)
@type collection_name: string
@param collection_name: name of collection
@returns: feature collection metadata
|
25,134 |
def multiget_slice(self, keys, column_parent, predicate, consistency_level):
self._seqid += 1
d = self._reqs[self._seqid] = defer.Deferred()
self.send_multiget_slice(keys, column_parent, predicate, consistency_level)
return d
|
Performs a get_slice for column_parent and predicate for the given keys in parallel.
Parameters:
- keys
- column_parent
- predicate
- consistency_level
|
25,135 |
def article(word, function=INDEFINITE, gender=MALE, role=SUBJECT):
return function == DEFINITE \
and definite_article(word, gender, role) \
or indefinite_article(word, gender, role)
|
Returns the indefinite (ein) or definite (der/die/das/die) article for the given word.
|
25,136 |
def tag(self, text):
if self.search_method == :
events = self._find_events_ahocorasick(text.text)
elif self.search_method == :
events = self._find_events_naive(text.text)
events = self._resolve_conflicts(events)
self._event_intervals(events, text)
if self.return_layer:
return events
else:
text[self.layer_name] = events
|
Retrieves list of events in the text.
Parameters
----------
text: Text
The text to search for events.
Returns
-------
list of events sorted by start, end
|
25,137 |
def produce_frequency_explorer(corpus,
category,
category_name=None,
not_category_name=None,
term_ranker=termranking.AbsoluteFrequencyRanker,
alpha=0.01,
use_term_significance=False,
term_scorer=None,
not_categories=None,
grey_threshold=1.96,
y_axis_values=None,
frequency_transform=lambda x: scale(np.log(x) - np.log(1)),
**kwargs):
t in category. E.g., "Below 5-star reviews".
Defaults to "Not " + category_name
term_ranker : TermRanker
TermRanker class for determining term frequency ranks.
alpha : float, default = 0.01
Uniform dirichlet prior for p-value calculation
use_term_significance : bool, True by default
Use term scorer
term_scorer : TermSignificance
Subclass of TermSignificance to use as for scores and significance
not_categories : list
All categories other than category by default. Documents labeled
with remaining category.
grey_threshold : float
Score to grey points. Default is 1.96
y_axis_values : list
Custom y-axis values. Defaults to linspace
frequency_transfom : lambda, default lambda x: scale(np.log(x) - np.log(1))
Takes a vector of frequencies and returns their x-axis scale.
Remaining arguments are from `produce_scattertext_explorer`.
if not_categories is None:
not_categories = [c for c in corpus.get_categories() if c != category]
if term_scorer is None:
term_scorer = LogOddsRatioUninformativeDirichletPrior(alpha)
my_term_ranker = term_ranker(corpus)
if kwargs.get(, False):
my_term_ranker.use_non_text_features()
term_freq_df = my_term_ranker.get_ranks() + 1
freqs = term_freq_df[[c + for c in [category] + not_categories]].sum(axis=1).values
x_axis_values = [round_downer(10 ** x) for x
in np.linspace(0, np.log(freqs.max()) / np.log(10), 5)]
x_axis_values = [x for x in x_axis_values if x > 1 and x <= freqs.max()]
frequencies_log_scaled = frequency_transform(freqs)
if not in kwargs:
kwargs[] = get_term_scorer_scores(category,
corpus,
kwargs.get(, False),
not_categories,
kwargs.get(, False),
term_ranker,
term_scorer,
kwargs.get(, False))
def y_axis_rescale(coords):
return ((coords - 0.5) / (np.abs(coords - 0.5).max()) + 1) / 2
def round_to_1(x):
if x == 0:
return 0
return round(x, -int(np.floor(np.log10(abs(x)))))
if y_axis_values is None:
max_score = np.floor(np.max(kwargs[]) * 100) / 100
min_score = np.ceil(np.min(kwargs[]) * 100) / 100
if min_score < 0 and max_score > 0:
central = 0
else:
central = 0.5
y_axis_values = [x for x in [min_score, central, max_score]
if x >= min_score and x <= max_score]
scores_scaled_for_charting = scale_neg_1_to_1_with_zero_mean_abs_max(kwargs[])
if use_term_significance:
kwargs[] = term_scorer
kwargs[] = kwargs.get(, term_scorer.get_name())
kwargs[] = kwargs.get(, % (grey_threshold, grey_threshold))
return produce_scattertext_explorer(corpus,
category=category,
category_name=category_name,
not_category_name=not_category_name,
x_coords=frequencies_log_scaled,
y_coords=scores_scaled_for_charting,
original_x=freqs,
original_y=kwargs[],
x_axis_values=x_axis_values,
y_axis_values=y_axis_values,
rescale_x=scale,
rescale_y=y_axis_rescale,
sort_by_dist=False,
term_ranker=term_ranker,
not_categories=not_categories,
x_label=kwargs.get(, ),
**kwargs)
|
Produces a Monroe et al. style visualization, with the x-axis being the log frequency
Parameters
----------
corpus : Corpus
Corpus to use.
category : str
Name of category column as it appears in original data frame.
category_name : str or None
Name of category to use. E.g., "5-star reviews."
Defaults to category
not_category_name : str or None
Name of everything that isn't in category. E.g., "Below 5-star reviews".
Defaults to "Not " + category_name
term_ranker : TermRanker
TermRanker class for determining term frequency ranks.
alpha : float, default = 0.01
Uniform dirichlet prior for p-value calculation
use_term_significance : bool, True by default
Use term scorer
term_scorer : TermSignificance
Subclass of TermSignificance to use as for scores and significance
not_categories : list
All categories other than category by default. Documents labeled
with remaining category.
grey_threshold : float
Score to grey points. Default is 1.96
y_axis_values : list
Custom y-axis values. Defaults to linspace
frequency_transfom : lambda, default lambda x: scale(np.log(x) - np.log(1))
Takes a vector of frequencies and returns their x-axis scale.
Remaining arguments are from `produce_scattertext_explorer`.'
Returns
-------
str, html of visualization
|
25,138 |
def get_region (self, rs,cs, re,ce):
rs = constrain (rs, 1, self.rows)
re = constrain (re, 1, self.rows)
cs = constrain (cs, 1, self.cols)
ce = constrain (ce, 1, self.cols)
if rs > re:
rs, re = re, rs
if cs > ce:
cs, ce = ce, cs
sc = []
for r in range (rs, re+1):
line = u
for c in range (cs, ce + 1):
ch = self.get_abs (r,c)
line = line + ch
sc.append (line)
return sc
|
This returns a list of lines representing the region.
|
25,139 |
def key_value_pairs(self):
self.op_data = []
hdrs = self.ip_data[0]
for row in self.ip_data[1:]:
id_col = row[0]
for col_num, col in enumerate(row):
self.op_data.append([id_col, hdrs[col_num], col])
|
convert list to key value pairs
This should also create unique id's to allow for any
dataset to be transposed, and then later manipulated
r1c1,r1c2,r1c3
r2c1,r2c2,r2c3
should be converted to
ID COLNUM VAL
r1c1,
|
25,140 |
def to_automaton_(f, labels:Set[Symbol]=None):
nnf = f.to_nnf()
if labels is None:
labels = nnf.find_labels()
alphabet = powerset(labels)
initial_state = MacroState({nnf})
final_states = {MacroState()}
delta = set()
d = f.delta(PLFalseInterpretation(), epsilon=True)
if d.truth(d):
final_states.add(initial_state)
states = {MacroState(), initial_state}
states_changed, delta_changed = True, True
while states_changed or delta_changed:
states_changed, delta_changed = False, False
for actions_set in alphabet:
states_list = list(states)
for q in states_list:
delta_formulas = [f.delta(actions_set) for f in q]
atomics = [s for subf in delta_formulas for s in find_atomics(subf)]
symbol2formula = {Symbol(str(f)): f for f in atomics if f != PLTrue() and f != PLFalse()}
formula2atomic_formulas = {
f: PLAtomic(Symbol(str(f)))
if f != PLTrue() and f != PLFalse()
else f for f in atomics
}
transformed_delta_formulas = [_transform_delta(f, formula2atomic_formulas) for f in delta_formulas]
if len(transformed_delta_formulas) == 0:
conjunctions = PLTrue()
elif len(transformed_delta_formulas) == 1:
conjunctions = transformed_delta_formulas[0]
else:
conjunctions = PLAnd(transformed_delta_formulas)
models = frozenset(conjunctions.minimal_models(Alphabet(symbol2formula)))
if len(models) == 0:
continue
for min_model in models:
q_prime = MacroState(
{symbol2formula[s] for s in min_model.true_propositions})
len_before = len(states)
states.add(q_prime)
if len(states) == len_before + 1:
states_list.append(q_prime)
states_changed = True
len_before = len(delta)
delta.add((q, actions_set, q_prime))
if len(delta) == len_before + 1:
delta_changed = True
if len(q_prime) == 0:
final_states.add(q_prime)
else:
subf_deltas = [subf.delta(PLFalseInterpretation(), epsilon=True) for subf in q_prime]
if len(subf_deltas)==1:
q_prime_delta_conjunction = subf_deltas[0]
else:
q_prime_delta_conjunction = PLAnd(subf_deltas)
if q_prime_delta_conjunction.truth(PLFalseInterpretation()):
final_states.add(q_prime)
alphabet = PythomataAlphabet({PLInterpretation(set(sym)) for sym in alphabet})
delta = frozenset((i, PLInterpretation(set(a)), o) for i, a, o in delta)
nfa = NFA.fromTransitions(
alphabet=alphabet,
states=frozenset(states),
initial_state=initial_state,
accepting_states=frozenset(final_states),
transitions=delta
)
return nfa
|
DEPRECATED
From a LDLfFormula, build the automaton.
:param f: a LDLfFormula;
:param labels: a set of Symbol, the fluents of our domain. If None, retrieve them from the formula;
:param determinize: True if you need to determinize the NFA, obtaining a DFA;
:param minimize: True if you need to minimize the DFA (if determinize is False this flag has no effect.)
:return: a NFA or a DFA which accepts the same traces that makes the formula True.
|
25,141 |
def Compile(self, filter_implementation):
operator = self.operator.lower()
if operator in (, ):
method =
elif operator in (, ):
method =
else:
raise errors.ParseError(
.format(operator))
args = [x.Compile(filter_implementation) for x in self.args]
return getattr(filter_implementation, method)(*args)
|
Compile the binary expression into a filter object.
|
25,142 |
def _read(self):
with open(self.path, ) as file_handle:
content = file_handle.read()
return compat.unicode(content)
|
Open the file and return its contents.
|
25,143 |
def record_variant_id(record):
if record.ID:
return record.ID
else:
return record.CHROM + + str(record.POS)
|
Get variant ID from pyvcf.model._Record
|
25,144 |
def list(self,table, **kparams):
result = self.table_api_get(table, **kparams)
return self.to_records(result, table)
|
get a collection of records by table name.
returns a dict (the json map) for python 3.4
|
25,145 |
def build_expressions(verb):
def partial(func, col, *args, **kwargs):
def new_func(gdf):
return func(gdf[col], *args, **kwargs)
return new_func
def make_statement(func, col):
if isinstance(func, str):
expr = .format(func, col)
elif callable(func):
expr = partial(func, col, *verb.args, **verb.kwargs)
else:
raise TypeError("{} is not a function".format(func))
return expr
def func_name(func):
if isinstance(func, str):
return func
try:
return func.__name__
except AttributeError:
return
if isinstance(verb.functions, (tuple, list)):
names = (func_name(func) for func in verb.functions)
names_and_functions = zip(names, verb.functions)
else:
names_and_functions = verb.functions.items()
columns = Selector.get(verb)
postfixes = []
stmts = []
for name, func in names_and_functions:
postfixes.append(name)
for col in columns:
stmts.append(make_statement(func, col))
if not stmts:
stmts = columns
add_postfix = (isinstance(verb.functions, dict) or
len(verb.functions) > 1)
if add_postfix:
fmt = .format
new_columns = [fmt(c, p) for p in postfixes for c in columns]
else:
new_columns = columns
expressions = [Expression(stmt, col)
for stmt, col in zip(stmts, new_columns)]
return expressions, new_columns
|
Build expressions for helper verbs
Parameters
----------
verb : verb
A verb with a *functions* attribute.
Returns
-------
out : tuple
(List of Expressions, New columns). The expressions and the
new columns in which the results of those expressions will
be stored. Even when a result will stored in a column with
an existing label, that column is still considered new,
i.e An expression ``x='x+1'``, will create a new_column `x`
to replace an old column `x`.
|
25,146 |
def Ainv(self):
if getattr(self, , None) is None:
self._Ainv = self.Solver(self.A, 13)
self._Ainv.run_pardiso(12)
return self._Ainv
|
Returns a Solver instance
|
25,147 |
def invite(self, username):
url = self._build_url(, username, base_url=self._api)
return self._json(self._put(url), 200)
|
Invite the user to join this team.
This returns a dictionary like so::
{'state': 'pending', 'url': 'https://api.github.com/teams/...'}
:param str username: (required), user to invite to join this team.
:returns: dictionary
|
25,148 |
def get_all_instances(sql, class_type, *args, **kwargs):
records = CoyoteDb.get_all_records(sql, *args, **kwargs)
instances = [CoyoteDb.get_object_from_dictionary_representation(
dictionary=record, class_type=class_type) for record in records]
for instance in instances:
instance._query = sql
return instances
|
Returns a list of instances of class_type populated with attributes from the DB record
@param sql: Sql statement to execute
@param class_type: The type of class to instantiate and populate with DB record
@return: Return a list of instances with attributes set to values from DB
|
25,149 |
def pack_rgb(rgb):
orig_shape = None
if isinstance(rgb, np.ndarray):
assert rgb.shape[-1] == 3
orig_shape = rgb.shape[:-1]
else:
assert len(rgb) == 3
rgb = np.array(rgb)
rgb = rgb.astype(int).reshape((-1, 3))
packed = (rgb[:, 0] << 16 |
rgb[:, 1] << 8 |
rgb[:, 2])
if orig_shape is None:
return packed
else:
return packed.reshape(orig_shape)
|
Packs a 24-bit RGB triples into a single integer,
works on both arrays and tuples.
|
25,150 |
def make_format(format_spec):
fill =
align =
zero =
width = format_spec.width
if format_spec.align:
align = format_spec.align[0]
if format_spec.fill:
fill = format_spec.fill[0]
if format_spec.zero:
zero =
precision_part = ""
if format_spec.precision:
precision_part = ".%s" % format_spec.precision
return "%s%s%s%s%s%s" % (fill, align, zero, width,
precision_part, format_spec.type)
|
Build format string from a format specification.
:param format_spec: Format specification (as FormatSpec object).
:return: Composed format (as string).
|
25,151 |
def url(self):
return get_url(CDN_URL, handle=self.handle, security=self.security)
|
Returns the URL for the instance, which can be used
to retrieve, delete, and overwrite the file. If security is enabled, signature and policy parameters will
be included,
*returns* [String]
```python
filelink = client.upload(filepath='/path/to/file')
filelink.url
# https://cdn.filestackcontent.com/FILE_HANDLE
```
|
25,152 |
def _handle_invalid_read_response(self, res, expected_len):
if not res:
reset(self.stick)
raise pyhsm.exception.YHSM_Error( \
% (pyhsm.defines.cmd2str(self.command)) )
self.stick.write(, )
res2 = self.stick.read(50)
lines = res2.split()
for this in lines:
if re.match(, this):
raise pyhsm.exception.YHSM_Error()
raise pyhsm.exception.YHSM_Error( \
% (self.stick.device, res.encode()))
|
This function is called when we do not get the expected frame header in
response to a command. Probable reason is that we are not talking to a
YubiHSM in HSM mode (might be a modem, or a YubiHSM in configuration mode).
Throws a hopefully helpful exception.
|
25,153 |
def confirm_build(build_url, keeper_token):
data = {
: True
}
r = requests.patch(
build_url,
auth=(keeper_token, ),
json=data)
if r.status_code != 200:
raise KeeperError(r)
|
Confirm a build upload is complete.
Wraps ``PATCH /builds/{build}``.
Parameters
----------
build_url : `str`
URL of the build resource. Given a build resource, this URL is
available from the ``self_url`` field.
keeper_token : `str`
Auth token (`ltdconveyor.keeper.get_keeper_token`).
Raises
------
ltdconveyor.keeper.KeeperError
Raised if there is an error communicating with the LTD Keeper API.
|
25,154 |
def open_stored_file(value, url):
upload = None
result = deserialize_upload(value, url)
filename = result[]
storage_class = result[]
if storage_class and filename:
storage = storage_class()
if storage.exists(filename):
upload = storage.open(filename)
upload.name = os.path.basename(filename)
return upload
|
Deserialize value for a given upload url and return open file.
Returns None if deserialization fails.
|
25,155 |
def add(self, method, pattern, callback):
pat_type, pat = self._normalize_pattern(pattern)
if pat_type == :
self._literal[method][pat] = callback
elif pat_type == :
self._wildcard[method].append(WildcardRoute(pat, callback))
else:
self._regex[method].append(RegexRoute(pat, callback))
|
Add a route.
Arguments:
method (str): HTTP method, e.g. GET, POST, etc.
pattern (str): Pattern that request paths must match.
callback (str): Route handler that is invoked when a request
path matches the *pattern*.
|
25,156 |
def _process_out_of_bounds(self, value, start, end):
"Clips out of bounds values"
if isinstance(value, np.datetime64):
v = dt64_to_dt(value)
if isinstance(start, (int, float)):
start = convert_timestamp(start)
if isinstance(end, (int, float)):
end = convert_timestamp(end)
s, e = start, end
if isinstance(s, np.datetime64):
s = dt64_to_dt(s)
if isinstance(e, np.datetime64):
e = dt64_to_dt(e)
else:
v, s, e = value, start, end
if v < s:
value = start
elif v > e:
value = end
return value
|
Clips out of bounds values
|
25,157 |
def core_periphery_dir(W, gamma=1, C0=None, seed=None):
s global random state to generate random numbers.
Otherwise, use a new np.random.RandomState instance seeded with the given value.
Infinite Loop aborted')
flag = False
ixes = np.arange(n)
Ct = C.copy()
while len(ixes) > 0:
Qt = np.zeros((n,))
ctix, = np.where(Ct)
nctix, = np.where(np.logical_not(Ct))
q0 = (np.sum(B[np.ix_(ctix, ctix)]) -
np.sum(B[np.ix_(nctix, nctix)]))
Qt[ctix] = q0 - 2 * np.sum(B[ctix, :], axis=1)
Qt[nctix] = q0 + 2 * np.sum(B[nctix, :], axis=1)
max_Qt = np.max(Qt[ixes])
u, = np.where(np.abs(Qt[ixes]-max_Qt) < 1e-10)
u = u[rng.randint(len(u))]
Ct[ixes[u]] = np.logical_not(Ct[ixes[u]])
ixes = np.delete(ixes, u)
if max_Qt - q > 1e-10:
flag = True
C = Ct.copy()
cix, = np.where(C)
ncix, = np.where(np.logical_not(C))
q = (np.sum(B[np.ix_(cix, cix)]) -
np.sum(B[np.ix_(ncix, ncix)]))
cix, = np.where(C)
ncix, = np.where(np.logical_not(C))
q = np.sum(B[np.ix_(cix, cix)]) - np.sum(B[np.ix_(ncix, ncix)])
return C, q
|
The optimal core/periphery subdivision is a partition of the network
into two nonoverlapping groups of nodes, a core group and a periphery
group. The number of core-group edges is maximized, and the number of
within periphery edges is minimized.
The core-ness is a statistic which quantifies the goodness of the
optimal core/periphery subdivision (with arbitrary relative value).
The algorithm uses a variation of the Kernighan-Lin graph partitioning
algorithm to optimize a core-structure objective described in
Borgatti & Everett (2000) Soc Networks 21:375-395
See Rubinov, Ypma et al. (2015) PNAS 112:10032-7
Parameters
----------
W : NxN np.ndarray
directed connection matrix
gamma : core-ness resolution parameter
Default value = 1
gamma > 1 detects small core, large periphery
0 < gamma < 1 detects large core, small periphery
C0 : NxN np.ndarray
Initial core structure
seed : hashable, optional
If None (default), use the np.random's global random state to generate random numbers.
Otherwise, use a new np.random.RandomState instance seeded with the given value.
|
25,158 |
def copyidfobject(self, idfobject):
return addthisbunch(self.idfobjects,
self.model,
self.idd_info,
idfobject, self)
|
Add an IDF object to the IDF.
Parameters
----------
idfobject : EpBunch object
The IDF object to remove. This usually comes from another idf file,
or it can be used to copy within this idf file.
|
25,159 |
def matrix_multiply(m1, m2):
mm = [[0.0 for _ in range(len(m2[0]))] for _ in range(len(m1))]
for i in range(len(m1)):
for j in range(len(m2[0])):
for k in range(len(m2)):
mm[i][j] += float(m1[i][k] * m2[k][j])
return mm
|
Matrix multiplication (iterative algorithm).
The running time of the iterative matrix multiplication algorithm is :math:`O(n^{3})`.
:param m1: 1st matrix with dimensions :math:`(n \\times p)`
:type m1: list, tuple
:param m2: 2nd matrix with dimensions :math:`(p \\times m)`
:type m2: list, tuple
:return: resultant matrix with dimensions :math:`(n \\times m)`
:rtype: list
|
25,160 |
def download(self, torrent_id, directory, filename) :
return self.call( % torrent_id, params={ : filename, : directory})
|
Download a torrent
|
25,161 |
def read_file(self, filename):
logger.info("Reading file: %s", format_path(filename))
contents = self.context.read_file(filename)
num_lines = len(contents.splitlines())
logger.debug("Read %s from %s.",
pluralize(num_lines, ),
format_path(filename))
return contents.rstrip()
|
Read a text file and provide feedback to the user.
:param filename: The pathname of the file to read (a string).
:returns: The contents of the file (a string).
|
25,162 |
def main():
args = parse_command_line()
print(HEADER.format(ip.__version__))
np.random.seed(args.rseed)
if os.path.exists(ip.__debugflag__):
os.remove(ip.__debugflag__)
if args.debug:
print("\n ** Enabling debug mode ** ")
ip._debug_on()
if args.json:
data = ipa.tetrad(name=args.name, workdir=args.workdir, load=True)
if args.force:
data._refresh()
else:
data.run(force=args.force, ipyclient=ipyclient)
|
main function
|
25,163 |
def answer(request):
if request.method == :
return render(request, , {}, help_text=answer.__doc__)
elif request.method == :
practice_filter = get_filter(request)
practice_context = PracticeContext.objects.from_content(practice_filter)
saved_answers = _save_answers(request, practice_context, True)
return render_json(request, saved_answers, status=200, template=)
else:
return HttpResponseBadRequest("method %s is not allowed".format(request.method))
|
Save the answer.
GET parameters:
html:
turn on the HTML version of the API
BODY
json in following format:
{
"answer": #answer, -- for one answer
"answers": [#answer, #answer, #answer ...] -- for multiple answers
}
answer = {
"answer_class": str, -- class of answer to save (e.g., flashcard_answer)
"response_time": int, -- response time in milliseconds
"meta": "str" -- optional information
"time_gap": int -- waiting time in frontend in seconds
... -- other fields depending on aswer type
(see from_json method of Django model class)
}
|
25,164 |
def grating(period,
number_of_teeth,
fill_frac,
width,
position,
direction,
lda=1,
sin_theta=0,
focus_distance=-1,
focus_width=-1,
evaluations=99,
layer=0,
datatype=0):
+x-x+y-y
if focus_distance < 0:
path = gdspy.L1Path(
(position[0] - 0.5 * width,
position[1] + 0.5 * (number_of_teeth - 1 + fill_frac) * period),
,
period * fill_frac, [width], [],
number_of_teeth,
period,
layer=layer,
datatype=datatype)
else:
neff = lda / float(period) + sin_theta
qmin = int(focus_distance / float(period) + 0.5)
path = gdspy.Path(period * fill_frac, position)
max_points = 199 if focus_width < 0 else 2 * evaluations
c3 = neff**2 - sin_theta**2
w = 0.5 * width
for q in range(qmin, qmin + number_of_teeth):
c1 = q * lda * sin_theta
c2 = (q * lda)**2
path.parametric(
lambda t: (width * t - w, (c1 + neff * numpy.sqrt(c2 - c3 * (
width * t - w)**2)) / c3),
number_of_evaluations=evaluations,
max_points=max_points,
layer=layer,
datatype=datatype)
path.x = position[0]
path.y = position[1]
if focus_width >= 0:
path.polygons[0] = numpy.vstack(
(path.polygons[0][:evaluations, :],
([position] if focus_width == 0 else
[(position[0] + 0.5 * focus_width, position[1]),
(position[0] - 0.5 * focus_width, position[1])])))
path.fracture()
if direction == :
return path.rotate(0.5 * numpy.pi, position)
elif direction == :
return path.rotate(-0.5 * numpy.pi, position)
elif direction == :
return path.rotate(numpy.pi, position)
else:
return path
|
Straight or focusing grating.
period : grating period
number_of_teeth : number of teeth in the grating
fill_frac : filling fraction of the teeth (w.r.t. the period)
width : width of the grating
position : grating position (feed point)
direction : one of {'+x', '-x', '+y', '-y'}
lda : free-space wavelength
sin_theta : sine of incidence angle
focus_distance : focus distance (negative for straight grating)
focus_width : if non-negative, the focusing area is included in
the result (usually for negative resists) and this
is the width of the waveguide connecting to the
grating
evaluations : number of evaluations of `path.parametric`
layer : GDSII layer number
datatype : GDSII datatype number
Return `PolygonSet`
|
25,165 |
def from_value(self, instance, value):
try:
parsed = self.type_.parse(value)
except (TypeError, ValueError):
if self.erroneous_as_absent:
return False
raise
self._set_from_recv(instance, parsed)
return True
|
Convert the given value using the set `type_` and store it into
`instance`’ attribute.
|
25,166 |
def notify_event(self, session_info, topic):
try:
self.event_bus.sendMessage(topic, items=session_info)
except AttributeError:
msg = "Could not publish {} event".format(topic)
raise AttributeError(msg)
|
:type identifiers: SimpleIdentifierCollection
|
25,167 |
def get_resource_listing(url, offset, limit, properties):
query = [
QPARA_OFFSET + + str(offset),
QPARA_LIMIT + + str(limit)
]
if not properties is None:
if len(properties) > 0:
query.append(QPARA_ATTRIBUTES + + .join(properties))
url = url + + .join(query)
json_obj = JsonResource(url).json
resources = []
for element in json_obj[]:
resource = ResourceHandle(element)
if not properties is None:
resource.properties = {}
for prop in properties:
if prop in element:
resource.properties[prop] = element[prop]
resources.append(resource)
return resources
|
Gneric method to retrieve a resource listing from a SCO-API. Takes the
resource-specific API listing Url as argument.
Parameters
----------
url : string
Resource listing Url for a SCO-API
offset : int, optional
Starting offset for returned list items
limit : int, optional
Limit the number of items in the result
properties : List(string)
List of additional object properties to be included for items in
the result
Returns
-------
List(ResourceHandle)
List of resource handle (one per subject in the object listing)
|
25,168 |
def getDefaultApplicationForMimeType(self, pchMimeType, pchAppKeyBuffer, unAppKeyBufferLen):
fn = self.function_table.getDefaultApplicationForMimeType
result = fn(pchMimeType, pchAppKeyBuffer, unAppKeyBufferLen)
return result
|
return the app key that will open this mime type
|
25,169 |
def point_cloud(df, columns=[0, 1, 2]):
df = df if isinstance(df, pd.DataFrame) else pd.DataFrame(df)
if not all(c in df.columns for c in columns):
columns = list(df.columns)[:3]
fig = plt.figure()
ax = fig.add_subplot(111, projection=)
Axes3D.scatter(*[df[columns[i]] for i in range(3)], zdir=, s=20, c=None, depthshade=True)
return ax
|
3-D Point cloud for plotting things like mesh models of horses ;)
|
25,170 |
def sheet2matrixidx(self,x,y):
r,c = self.sheet2matrix(x,y)
r = np.floor(r)
c = np.floor(c)
if hasattr(r,):
return r.astype(int), c.astype(int)
else:
return int(r),int(c)
|
Convert a point (x,y) in sheet coordinates to the integer row
and column index of the matrix cell in which that point falls,
given a bounds and density. Returns (row,column).
Note that if coordinates along the right or bottom boundary
are passed into this function, the returned matrix coordinate
of the boundary will be just outside the matrix, because the
right and bottom boundaries are exclusive.
Valid for scalar or array x and y.
|
25,171 |
def plot(self, axis=None, node_size=40, node_color=,
node_alpha=0.8, edge_alpha=0.5, edge_cmap=,
edge_linewidth=2, vary_line_width=True, colorbar=True):
try:
import matplotlib.pyplot as plt
from matplotlib.collections import LineCollection
except ImportError:
raise ImportError()
if self._data.shape[0] > 32767:
warn()
return None
if axis is None:
axis = plt.gca()
if self._data.shape[1] > 2:
if self._data.shape[1] > 32:
data_for_projection = PCA(n_components=32).fit_transform(self._data)
else:
data_for_projection = self._data.copy()
projection = TSNE().fit_transform(data_for_projection)
else:
projection = self._data.copy()
if vary_line_width:
line_width = edge_linewidth * (np.log(self._mst.T[2].max() / self._mst.T[2]) + 1.0)
else:
line_width = edge_linewidth
line_coords = projection[self._mst[:, :2].astype(int)]
line_collection = LineCollection(line_coords, linewidth=line_width,
cmap=edge_cmap, alpha=edge_alpha)
line_collection.set_array(self._mst[:, 2].T)
axis.add_artist(line_collection)
axis.scatter(projection.T[0], projection.T[1], c=node_color, alpha=node_alpha, s=node_size)
axis.set_xticks([])
axis.set_yticks([])
if colorbar:
cb = plt.colorbar(line_collection)
cb.ax.set_ylabel()
return axis
|
Plot the minimum spanning tree (as projected into 2D by t-SNE if required).
Parameters
----------
axis : matplotlib axis, optional
The axis to render the plot to
node_size : int, optional
The size of nodes in the plot (default 40).
node_color : matplotlib color spec, optional
The color to render nodes (default black).
node_alpha : float, optional
The alpha value (between 0 and 1) to render nodes with
(default 0.8).
edge_cmap : matplotlib colormap, optional
The colormap to color edges by (varying color by edge
weight/distance). Can be a cmap object or a string
recognised by matplotlib. (default `viridis_r`)
edge_alpha : float, optional
The alpha value (between 0 and 1) to render edges with
(default 0.5).
edge_linewidth : float, optional
The linewidth to use for rendering edges (default 2).
vary_line_width : bool, optional
Edge width is proportional to (log of) the inverse of the
mutual reachability distance. (default True)
colorbar : bool, optional
Whether to draw a colorbar. (default True)
Returns
-------
axis : matplotlib axis
The axis used the render the plot.
|
25,172 |
def readBoolean(self):
byte = self.stream.read(1)
if byte == :
return False
elif byte == :
return True
else:
raise ValueError("Error reading boolean")
|
Read C{Boolean}.
@raise ValueError: Error reading Boolean.
@rtype: C{bool}
@return: A Boolean value, C{True} if the byte
is nonzero, C{False} otherwise.
|
25,173 |
def available(self, **kwargs):
uri = "%s/%s" % (self.uri, "available")
response, instance = self.request("GET", uri, params=kwargs)
return instance
|
Find available dedicated numbers to buy. Returns dictionary like this:
::
{
"numbers": [
"12146124143",
"12172100315",
"12172100317",
"12172100319",
"12172100321",
"12172100323",
"12172100325",
"12172100326",
"12172100327",
"12172100328"
],
"price": 2.4
}
:Example:
numbers = client.numbers.available(country="US")
:param str country: Dedicated number country. Required.
:param str prefix: Desired number prefix. Should include country code (i.e. 447 for GB)
|
25,174 |
def parse():
parser = argparse.ArgumentParser(
description=,
formatter_class=argparse.RawTextHelpFormatter
)
group = parser.add_mutually_exclusive_group()
group.add_argument(
,
,
help=,
metavar=,
)
group.add_argument(
,
,
help=,
metavar=,
)
parser.add_argument(
,
,
help=,
metavar=,
required=False,
)
parser.add_argument(
,
,
help=,
required=False,
action=,
)
parser.add_argument(
,
,
help=,
required=False,
action=,
)
parser.add_argument(
,
,
help=,
required=False,
action=,
)
parser.add_argument(
,
,
help=,
required=False,
action=,
)
parser.add_argument(
,
,
help=,
required=False,
action=,
)
parser.add_argument(
,
,
help=,
required=False,
metavar=,
)
return vars(parser.parse_args())
|
parse arguments supplied by cmd-line
|
25,175 |
def tplot_restore(filename):
if not (os.path.isfile(filename)):
print("Not a valid file name")
return
if filename.endswith():
temp_tplot = readsav(filename)
for i in range(len(temp_tplot[])):
data_name = temp_tplot[][i][0].decode("utf-8")
temp_x_data = temp_tplot[][i][1][0][0]
if len(temp_tplot[][i][1][0][2].shape) == 2:
temp_y_data = np.transpose(temp_tplot[][i][1][0][2])
else:
temp_y_data = temp_tplot[][i][1][0][2]
if len(temp_tplot[][i][1][0]) > 4:
temp_v_data = temp_tplot[][i][1][0][4]
if (temp_x_data.dtype.byteorder == ):
temp_x_data = temp_x_data.byteswap().newbyteorder()
if (temp_y_data.dtype.byteorder == ):
temp_y_data = temp_y_data.byteswap().newbyteorder()
if (temp_v_data.dtype.byteorder == ):
temp_v_data = temp_v_data.byteswap().newbyteorder()
store_data(data_name, data={:temp_x_data, :temp_y_data, :temp_v_data})
else:
if (temp_x_data.dtype.byteorder == ):
temp_x_data = temp_x_data.byteswap().newbyteorder()
if (temp_y_data.dtype.byteorder == ):
temp_y_data = temp_y_data.byteswap().newbyteorder()
store_data(data_name, data={:temp_x_data, :temp_y_data})
if temp_tplot[][i][3].dtype.names is not None:
for option_name in temp_tplot[][i][3].dtype.names:
options(data_name, option_name, temp_tplot[][i][3][option_name][0])
data_quants[data_name].trange = temp_tplot[][i][4].tolist()
data_quants[data_name].dtype = temp_tplot[][i][5]
data_quants[data_name].create_time = temp_tplot[][i][6]
for option_name in temp_tplot[][0][0].dtype.names:
if option_name == :
tplot_options(, temp_tplot[][0][0][option_name][0])
if option_name == :
tplot_options(, temp_tplot[][0][0][option_name][0])
if option_name == :
tplot_options(, temp_tplot[][0][0][option_name][0])
if in temp_tplot[][0][1].tolist():
for option_name in temp_tplot[][0][1][][0].dtype.names:
if option_name == :
tplot_options(, temp_tplot[][0][1][][0][option_name][0])
else:
temp = pickle.load(open(filename,"rb"))
num_data_quants = temp[0]
for i in range(0, num_data_quants):
data_quants[temp[i+1].name] = temp[i+1]
tplot_opt_glob = temp[num_data_quants+1]
return
|
This function will restore tplot variables that have been saved with the "tplot_save" command.
.. note::
This function is compatible with the IDL tplot_save routine.
If you have a ".tplot" file generated from IDL, this procedure will restore the data contained in the file.
Not all plot options will transfer over at this time.
Parameters:
filename : str
The file name and full path generated by the "tplot_save" command.
Returns:
None
Examples:
>>> # Restore the saved data from the tplot_save example
>>> import pytplot
>>> pytplot.restore('C:/temp/variable1.pytplot')
|
25,176 |
def _using_stdout(self):
if WINDOWS and colorama:
return self.stream.wrapped is sys.stdout
return self.stream is sys.stdout
|
Return whether the handler is using sys.stdout.
|
25,177 |
def open(self, mode):
if mode == :
return self.format.pipe_writer(AtomicFtpFile(self._fs, self.path))
elif mode == :
temp_dir = os.path.join(tempfile.gettempdir(), )
self.__tmp_path = temp_dir + + self.path.lstrip() + % random.randrange(0, 1e10)
self._fs.get(self.path, self.__tmp_path)
return self.format.pipe_reader(
FileWrapper(io.BufferedReader(io.FileIO(self.__tmp_path, )))
)
else:
raise Exception("mode must be or (got: %s)" % mode)
|
Open the FileSystem target.
This method returns a file-like object which can either be read from or written to depending
on the specified mode.
:param mode: the mode `r` opens the FileSystemTarget in read-only mode, whereas `w` will
open the FileSystemTarget in write mode. Subclasses can implement
additional options.
:type mode: str
|
25,178 |
def get_bounds(self, bin_num):
min_bound = (self.bin_size * bin_num) + self.min_value
max_bound = min_bound + self.bin_size
return self.Bounds(min_bound, max_bound)
|
Get the bonds of a bin, given its index `bin_num`.
:returns: a `Bounds` namedtuple with properties min and max
respectively.
|
25,179 |
def compute_venn3_subsets(a, b, c):
if not (type(a) == type(b) == type(c)):
raise ValueError("All arguments must be of the same type")
set_size = len if type(a) != Counter else lambda x: sum(x.values())
return (set_size(a - (b | c)),
set_size(b - (a | c)),
set_size((a & b) - c),
set_size(c - (a | b)),
set_size((a & c) - b),
set_size((b & c) - a),
set_size(a & b & c))
|
Given three set or Counter objects, computes the sizes of (a & ~b & ~c, ~a & b & ~c, a & b & ~c, ....),
as needed by the subsets parameter of venn3 and venn3_circles.
Returns the result as a tuple.
>>> compute_venn3_subsets(set([1,2,3]), set([2,3,4]), set([3,4,5,6]))
(1, 0, 1, 2, 0, 1, 1)
>>> compute_venn3_subsets(Counter([1,2,3]), Counter([2,3,4]), Counter([3,4,5,6]))
(1, 0, 1, 2, 0, 1, 1)
>>> compute_venn3_subsets(Counter([1,1,1]), Counter([1,1,1]), Counter([1,1,1,1]))
(0, 0, 0, 1, 0, 0, 3)
>>> compute_venn3_subsets(Counter([1,1,2,2,3,3]), Counter([2,2,3,3,4,4]), Counter([3,3,4,4,5,5,6,6]))
(2, 0, 2, 4, 0, 2, 2)
>>> compute_venn3_subsets(Counter([1,2,3]), Counter([2,2,3,3,4,4]), Counter([3,3,4,4,4,5,5,6]))
(1, 1, 1, 4, 0, 3, 1)
>>> compute_venn3_subsets(set([]), set([]), set([]))
(0, 0, 0, 0, 0, 0, 0)
>>> compute_venn3_subsets(set([1]), set([]), set([]))
(1, 0, 0, 0, 0, 0, 0)
>>> compute_venn3_subsets(set([]), set([1]), set([]))
(0, 1, 0, 0, 0, 0, 0)
>>> compute_venn3_subsets(set([]), set([]), set([1]))
(0, 0, 0, 1, 0, 0, 0)
>>> compute_venn3_subsets(Counter([]), Counter([]), Counter([1]))
(0, 0, 0, 1, 0, 0, 0)
>>> compute_venn3_subsets(set([1]), set([1]), set([1]))
(0, 0, 0, 0, 0, 0, 1)
>>> compute_venn3_subsets(set([1,3,5,7]), set([2,3,6,7]), set([4,5,6,7]))
(1, 1, 1, 1, 1, 1, 1)
>>> compute_venn3_subsets(Counter([1,3,5,7]), Counter([2,3,6,7]), Counter([4,5,6,7]))
(1, 1, 1, 1, 1, 1, 1)
>>> compute_venn3_subsets(Counter([1,3,5,7]), set([2,3,6,7]), set([4,5,6,7]))
Traceback (most recent call last):
...
ValueError: All arguments must be of the same type
|
25,180 |
def off_coordinator(self, year):
try:
oc_anchor = self._year_info_pq(year, )()
if oc_anchor:
return oc_anchor.attr[]
except ValueError:
return None
|
Returns the coach ID for the team's OC in a given year.
:year: An int representing the year.
:returns: A string containing the coach ID of the OC.
|
25,181 |
def load_hdf5(path):
with h5py.File(path, ) as f:
is_sparse = f[][...]
if is_sparse:
shape = tuple(f[][...])
data = f[][...]
indices = f[][...]
indptr = f[][...]
X = sparse.csr_matrix((data, indices, indptr), shape=shape)
else:
X = f[][...]
y = f[][...]
return X, y
|
Load data from a HDF5 file.
Args:
path (str): A path to the HDF5 format file containing data.
dense (boolean): An optional variable indicating if the return matrix
should be dense. By default, it is false.
Returns:
Data matrix X and target vector y
|
25,182 |
def get_id(self, name, recurse=True):
self._dlog("getting id ".format(name))
var = self._search("vars", name, recurse)
return var
|
Get the first id matching ``name``. Will either be a local
or a var.
:name: TODO
:returns: TODO
|
25,183 |
def etree_to_dict(source):
def etree_to_dict_recursive(parent):
children = parent.getchildren()
if children:
d = {}
identical_children = False
for child in children:
if not identical_children:
if child.tag in d:
identical_children = True
l = [{key: d[key]} for key in d]
l.append({child.tag: etree_to_dict_recursive(child)})
del d
else:
d.update({child.tag: etree_to_dict_recursive(child)})
else:
l.append({child.tag: etree_to_dict_recursive(child)})
return (d if not identical_children else l)
else:
return parent.text
if hasattr(source, ):
source = source.getroot()
if hasattr(source, ):
return {source.tag: etree_to_dict_recursive(source)}
else:
raise TypeError("Requires an Element or an ElementTree.")
|
Recursively load dict/list representation of an XML tree into an etree representation.
Args:
source -- An etree Element or ElementTree.
Returns:
A dictionary representing sorce's xml structure where tags with multiple identical childrens
contain list of all their children dictionaries..
>>> etree_to_dict(ET.fromstring('<content><id>12</id><title/></content>'))
{'content': {'id': '12', 'title': None}}
>>> etree_to_dict(ET.fromstring('<content><list><li>foo</li><li>bar</li></list></content>'))
{'content': {'list': [{'li': 'foo'}, {'li': 'bar'}]}}
|
25,184 |
def _load_packets(file_h, header, layers=0):
pkts = []
hdrp = ctypes.pointer(header)
while True:
pkt = _read_a_packet(file_h, hdrp, layers)
if pkt:
pkts.append(pkt)
else:
break
return pkts
|
Read packets from the capture file. Expects the file handle to point to
the location immediately after the header (24 bytes).
|
25,185 |
def run_hive(args, check_return_code=True):
cmd = load_hive_cmd() + args
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
if check_return_code and p.returncode != 0:
raise HiveCommandError("Hive command: {0} failed with error code: {1}".format(" ".join(cmd), p.returncode),
stdout, stderr)
return stdout.decode()
|
Runs the `hive` from the command line, passing in the given args, and
returning stdout.
With the apache release of Hive, so of the table existence checks
(which are done using DESCRIBE do not exit with a return code of 0
so we need an option to ignore the return code and just return stdout for parsing
|
25,186 |
def delNode(self, address):
if address in self.nodes:
del self.nodes[address]
self.poly.delNode(address)
|
Just send it along if requested, should be able to delete the node even if it isn't
in our config anywhere. Usually used for normalization.
|
25,187 |
def enable_contactgroup_host_notifications(self, contactgroup):
for contact_id in contactgroup.get_contacts():
self.enable_contact_host_notifications(self.daemon.contacts[contact_id])
|
Enable host notifications for a contactgroup
Format of the line that triggers function call::
ENABLE_CONTACTGROUP_HOST_NOTIFICATIONS;<contactgroup_name>
:param contactgroup: contactgroup to enable
:type contactgroup: alignak.objects.contactgroup.Contactgroup
:return: None
|
25,188 |
def _remove_non_serializable_store_entries(store: Store) -> dict:
cleaned_store_data = {}
for key, value in store.items():
if Script._is_serializable(key) and Script._is_serializable(value):
cleaned_store_data[key] = value
else:
_logger.info("Skip non-serializable item in the local script store. Key: , Value: . "
"This item cannot be saved and therefore will be lost when autokey quits.".format(
key, value
))
return cleaned_store_data
|
Copy all serializable data into a new dict, and skip the rest.
This makes sure to keep the items during runtime, even if the user edits and saves the script.
|
25,189 |
def _start_http_session(self):
api_logger.debug("Starting new HTTP session...")
self.session = FuturesSession(executor=self.executor, max_workers=self.max_workers)
self.session.headers.update({"User-Agent": self.user_agent})
if self.username and self.password:
api_logger.debug("Requests will use authorization.")
self.session.auth = HTTPBasicAuth(self.username, self.password)
|
Start a new requests HTTP session, clearing cookies and session data.
:return: None
|
25,190 |
def port_profile_qos_profile_qos_flowcontrol_flowcontrolglobal_tx(self, **kwargs):
config = ET.Element("config")
port_profile = ET.SubElement(config, "port-profile", xmlns="urn:brocade.com:mgmt:brocade-port-profile")
name_key = ET.SubElement(port_profile, "name")
name_key.text = kwargs.pop()
qos_profile = ET.SubElement(port_profile, "qos-profile")
qos = ET.SubElement(qos_profile, "qos")
flowcontrol = ET.SubElement(qos, "flowcontrol")
flowcontrolglobal = ET.SubElement(flowcontrol, "flowcontrolglobal")
tx = ET.SubElement(flowcontrolglobal, "tx")
tx.text = kwargs.pop()
callback = kwargs.pop(, self._callback)
return callback(config)
|
Auto Generated Code
|
25,191 |
def get_many(d, required=[], optional=[], one_of=[]):
d = d or {}
r = [d[k] for k in required]
r += [d.get(k)for k in optional]
if one_of:
for k in (k for k in one_of if k in d):
return r + [d[k]]
raise KeyError("Missing a one_of value.")
return r
|
Returns a predictable number of elements out of ``d`` in a list for auto-expanding.
Keys in ``required`` will raise KeyError if not found in ``d``.
Keys in ``optional`` will return None if not found in ``d``.
Keys in ``one_of`` will raise KeyError if none exist, otherwise return the first in ``d``.
Example::
uid, action, limit, offset = get_many(request.params, required=['uid', 'action'], optional=['limit', 'offset'])
Note: This function has been added to the webhelpers package.
|
25,192 |
def connect(self, target, acceptor, wrapper=None):
if not self.running:
raise ValueError("TendrilManager not running")
fam = utils.addr_info(target)
if self.addr_family != fam:
raise ValueError("address family mismatch")
|
Initiate a connection from the tendril manager's endpoint.
Once the connection is completed, a Tendril object will be
created and passed to the given acceptor.
:param target: The target of the connection attempt.
:param acceptor: A callable which will initialize the state of
the new Tendril object.
:param wrapper: A callable taking, as its first argument, a
socket.socket object. The callable must
return a valid proxy for the socket.socket
object, which will subsequently be used to
communicate on the connection.
For passing extra arguments to the acceptor or the wrapper,
see the ``TendrilPartial`` class; for chaining together
multiple wrappers, see the ``WrapperChain`` class.
|
25,193 |
def find_codon_mismatches(sbjct_start, sbjct_seq, qry_seq):
mis_matches = []
codon_offset = (sbjct_start-1) % 3
i_start = 0
if codon_offset != 0:
i_start = 3 - codon_offset
sbjct_start = sbjct_start + i_start
sbjct_seq = sbjct_seq[i_start:]
qry_seq = qry_seq[i_start:]
codon_no = int((sbjct_start-1) / 3)
q_shift = 0
s_shift = 0
mut_no = 0
indel_no = 0
indels = get_indels(sbjct_seq, qry_seq, sbjct_start)
for index in range(0, len(sbjct_seq), 3):
codon_no += 1
s_i = index + s_shift
q_i = index + q_shift
sbjct_codon = sbjct_seq[s_i:s_i+3]
qry_codon = qry_seq[q_i:q_i+3]
if len(sbjct_seq[s_i:].replace("-","")) + len(qry_codon[q_i:].replace("-","")) < 6:
break
if sbjct_codon.upper() != qry_codon.upper():
if "-" in sbjct_codon or "-" in qry_codon:
try:
indel_data = indels[indel_no]
except IndexError:
print(sbjct_codon, qry_codon)
print(indels)
print(gene, indel_data, indel_no)
mut = indel_data[0]
codon_no_indel = indel_data[1]
seq_pos = indel_data[2] + sbjct_start - 1
indel = indel_data[3]
indel_no +=1
if mut == "ins":
sbjct_rf_indel = get_inframe_gap(sbjct_seq[s_i:], 3)
qry_rf_indel = get_inframe_gap(qry_seq[q_i:], int(math.floor(len(sbjct_rf_indel)/3) *3))
else:
qry_rf_indel = get_inframe_gap(qry_seq[q_i:], 3)
sbjct_rf_indel = get_inframe_gap(sbjct_seq[s_i:], int(math.floor(len(qry_rf_indel)/3) *3))
mut_name, aa_ref, aa_alt = name_indel_mutation(sbjct_seq, indel, sbjct_rf_indel, qry_rf_indel, codon_no, mut, sbjct_start - 1)
shift_diff_before = abs(s_shift - q_shift)
s_shift += len(sbjct_rf_indel) - 3
q_shift += len(qry_rf_indel) - 3
shift_diff = abs(s_shift - q_shift)
if shift_diff_before != 0 and shift_diff %3 == 0:
if s_shift > q_shift:
nucs_needed = int((len(sbjct_rf_indel)/3) *3) + shift_diff
pre_qry_indel = qry_rf_indel
qry_rf_indel = get_inframe_gap(qry_seq[q_i:], nucs_needed)
q_shift += len(qry_rf_indel) - len(pre_qry_indel)
elif q_shift > s_shift:
nucs_needed = int((len(qry_rf_indel)/3)*3) + shift_diff
pre_sbjct_indel = sbjct_rf_indel
sbjct_rf_indel = get_inframe_gap(sbjct_seq[s_i:], nucs_needed)
s_shift += len(sbjct_rf_indel) - len(pre_sbjct_indel)
mut_name, aa_ref, aa_alt = name_indel_mutation(sbjct_seq, indel, sbjct_rf_indel, qry_rf_indel, codon_no, mut, sbjct_start - 1)
if "Frameshift" in mut_name:
mut_name = mut_name.split("-")[0] + "- Frame restored"
mis_matches += [[mut, codon_no_indel, seq_pos, indel, mut_name, sbjct_rf_indel, qry_rf_indel, aa_ref, aa_alt]]
no_of_indels = len(re.findall("\-\w", sbjct_rf_indel)) + len(re.findall("\-\w", qry_rf_indel))
if no_of_indels > 1:
for j in range(indel_no, indel_no + no_of_indels - 1):
try:
indel_data = indels[j]
except IndexError:
sys.exit("indel_data list is out of range, bug!")
mut = indel_data[0]
codon_no_indel = indel_data[1]
seq_pos = indel_data[2] + sbjct_start - 1
indel = indel_data[3]
indel_no +=1
mis_matches += [[mut, codon_no_indel, seq_pos, indel, mut_name, sbjct_rf_indel, qry_rf_indel, aa_ref, aa_alt]]
if mut == "del":
codon_no += int((len(sbjct_rf_indel) - 3)/3)
elif sbjct_rf_indel.count("-") == len(sbjct_rf_indel):
codon_no -= 1
else:
mut = "sub"
aa_ref = aa(sbjct_codon)
aa_alt = aa(qry_codon)
if aa_ref != aa_alt:
mut_name = "p." + aa_ref + str(codon_no) + aa_alt
mis_matches += [[mut, codon_no, codon_no, aa_alt, mut_name, sbjct_codon, qry_codon, aa_ref, aa_alt]]
try:
if mis_matches[-1][-1] == "*":
mut_name += " - Premature stop codon"
mis_matches[-1][4] = mis_matches[-1][4].split("-")[0] + " - Premature stop codon"
break
except IndexError:
pass
mis_matches = sorted(mis_matches, key = lambda x:x[1])
return mis_matches
|
This function takes two alligned sequence (subject and query), and
the position on the subject where the alignment starts. The sequences
are compared codon by codon. If a mis matches is found it is saved in
'mis_matches'. If a gap is found the function get_inframe_gap is used
to find the indel sequence and keep the sequence in the correct
reading frame. The function translate_indel is used to name indel
mutations and translate the indels to amino acids
The function returns a list of tuples containing all needed informations
about the mutation in order to look it up in the database dict known
mutation and the with the output files the the user.
|
25,194 |
def _group_groups(perm_list):
perm_list = sorted(perm_list, key=lambda tup: tup[0])
grouped_perms = []
for key, group in groupby(perm_list, lambda tup: (tup[0], tup[1])):
grouped_perms.append((key[0], key[1], [g[2] for g in group]))
return grouped_perms
|
Group permissions by group.
Input is list of tuples of length 3, where each tuple is in
following format::
(<group_id>, <group_name>, <single_permission>)
Permissions are regrouped and returned in such way that there is
only one tuple for each group::
(<group_id>, <group_name>, [<first_permission>, <second_permission>,...])
:param list perm_list: list of touples of length 3
:return: list tuples with grouped permissions
:rtype: list
|
25,195 |
def CreateVertices(self, points):
gr = digraph()
for z, x, Q in points:
node = (z, x, Q)
gr.add_nodes([node])
return gr
|
Returns a dictionary object with keys that are 2tuples
represnting a point.
|
25,196 |
def intermediate_fluents(self) -> Dict[str, PVariable]:
return { str(pvar): pvar for pvar in self.pvariables if pvar.is_intermediate_fluent() }
|
Returns interm-fluent pvariables.
|
25,197 |
def query_string(context, key, value):
try:
request = context[]
args = request.GET.copy()
except KeyError:
args = QueryDict().copy()
args[key] = value
return args.urlencode()
|
For adding/replacing a key=value pair to the GET string for a URL.
eg, if we're viewing ?p=3 and we do {% query_string order 'taken' %}
then this returns "p=3&order=taken"
And, if we're viewing ?p=3&order=uploaded and we do the same thing, we get
the same result (ie, the existing "order=uploaded" is replaced).
Expects the request object in context to do the above; otherwise it will
just return a query string with the supplied key=value pair.
|
25,198 |
def progress(params, rep):
name = params[]
fullpath = os.path.join(params[], params[])
logname = os.path.join(fullpath, %rep)
if os.path.exists(logname):
logfile = open(logname, )
lines = logfile.readlines()
logfile.close()
return int(100 * len(lines) / params[])
else:
return 0
|
Helper function to calculate the progress made on one experiment.
|
25,199 |
def LoadFromStorage(cls, path=None):
if path is None:
path = os.path.join(os.path.expanduser(), )
return cls(**googleads.common.LoadFromStorage(
path, cls._YAML_KEY, cls._REQUIRED_INIT_VALUES,
cls._OPTIONAL_INIT_VALUES))
|
Creates an AdWordsClient with information stored in a yaml file.
Args:
[optional]
path: The path string to the file containing cached AdWords data.
Returns:
An AdWordsClient initialized with the values cached in the file.
Raises:
A GoogleAdsValueError if the given yaml file does not contain the
information necessary to instantiate a client object - either a
required key was missing or an OAuth2 key was missing.
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.