Unnamed: 0
int64 0
389k
| code
stringlengths 26
79.6k
| docstring
stringlengths 1
46.9k
|
---|---|---|
21,700 | def _filter_fields(self, filter_function):
fields = []
if self.parent_type:
fields.extend(self.parent_type._filter_fields(filter_function))
fields.extend(filter(filter_function, self.fields))
return fields | Utility to iterate through all fields (super types first) of a type.
:param filter: A function that takes in a Field object. If it returns
True, the field is part of the generated output. If False, it is
omitted. |
21,701 | def submit(self, stanza):
body = _encode(**stanza)
self.service.post(self.path, body=body)
return self | Adds keys to the current configuration stanza as a
dictionary of key-value pairs.
:param stanza: A dictionary of key-value pairs for the stanza.
:type stanza: ``dict``
:return: The :class:`Stanza` object. |
21,702 | def remove_blocked_work_units(self, work_spec_name, work_unit_names):
return self._remove_some_work_units(
work_spec_name, work_unit_names, suffix=_BLOCKED) | Remove some work units in the blocked list.
If `work_unit_names` is :const:`None` (which must be passed
explicitly), all pending work units in `work_spec_name` are
removed; otherwise only the specific named work units will be.
Note that none of the "remove" functions will restart blocked
work units, so if you have called
e.g. :meth:`remove_available_work_units` for a predecessor
job, you may need to also call this method for its successor.
:param str work_spec_name: name of the work spec
:param list work_unit_names: names of the work units, or
:const:`None` for all in `work_spec_name`
:return: number of work units removed |
21,703 | def _deposit_withdraw(self, type, amount, coinbase_account_id):
data = {
:type,
:amount,
:coinbase_account_id
}
return self._post(, data=data) | `<https://docs.exchange.coinbase.com/#depositwithdraw>`_ |
21,704 | def render_view(view_name, **args):
try:
root_xml = get_view_root(view_name)
return render(root_xml, **args)
except CoreError as error:
error.add_view_info(ViewInfo(view_name, None))
raise
except:
info = exc_info()
error = ViewError(, ViewInfo(view_name, None))
error.add_cause(info[1])
raise error from info[1] | Process view and return root Node |
21,705 | def restrict(self, point):
items = [f.restrict(point) for f in self._items]
return self.__class__(items, self.shape, self.ftype) | Apply the ``restrict`` method to all functions.
Returns a new farray. |
21,706 | def stop(self, stop_I):
return pd.read_sql_query("SELECT * FROM stops WHERE stop_I={stop_I}".format(stop_I=stop_I), self.conn) | Get all stop data as a pandas DataFrame for all stops, or an individual stop'
Parameters
----------
stop_I : int
stop index
Returns
-------
stop: pandas.DataFrame |
21,707 | def _check_markers(task_ids, offset=10):
shuffle(task_ids)
has_errors = False
for index in xrange(0, len(task_ids), offset):
keys = [ndb.Key(FuriousAsyncMarker, id)
for id in task_ids[index:index + offset]]
markers = ndb.get_multi(keys)
if not all(markers):
logging.debug("Not all Asyncs fail? Check the success property on the
has_errors = not all((marker.success for marker in markers))
return True, has_errors | Returns a flag for markers being found for the task_ids. If all task ids
have markers True will be returned. Otherwise it will return False as soon
as a None result is hit. |
21,708 | def get_differentially_expressed_genes(self, diff_type: str) -> VertexSeq:
if diff_type == "up":
diff_expr = self.graph.vs.select(up_regulated_eq=True)
elif diff_type == "down":
diff_expr = self.graph.vs.select(down_regulated_eq=True)
else:
diff_expr = self.graph.vs.select(diff_expressed_eq=True)
return diff_expr | Get the differentially expressed genes based on diff_type.
:param str diff_type: Differential expression type chosen by the user; all, down, or up.
:return list: A list of differentially expressed genes. |
21,709 | def stats(self):
printDebug("Classes.....: %d" % len(self.classes))
printDebug("Properties..: %d" % len(self.properties)) | shotcut to pull out useful info for interactive use |
21,710 | def create_doc_jar(self, target, open_jar, version):
javadoc = self._java_doc(target)
scaladoc = self._scala_doc(target)
if javadoc or scaladoc:
jar_path = self.artifact_path(open_jar, version, suffix=)
with self.open_jar(jar_path, overwrite=True, compressed=True) as open_jar:
def add_docs(docs):
if docs:
for basedir, doc_files in docs.items():
for doc_file in doc_files:
open_jar.write(os.path.join(basedir, doc_file), doc_file)
add_docs(javadoc)
add_docs(scaladoc)
return jar_path
else:
return None | Returns a doc jar if either scala or java docs are available for the given target. |
21,711 | def find_last(fileobj, serial):
try:
fileobj.seek(-256*256, 2)
except IOError:
fileobj.seek(0)
data = fileobj.read()
try:
index = data.rindex(b"OggS")
except ValueError:
raise error("unable to find final Ogg header")
bytesobj = cBytesIO(data[index:])
best_page = None
try:
page = OggPage(bytesobj)
except error:
pass
else:
if page.serial == serial:
if page.last:
return page
else:
best_page = page
else:
best_page = None
fileobj.seek(0)
try:
page = OggPage(fileobj)
while not page.last:
page = OggPage(fileobj)
while page.serial != serial:
page = OggPage(fileobj)
best_page = page
return page
except error:
return best_page
except EOFError:
return best_page | Find the last page of the stream 'serial'.
If the file is not multiplexed this function is fast. If it is,
it must read the whole the stream.
This finds the last page in the actual file object, or the last
page in the stream (with eos set), whichever comes first. |
21,712 | def _verify_subnet_association(route_table_desc, subnet_id):
s route table association
route_table_desc
the description of a route table, as returned from boto_vpc.describe_route_table
subnet_id
the subnet id to verify
.. versionadded:: 2016.11.0
associationsassociationssubnet_id'] == subnet_id:
return True
return False | Helper function verify a subnet's route table association
route_table_desc
the description of a route table, as returned from boto_vpc.describe_route_table
subnet_id
the subnet id to verify
.. versionadded:: 2016.11.0 |
21,713 | def update_fluent_cached_urls(item, dry_run=False):
change_report = []
if hasattr(item, ):
for translation in item.translations.all():
old_url = translation._cached_url
item._update_cached_url(translation)
change_report.append(
(translation, , old_url, translation._cached_url))
if not dry_run:
translation.save()
if not dry_run:
item._expire_url_caches()
if item.is_draft:
children = [child for child in item.children.all()
if child.is_draft]
else:
children = [child for child in item.get_draft().children.all()
if child.is_published]
for child in children:
update_fluent_cached_urls(child, dry_run=dry_run)
return change_report | Regenerate the cached URLs for an item's translations. This is a fiddly
business: we use "hidden" methods instead of the public ones to avoid
unnecessary and unwanted slug changes to ensure uniqueness, the logic for
which doesn't work with our publishing. |
21,714 | def get_all(self, name, failobj=None):
values = []
name = name.lower()
for k, v in self._headers:
if k.lower() == name:
values.append(self.policy.header_fetch_parse(k, v))
if not values:
return failobj
return values | Return a list of all the values for the named field.
These will be sorted in the order they appeared in the original
message, and may contain duplicates. Any fields deleted and
re-inserted are always appended to the header list.
If no such fields exist, failobj is returned (defaults to None). |
21,715 | def iteration(self):
i = 0
conv = np.inf
old_conv = -np.inf
conv_list = []
m = self.original
if isinstance(self.original, pd.DataFrame):
ipfn_method = self.ipfn_df
elif isinstance(self.original, np.ndarray):
ipfn_method = self.ipfn_np
self.original = self.original.astype()
else:
print()
sys.exit(0)
while ((i <= self.max_itr and conv > self.conv_rate) and
(i <= self.max_itr and abs(conv - old_conv) > self.rate_tolerance)):
old_conv = conv
m, conv = ipfn_method(m, self.aggregates, self.dimensions, self.weight_col)
conv_list.append(conv)
i += 1
converged = 1
if i <= self.max_itr:
if not conv > self.conv_rate:
print()
elif not abs(conv - old_conv) > self.rate_tolerance:
print()
else:
print()
converged = 0
if self.verbose == 0:
return m
elif self.verbose == 1:
return m, converged
elif self.verbose == 2:
return m, converged, pd.DataFrame({: range(i), : conv_list}).set_index()
else:
print()
sys.exit(0) | Runs the ipfn algorithm. Automatically detects of working with numpy ndarray or pandas dataframes. |
21,716 | def content():
table_of_contents = m.Message()
message = m.Message()
_create_section_header(
message,
table_of_contents,
,
tr(),
heading_level=1)
_create_section_header(
message,
table_of_contents,
,
tr(),
heading_level=2)
message.add(m.Paragraph(definitions.messages.disclaimer()))
_create_section_header(
message,
table_of_contents,
,
tr(),
heading_level=2)
bullets = m.BulletedList()
for item in definitions.limitations():
bullets.add(item)
message.add(bullets)
_create_section_header(
message,
table_of_contents,
,
tr(),
heading_level=1)
last_group = None
table = None
for key, value in list(definitions.concepts.items()):
current_group = value[]
if current_group != last_group:
if last_group is not None:
message.add(table)
_create_section_header(
message,
table_of_contents,
current_group.replace(, ),
current_group,
heading_level=2)
table = _start_glossary_table(current_group)
last_group = current_group
row = m.Row()
term = value[].replace(, ).title()
description = m.Message(value[])
for citation in value[]:
if citation[] in [None, ]:
continue
if citation[] in [None, ]:
description.add(m.Paragraph(citation[]))
else:
description.add(m.Paragraph(
m.Link(citation[], citation[])))
row.add(m.Cell(term))
row.add(m.Cell(description))
url = _definition_icon_url(value)
if url:
row.add(m.Cell(m.Image(url, **MEDIUM_ICON_STYLE)))
else:
row.add(m.Cell())
table.add(row)
)))
table = m.Table(style_class=)
row = m.Row()
row.add(m.Cell(tr(), header=True))
row.add(m.Cell(tr(), header=True))
table.add(row)
for item in html_frame_elements:
row = m.Row()
row.add(m.Cell(item[]))
row.add(m.Cell(item[]))
table.add(row)
message.add(table)
_create_section_header(
message,
table_of_contents,
,
tr(),
heading_level=1)
message.add(developer_help())
full_message = m.Message()
style = SECTION_STYLE
style[] =
header = m.Heading(tr(), **style)
full_message.add(header)
full_message.add(table_of_contents)
full_message.add(message)
return full_message | Helper method that returns just the content.
This method was added so that the text could be reused in the
dock_help module.
.. versionadded:: 4.0.0
:returns: A message object without brand element.
:rtype: safe.messaging.message.Message |
21,717 | def patched(attrs, updates):
orig = patch(attrs, updates.items())
try:
yield orig
finally:
patch(attrs, orig.items()) | A context in which some attributes temporarily have a modified value. |
21,718 | def add_cnt_64bit(self,oid,value,label=None):
self.add_oid_entry(oid,,int(value)%18446744073709551615,label=label) | Short helper to add a 64 bit counter value to the MIB subtree. |
21,719 | def _write_jpy_config(target_dir=None, install_dir=None):
if not target_dir:
target_dir = _build_dir()
args = [sys.executable,
os.path.join(target_dir, ),
, jvm_dll_file,
, jdk_home_dir,
, ,
,
]
if install_dir:
args.append()
args.append(install_dir)
log.info( % (target_dir, install_dir))
return subprocess.call(args) | Write out a well-formed jpyconfig.properties file for easier Java
integration in a given location. |
21,720 | def _new_output_char(self, char):
self.text.config(state=tkinter.NORMAL)
self.text.insert("end", char)
self.text.see("end")
self.text.config(state=tkinter.DISABLED) | insert in text field |
21,721 | def process_upload(photo_list, form, parent_object, user, status=):
status += "beginning upload processing. Gathering and normalizing fields....<br>"
for upload_file in photo_list:
upload_file.name = upload_file.name.lower().replace(, )
upload_name = upload_file.name
status += .format(upload_name)
if upload_name.endswith() or upload_name.endswith():
status += "Found jpg. Attempting to save... <br>"
try:
dupe = ArticleImage.objects.get(photo__contains=upload_name, article=parent_object)
except ObjectDoesNotExist:
dupe = None
if not dupe:
try:
upload = ArticleImage(
article=parent_object,
photo=upload_file
)
upload.save()
status += "Saved and uploaded jpg."
except Exception as error:
status += "Error saving image: {}".format(error)
time.sleep(1)
return status | Helper function that actually processes and saves the upload(s).
Segregated out for readability. |
21,722 | def get_urls(self):
urls = self.get_subfields("856", "u", i1="4", i2="2")
return map(lambda x: x.replace("&", "&"), urls) | Content of field ``856u42``. Typically URL pointing to producers
homepage.
Returns:
list: List of URLs defined by producer. |
21,723 | def set_console(self, console):
self.console = console
yield from self._hypervisor.send(.format(name=self._name, console=self.console)) | Sets the TCP console port.
:param console: console port (integer) |
21,724 | def send_message(
self, request: str, response_expected: bool, **kwargs: Any
) -> Response:
payload = str(request) + self.delimiter
self.socket.send(payload.encode(self.encoding))
response = bytes()
decoded = None
while True:
response += self.socket.recv(1024)
decoded = response.decode(self.encoding)
if len(decoded) < self.delimiter_length:
continue
elif decoded[-self.delimiter_length :] == self.delimiter:
break
assert decoded is not None
return Response(decoded[: -self.delimiter_length]) | Transport the message to the server and return the response.
Args:
request: The JSON-RPC request string.
response_expected: Whether the request expects a response.
Returns:
A Response object. |
21,725 | def switch_off(self, *args):
if self.off_check(*args):
return self._switch.switch(False)
else:
return False | Sets the state of the switch to False if off_check() returns True,
given the arguments provided in kwargs.
:param kwargs: variable length dictionary of key-pair arguments
:return: Boolean. Returns True if the operation is successful |
21,726 | def run_command(command):
process = subprocess.Popen(
shlex.split(command),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
close_fds=True)
output, stderr = [stream.decode(sys.getdefaultencoding(), )
for stream in process.communicate()]
if process.returncode != 0:
raise AirflowConfigException(
"Cannot execute {}. Error code is: {}. Output: {}, Stderr: {}"
.format(command, process.returncode, output, stderr)
)
return output | Runs command and returns stdout |
21,727 | def eventFilter( self, object, event ):
if ( event.type() == event.KeyPress ):
if ( event.key() in (Qt.Key_Return, Qt.Key_Enter) ):
self.addQuery()
return True
return False | Filters the object for particular events.
:param object | <QObject>
event | <QEvent>
:return <bool> | consumed |
21,728 | def show_hydrophobic(self):
hydroph = self.plcomplex.hydrophobic_contacts
if not len(hydroph.bs_ids) == 0:
self.select_by_ids(, hydroph.bs_ids, restrict=self.protname)
self.select_by_ids(, hydroph.lig_ids, restrict=self.ligname)
for i in hydroph.pairs_ids:
cmd.select(, % (i[0], self.protname))
cmd.select(, % (i[1], self.ligname))
cmd.distance(, , )
if self.object_exists():
cmd.set(, 0.5, )
cmd.set(, , )
else:
cmd.select(, ) | Visualizes hydrophobic contacts. |
21,729 | def changed(self, message=None, *args):
if message is not None:
self.logger.debug(, self._repr(), message % args)
self.logger.debug(, self._repr())
if self.parent is not None:
self.parent.changed()
elif isinstance(self, Mutable):
super(TrackedObject, self).changed() | Marks the object as changed.
If a `parent` attribute is set, the `changed()` method on the parent
will be called, propagating the change notification up the chain.
The message (if provided) will be debug logged. |
21,730 | def allreduce(self, x, mesh_axes, reduction_fn_string):
if not mesh_axes:
return x
x = x.to_laid_out_tensor()
if reduction_fn_string == "SUM":
group_assignment = self._create_group_assignment(mesh_axes)
group_size = len(group_assignment[0])
tf_in = x.one_slice
dtype = tf_in.dtype
if dtype == tf.float32:
cast_to_float32 = False
elif dtype == tf.bfloat16:
cast_to_float32 = (
group_size > self._allreduce_in_bfloat16_max_group_size)
else:
tf.logging.info("Casting %s to float32 for allreduce" % tf_in.dtype)
cast_to_float32 = True
if cast_to_float32:
tf_in = tf.cast(tf_in, tf.float32)
tf_out = tpu_ops.cross_replica_sum(tf_in, group_assignment)
if cast_to_float32:
tf_out = tf.cast(tf_out, dtype)
return self.LaidOutTensor([tf_out])
else:
for axis in mesh_axes:
x = self.allconcat(x, axis, 0, stack=True)
x = self.LaidOutTensor(
[mtf.reduction_fn(reduction_fn_string)(x.one_slice, 0)])
return x | Grouped allreduce, (summed across the given dimensions).
Args:
x: a LaidOutTensor
mesh_axes: a list of integers
reduction_fn_string: "SUM"
Returns:
a LaidOutTensor
Raises:
ValueError: if the reduction is not yet implemented. |
21,731 | def intersection(self, *args):
if type(self.obj[0]) is int:
a = self.obj
else:
a = tuple(self.obj[0])
setobj = set(a)
for i, v in enumerate(args):
setobj = setobj & set(args[i])
return self._wrap(list(setobj)) | Produce an array that contains every item shared between all the
passed-in arrays. |
21,732 | def sample_storage_size(self):
try:
coll_stats = self.database.command(, )
sample_storage_size = coll_stats[]/1024.0/1024.0
return sample_storage_size
except pymongo.errors.OperationFailure:
return 0 | Get the storage size of the samples storage collection. |
21,733 | def place_oceans_at_map_borders(world):
ocean_border = int(min(30, max(world.width / 5, world.height / 5)))
def place_ocean(x, y, i):
world.layers[].data[y, x] = \
(world.layers[].data[y, x] * i) / ocean_border
for x in range(world.width):
for i in range(ocean_border):
place_ocean(x, i, i)
place_ocean(x, world.height - i - 1, i)
for y in range(world.height):
for i in range(ocean_border):
place_ocean(i, y, i)
place_ocean(world.width - i - 1, y, i) | Lower the elevation near the border of the map |
21,734 | def move(self, source_path, destination_path):
self.copy(source_path, destination_path)
self.remove(source_path) | Rename/move an object from one GCS location to another. |
21,735 | def _get_dS2S(self, imt_per):
if imt_per == 0:
dS2S = 0.05
elif 0 < imt_per < 0.15:
dS2S = self._interp_function(-0.15, 0.05, 0.15, 0, imt_per)
elif 0.15 <= imt_per < 0.45:
dS2S = self._interp_function(0.4, -0.15, 0.45, 0.15, imt_per)
elif 0.45 <= imt_per < 3.2:
dS2S = 0.4
elif 3.2 <= imt_per < 5:
dS2S = self._interp_function(0.08, 0.4, 5, 3.2, imt_per)
elif 5 <= imt_per <= 10:
dS2S = 0.08
else:
dS2S = 0
return dS2S | Table 4 of 2013 report |
21,736 | def init_environment():
base_path = os.path.abspath(os.path.dirname(__file__))
env_path = .format(base_path)
if os.path.exists(env_path):
with open(env_path) as f:
lines = f.readlines()
for line in lines:
var = line.strip().split()
if len(var) == 2:
os.environ[var[0]] = var[1] | Allow variables assigned in .env available using
os.environ.get('VAR_NAME') |
21,737 | def one_thread_per_process():
try:
import mkl
is_mkl = True
except ImportError:
is_mkl = False
if is_mkl:
n_threads = mkl.get_max_threads()
mkl.set_num_threads(1)
try:
yield
finally:
mkl.set_num_threads(n_threads)
else:
yield | Return a context manager where only one thread is allocated to a process.
This function is intended to be used as a with statement like::
>>> with process_per_thread():
... do_something() # one thread per process
Notes:
This function only works when MKL (Intel Math Kernel Library)
is installed and used in, for example, NumPy and SciPy.
Otherwise this function does nothing. |
21,738 | def b_operator(self, P):
r
A, B, Q, R, beta = self.A, self.B, self.Q, self.R, self.beta
S1 = Q + beta * dot(B.T, dot(P, B))
S2 = beta * dot(B.T, dot(P, A))
S3 = beta * dot(A.T, dot(P, A))
F = solve(S1, S2) if not self.pure_forecasting else np.zeros(
(self.k, self.n))
new_P = R - dot(S2.T, F) + S3
return F, new_P | r"""
The B operator, mapping P into
.. math::
B(P) := R - \beta^2 A'PB(Q + \beta B'PB)^{-1}B'PA + \beta A'PA
and also returning
.. math::
F := (Q + \beta B'PB)^{-1} \beta B'PA
Parameters
----------
P : array_like(float, ndim=2)
A matrix that should be n x n
Returns
-------
F : array_like(float, ndim=2)
The F matrix as defined above
new_p : array_like(float, ndim=2)
The matrix P after applying the B operator |
21,739 | def is_ext_pack_usable(self, name):
if not isinstance(name, basestring):
raise TypeError("name can only be an instance of type basestring")
usable = self._call("isExtPackUsable",
in_p=[name])
return usable | Check if the given extension pack is loaded and usable.
in name of type str
The name of the extension pack to check for.
return usable of type bool
Is the given extension pack loaded and usable. |
21,740 | def register_widgets():
Page.create_content_type(
ApplicationWidget, APPLICATIONS=settings.APPLICATION_CHOICES)
for _optgroup, _widgets in six.iteritems(settings.WIDGETS):
optgroup = _optgroup if _optgroup != else None
for widget in _widgets:
kwargs = {: optgroup}
if isinstance(widget, six.string_types):
try:
WidgetCls = get_class_from_string(widget)
except:
exc_info = sys.exc_info()
raise six.reraise(*exc_info)
elif isinstance(widget, tuple):
try:
WidgetCls = get_class_from_string(widget[0])
if len(widget) > 1:
kwargs.update(widget[1])
except Exception as e:
raise Exception( % (mod, e))
else:
WidgetCls = widget
Page.create_content_type(
WidgetCls, **kwargs) | Register all collected widgets from settings
WIDGETS = [('mymodule.models.MyWidget', {'mykwargs': 'mykwarg'})]
WIDGETS = ['mymodule.models.MyWidget', MyClass] |
21,741 | def mkdir(self, req, parent, name, mode):
self.reply_err(req, errno.EROFS) | Create a directory
Valid replies:
reply_entry
reply_err |
21,742 | def addLayer(self,layer,z=-1):
if not isinstance(layer,Layer):
raise TypeError("layer must be an instance of Layer!")
if z==-1:
self.layers.append(layer)
else:
self.layers.insert(z,layer) | Adds a new layer to the stack, optionally at the specified z-value.
``layer`` must be an instance of Layer or subclasses.
``z`` can be used to override the index of the layer in the stack. Defaults to ``-1`` for appending. |
21,743 | def future(self, rev=None):
if rev is not None:
self.seek(rev)
return WindowDictFutureView(self._future) | Return a Mapping of items after the given revision.
Default revision is the last one looked up. |
21,744 | def get_design_run_results(self, data_view_id, run_uuid):
url = routes.get_data_view_design_results(data_view_id, run_uuid)
response = self._get(url).json()
result = response["data"]
return DesignResults(
best_materials=result.get("best_material_results"),
next_experiments=result.get("next_experiment_results")
) | Retrieves the results of an existing designrun
:param data_view_id: The ID number of the data view to which the
run belongs, as a string
:type data_view_id: str
:param run_uuid: The UUID of the design run to retrieve results from
:type run_uuid: str
:return: A :class:`DesignResults` object |
21,745 | def user_parse(data):
user_ = data.get(, {})
yield , data.get() or user_.get()
yield , user_.get(, {}).get()
first_name, _, last_name = data.get(
, {}).get(, ).partition()
yield , first_name
yield , last_name | Parse information from the provider. |
21,746 | def save_as_plt(self, fname, pixel_array=None, vmin=None, vmax=None,
cmap=None, format=None, origin=None):
from matplotlib.backends.backend_agg \
import FigureCanvasAgg as FigureCanvas
from matplotlib.figure import Figure
from pylab import cm
if pixel_array is None:
pixel_array = self.numpy
if cmap is None:
cmap = cm.bone
fig = Figure(figsize=pixel_array.shape[::-1], dpi=1, frameon=False)
canvas = FigureCanvas(fig)
fig.figimage(pixel_array, cmap=cmap, vmin=vmin,
vmax=vmax, origin=origin)
fig.savefig(fname, dpi=1, format=format)
return True | This method saves the image from a numpy array using matplotlib
:param fname: Location and name of the image file to be saved.
:param pixel_array: Numpy pixel array, i.e. ``numpy()`` return value
:param vmin: matplotlib vmin
:param vmax: matplotlib vmax
:param cmap: matplotlib color map
:param format: matplotlib format
:param origin: matplotlib origin
This method will return True if successful |
21,747 | def tell(self):
if self._shifts:
t = self._file.tell()
if t == self._shifts[0]:
return 0
elif t == self._shifts[-1]:
return len(self._shifts) - 1
elif t in self._shifts:
return bisect_left(self._shifts, t)
else:
return bisect_left(self._shifts, t) - 1
raise self._implement_error | :return: number of records processed from the original file |
21,748 | def identify_phase(T, P, Tm=None, Tb=None, Tc=None, Psat=None):
rslgl
if Tm and T <= Tm:
return
elif Tc and T >= Tc:
return
elif Psat:
if P <= Psat:
return
elif P > Psat:
return
elif Tb:
if 9E4 < P < 1.1E5:
if T < Tb:
return
else:
return
elif P > 1.1E5 and T <= Tb:
return
else:
return None
else:
return None | r'''Determines the phase of a one-species chemical system according to
basic rules, using whatever information is available. Considers only the
phases liquid, solid, and gas; does not consider two-phase
scenarios, as should occurs between phase boundaries.
* If the melting temperature is known and the temperature is under or equal
to it, consider it a solid.
* If the critical temperature is known and the temperature is greater or
equal to it, consider it a gas.
* If the vapor pressure at `T` is known and the pressure is under or equal
to it, consider it a gas. If the pressure is greater than the vapor
pressure, consider it a liquid.
* If the melting temperature, critical temperature, and vapor pressure are
not known, attempt to use the boiling point to provide phase information.
If the pressure is between 90 kPa and 110 kPa (approximately normal),
consider it a liquid if it is under the boiling temperature and a gas if
above the boiling temperature.
* If the pressure is above 110 kPa and the boiling temperature is known,
consider it a liquid if the temperature is under the boiling temperature.
* Return None otherwise.
Parameters
----------
T : float
Temperature, [K]
P : float
Pressure, [Pa]
Tm : float, optional
Normal melting temperature, [K]
Tb : float, optional
Normal boiling point, [K]
Tc : float, optional
Critical temperature, [K]
Psat : float, optional
Vapor pressure of the fluid at `T`, [Pa]
Returns
-------
phase : str
Either 's', 'l', 'g', or None if the phase cannot be determined
Notes
-----
No special attential is paid to any phase transition. For the case where
the melting point is not provided, the possibility of the fluid being solid
is simply ignored.
Examples
--------
>>> identify_phase(T=280, P=101325, Tm=273.15, Psat=991)
'l' |
21,749 | def string(self):
r
if isinstance(self.expr, TexCmd) and len(self.expr.args) == 1:
return self.expr.args[0].value | r"""This is valid if and only if
1. the expression is a :class:`.TexCmd` AND
2. the command has only one argument.
:rtype: Union[None,str]
>>> from TexSoup import TexSoup
>>> soup = TexSoup(r'''\textbf{Hello}''')
>>> soup.textbf.string
'Hello'
>>> soup.textbf.string = 'Hello World'
>>> soup.textbf.string
'Hello World'
>>> soup.textbf
\textbf{Hello World} |
21,750 | def reference_index(self, ref_id):
try:
indexes = range(self.reference_count())
return next(i for i in indexes if self.reference_id(i) == ref_id)
except StopIteration as e:
raise ReferenceNotFoundError("ID: " + ref_id) from e | Return the first reference with this ID. |
21,751 | def untrace_class(cls):
for name, method in inspect.getmembers(cls, inspect.ismethod):
untrace_method(cls, method)
for name, function in inspect.getmembers(cls, inspect.isfunction):
untrace_method(cls, function)
for name, accessor in inspect.getmembers(cls, lambda x: type(x) is property):
untrace_property(cls, accessor)
set_untraced(cls)
return True | Untraces given class.
:param cls: Class to untrace.
:type cls: object
:return: Definition success.
:rtype: bool |
21,752 | def _get_color(self, age):
if age == self.tree.age:
return self.leaf_color
color = self.stem_color
tree = self.tree
if len(color) == 3:
return color
diff = [color[i+3]-color[i] for i in range(3)]
per_age = [diff[i]/(tree.age-1) for i in range(3)]
return tuple([int(color[i]+per_age[i]*age) for i in range(3)]) | Get the fill color depending on age.
Args:
age (int): The age of the branch/es
Returns:
tuple: (r, g, b) |
21,753 | def find_ge(self, dt):
i = bisect_left(self.dates, dt)
if i != len(self.dates):
return i
raise RightOutOfBound | Building block of all searches. Find the index
corresponding to the leftmost value greater or equal to *dt*.
If *dt* is greater than the
:func:`dynts.TimeSeries.end` a :class:`dynts.exceptions.RightOutOfBound`
exception will raise.
*dt* must be a python datetime.date instance. |
21,754 | def get_refinement_options(self):
domain = self.get_domain()
for upper_value in self.upper:
for suc in domain.successors(upper_value):
yield suc | Returns possible specializations for the upper values in the taxonomy |
21,755 | def data(self):
if self.detached:
return None
bio = Membio()
if not libcrypto.CMS_verify(self.ptr, None, None, None, bio.bio,
Flags.NO_VERIFY):
raise CMSError("extract data")
return str(bio) | Returns signed data if present in the message |
21,756 | def set_cpus(self, cpus=0):
from multiprocessing import cpu_count
max_cpus = cpu_count()
if not 0 < cpus < max_cpus:
cpus = max_cpus
self.add_option("--cpus", default=cpus, type="int",
help="Number of CPUs to use, 0=unlimited [default: %default]") | Add --cpus options to specify how many threads to use. |
21,757 | def xinfo_help(self):
fut = self.execute(b, b)
return wait_convert(fut, lambda l: b.join(l)) | Retrieve help regarding the ``XINFO`` sub-commands |
21,758 | def lists(self, uid=0, **kwargs):
lists = Lists(self.base_uri, self.auth)
return self.get_subresource_instances(uid, instance=lists,
resource="lists", params=kwargs) | Returns a list of :class:`List` objects (lists which Contact belongs to) and a pager dict.
:Example:
lists, pager = client.contacts.lists(uid=1901010)
:param int uid: The unique id of the Contact to update. Required.
:param int page: Fetch specified results page. Default=1
:param int limit: How many results on page. Default=10 |
21,759 | def device_from_request(request):
from yacms.conf import settings
try:
for (device, _) in settings.DEVICE_USER_AGENTS:
if device == request.COOKIES["yacms-device"]:
return device
except KeyError:
try:
user_agent = request.META["HTTP_USER_AGENT"].lower()
except KeyError:
pass
else:
try:
user_agent = user_agent.decode("utf-8")
for (device, ua_strings) in settings.DEVICE_USER_AGENTS:
for ua_string in ua_strings:
if ua_string.lower() in user_agent:
return device
except (AttributeError, UnicodeDecodeError, UnicodeEncodeError):
pass
return "" | Determine's the device name from the request by first looking for an
overridding cookie, and if not found then matching the user agent.
Used at both the template level for choosing the template to load and
also at the cache level as a cache key prefix. |
21,760 | def options_response(env):
status_headers = StatusAndHeaders(, [
(, ),
(, ),
])
response = WbResponse(status_headers)
response.add_access_control_headers(env=env)
return response | Construct WbResponse for OPTIONS based on the WSGI env dictionary
:param dict env: The WSGI environment dictionary
:return: The WBResponse for the options request
:rtype: WbResponse |
21,761 | def _read_mode_acopt(self, size, kind):
temp = self._read_unpack(size)
algo = chksum_opt.get(temp)
data = dict(
kind=kind,
length=size,
ac=algo,
)
return data | Read Alternate Checksum Request option.
Positional arguments:
size - int, length of option
kind - int, 14 (Alt-Chksum Request)
Returns:
* dict -- extracted Alternate Checksum Request (CHKSUM-REQ) option
Structure of TCP CHKSUM-REQ [RFC 1146][RFC 6247]:
+----------+----------+----------+
| Kind=14 | Length=3 | chksum |
+----------+----------+----------+
Octets Bits Name Description
0 0 tcp.chksumreq.kind Kind (14)
1 8 tcp.chksumreq.length Length (3)
2 16 tcp.chksumreq.ac Checksum Algorithm |
21,762 | def is_known_type(self, type_name):
type_name = str(type_name)
if type_name in self.known_types:
return True
return False | Check if type is known to the type system.
Returns:
bool: True if the type is a known instantiated simple type, False otherwise |
21,763 | def build_verified_certificate_chain(self, received_certificate_chain: List[Certificate]) -> List[Certificate]:
if not self._is_certificate_chain_order_valid(received_certificate_chain):
raise InvalidCertificateChainOrderError()
verified_certificate_chain = []
anchor_cert = None
for cert in received_certificate_chain:
anchor_cert = self._get_certificate_with_subject(cert.issuer)
verified_certificate_chain.append(cert)
if anchor_cert:
verified_certificate_chain.append(anchor_cert)
break
if anchor_cert is None:
raise AnchorCertificateNotInTrustStoreError()
return verified_certificate_chain | Try to figure out the verified chain by finding the anchor/root CA the received chain chains up to in the
trust store.
This will not clean the certificate chain if additional/invalid certificates were sent and the signatures and
fields (notBefore, etc.) are not verified. |
21,764 | def _multi_call(function, contentkey, *args, **kwargs):
position
ret = function(*args, **kwargs)
position = ret.get()
while position:
more = function(*args, position=position, **kwargs)
ret[contentkey].extend(more[contentkey])
position = more.get()
return ret.get(contentkey) | Retrieve full list of values for the contentkey from a boto3 ApiGateway
client function that may be paged via 'position' |
21,765 | def render_search(self, ctx, data):
if self.username is None:
return
translator = self._getViewerPrivateApplication()
searchAggregator = translator.getPageComponents().searchAggregator
if searchAggregator is None or not searchAggregator.providers():
return
return ctx.tag.fillSlots(
, translator.linkTo(searchAggregator.storeID)) | Render some UI for performing searches, if we know about a search
aggregator. |
21,766 | def event_transition(self, event_cls, event_type,
ion_type=None, value=None, annotations=None, depth=None, whence=None):
if annotations is None:
annotations = self.annotations
if annotations is None:
annotations = ()
if not (event_type is IonEventType.CONTAINER_START) and \
annotations and (self.limit - self.queue.position) != 0:
raise IonException()
if depth is None:
depth = self.depth
if whence is None:
whence = self.whence
return Transition(
event_cls(event_type, ion_type, value, self.field_name, annotations, depth),
whence
) | Returns an ion event event_transition that yields to another co-routine.
If ``annotations`` is not specified, then the ``annotations`` are the annotations of this
context.
If ``depth`` is not specified, then the ``depth`` is depth of this context.
If ``whence`` is not specified, then ``whence`` is the whence of this context. |
21,767 | def p_members(self, p):
if len(p) == 1:
p[0] = list()
else:
p[1].append(p[2])
p[0] = p[1] | members :
| members member VALUE_SEPARATOR
| members member |
21,768 | def top_charts(self):
response = self._call(mc_calls.BrowseTopChart)
top_charts = response.body
return top_charts | Get a listing of the default top charts. |
21,769 | def get_db_mutations(mut_db_path, gene_list, res_stop_codons):
try:
drugfile = open(mut_db_path, "r")
except:
sys.exit("Wrong path: %s"%(mut_db_path))
known_mutations = dict()
drug_genes = dict()
known_stop_codon = dict()
indelflag = False
stopcodonflag = False
for line in drugfile:
if line.startswith("
if "indel" in line.lower():
indelflag = True
elif "stop codon" in line.lower():
stopcodonflag = True
else:
stopcodonflag = False
continue
if line.strip() == "":
continue
mutation = [data.strip() for data in line.strip().split("\t")]
assert len(mutation) == 9, "mutation overview file (%s) must have 9 columns, %s"%(mut_db_path, mutation)
gene_ID = mutation[0]
if gene_ID in gene_list:
gene_name = mutation[1]
no_of_mut = int(mutation[2])
mut_pos = int(mutation[3])
ref_codon = mutation[4]
ref_aa = mutation[5]
alt_aa = mutation[6].split(",")
res_drug = mutation[7].replace("\t", " ")
pmid = mutation[8].split(",")
if ("*" in alt_aa and res_stop_codons != ) or (res_stop_codons == and stopcodonflag == True):
if gene_ID not in known_stop_codon:
known_stop_codon[gene_ID] = {"pos": [], "drug": res_drug}
known_stop_codon[gene_ID]["pos"].append(mut_pos)
drug_lst = res_drug.split(",")
for drug in drug_lst:
drug = drug.upper()
if drug not in drug_genes:
drug_genes[drug] = []
if gene_ID not in drug_genes[drug]:
drug_genes[drug].append(gene_ID)
mut_info = dict()
for i in range(len(alt_aa)):
try:
mut_info[alt_aa[i]] = {"gene_name": gene_name, "drug": res_drug, "pmid": pmid[i]}
except IndexError:
mut_info[alt_aa[i]] = {"gene_name": gene_name, "drug": res_drug, "pmid": "-"}
if no_of_mut != 1:
print("More than one mutation is needed, this is not implemented", mutation)
if gene_ID not in known_mutations:
known_mutations[gene_ID] = {"sub" : dict(), "ins" : dict(), "del" : dict()}
if indelflag == False:
mutation_type = "sub"
else:
mutation_type = ref_aa
if mut_pos not in known_mutations[gene_ID][mutation_type]:
known_mutations[gene_ID][mutation_type][mut_pos] = dict()
for aa in alt_aa:
known_mutations[gene_ID][mutation_type][mut_pos][aa] = mut_info[aa]
drugfile.close()
for gene in gene_list:
if gene not in known_mutations:
known_mutations[gene] = {"sub" : dict(), "ins" : dict(), "del" : dict()}
return known_mutations, drug_genes, known_stop_codon | This function opens the file resistenss-overview.txt, and reads the
content into a dict of dicts. The dict will contain information about
all known mutations given in the database. This dict is returned. |
21,770 | def com_google_fonts_check_metadata_canonical_filename(font_metadata,
canonical_filename,
is_variable_font):
if is_variable_font:
valid_varfont_suffixes = [
("Roman-VF", "Regular"),
("Italic-VF", "Italic"),
]
for valid_suffix, style in valid_varfont_suffixes:
if style in canonical_filename:
canonical_filename = valid_suffix.join(canonical_filename.split(style))
if canonical_filename != font_metadata.filename:
yield FAIL, ("METADATA.pb: filename field (\"{}\")"
" does not match "
"canonical name \"{}\".".format(font_metadata.filename,
canonical_filename))
else:
yield PASS, "Filename in METADATA.pb is set canonically." | METADATA.pb: Filename is set canonically? |
21,771 | def un_camel_case(text):
r
revtext = text[::-1]
pieces = []
while revtext:
match = CAMEL_RE.match(revtext)
if match:
pieces.append(match.group(1))
revtext = revtext[match.end():]
else:
pieces.append(revtext)
revtext =
revstr = .join(piece.strip() for piece in pieces
if piece.strip())
return revstr[::-1].replace(, ) | r"""
Splits apart words that are written in CamelCase.
Bugs:
- Non-ASCII characters are treated as lowercase letters, even if they are
actually capital letters.
Examples:
>>> un_camel_case('1984ZXSpectrumGames')
'1984 ZX Spectrum Games'
>>> un_camel_case('aaAa aaAaA 0aA AAAa!AAA')
'aa Aa aa Aa A 0a A AA Aa! AAA'
>>> un_camel_case('MotörHead')
'Mot\xf6r Head'
>>> un_camel_case('MSWindows3.11ForWorkgroups')
'MS Windows 3.11 For Workgroups'
This should not significantly affect text that is not camel-cased:
>>> un_camel_case('ACM_Computing_Classification_System')
'ACM Computing Classification System'
>>> un_camel_case('Anne_Blunt,_15th_Baroness_Wentworth')
'Anne Blunt, 15th Baroness Wentworth'
>>> un_camel_case('Hindi-Urdu')
'Hindi-Urdu' |
21,772 | def get_gravityspy_triggers(tablename, engine=None, **kwargs):
from sqlalchemy.engine import create_engine
from sqlalchemy.exc import ProgrammingError
if engine is None:
conn_kw = {}
for key in (, , , ):
try:
conn_kw[key] = kwargs.pop(key)
except KeyError:
pass
engine = create_engine(get_connection_str(**conn_kw))
try:
return GravitySpyTable(fetch(engine, tablename, **kwargs))
except ProgrammingError as exc:
if % tablename in str(exc):
msg = exc.args[0]
msg = msg.replace(
,
% .join(engine.table_names()))
exc.args = (msg,)
raise | Fetch data into an `GravitySpyTable`
Parameters
----------
table : `str`,
The name of table you are attempting to receive triggers
from.
selection
other filters you would like to supply
underlying reader method for the given format
.. note::
For now it will attempt to automatically connect you
to a specific DB. In the future, this may be an input
argument.
Returns
-------
table : `GravitySpyTable` |
21,773 | def count_genomic_region_plot(self):
keys = self.snpeff_section_totals[]
sorted_keys = sorted(keys, reverse=True, key=keys.get)
pkeys = OrderedDict()
for k in sorted_keys:
pkeys[k] = {: k.replace(, ).title().replace(, ) }
pconfig = {
: ,
: ,
: ,
: True
}
return bargraph.plot(self.snpeff_data, pkeys, pconfig) | Generate the SnpEff Counts by Genomic Region plot |
21,774 | def sync(remote=, branch=):
pull(branch, remote)
push(branch, remote)
print(cyan("Git Synced!")) | git pull and push commit |
21,775 | def save_files(self, selections) -> None:
try:
currentpath = self.currentpath
selections = selectiontools.Selections(selections)
for selection in selections:
if selection.name == :
continue
path = os.path.join(currentpath, selection.name+)
selection.save_networkfile(filepath=path)
except BaseException:
objecttools.augment_excmessage(
% selections) | Save the |Selection| objects contained in the given |Selections|
instance to separate network files. |
21,776 | def get_apps_menu(self):
menu = {}
for model, model_admin in self.admin_site._registry.items():
if hasattr(model_admin, ):
if model_admin.app_config.has_menu_permission(obj=self.user):
menu.update({
+ model_admin.app_config.name: {
: model_admin.app_config.verbose_name,
: model_admin.app_config.init_menu(),
: model_admin.app_config.icon}})
return menu | Temporal code, will change to apps.get_app_configs() for django 1.7
Generate a initial menu list using the AppsConfig registered |
21,777 | def network_interfaces_list(resource_group, **kwargs):
result = {}
netconn = __utils__[](, **kwargs)
try:
nics = __utils__[](
netconn.network_interfaces.list(
resource_group_name=resource_group
)
)
for nic in nics:
result[nic[]] = nic
except CloudError as exc:
__utils__[](, str(exc), **kwargs)
result = {: str(exc)}
return result | .. versionadded:: 2019.2.0
List all network interfaces within a resource group.
:param resource_group: The resource group name to list network
interfaces within.
CLI Example:
.. code-block:: bash
salt-call azurearm_network.network_interfaces_list testgroup |
21,778 | def _build_crawlid_info(self, master, dict):
master[] = 0
master[] = 0
master[] = dict[]
master[] = dict[]
master[] = dict[]
master[] = {}
timeout_key = .format(sid=dict[],
aid=dict[],
cid=dict[])
if self.redis_conn.exists(timeout_key):
master[] = self.redis_conn.get(timeout_key)
match_string = .format(sid=dict[])
for key in self.redis_conn.scan_iter(match=match_string):
domain = key.split(":")[1]
sortedDict = self._get_bin(key)
for score in sortedDict:
for item in sortedDict[score]:
if in item:
item = item[]
if item[] == dict[] and item[] == dict[]:
if domain not in master[]:
master[][domain] = {}
master[][domain][] = 0
master[][domain][] = -9999
master[][domain][] = 9999
master[] = master[] + 1
master[][domain][] = master[][domain][] + 1
if item[] > master[][domain][]:
master[][domain][] = item[]
if item[] < master[][domain][]:
master[][domain][] = item[]
master[] = master[] + 1
return master | Builds the crawlid info object
@param master: the master dict
@param dict: the dict object received
@return: the crawlid info object |
21,779 | def write_message(self, status=messages.INFO, message=None):
if not message:
message = u"%s saved" % self.object
messages.add_message(self.request, status, message)
return message | Writes a message to django's messaging framework and
returns the written message.
:param status: The message status level. Defaults to \
messages.INFO.
:param message: The message to write. If not given, \
defaults to appending 'saved' to the unicode representation \
of `self.object`. |
21,780 | def bitwise_or(self, t):
s = self
result_interval = list()
for u in s._ssplit():
for v in t._ssplit():
w = u.bits
if u.is_integer:
s_t = StridedInterval._ntz(v.stride)
elif v.is_integer:
s_t = StridedInterval._ntz(u.stride)
else:
s_t = min(StridedInterval._ntz(u.stride), StridedInterval._ntz(v.stride))
if u.is_integer and u.lower_bound == 0:
new_stride = v.stride
elif v.is_integer and v.lower_bound == 0:
new_stride = u.stride
else:
new_stride = 2 ** s_t
mask = (1 << s_t) - 1
r = (u.lower_bound & mask) | (v.lower_bound & mask)
m = (2 ** w) - 1
low_bound = WarrenMethods.min_or(u.lower_bound & (~mask & m), u.upper_bound & (~mask & m), v.lower_bound & (~mask & m), v.upper_bound & (~mask & m), w)
upper_bound = WarrenMethods.max_or(u.lower_bound & (~mask & m), u.upper_bound & (~mask & m), v.lower_bound & (~mask & m), v.upper_bound & (~mask & m), w)
if low_bound == upper_bound:
new_stride = 0
new_interval = StridedInterval(lower_bound=((low_bound & (~mask & m)) | r), upper_bound=((upper_bound & (~mask & m)) | r), bits=w, stride=new_stride)
result_interval.append(new_interval)
return StridedInterval.least_upper_bound(*result_interval).normalize() | Binary operation: logical or
:param b: The other operand
:return: self | b |
21,781 | def format_latex(self, strng):
escape_re = re.compile(r,re.MULTILINE)
cmd_name_re = re.compile(r % ESC_MAGIC,
re.MULTILINE)
cmd_re = re.compile(r % ESC_MAGIC,
re.MULTILINE)
par_re = re.compile(r,re.MULTILINE)
newline_re = re.compile(r)
strng = cmd_name_re.sub(r,
strng)
strng = cmd_re.sub(r,strng)
strng = par_re.sub(r,strng)
strng = escape_re.sub(r,strng)
strng = newline_re.sub(r,strng)
return strng | Format a string for latex inclusion. |
21,782 | def chain_sub_regexes(phrase, *regex_sub_pairs):
test ok _k$ootest_ooo
for regex, substitution in regex_sub_pairs:
if isinstance(regex, basestring):
regex = re.compile(regex)
phrase = regex.sub(substitution, phrase)
return phrase | Allow for a series of regex substitutions to occur
chain_sub_regexes('test ok', (' ', '_'), ('k$', 'oo'))
# => 'test_ooo' |
21,783 | def do_write(self):
while True:
try:
written = 0
if hasattr(self.fd, ):
written = self.fd.send(self.buffer)
else:
written = os.write(self.fd.fileno(), self.buffer)
self.buffer = self.buffer[written:]
if self.close_requested and len(self.buffer) == 0:
self.close()
return written
except EnvironmentError as e:
if e.errno not in Stream.ERRNO_RECOVERABLE:
raise e | Flushes as much pending data from the internal write buffer as possible. |
21,784 | def from_cli(opt, length, delta_f, low_frequency_cutoff,
strain=None, dyn_range_factor=1, precision=None):
f_low = low_frequency_cutoff
sample_rate = int((length -1) * 2 * delta_f)
try:
psd_estimation = opt.psd_estimation is not None
except AttributeError:
psd_estimation = False
exclusive_opts = [opt.psd_model, opt.psd_file, opt.asd_file,
psd_estimation]
if sum(map(bool, exclusive_opts)) != 1:
err_msg = "You must specify exactly one of , "
err_msg += ", , "
raise ValueError(err_msg)
if (opt.psd_model or opt.psd_file or opt.asd_file):
if opt.psd_model:
psd = from_string(opt.psd_model, length, delta_f, f_low)
elif opt.psd_file or opt.asd_file:
if opt.asd_file:
psd_file_name = opt.asd_file
else:
psd_file_name = opt.psd_file
if psd_file_name.endswith((, )):
is_asd_file = bool(opt.asd_file)
psd = from_txt(psd_file_name, length,
delta_f, f_low, is_asd_file=is_asd_file)
elif opt.asd_file:
err_msg = "ASD files are only valid as ASCII files (.dat or "
err_msg += ".txt). Supplied {}.".format(psd_file_name)
elif psd_file_name.endswith((, )):
psd = from_xml(psd_file_name, length, delta_f, f_low,
ifo_string=opt.psd_file_xml_ifo_string,
root_name=opt.psd_file_xml_root_name)
kmin = int(low_frequency_cutoff / psd.delta_f)
psd[0:kmin] = psd[kmin]
psd *= dyn_range_factor ** 2
elif psd_estimation:
psd = welch(strain, avg_method=opt.psd_estimation,
seg_len=int(opt.psd_segment_length * sample_rate),
seg_stride=int(opt.psd_segment_stride * sample_rate),
num_segments=opt.psd_num_segments,
require_exact_data_fit=False)
if delta_f != psd.delta_f:
psd = interpolate(psd, delta_f)
else:
if opt.psd_inverse_length:
psd = inverse_spectrum_truncation(psd,
int(opt.psd_inverse_length * sample_rate),
low_frequency_cutoff=f_low)
if hasattr(opt, ) and opt.psd_output:
(psd.astype(float64) / (dyn_range_factor ** 2)).save(opt.psd_output)
if precision is None:
return psd
elif precision == :
return psd.astype(float32)
elif precision == :
return psd.astype(float64)
else:
err_msg = "If provided the precision kwarg must be either "
err_msg += "or . You provided %s." %(precision)
raise ValueError(err_msg) | Parses the CLI options related to the noise PSD and returns a
FrequencySeries with the corresponding PSD. If necessary, the PSD is
linearly interpolated to achieve the resolution specified in the CLI.
Parameters
----------
opt : object
Result of parsing the CLI with OptionParser, or any object with the
required attributes (psd_model, psd_file, asd_file, psd_estimation,
psd_segment_length, psd_segment_stride, psd_inverse_length,
psd_output).
length : int
The length in samples of the output PSD.
delta_f : float
The frequency step of the output PSD.
low_frequency_cutoff: float
The low frequncy cutoff to use when calculating the PSD.
strain : {None, TimeSeries}
Time series containing the data from which the PSD should be measured,
when psd_estimation is in use.
dyn_range_factor : {1, float}
For PSDs taken from models or text files, if `dyn_range_factor` is
not None, then the PSD is multiplied by `dyn_range_factor` ** 2.
precision : str, choices (None,'single','double')
If not specified, or specified as None, the precision of the returned
PSD will match the precision of the data, if measuring a PSD, or will
match the default precision of the model if using an analytical PSD.
If 'single' the PSD will be converted to float32, if not already in
that precision. If 'double' the PSD will be converted to float64, if
not already in that precision.
Returns
-------
psd : FrequencySeries
The frequency series containing the PSD. |
21,785 | def reboot_adb_server():
_reboot_count = 0
_max_retry = 1
def _reboot():
nonlocal _reboot_count
if _reboot_count >= _max_retry:
raise RuntimeError(.format(_max_retry))
_reboot_count += 1
return_code = subprocess.call([, ], stdout=subprocess.DEVNULL)
if bool(return_code):
warnings.warn()
raise EnvironmentError()
return _reboot | execute 'adb devices' to start adb server |
21,786 | def _square_batch_bcbio_variation(data, region, bam_files, vrn_files, out_file,
todo="square"):
ref_file = tz.get_in(("reference", "fasta", "base"), data)
cores = tz.get_in(("config", "algorithm", "num_cores"), data, 1)
resources = config_utils.get_resources("bcbio-variation-recall", data["config"])
memcores = int(math.ceil(float(cores) / 5.0))
jvm_opts = config_utils.adjust_opts(resources.get("jvm_opts", ["-Xms250m", "-Xmx2g"]),
{"algorithm": {"memory_adjust": {"direction": "increase",
"magnitude": memcores}}})
input_file = "%s-inputs.txt" % os.path.splitext(out_file)[0]
with open(input_file, "w") as out_handle:
out_handle.write("\n".join(sorted(list(set(vrn_files)))) + "\n")
if todo == "square":
out_handle.write("\n".join(sorted(list(set(bam_files)))) + "\n")
variantcaller = tz.get_in(("config", "algorithm", "jointcaller"), data).replace("-joint", "")
cmd = ["bcbio-variation-recall", todo] + jvm_opts + broad.get_default_jvm_opts() + \
["-c", cores, "-r", bamprep.region_to_gatk(region)]
if todo == "square":
cmd += ["--caller", variantcaller]
cmd += [out_file, ref_file, input_file]
bcbio_env = utils.get_bcbio_env()
cmd = " ".join(str(x) for x in cmd)
do.run(cmd, "%s in region: %s" % (cmd, bamprep.region_to_gatk(region)), env=bcbio_env)
return out_file | Run squaring or merging analysis using bcbio.variation.recall. |
21,787 | def paste(self):
try:
t = pygame.scrap.get(SCRAP_TEXT)
if t:
self.insert(t)
return True
except:
return False | Insert text from the clipboard at the cursor. |
21,788 | def inall_cmd(argv):
envs = lsenvs()
errors = False
for env in envs:
print("\n%s:" % env)
try:
inve(env, *argv)
except CalledProcessError as e:
errors = True
err(e)
sys.exit(errors) | Run a command in each virtualenv. |
21,789 | def log_error(self, msg, *args):
if self._logger is not None:
self._logger.error(msg, *args)
else:
print(msg % args) | Log an error or print in stdout if no logger. |
21,790 | def program_rtr_nwk_next_hop(self, rout_id, next_hop, cidr):
namespace = self.find_rtr_namespace(rout_id)
if namespace is None:
LOG.error("Unable to find namespace for router %s", rout_id)
return False
args = [, , , cidr, , next_hop]
ret = self.program_rtr(args, rout_id, namespace=namespace)
if not ret:
LOG.error("Program router returned error for %s", rout_id)
return False
return True | Program the next hop for all networks of a tenant. |
21,791 | def to_html(sample, stats_object):
n_obs = stats_object[][]
value_formatters = formatters.value_formatters
row_formatters = formatters.row_formatters
if not isinstance(sample, pd.DataFrame):
raise TypeError("sample must be of type pandas.DataFrame")
if not isinstance(stats_object, dict):
raise TypeError("stats_object must be of type dict. Did you generate this using the pandas_profiling.describe() function?")
if not set({, , , }).issubset(set(stats_object.keys())):
raise TypeError(
"stats_object badly formatted. Did you generate this using the pandas_profiling.describe() function?")
def fmt(value, name):
if pd.isnull(value):
return ""
if name in value_formatters:
return value_formatters[name](value)
elif isinstance(value, float):
return value_formatters[formatters.DEFAULT_FLOAT_FORMATTER](value)
else:
try:
return unicode(value)
except NameError:
return str(value)
def _format_row(freq, label, max_freq, row_template, n, extra_class=):
if max_freq != 0:
width = int(freq / max_freq * 99) + 1
else:
width = 1
if width > 20:
label_in_bar = freq
label_after_bar = ""
else:
label_in_bar = " "
label_after_bar = freq
return row_template.render(label=label,
width=width,
count=freq,
percentage=.format(freq / n * 100),
extra_class=extra_class,
label_in_bar=label_in_bar,
label_after_bar=label_after_bar)
def freq_table(freqtable, n, table_template, row_template, max_number_to_print, nb_col=6):
freq_rows_html = u
if max_number_to_print > n:
max_number_to_print=n
if max_number_to_print < len(freqtable):
freq_other = sum(freqtable.iloc[max_number_to_print:])
min_freq = freqtable.values[max_number_to_print]
else:
freq_other = 0
min_freq = 0
freq_missing = n - sum(freqtable)
max_freq = max(freqtable.values[0], freq_other, freq_missing)
for label, freq in six.iteritems(freqtable.iloc[0:max_number_to_print]):
freq_rows_html += _format_row(freq, label, max_freq, row_template, n)
if freq_other > min_freq:
freq_rows_html += _format_row(freq_other,
"Other values (%s)" % (freqtable.count() - max_number_to_print), max_freq, row_template, n,
extra_class=)
if freq_missing > min_freq:
freq_rows_html += _format_row(freq_missing, "(Missing)", max_freq, row_template, n, extra_class=)
return table_template.render(rows=freq_rows_html, varid=hash(idx), nb_col=nb_col)
def extreme_obs_table(freqtable, table_template, row_template, number_to_print, n, ascending = True):
| Generate a HTML report from summary statistics and a given sample.
Parameters
----------
sample : DataFrame
the sample you want to print
stats_object : dict
Summary statistics. Should be generated with an appropriate describe() function
Returns
-------
str
containing profile report in HTML format
Notes
-----
* This function as to be refactored since it's huge and it contains inner functions |
21,792 | def get_maya_envpath(self):
opj = os.path.join
ml = self.get_maya_location()
mb = self.get_maya_bin()
msp = self.get_maya_sitepackage_dir()
pyzip = opj(mb, "python27.zip")
pydir = opj(ml, "Python")
pydll = opj(pydir, "DLLs")
pylib = opj(pydir, "lib")
pyplat = opj(pylib, "plat-win")
pytk = opj(pylib, "lib-tk")
path = os.pathsep.join((pyzip, pydll, pylib, pyplat, pytk, mb, pydir, msp))
return path | Return the PYTHONPATH neccessary for running mayapy
If you start native mayapy, it will setup these paths.
You might want to prepend this to your path if running from
an external intepreter.
:returns: the PYTHONPATH that is used for running mayapy
:rtype: str
:raises: None |
21,793 | def reciprocal_space(space, axes=None, halfcomplex=False, shift=True,
**kwargs):
if not isinstance(space, DiscreteLp):
raise TypeError(
.format(space))
if axes is None:
axes = tuple(range(space.ndim))
axes = normalized_axes_tuple(axes, space.ndim)
if not all(space.is_uniform_byaxis[axis] for axis in axes):
raise ValueError(
)
if halfcomplex and space.field != RealNumbers():
raise ValueError(
)
exponent = kwargs.pop(, None)
if exponent is None:
exponent = conj_exponent(space.exponent)
dtype = kwargs.pop(, None)
if dtype is None:
dtype = complex_dtype(space.dtype)
else:
if not is_complex_floating_dtype(dtype):
raise ValueError(
.format(dtype_repr(dtype)))
impl = kwargs.pop(, )
recip_grid = reciprocal_grid(space.grid, shift=shift,
halfcomplex=halfcomplex, axes=axes)
non_axes = [i for i in range(space.ndim) if i not in axes]
min_pt = {i: space.min_pt[i] for i in non_axes}
max_pt = {i: space.max_pt[i] for i in non_axes}
if halfcomplex:
max_pt[axes[-1]] = recip_grid.max_pt[axes[-1]]
part = uniform_partition_fromgrid(recip_grid, min_pt, max_pt)
axis_labels = list(space.axis_labels)
for i in axes:
label = axis_labels[i].replace(, )
axis_labels[i] = .format(label)
recip_spc = uniform_discr_frompartition(part, exponent=exponent,
dtype=dtype, impl=impl,
axis_labels=axis_labels)
return recip_spc | Return the range of the Fourier transform on ``space``.
Parameters
----------
space : `DiscreteLp`
Real space whose reciprocal is calculated. It must be
uniformly discretized.
axes : sequence of ints, optional
Dimensions along which the Fourier transform is taken.
Default: all axes
halfcomplex : bool, optional
If ``True``, take only the negative frequency part along the last
axis for. For ``False``, use the full frequency space.
This option can only be used if ``space`` is a space of
real-valued functions.
shift : bool or sequence of bools, optional
If ``True``, the reciprocal grid is shifted by half a stride in
the negative direction. With a boolean sequence, this option
is applied separately to each axis.
If a sequence is provided, it must have the same length as
``axes`` if supplied. Note that this must be set to ``True``
in the halved axis in half-complex transforms.
Default: ``True``
impl : string, optional
Implementation back-end for the created space.
Default: ``'numpy'``
exponent : float, optional
Create a space with this exponent. By default, the conjugate
exponent ``q = p / (p - 1)`` of the exponent of ``space`` is
used, where ``q = inf`` for ``p = 1`` and vice versa.
dtype : optional
Complex data type of the created space. By default, the
complex counterpart of ``space.dtype`` is used.
Returns
-------
rspace : `DiscreteLp`
Reciprocal of the input ``space``. If ``halfcomplex=True``, the
upper end of the domain (where the half space ends) is chosen to
coincide with the grid node. |
21,794 | def handle_get_reseller(self, req):
if not self.is_reseller_admin(req):
return self.denied_response(req)
listing = []
marker =
while True:
path = % (quote(self.auth_account),
quote(marker))
resp = self.make_pre_authed_request(
req.environ, , path).get_response(self.app)
if resp.status_int // 100 != 2:
raise Exception( %
(path, resp.status))
sublisting = json.loads(resp.body)
if not sublisting:
break
for container in sublisting:
if container[][0] != :
listing.append({: container[]})
marker = sublisting[-1][].encode()
return Response(body=json.dumps({: listing}),
content_type=CONTENT_TYPE_JSON) | Handles the GET v2 call for getting general reseller information
(currently just a list of accounts). Can only be called by a
.reseller_admin.
On success, a JSON dictionary will be returned with a single `accounts`
key whose value is list of dicts. Each dict represents an account and
currently only contains the single key `name`. For example::
{"accounts": [{"name": "reseller"}, {"name": "test"},
{"name": "test2"}]}
:param req: The swob.Request to process.
:returns: swob.Response, 2xx on success with a JSON dictionary as
explained above. |
21,795 | def post_load(fn=None, pass_many=False, pass_original=False):
return set_hook(fn, (POST_LOAD, pass_many), pass_original=pass_original) | Register a method to invoke after deserializing an object. The method
receives the deserialized data and returns the processed data.
By default, receives a single datum at a time, transparently handling the ``many``
argument passed to the Schema. If ``pass_many=True``, the raw data
(which may be a collection) and the value for ``many`` is passed.
If ``pass_original=True``, the original data (before deserializing) will be passed as
an additional argument to the method. |
21,796 | def get_commands():
commands = dict((name, ) for name in find_commands(__path__[0]))
if not settings.configured:
return commands
for app_config in reversed(list(apps.get_app_configs())):
path = os.path.join(app_config.path, )
commands.update(dict((name, app_config.name) for name in find_commands(path)))
return commands | Returns a dictionary mapping command names to their callback applications.
This works by looking for a management.commands package in django.core, and
in each installed application -- if a commands package exists, all commands
in that package are registered.
Core commands are always included. If a settings module has been
specified, user-defined commands will also be included.
The dictionary is in the format {command_name: app_name}. Key-value
pairs from this dictionary can then be used in calls to
load_command_class(app_name, command_name)
If a specific version of a command must be loaded (e.g., with the
startapp command), the instantiated module can be placed in the
dictionary in place of the application name.
The dictionary is cached on the first call and reused on subsequent
calls. |
21,797 | def read_transport_message(self, origin, message_type, timeout=15):
return self.event_handler.wait_for_event((_EventType.Transport, origin, message_type), timeout=timeout) | Blocking read of a transport message that does not indicate a message from the Pebble.
Will block until a message is received, or it times out.
.. warning::
Avoid calling this method from an endpoint callback; doing so is likely to lead to deadlock.
:param origin: The type of :class:`.MessageTarget` that triggers the message.
:param message_type: The class of the message to read from the transport.
:param timeout: The maximum time to wait before raising :exc:`.TimeoutError`.
:return: The object read from the transport; of the same type as passed to ``message_type``. |
21,798 | def add_dependency(self, depend):
try:
self._add_child(self.depends, self.depends_set, depend)
except TypeError as e:
e = e.args[0]
if SCons.Util.is_List(e):
s = list(map(str, e))
else:
s = str(e)
raise SCons.Errors.UserError("attempted to add a non-Node dependency to %s:\n\t%s is a %s, not a Node" % (str(self), s, type(e))) | Adds dependencies. |
21,799 | def detach(self, force=False):
instance_id = None
if self.attach_data:
instance_id = self.attach_data.instance_id
device = None
if self.attach_data:
device = self.attach_data.device
return self.connection.detach_volume(self.id, instance_id, device, force) | Detach this EBS volume from an EC2 instance.
:type force: bool
:param force: Forces detachment if the previous detachment attempt did
not occur cleanly. This option can lead to data loss or
a corrupted file system. Use this option only as a last
resort to detach a volume from a failed instance. The
instance will not have an opportunity to flush file system
caches nor file system meta data. If you use this option,
you must perform file system check and repair procedures.
:rtype: bool
:return: True if successful |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.