Unnamed: 0
int64 0
389k
| code
stringlengths 26
79.6k
| docstring
stringlengths 1
46.9k
|
---|---|---|
25,500 |
def replace(text, replacements):
p = 0
parts = []
for (start, end, new_text) in sorted(replacements):
parts.append(text[p:start])
parts.append(new_text)
p = end
parts.append(text[p:])
return .join(parts)
|
Replaces multiple slices of text with new values. This is a convenience method for making code
modifications of ranges e.g. as identified by ``ASTTokens.get_text_range(node)``. Replacements is
an iterable of ``(start, end, new_text)`` tuples.
For example, ``replace("this is a test", [(0, 4, "X"), (8, 1, "THE")])`` produces
``"X is THE test"``.
|
25,501 |
def TNE_metric(bpmn_graph):
events_counts = get_events_counts(bpmn_graph)
return sum(
[count for _, count in events_counts.items()]
)
|
Returns the value of the TNE metric (Total Number of Events of the Model)
for the BPMNDiagramGraph instance.
:param bpmn_graph: an instance of BpmnDiagramGraph representing BPMN model.
|
25,502 |
async def _senddms(self):
data = self.bot.config.get("meta", {})
tosend = data.get(, True)
data[] = not tosend
await self.bot.config.put(, data)
await self.bot.responses.toggle(message="Forwarding of DMs to owner has been {status}.", success=data[])
|
Toggles sending DMs to owner.
|
25,503 |
def update_activity(self, activity_id, activity_name=None, desc=None,
started_on=None, ended_on=None):
put_data = {
"name": activity_name,
"description": desc,
"started_on": started_on,
"ended_on": ended_on
}
return self._put("/activities/" + activity_id, put_data)
|
Send PUT request to /activities/{activity_id} to update the activity metadata.
Raises ValueError if at least one field is not updated.
:param activity_id: str uuid of activity
:param activity_name: str new name of the activity (optional)
:param desc: str description of the activity (optional)
:param started_on: str date the updated activity began on (optional)
:param ended_on: str date the updated activity ended on (optional)
:return: requests.Response containing the successful result
|
25,504 |
def exitDialog(self):
if self.prev_submenu is not None:
self.menu.changeSubMenu(self.prev_submenu)
self.prev_submenu = None
|
Helper method that exits the dialog.
This method will cause the previously active submenu to activate.
|
25,505 |
def configure_logging(self):
root_logger = logging.getLogger()
root_logger.setLevel(logging.DEBUG)
console = logging.StreamHandler(self.stderr)
console_level = {self.WARNING_LEVEL: logging.WARNING,
self.INFO_LEVEL: logging.INFO,
self.DEBUG_LEVEL: logging.DEBUG,
}.get(self.options.verbose_level, logging.DEBUG)
if console_level == logging.INFO:
console.setLevel(logging.WARNING)
else:
console.setLevel(console_level)
if logging.DEBUG == console_level:
formatter = logging.Formatter(self.DEBUG_MESSAGE_FORMAT)
else:
formatter = logging.Formatter(self.CONSOLE_MESSAGE_FORMAT)
logging.getLogger().setLevel(logging.WARNING)
logging.getLogger().setLevel(logging.WARNING)
console.setFormatter(formatter)
root_logger.addHandler(console)
return
|
Create logging handlers for any log output.
|
25,506 |
def parse_ssh_destination(destination):
match = _re_ssh.match(destination)
if not match:
raise InvalidDestination("Invalid destination: %s" % destination)
user, password, host, port = match.groups()
info = {}
if user:
info[] = user
else:
info[] = getpass.getuser()
if password:
info[] = password
if port:
info[] = int(port)
info[] = host
return info
|
Parses the SSH destination argument.
|
25,507 |
def make_block_creator(yaml_path, filename=None):
sections, yamlname, docstring = Section.from_yaml(yaml_path, filename)
yamldir = os.path.dirname(yaml_path)
controller_sections = [s for s in sections if s.section == "controllers"]
assert len(controller_sections) == 1, \
"Expected exactly 1 controller, got %s" % (controller_sections,)
controller_section = controller_sections[0]
def block_creator(kwargs):
defines = _create_defines(sections, yamlname, yamldir, kwargs)
controllers, parts = _create_blocks_and_parts(sections, defines)
controller = controller_section.instantiate(defines)
for part in parts:
controller.add_part(part)
controllers.append(controller)
return controllers
creator = creator_with_nice_signature(
block_creator, sections, yamlname, yaml_path, docstring)
return creator
|
Make a collection function that will create a list of blocks
Args:
yaml_path (str): File path to YAML file, or a file in the same dir
filename (str): If give, use this filename as the last element in
the yaml_path (so yaml_path can be __file__)
Returns:
function: A collection function decorated with @takes. This can be
used in other blocks or instantiated by the process. If the
YAML text specified controllers or parts then a block instance
with the given name will be instantiated. If there are any
blocks listed then they will be called. All created blocks
by this or any sub collection will be returned
|
25,508 |
def get_name(self):
pathname = self.get_filename()
if pathname:
modName = self.__filename_to_modname(pathname)
if isinstance(modName, compat.unicode):
try:
modName = modName.encode()
except UnicodeEncodeError:
e = sys.exc_info()[1]
warnings.warn(str(e))
else:
modName = "0x%x" % self.get_base()
return modName
|
@rtype: str
@return: Module name, as used in labels.
@warning: Names are B{NOT} guaranteed to be unique.
If you need unique identification for a loaded module,
use the base address instead.
@see: L{get_label}
|
25,509 |
def circumcircleForTriangle(cls, triangle):
if triangle.isRight:
o = triangle.hypotenuse.midpoint
r = o.distance(triangle.A)
return cls(o, r)
abn = triangle.AB.normal
abn += triangle.AB.midpoint
acn = triangle.AC.normal
acn += triangle.AC.midpoint
o = abn.intersection(acn)
r = o.distance(triangle.A)
return cls(o, r)
|
:param: triangle - Triangle class
:return: Circle class
Returns the circle where every vertex in the input triangle is
on the radius of that circle.
|
25,510 |
def _m2m_rev_field_name(model1, model2):
m2m_field_names = [
rel.get_accessor_name() for rel in model1._meta.get_fields()
if rel.many_to_many
and rel.auto_created
and rel.related_model == model2
]
return m2m_field_names[0]
|
Gets the name of the reverse m2m accessor from `model1` to `model2`
For example, if User has a ManyToManyField connected to Group,
`_m2m_rev_field_name(Group, User)` retrieves the name of the field on
Group that lists a group's Users. (By default, this field is called
`user_set`, but the name can be overridden).
|
25,511 |
def visit_FunctionDef(self, node):
self.symbols.pop(node.name, None)
gsymbols = self.symbols.copy()
[self.symbols.pop(arg.id, None) for arg in node.args.args]
self.generic_visit(node)
self.symbols = gsymbols
return node
|
Update import context using overwriting name information.
Examples
--------
>> import foo
>> import bar
>> def foo(bar):
>> print(bar)
In this case, neither bar nor foo can be used in the foo function and
in future function, foo will not be usable.
|
25,512 |
def reverse(self, point, language=None, sensor=False):
params = {
: point,
: str(sensor).lower()
}
if language:
params[] = language
if not self.premier:
url = self.get_url(params)
else:
url = self.get_signed_url(params)
return self.GetService_url(url)
|
Reverse geocode a point.
Pls refer to the Google Maps Web API for the details of the parameters
|
25,513 |
def floor(self):
return Point(int(math.floor(self.x)), int(math.floor(self.y)))
|
Round `x` and `y` down to integers.
|
25,514 |
def buffer(self, *args, **kwargs):
self.check_session()
result = self.session.buffer(*args, **kwargs)
return result
|
Buffer documents, in the current session
|
25,515 |
def convert_ram_sdp_ar(ADDR_WIDTH=8, DATA_WIDTH=8):
clk = Signal(bool(0))
we = Signal(bool(0))
addrw = Signal(intbv(0)[ADDR_WIDTH:])
addrr = Signal(intbv(0)[ADDR_WIDTH:])
di = Signal(intbv(0)[DATA_WIDTH:])
do = Signal(intbv(0)[DATA_WIDTH:])
toVerilog(ram_sdp_ar, clk, we, addrw, addrr, di, do)
|
Convert RAM: Simple-Dual-Port, Asynchronous Read
|
25,516 |
def symlink(src, link):
t, an error will be raised.
Args:
src (str): The path to a file or directory
link (str): The path to the link
Returns:
bool: True if successful, otherwise False
CLI Example:
.. code-block:: bash
salt file.symlink /path/to/file /path/to/link
Symlinks are only supported on Windows Vista or later.The given source path does not exist.File path must be absolute.Could not create \ - [{1}] {2}'.format(
link,
exc.winerror,
exc.strerror
)
)
|
Create a symbolic link to a file
This is only supported with Windows Vista or later and must be executed by
a user with the SeCreateSymbolicLink privilege.
The behavior of this function matches the Unix equivalent, with one
exception - invalid symlinks cannot be created. The source path must exist.
If it doesn't, an error will be raised.
Args:
src (str): The path to a file or directory
link (str): The path to the link
Returns:
bool: True if successful, otherwise False
CLI Example:
.. code-block:: bash
salt '*' file.symlink /path/to/file /path/to/link
|
25,517 |
def qa(ctx):
header()
info()
flake8_results = lrun(, pty=True, warn=True)
info()
eslint_results = lrun(, pty=True, warn=True)
if flake8_results.failed or eslint_results.failed:
exit(flake8_results.return_code or eslint_results.return_code)
print(green())
|
Run a quality report
|
25,518 |
def graph_from_incidence_matrix(matrix, node_prefix=, directed=False):
if directed:
graph = Dot(graph_type=)
else:
graph = Dot(graph_type=)
for row in matrix:
nodes = []
c = 1
for node in row:
if node:
nodes.append(c * node)
c += 1
nodes.sort()
if len(nodes) == 2:
graph.add_edge(
Edge(
node_prefix + abs(nodes[0]),
node_prefix + nodes[1]))
if not directed:
graph.set_simplify(True)
return graph
|
Creates a basic graph out of an incidence matrix.
The matrix has to be a list of rows of values
representing an incidence matrix.
The values can be anything: bool, int, float, as long
as they can evaluate to True or False.
|
25,519 |
def lock():
device_name
conn = __proxy__[]()
ret = {}
ret[] = True
try:
conn.cu.lock()
ret[] = "Successfully locked the configuration."
except jnpr.junos.exception.LockError as exception:
ret[] = .format(exception)
ret[] = False
return ret
|
Attempts an exclusive lock on the candidate configuration. This
is a non-blocking call.
.. note::
When locking, it is important to remember to call
:py:func:`junos.unlock <salt.modules.junos.unlock>` once finished. If
locking during orchestration, remember to include a step in the
orchestration job to unlock.
CLI Example:
.. code-block:: bash
salt 'device_name' junos.lock
|
25,520 |
def discard(self, element, multiplicity=None):
_elements = self._elements
if element in _elements:
old_multiplicity = _elements[element]
if multiplicity is None or multiplicity >= old_multiplicity:
del _elements[element]
self._total -= old_multiplicity
elif multiplicity < 0:
raise ValueError("Multiplicity must not be negative")
elif multiplicity > 0:
_elements[element] -= multiplicity
self._total -= multiplicity
return old_multiplicity
else:
return 0
|
Removes the `element` from the multiset.
If multiplicity is ``None``, all occurrences of the element are removed:
>>> ms = Multiset('aab')
>>> ms.discard('a')
2
>>> sorted(ms)
['b']
Otherwise, the multiplicity is subtracted from the one in the multiset and the
old multiplicity is removed:
>>> ms = Multiset('aab')
>>> ms.discard('a', 1)
2
>>> sorted(ms)
['a', 'b']
In contrast to :meth:`remove`, this does not raise an error if the
element is not in the multiset:
>>> ms = Multiset('a')
>>> ms.discard('b')
0
>>> sorted(ms)
['a']
It is also not an error to remove more elements than are in the set:
>>> ms.remove('a', 2)
1
>>> sorted(ms)
[]
Args:
element:
The element to remove from the multiset.
multiplicity:
An optional multiplicity i.e. count of elements to remove.
Returns:
The multiplicity of the element in the multiset before
the removal.
|
25,521 |
def records():
with db.session.begin_nested():
for idx in range(20):
id_ = uuid.uuid4()
Record.create({
: .format(idx),
: .format(idx),
: ,
: idx
}, id_=id_)
PersistentIdentifier.create(
pid_type=,
pid_value=idx,
object_type=,
object_uuid=id_,
status=PIDStatus.REGISTERED,
)
db.session.commit()
|
Load test data fixture.
|
25,522 |
def retrieve(self, operation, field=None):
obj = self._get(operation, field)
if obj is None:
return Mark(collection=self.collection, operation=operation, field=field)
return Mark.from_dict(self.collection, obj)
|
Retrieve a position in this collection.
:param operation: Name of an operation
:type operation: :class:`Operation`
:param field: Name of field for sort order
:type field: str
:return: The position for this operation
:rtype: Mark
:raises: NoTrackingCollection
|
25,523 |
def convert_quadratic_to_cubic_path(q0, q1, q2):
c0 = q0
c1 = (q0[0] + 2. / 3 * (q1[0] - q0[0]), q0[1] + 2. / 3 * (q1[1] - q0[1]))
c2 = (c1[0] + 1. / 3 * (q2[0] - q0[0]), c1[1] + 1. / 3 * (q2[1] - q0[1]))
c3 = q2
return c0, c1, c2, c3
|
Convert a quadratic Bezier curve through q0, q1, q2 to a cubic one.
|
25,524 |
def missed_lines(self, filename):
statuses = self.line_statuses(filename)
statuses = extrapolate_coverage(statuses)
return [lno for lno, status in statuses if status is False]
|
Return a list of extrapolated uncovered line numbers for the
file `filename` according to `Cobertura.line_statuses`.
|
25,525 |
def register(self):
user, created = self.Model.create_account(self._json_params)
if user.api_key is None:
raise JHTTPBadRequest()
if not created:
raise JHTTPConflict()
self.request._user = user
headers = remember(self.request, user.username)
return JHTTPOk(, headers=headers)
|
Register a new user by POSTing all required data.
User's `Authorization` header value is returned in `WWW-Authenticate`
header.
|
25,526 |
def store_equal(self):
with h5py.File(self.database.input, ) as io5:
fillsets = io5["quartets"]
if not os.path.exists(self.files.tree):
raise IPyradWarningExit(
"To use sampling method requires a guidetree")
tre = ete3.Tree(self.files.tree)
tre.unroot()
tre.resolve_polytomy(recursive=True)
splits = [([self.samples.index(z.name) for z in i],
[self.samples.index(z.name) for z in j]) \
for (i, j) in tre.get_edges()]
splits = [i for i in splits if all([len(j) > 1 for j in i])]
squarts = self.params.nquartets // len(splits)
saturable = 0
qiters = []
for idx, split in enumerate(splits):
total = n_choose_k(len(split[0]), 2) * n_choose_k(len(split[1]), 2)
if total < squarts*2:
qiter = (i+j for (i, j) in itertools.product(
itertools.combinations(split[0], 2),
itertools.combinations(split[1], 2)))
saturable += 1
else:
qiter = (random_product(split[0], split[1]) for _ \
in xrange(self.params.nquartets))
qiters.append((idx, qiter))
qitercycle = itertools.cycle(qiters)
sampled = set()
i = 0
empty = set()
edge_targeted = 0
random_targeted = 0
while i < self.params.nquartets:
cycle, qiter = qitercycle.next()
try:
qrtsamp = tuple(sorted(qiter.next()))
if qrtsamp not in sampled:
sampled.add(qrtsamp)
edge_targeted += 1
i += 1
if not i % self._chunksize:
print(min(i, self.params.nquartets))
except StopIteration:
empty.add(cycle)
if len(empty) == saturable:
break
while i <= self.params.nquartets:
newset = tuple(sorted(np.random.choice(
range(len(self.samples)), 4, replace=False)))
if newset not in sampled:
sampled.add(newset)
random_targeted += 1
i += 1
if not i % self._chunksize:
print(min(i, self.params.nquartets))
print(self.params.nquartets)
fillsets[:] = np.array(tuple(sampled))
del sampled
|
Takes a tetrad class object and populates array with random
quartets sampled equally among splits of the tree so that
deep splits are not overrepresented relative to rare splits,
like those near the tips.
|
25,527 |
def stats(self, indices=None):
path = self.conn._make_path(indices, (), "_stats")
return self.conn._send_request(, path)
|
Retrieve the statistic of one or more indices
(See :ref:`es-guide-reference-api-admin-indices-stats`)
:keyword indices: an index or a list of indices
|
25,528 |
def open(filename, frame=):
data = Image.load_data(filename)
return PointCloudImage(data, frame)
|
Creates a PointCloudImage from a file.
Parameters
----------
filename : :obj:`str`
The file to load the data from. Must be one of .png, .jpg,
.npy, or .npz.
frame : :obj:`str`
A string representing the frame of reference in which the new image
lies.
Returns
-------
:obj:`PointCloudImage`
The new PointCloudImage.
|
25,529 |
def _get_result_constructor(self):
if not self._values_list:
return lambda rows: self.model._construct_instance(rows)
elif self._flat_values_list:
return lambda row: row.popitem()[1]
else:
return lambda row: self._get_row_value_list(self._only_fields, row)
|
Returns a function that will be used to instantiate query results
|
25,530 |
def leave_command_mode(self, append_to_history=False):
client_state = self.get_client_state()
client_state.command_buffer.reset(append_to_history=append_to_history)
client_state.prompt_buffer.reset(append_to_history=True)
client_state.prompt_command =
client_state.confirm_command =
client_state.app.layout.focus_previous()
|
Leave the command/prompt mode.
|
25,531 |
def _merge_common_bands(rasters):
all_bands = IndexedSet([rs.band_names[0] for rs in rasters])
def key(rs):
return all_bands.index(rs.band_names[0])
rasters_final = []
for band_name, rasters_group in groupby(sorted(rasters, key=key), key=key):
rasters_final.append(reduce(_fill_pixels, rasters_group))
return rasters_final
|
Combine the common bands.
|
25,532 |
def load_preprocess_images(image_paths: List[str], image_size: tuple) -> List[np.ndarray]:
image_size = image_size[1:]
images = []
for image_path in image_paths:
images.append(load_preprocess_image(image_path, image_size))
return images
|
Load and pre-process the images specified with absolute paths.
:param image_paths: List of images specified with paths.
:param image_size: Tuple to resize the image to (Channels, Height, Width)
:return: A list of loaded images (numpy arrays).
|
25,533 |
def filter_single_grain(self):
my_index = 0
my_grains = [[,,,,,,,,]]
for it in range(len(self.data)):
my_grains.append([my_index,self.desc[it][self.descdict[]], self.desc[it][self.descdict[]], self.desc[it][self.descdict[]], self.desc[it][self.descdict[]], self.desc[it][self.descdict[]], self.data[it][self.datadict[]], self.data[it][self.datadict[]], self.data[it][self.datadict[]]])
my_index += 1
for prt_line in my_grains:
print(prt_line)
usr_input =
usr_input = input()
if usr_input == :
print()
return None
elif len(usr_input) == 1:
usr_index = [usr_input]
else:
usr_index = usr_input.split()
for it in range(len(usr_index)):
usr_index[it] = int(usr_index[it])
desc_tmp = np.zeros((len(usr_index),len(self.header_desc)),dtype=)
data_tmp = np.zeros((len(usr_index),len(self.header_data)))
style_tmp= np.zeros((len(usr_index),len(self.header_style)),dtype=)
for i in range(len(usr_index)):
for j in range(len(self.header_desc)):
desc_tmp[i][j] = self.desc[usr_index[i]][j]
for k in range(len(self.header_data)):
data_tmp[i][k] = self.data[usr_index[i]][k]
for l in range(len(self.header_style)):
style_tmp[i][l]= self.style[usr_index[i]][l]
self.desc = desc_tmp
self.data = data_tmp
self.style= style_tmp
|
This subroutine is to filter out single grains. It is kind of
useless if you have tons of data still in the list. To work on
there, you have other filters (filter_desc and filter_data)
available! This filter gives an index to every grain, plots
the most important information, and then asks you to pick a
filter. No input necessary, input is given during the routine
|
25,534 |
def mass_3d(self, r, kwargs, bool_list=None):
bool_list = self._bool_list(bool_list)
mass_3d = 0
for i, func in enumerate(self.func_list):
if bool_list[i] is True:
kwargs_i = {k:v for k, v in kwargs[i].items() if not k in [, ]}
mass_3d_i = func.mass_3d_lens(r, **kwargs_i)
mass_3d += mass_3d_i
return mass_3d
|
computes the mass within a 3d sphere of radius r
:param r: radius (in angular units)
:param kwargs: list of keyword arguments of lens model parameters matching the lens model classes
:param bool_list: list of bools that are part of the output
:return: mass (in angular units, modulo epsilon_crit)
|
25,535 |
def name(self):
if self.mako_template.filename:
return os.path.basename(self.mako_template.filename)
return
|
Returns the name of this template (if created from a file) or "string" if not
|
25,536 |
def _reconstruct_object(typ, obj, axes, dtype):
try:
typ = typ.type
except AttributeError:
pass
res_t = np.result_type(obj.dtype, dtype)
if (not isinstance(typ, partial) and
issubclass(typ, pd.core.generic.PandasObject)):
return typ(obj, dtype=res_t, **axes)
if hasattr(res_t, ) and typ == np.bool_ and res_t != np.bool_:
ret_value = res_t.type(obj)
else:
ret_value = typ(obj).astype(res_t)
if len(obj.shape) == 1 and len(obj) == 1:
if not isinstance(ret_value, np.ndarray):
ret_value = np.array([ret_value]).astype(res_t)
return ret_value
|
Reconstruct an object given its type, raw value, and possibly empty
(None) axes.
Parameters
----------
typ : object
A type
obj : object
The value to use in the type constructor
axes : dict
The axes to use to construct the resulting pandas object
Returns
-------
ret : typ
An object of type ``typ`` with the value `obj` and possible axes
`axes`.
|
25,537 |
def _parse(self, r, length):
def bits_left():
return length * 8 - r.get_position()
self.audioObjectType = self._get_audio_object_type(r)
self.samplingFrequency = self._get_sampling_freq(r)
self.channelConfiguration = r.bits(4)
self.sbrPresentFlag = -1
self.psPresentFlag = -1
if self.audioObjectType in (5, 29):
self.extensionAudioObjectType = 5
self.sbrPresentFlag = 1
if self.audioObjectType == 29:
self.psPresentFlag = 1
self.extensionSamplingFrequency = self._get_sampling_freq(r)
self.audioObjectType = self._get_audio_object_type(r)
if self.audioObjectType == 22:
self.extensionChannelConfiguration = r.bits(4)
else:
self.extensionAudioObjectType = 0
if self.audioObjectType in (1, 2, 3, 4, 6, 7, 17, 19, 20, 21, 22, 23):
try:
GASpecificConfig(r, self)
except NotImplementedError:
return
else:
return
if self.audioObjectType in (
17, 19, 20, 21, 22, 23, 24, 25, 26, 27, 39):
epConfig = r.bits(2)
if epConfig in (2, 3):
return
if self.extensionAudioObjectType != 5 and bits_left() >= 16:
syncExtensionType = r.bits(11)
if syncExtensionType == 0x2b7:
self.extensionAudioObjectType = self._get_audio_object_type(r)
if self.extensionAudioObjectType == 5:
self.sbrPresentFlag = r.bits(1)
if self.sbrPresentFlag == 1:
self.extensionSamplingFrequency = \
self._get_sampling_freq(r)
if bits_left() >= 12:
syncExtensionType = r.bits(11)
if syncExtensionType == 0x548:
self.psPresentFlag = r.bits(1)
if self.extensionAudioObjectType == 22:
self.sbrPresentFlag = r.bits(1)
if self.sbrPresentFlag == 1:
self.extensionSamplingFrequency = \
self._get_sampling_freq(r)
self.extensionChannelConfiguration = r.bits(4)
|
Raises BitReaderError
|
25,538 |
def version(self):
if self.find:
return self.meta.sp + split_package(self.find)[1]
return ""
|
Return version from installed packages
|
25,539 |
def _set_logger(self):
self.logger.propagate = False
hdl = logging.StreamHandler()
fmt_str =
hdl.setFormatter(logging.Formatter(fmt_str))
self.logger.addHandler(hdl)
|
change log format.
|
25,540 |
def GetFileObjectByPathSpec(self, path_spec):
file_entry = self.GetFileEntryByPathSpec(path_spec)
if not file_entry:
return None
return file_entry.GetFileObject()
|
Retrieves a file-like object for a path specification.
Args:
path_spec (PathSpec): a path specification.
Returns:
FileIO: a file-like object or None if not available.
|
25,541 |
def status(self, job_ids):
PENDINGRUNNINGCANCELLEDCOMPLETEDFAILEDTIMEOUT
statuses = []
for job_id in job_ids:
instance = self.client.instances().get(instance=job_id, project=self.project_id, zone=self.zone).execute()
self.resources[job_id][] = translate_table[instance[]]
statuses.append(translate_table[instance[]])
return statuses
|
Get the status of a list of jobs identified by the job identifiers
returned from the submit request.
Args:
- job_ids (list) : A list of job identifiers
Returns:
- A list of status from ['PENDING', 'RUNNING', 'CANCELLED', 'COMPLETED',
'FAILED', 'TIMEOUT'] corresponding to each job_id in the job_ids list.
Raises:
- ExecutionProviderException or its subclasses
|
25,542 |
def get_connected_devices():
all_daplinks = []
all_interfaces = _get_interfaces()
for interface in all_interfaces:
try:
new_daplink = DAPAccessCMSISDAP(None, interface=interface)
all_daplinks.append(new_daplink)
except DAPAccessIntf.TransferError:
logger = logging.getLogger(__name__)
logger.error(, exc_info=session.Session.get_current().log_tracebacks)
return all_daplinks
|
Return an array of all mbed boards connected
|
25,543 |
def set_app(name, site, settings=None):
ret = {: name,
: {},
: str(),
: None}
if not settings:
ret[] =
ret[] = True
return ret
ret_settings = {
: {},
: {},
}
current_settings = __salt__[](name=name,
site=site,
settings=settings.keys())
for setting in settings:
if str(settings[setting]) != str(current_settings[setting]):
ret_settings[][setting] = {: current_settings[setting],
: settings[setting]}
if not ret_settings[]:
ret[] =
ret[] = True
return ret
elif __opts__[]:
ret[] =
ret[] = ret_settings
return ret
__salt__[](name=name, site=site,
settings=settings)
new_settings = __salt__[](name=name, site=site, settings=settings.keys())
for setting in settings:
if str(settings[setting]) != str(new_settings[setting]):
ret_settings[][setting] = {: current_settings[setting],
: new_settings[setting]}
ret_settings[].pop(setting, None)
if ret_settings[]:
ret[] =
ret[] = ret_settings
ret[] = False
else:
ret[] =
ret[] = ret_settings[]
ret[] = True
return ret
|
.. versionadded:: 2017.7.0
Set the value of the setting for an IIS web application.
.. note::
This function only configures existing app. Params are case sensitive.
:param str name: The IIS application.
:param str site: The IIS site name.
:param str settings: A dictionary of the setting names and their values.
Available settings:
- ``physicalPath`` - The physical path of the webapp
- ``applicationPool`` - The application pool for the webapp
- ``userName`` "connectAs" user
- ``password`` "connectAs" password for user
:rtype: bool
Example of usage:
.. code-block:: yaml
site0-webapp-setting:
win_iis.set_app:
- name: app0
- site: Default Web Site
- settings:
userName: domain\\user
password: pass
physicalPath: c:\inetpub\wwwroot
applicationPool: appPool0
|
25,544 |
def get_mock_personalization_dict():
mock_pers = dict()
mock_pers[] = [To("[email protected]",
"Example User"),
To("[email protected]",
"Example User")]
mock_pers[] = [To("[email protected]",
"Example User"),
To("[email protected]",
"Example User")]
mock_pers[] = [To("[email protected]"),
To("[email protected]")]
mock_pers[] = ("Hello World from the Personalized "
"SendGrid Python Library")
mock_pers[] = [Header("X-Test", "test"),
Header("X-Mock", "true")]
mock_pers[] = [Substitution("%name%", "Example User"),
Substitution("%city%", "Denver")]
mock_pers[] = [CustomArg("user_id", "343"),
CustomArg("type", "marketing")]
mock_pers[] = 1443636843
return mock_pers
|
Get a dict of personalization mock.
|
25,545 |
def _process_execute_error(self, msg):
content = msg[]
traceback = .join(content[]) +
if False:
traceback = traceback.replace(ename, ename_styled)
self._append_html(traceback)
else:
self._append_plain_text(traceback)
|
Reimplemented for IPython-style traceback formatting.
|
25,546 |
def released(self, unit, lock, timestamp):
interval = _utcnow() - timestamp
self.msg(.format(lock, unit,
interval))
|
Called on the leader when it has released a lock.
By default, does nothing but log messages. Override if you
need to perform additional housekeeping when a lock is released,
for example recording timestamps.
|
25,547 |
def p_suffix(self, length=None, elipsis=False):
"Return the rest of the input"
if length is not None:
result = self.input[self.pos:self.pos + length]
if elipsis and len(result) == length:
result += "..."
return result
return self.input[self.pos:]
|
Return the rest of the input
|
25,548 |
def on_message(self, ws, message):
m = json.loads(message)
self.logger.debug(m)
if m.get("s", 0):
self.sequence = m["s"]
if m["op"] == self.DISPATCH:
if m["t"] == "READY":
for channel in m["d"]["private_channels"]:
if len(channel["recipients"]) == 1:
self.channels[channel["id"]] = User(channel["recipients"][0])
self.logger.info("added channel for %s", self.channels[channel["id"]])
self.session = m["d"]["session_id"]
self.con_connect(User(m["d"]["user"]))
elif m["t"] == "GUILD_CREATE":
pass
elif m["t"] == "MESSAGE_CREATE":
self.con_message(Message(m["d"]))
elif m["op"] == self.HELLO:
interval = int(m[][] / 1000)
self.h = Heartbeat(self, interval)
self.h.daemon = True
self.h.start()
elif m["op"] == self.HEARTBEAT_ACK:
pass
else:
self.logger.debug(m)
|
Todo
|
25,549 |
def pause_writing(self):
if not self.is_closing():
self._can_send.clear()
self.transport.pause_reading()
|
Transport calls when the send buffer is full.
|
25,550 |
def abs(x, context=None):
return _apply_function_in_current_context(
BigFloat,
mpfr.mpfr_abs,
(BigFloat._implicit_convert(x),),
context,
)
|
Return abs(x).
|
25,551 |
def angular_errors(hyp_axes):
ax = N.sqrt(hyp_axes)
return tuple(N.arctan2(ax[-1],ax[:-1]))
|
Minimum and maximum angular errors
corresponding to 1st and 2nd axes
of PCA distribution.
Ordered as [minimum, maximum] angular error.
|
25,552 |
def add_on_connection_close_callback(self):
self._logger.debug()
self._connection.add_on_close_callback(self.on_connection_closed)
|
Add an on close callback that will be invoked by pika
when RabbitMQ closes the connection to the publisher unexpectedly.
|
25,553 |
def get_bootstrap_from_recipes(cls, recipes, ctx):
info()
bootstraps = [cls.get_bootstrap(name, ctx)
for name in cls.list_bootstraps()]
acceptable_bootstraps = []
for bs in bootstraps:
if not bs.can_be_chosen_automatically:
continue
possible_dependency_lists = expand_dependencies(bs.recipe_depends)
for possible_dependencies in possible_dependency_lists:
ok = True
for recipe in possible_dependencies:
recipe = Recipe.get_recipe(recipe, ctx)
if any([conflict in recipes for conflict in recipe.conflicts]):
ok = False
break
for recipe in recipes:
try:
recipe = Recipe.get_recipe(recipe, ctx)
except ValueError:
conflicts = []
else:
conflicts = recipe.conflicts
if any([conflict in possible_dependencies
for conflict in conflicts]):
ok = False
break
if ok and bs not in acceptable_bootstraps:
acceptable_bootstraps.append(bs)
info(.format(
len(acceptable_bootstraps),
[bs.name for bs in acceptable_bootstraps]))
if acceptable_bootstraps:
info(
.format(acceptable_bootstraps[0].name))
return acceptable_bootstraps[0]
return None
|
Returns a bootstrap whose recipe requirements do not conflict with
the given recipes.
|
25,554 |
def get_diff_endpoints_from_commit_range(repo, commit_range):
if not commit_range:
raise ValueError()
result = re_find(COMMIT_RANGE_REGEX, commit_range)
if not result:
raise ValueError(
a..b\a...b\
.format(commit_range))
a, b = result[], result[]
a, b = repo.rev_parse(a), repo.rev_parse(b)
if result[]:
a = one_or_raise(repo.merge_base(a, b))
return a, b
|
Get endpoints of a diff given a commit range
The resulting endpoints can be diffed directly::
a, b = get_diff_endpoints_from_commit_range(repo, commit_range)
a.diff(b)
For details on specifying git diffs, see ``git diff --help``.
For details on specifying revisions, see ``git help revisions``.
Args:
repo (git.Repo): Repo object initialized with project root
commit_range (str): commit range as would be interpreted by ``git
diff`` command. Unfortunately only patterns of the form ``a..b``
and ``a...b`` are accepted. Note that the latter pattern finds the
merge-base of a and b and uses it as the starting point for the
diff.
Returns:
Tuple[git.Commit, git.Commit]: starting commit, ending commit (
inclusive)
Raises:
ValueError: commit_range is empty or ill-formed
See also:
<https://stackoverflow.com/q/7251477>
|
25,555 |
def _subset_by_support(orig_vcf, cmp_calls, data):
cmp_vcfs = [x["vrn_file"] for x in cmp_calls]
out_file = "%s-inensemble.vcf.gz" % utils.splitext_plus(orig_vcf)[0]
if not utils.file_uptodate(out_file, orig_vcf):
with file_transaction(data, out_file) as tx_out_file:
cmd = "bedtools intersect -header -wa -f 0.5 -r -a {orig_vcf} -b "
for cmp_vcf in cmp_vcfs:
cmd += "<(bcftools view -f %s) " % cmp_vcf
cmd += "| bgzip -c > {tx_out_file}"
do.run(cmd.format(**locals()), "Subset calls by those present in Ensemble output")
return vcfutils.bgzip_and_index(out_file, data["config"])
|
Subset orig_vcf to calls also present in any of the comparison callers.
|
25,556 |
def reverse_dependencies(self, ireqs):
ireqs_as_cache_values = [self.as_cache_key(ireq) for ireq in ireqs]
return self._reverse_dependencies(ireqs_as_cache_values)
|
Returns a lookup table of reverse dependencies for all the given ireqs.
Since this is all static, it only works if the dependency cache
contains the complete data, otherwise you end up with a partial view.
This is typically no problem if you use this function after the entire
dependency tree is resolved.
|
25,557 |
def prompt(test_input = None):
if test_input != None:
if type(test_input) == list and len(test_input):
choice = test_input.pop(0)
elif type(test_input) == list:
choice =
else:
choice = test_input
else:
try:
choice = raw_input()
except:
choice = input()
return choice
|
Prompt function that works for Python2 and Python3
:param test_input: Value to be returned when testing
:return: Value typed by user (or passed in argument when testing)
|
25,558 |
def drop_genes(self, build=None):
if build:
LOG.info("Dropping the hgnc_gene collection, build %s", build)
self.hgnc_collection.delete_many({: build})
else:
LOG.info("Dropping the hgnc_gene collection")
self.hgnc_collection.drop()
|
Delete the genes collection
|
25,559 |
def check_array_struct(array):
try:
arr = np.array(array)
except:
raise HydraError("Array %s is not valid."%(array,))
if type(arr[0]) is list:
raise HydraError("Array %s is not valid."%(array,))
|
Check to ensure arrays are symmetrical, for example:
[[1, 2, 3], [1, 2]] is invalid
|
25,560 |
def stock2fa(stock):
seqs = {}
for line in stock:
if line.startswith() is False and line.startswith() is False and len(line) > 3:
id, seq = line.strip().split()
id = id.rsplit(, 1)[0]
id = re.split(, id, 1)[-1]
if id not in seqs:
seqs[id] = []
seqs[id].append(seq)
if line.startswith():
break
return seqs
|
convert stockholm to fasta
|
25,561 |
def has_edge(self, edge):
u, v = edge
return (u, v) in self.edge_properties
|
Return whether an edge exists.
@type edge: tuple
@param edge: Edge.
@rtype: boolean
@return: Truth-value for edge existence.
|
25,562 |
def get(self, column_name):
column_name = column_name.lower()
for c in self.columns:
if c.name == column_name:
return c
return None
|
Retrieve a column from the list with name value :code:`column_name`
:param str column_name: The name of the column to get
:return: :class:`~giraffez.types.Column` with the specified name, or :code:`None` if it does not exist.
|
25,563 |
def unmount(self):
if not self.mounted:
return
cmd = % self.mount_point_local
shell_exec(cmd)
if self.mounted:
self._kill()
shell_exec(cmd)
self._mount_point_local_delete()
|
Unmounts the sftp system if it's currently mounted.
|
25,564 |
def kitchen_merge(backend, source_kitchen, target_kitchen):
click.secho( % (get_datetime(), source_kitchen, target_kitchen), fg=)
check_and_print(DKCloudCommandRunner.merge_kitchens_improved(backend.dki, source_kitchen, target_kitchen))
|
Merge two Kitchens
|
25,565 |
def remove_overlap(self, begin, end=None):
hitlist = self.at(begin) if end is None else self.overlap(begin, end)
for iv in hitlist:
self.remove(iv)
|
Removes all intervals overlapping the given point or range.
Completes in O((r+m)*log n) time, where:
* n = size of the tree
* m = number of matches
* r = size of the search range (this is 1 for a point)
|
25,566 |
def LookupChain(lookup_func_list):
def MoreFormatters(formatter_name):
for lookup_func in lookup_func_list:
formatter_func = lookup_func(formatter_name)
if formatter_func is not None:
return formatter_func
return MoreFormatters
|
Returns a *function* suitable for passing as the more_formatters argument
to Template.
NOTE: In Java, this would be implemented using the 'Composite' pattern. A
*list* of formatter lookup function behaves the same as a *single* formatter
lookup funcion.
Note the distinction between formatter *lookup* functions and formatter
functions here.
|
25,567 |
def wait_for_edge(self, pin, edge):
self.bbio_gpio.wait_for_edge(self.mraa_gpio.Gpio(pin), self._edge_mapping[edge])
|
Wait for an edge. Pin should be type IN. Edge must be RISING,
FALLING or BOTH.
|
25,568 |
def attachviewers(self, profiles):
if self.metadata:
template = None
for profile in profiles:
if isinstance(self, CLAMInputFile):
for t in profile.input:
if self.metadata.inputtemplate == t.id:
template = t
break
elif isinstance(self, CLAMOutputFile) and self.metadata and self.metadata.provenance:
for t in profile.outputtemplates():
if self.metadata.provenance.outputtemplate_id == t.id:
template = t
break
else:
raise NotImplementedError
if template:
break
if template and template.viewers:
for viewer in template.viewers:
self.viewers.append(viewer)
if template and template.converters:
for converter in template.converters:
self.converters.append(converter)
|
Attach viewers *and converters* to file, automatically scan all profiles for outputtemplate or inputtemplate
|
25,569 |
def write_crc32(fo, bytes):
data = crc32(bytes) & 0xFFFFFFFF
fo.write(pack(, data))
|
A 4-byte, big-endian CRC32 checksum
|
25,570 |
def generate_entry_label(entry):
if isinstance(entry, MultiEntry):
return " + ".join([latexify_ion(e.name) for e in entry.entry_list])
else:
return latexify_ion(latexify(entry.name))
|
Generates a label for the pourbaix plotter
Args:
entry (PourbaixEntry or MultiEntry): entry to get a label for
|
25,571 |
def get_atoms(structure, **kwargs):
if not structure.is_ordered:
raise ValueError("ASE Atoms only supports ordered structures")
symbols = [str(site.specie.symbol) for site in structure]
positions = [site.coords for site in structure]
cell = structure.lattice.matrix
return Atoms(symbols=symbols, positions=positions, pbc=True,
cell=cell, **kwargs)
|
Returns ASE Atoms object from pymatgen structure.
Args:
structure: pymatgen.core.structure.Structure
**kwargs: other keyword args to pass into the ASE Atoms constructor
Returns:
ASE Atoms object
|
25,572 |
def _parse_acl_config(self, acl_config):
parsed_acls = dict()
for acl in acl_config[]:
parsed_acls[acl[]] = set()
for rule in acl[]:
parsed_acls[acl[]].add(rule[])
return parsed_acls
|
Parse configured ACLs and rules
ACLs are returned as a dict of rule sets:
{<eos_acl1_name>: set([<eos_acl1_rules>]),
<eos_acl2_name>: set([<eos_acl2_rules>]),
...,
}
|
25,573 |
def _gen_last_current_relation(self, post_id):
last_post_id = self.get_secure_cookie()
if last_post_id:
last_post_id = last_post_id.decode()
self.set_secure_cookie(, post_id)
if last_post_id and MPost.get_by_uid(last_post_id):
self._add_relation(last_post_id, post_id)
|
Generate the relation for the post and last post viewed.
|
25,574 |
def update_attributes(self, updates):
if not isinstance(updates, dict):
updates = updates.to_dict()
for sdk_key, spec_key in self._get_attributes_map().items():
attr = % sdk_key
if spec_key in updates and not hasattr(self, attr):
setattr(self, attr, updates[spec_key])
|
Update attributes.
|
25,575 |
def deaccent(text):
norm = unicodedata.normalize("NFD", text)
result = "".join(ch for ch in norm if unicodedata.category(ch) != )
return unicodedata.normalize("NFC", result)
|
Remove accentuation from the given string.
|
25,576 |
def _handle_aui(self, data):
msg = AUIMessage(data)
self.on_aui_message(message=msg)
return msg
|
Handle AUI messages.
:param data: RF message to parse
:type data: string
:returns: :py:class`~alarmdecoder.messages.AUIMessage`
|
25,577 |
def clear(self):
self.filename =
self.filehandler = 0
self.station_name =
self.rec_dev_id =
self.rev_year = 0000
self.TT = 0
self.A = 0
self.D = 0
self.An = []
self.Ach_id = []
self.Aph = []
self.Accbm = []
self.uu = []
self.a = []
self.b = []
self.skew = []
self.min = []
self.max = []
self.primary = []
self.secondary = []
self.PS = []
self.Dn = []
self.Dch_id = []
self.Dph = []
self.Dccbm = []
self.y = []
self.lf = 0
self.nrates = 0
self.samp = []
self.endsamp = []
self.start = [00,00,0000,00,00,0.0]
self.trigger = [00,00,0000,00,00,0.0]
self.ft =
self.timemult = 0.0
self.DatFileContent =
|
Clear the internal (private) variables of the class.
|
25,578 |
def process_function(self, call_node, definition):
self.function_call_index += 1
saved_function_call_index = self.function_call_index
def_node = definition.node
saved_variables, first_node = self.save_local_scope(
def_node.lineno,
saved_function_call_index
)
args_mapping, first_node = self.save_def_args_in_temp(
call_node.args,
Arguments(def_node.args),
call_node.lineno,
saved_function_call_index,
first_node
)
self.filenames.append(definition.path)
self.create_local_scope_from_def_args(
call_node.args,
Arguments(def_node.args),
def_node.lineno,
saved_function_call_index
)
function_nodes, first_node = self.visit_and_get_function_nodes(
definition,
first_node
)
self.filenames.pop()
self.restore_saved_local_scope(
saved_variables,
args_mapping,
def_node.lineno
)
self.return_handler(
call_node,
function_nodes,
saved_function_call_index,
first_node
)
self.function_return_stack.pop()
self.function_definition_stack.pop()
return self.nodes[-1]
|
Processes a user defined function when it is called.
Increments self.function_call_index each time it is called, we can refer to it as N in the comments.
Make e.g. save_N_LHS = assignment.LHS for each AssignmentNode. (save_local_scope)
Create e.g. temp_N_def_arg1 = call_arg1_label_visitor.result for each argument.
Visit the arguments if they're calls. (save_def_args_in_temp)
Create e.g. def_arg1 = temp_N_def_arg1 for each argument. (create_local_scope_from_def_args)
Visit and get function nodes. (visit_and_get_function_nodes)
Loop through each save_N_LHS node and create an e.g.
foo = save_1_foo or, if foo was a call arg, foo = arg_mapping[foo]. (restore_saved_local_scope)
Create e.g. ~call_1 = ret_func_foo RestoreNode. (return_handler)
Notes:
Page 31 in the original thesis, but changed a little.
We don't have to return the ~call_1 = ret_func_foo RestoreNode made in return_handler,
because it's the last node anyway, that we return in this function.
e.g. ret_func_foo gets assigned to visit_Return.
Args:
call_node(ast.Call) : The node that calls the definition.
definition(LocalModuleDefinition): Definition of the function being called.
Returns:
Last node in self.nodes, probably the return of the function appended to self.nodes in return_handler.
|
25,579 |
def _ge_from_lt(self, other):
op_result = self.__lt__(other)
if op_result is NotImplemented:
return NotImplemented
return not op_result
|
Return a >= b. Computed by @total_ordering from (not a < b).
|
25,580 |
def find_egg_entry_point(self, object_type, name=None):
if name is None:
name =
possible = []
for protocol_options in object_type.egg_protocols:
for protocol in protocol_options:
pkg_resources.require(self.spec)
entry = pkg_resources.get_entry_info(
self.spec,
protocol,
name)
if entry is not None:
possible.append((entry.load(), protocol, entry.name))
break
if not possible:
dist = pkg_resources.get_distribution(self.spec)
raise LookupError(
"Entry point %r not found in egg %r (dir: %s; protocols: %s; "
"entry_points: %s)"
% (name, self.spec,
dist.location,
.join(_flatten(object_type.egg_protocols)),
.join(_flatten([
dictkeys(pkg_resources.get_entry_info(self.spec, prot, name) or {})
for prot in protocol_options] or ))))
if len(possible) > 1:
raise LookupError(
"Ambiguous entry points for %r in egg %r (protocols: %s)"
% (name, self.spec, .join(_flatten(protocol_options))))
return possible[0]
|
Returns the (entry_point, protocol) for the with the given
``name``.
|
25,581 |
def trim_args(kwds):
reject_key = ("type", "types", "configure")
reject_val = (None, ())
kwargs = {
k: v for k, v in kwds.items() if k not in reject_key and v not in reject_val
}
for k, v in kwargs.items():
if k in ("to", "cc", "bcc", "attachments"):
kwargs[k] = list(kwargs[k])
return kwargs
|
Gets rid of args with value of None, as well as select keys.
|
25,582 |
def find(self, group=None, element=None, name=None, VR=None):
results = self.read()
if name is not None:
def find_name(data_element):
return data_element.name.lower() == name.lower()
return filter(find_name, results)
if group is not None:
def find_group(data_element):
return (data_element.tag[] == group
or int(data_element.tag[], 16) == group)
results = filter(find_group, results)
if element is not None:
def find_element(data_element):
return (data_element.tag[] == element
or int(data_element.tag[], 16) == element)
results = filter(find_element, results)
if VR is not None:
def find_VR(data_element):
return data_element.VR.lower() == VR.lower()
results = filter(find_VR, results)
return results
|
Searches for data elements in the DICOM file given the filters
supplied to this method.
:param group: Hex decimal for the group of a DICOM element e.g. 0x002
:param element: Hex decimal for the element value of a DICOM element e.g. 0x0010
:param name: Name of the DICOM element, e.g. "Modality"
:param VR: Value Representation of the DICOM element, e.g. "PN"
|
25,583 |
def get_snippet_by_name(cls, name):
name_with_dir_separators = name.replace(, os.path.sep)
loaded = yaml_loader.YamlLoader.load_yaml_by_relpath(cls.snippets_dirs,
name_with_dir_separators + )
if loaded:
return cls._create_snippet(name, *loaded)
raise exceptions.SnippetNotFoundException(.
format(name=name_with_dir_separators))
|
name is in dotted format, e.g. topsnippet.something.wantedsnippet
|
25,584 |
def _check_inputs(z, m):
try:
nz = len(z)
z = np.array(z)
except TypeError:
z = np.array([z])
nz = len(z)
try:
nm = len(m)
m = np.array(m)
except TypeError:
m = np.array([m])
nm = len(m)
if (z < 0).any() or (m < 0).any():
raise ValueError()
if nz != nm and nz > 1 and nm > 1:
raise ValueError()
else:
if type(z) != np.ndarray:
z = np.array(z)
if type(m) != np.ndarray:
m = np.array(m)
return z, m
|
Check inputs are arrays of same length or array and a scalar.
|
25,585 |
def parse(self, data):
graph = self._init_graph()
if not in data or data[] != :
raise ParserError()
required_keys = [, , , , ]
for key in required_keys:
if key not in data:
raise ParserError(.format(key))
self.protocol = data[]
self.version = data[]
self.revision = data.get()
self.metric = data[]
for node in data[]:
graph.add_node(node[],
label=node[] if in node else None,
local_addresses=node.get(, []),
**node.get(, {}))
for link in data[]:
try:
source = link["source"]
dest = link["target"]
cost = link["cost"]
except KeyError as e:
raise ParserError( % e)
properties = link.get(, {})
graph.add_edge(source, dest, weight=cost, **properties)
return graph
|
Converts a NetJSON 'NetworkGraph' object
to a NetworkX Graph object,which is then returned.
Additionally checks for protocol version, revision and metric.
|
25,586 |
def get_smart_contract_event_by_height(self, height: int, is_full: bool = False) -> List[dict]:
payload = self.generate_json_rpc_payload(RpcMethod.GET_SMART_CONTRACT_EVENT, [height, 1])
response = self.__post(self.__url, payload)
if is_full:
return response
event_list = response[]
if event_list is None:
event_list = list()
return event_list
|
This interface is used to get the corresponding smart contract event based on the height of block.
:param height: a decimal height value.
:param is_full:
:return: the information of smart contract event in dictionary form.
|
25,587 |
def _kp2(A, B):
N = A.shape[0]
if B.shape[0] != N:
raise(ValueError)
newshape1 = A.shape[1]*B.shape[1]
return np.einsum(, A, B).reshape(N, newshape1, -1)
|
Special case Kronecker tensor product of A[i] and B[i] at each
time interval i for i = 0 .. N-1
Specialized for the case A and B rank 3 with A.shape[0]==B.shape[0]
|
25,588 |
def create_storage_account(access_token, subscription_id, rgname, account_name, location,
storage_type=):
endpoint = .join([get_rm_endpoint(),
, subscription_id,
, rgname,
, account_name,
, STORAGE_API])
storage_body = {: location}
storage_body[] = {: storage_type}
storage_body[] =
body = json.dumps(storage_body)
return do_put(endpoint, body, access_token)
|
Create a new storage account in the named resource group, with the named location.
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
rgname (str): Azure resource group name.
account_name (str): Name of the new storage account.
location (str): Azure data center location. E.g. westus.
storage_type (str): Premium or Standard, local or globally redundant.
Defaults to Standard_LRS.
Returns:
HTTP response. JSON body of storage account properties.
|
25,589 |
def add_event(
request,
template=,
event_form_class=forms.EventForm,
recurrence_form_class=forms.MultipleOccurrenceForm
):
dtstart = None
if request.method == :
event_form = event_form_class(request.POST)
recurrence_form = recurrence_form_class(request.POST)
if event_form.is_valid() and recurrence_form.is_valid():
event = event_form.save()
recurrence_form.save(event)
return http.HttpResponseRedirect(event.get_absolute_url())
else:
if in request.GET:
try:
dtstart = parser.parse(request.GET[])
except(TypeError, ValueError) as exc:
logging.warning(exc)
dtstart = dtstart or datetime.now()
event_form = event_form_class()
recurrence_form = recurrence_form_class(initial={: dtstart})
return render(
request,
template,
{: dtstart, : event_form, : recurrence_form}
)
|
Add a new ``Event`` instance and 1 or more associated ``Occurrence``s.
Context parameters:
``dtstart``
a datetime.datetime object representing the GET request value if present,
otherwise None
``event_form``
a form object for updating the event
``recurrence_form``
a form object for adding occurrences
|
25,590 |
def tai_jd(self, jd):
tai = _to_array(jd)
t = Time(self, tai + tt_minus_tai)
t.tai = tai
return t
|
Build a `Time` from a TAI Julian date.
Supply the International Atomic Time (TAI) as a Julian date:
>>> t = ts.tai_jd(2456675.56640625)
>>> t.tai
2456675.56640625
>>> t.tai_calendar()
(2014, 1, 18, 1, 35, 37.5)
|
25,591 |
def reset_calibrators(self, parameter):
req = mdb_pb2.ChangeParameterRequest()
req.action = mdb_pb2.ChangeParameterRequest.RESET_CALIBRATORS
calib_info = req.defaultCalibrator
url = .format(
self._instance, self._processor, parameter)
response = self._client.post_proto(url, data=req.SerializeToString())
|
Reset all calibrators for the specified parameter to their original MDB value.
|
25,592 |
def download_image(self, device_label, image_id, file_name):
response = None
try:
response = requests.get(
urls.download_image(self._giid, device_label, image_id),
headers={
: .format(self._vid)},
stream=True)
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
with open(file_name, ) as image_file:
for chunk in response.iter_content(chunk_size=1024):
if chunk:
image_file.write(chunk)
|
Download image taken by a smartcam
Args:
device_label (str): device label of camera
image_id (str): image id from image series
file_name (str): path to file
|
25,593 |
def _create(self, title, heads, refresh=None, path_start=None):
doc, tag, text = Doc().tagtext()
doc.asis()
heads["Date"] = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
heads["Pass rate"] = self.results.pass_rate()
heads["Pass rate excluding retries"] = self.results.pass_rate(include_retries=False)
with tag():
with tag():
doc.asis(self.head)
if refresh:
doc.asis( + str(refresh) + )
with tag(, id=):
with tag():
text(title)
with tag():
for head in heads:
with tag():
with tag(, width="100px"):
text(head)
with tag():
text(heads[head])
with tag():
with tag():
text()
with tag():
text(str(self.summary["count"]))
with tag():
with tag():
text()
with tag():
text(str(self.summary["pass"]))
with tag():
with tag():
text()
with tag():
text(str(self.summary["fail"]))
with tag():
with tag():
text()
with tag():
text(str(self.summary["inconclusive"]))
with tag():
with tag():
text()
with tag():
text(str(self.summary["skip"]))
with tag():
with tag():
text()
with tag():
text(self.duration_to_string(self.summary["duration"]))
with tag():
with tag():
text(.format(get_fw_name()))
with tag():
text(get_fw_version())
with tag(, style=):
with tag():
with tag():
text("Test Case")
with tag():
text("Verdict")
with tag():
text("Fail Reason")
with tag():
text("Skip Reason")
with tag():
text("Retried")
with tag():
text("Duration")
for result in self.results:
if result.success:
klass =
elif result.inconclusive:
klass =
else:
klass =
with tag(, klass= % klass, onclick=):
with tag(, width="200px"):
text(result.get_tc_name())
with tag(, width="100px"):
if result.success:
color =
elif result.failure:
color =
else:
color =
with tag(, color=color):
text(result.get_verdict())
with tag(, width="350px"):
text(hex_escape_str(result.fail_reason))
with tag(, width="300px"):
text(result.skip_reason if result.skipped() else "")
with tag(, width="50px"):
text("Yes" if result.retries_left != 0 else "No")
with tag(, width="100px"):
text(str(result.duration))
with tag(, klass=):
with tag(, colspan="5"):
if hasattr(result, ) and \
result.tc_git_info and \
"scm_link" in result.tc_git_info:
link = result.tc_git_info[]
with tag(, href=link):
text(link)
doc.stag()
for fil in result.logfiles:
filepath = os.path.relpath(fil, path_start)
with tag(, href=filepath):
text(filepath)
doc.stag()
return doc.getvalue()
|
Internal create method, uses yattag to generate a html document with result data.
:param title: Title of report
:param heads: Headers for report
:param refresh: If set to True, adds a HTTP-EQUIV="refresh" to the report
:param path_start: path to file where this is report is to be stored.
:return: yattag document.
|
25,594 |
def upload_file_and_send_file_offer(self, file_name, user_id, data=None, input_file_path=None,
content_type=, auto_open=False,
prevent_share=False, scope=):
if input_file_path:
with open(input_file_path, ) as f:
data = f.read()
if not data:
raise ValueError()
params = {
: file_name,
: user_id,
: if auto_open else ,
: if prevent_share else ,
}
return _post(
token=self.oauth.get_app_token(scope),
uri= + urllib.urlencode(params),
data=data,
content_type=content_type
)
|
Upload a file of any type to store and return a FileId once file offer has been sent.
No user authentication required
|
25,595 |
def run(self, host, port, **options):
self.registry.debug = True
debugged = DebuggedJsonRpcApplication(self, evalex=True)
run_simple(host, port, debugged, use_reloader=True, **options)
|
For debugging purposes, you can run this as a standalone server.
.. WARNING:: **Security vulnerability**
This uses :class:`DebuggedJsonRpcApplication` to assist debugging. If you want to use
this in production, you should run :class:`Server` as a standard WSGI app with
`uWSGI <https://uwsgi-docs.readthedocs.org/en/latest/>`_ or another similar WSGI server.
.. versionadded:: 0.1.0
|
25,596 |
def get_aws_s3_handle(config_map):
url = + config_map[] +
if not AWS_CLIENT.is_aws_s3_client_set():
client = boto3.client(
,
aws_access_key_id=config_map[],
aws_secret_access_key=config_map[]
)
AWS_CLIENT.set_aws_s3_client(client)
else:
client = AWS_CLIENT.s3
return client, url
|
Convenience function for getting AWS S3 objects
Added by [email protected], Jan 9, 2015
Added to aws_adapter build by [email protected], Jan 25, 2015, and
added support for Configuration
May 25, 2017: Switch to boto3
|
25,597 |
def object_formatter(v, c, m, p):
endpoint = current_app.config[].get(
m.object_type)
if endpoint and m.object_uuid:
return Markup(.format(
url_for(endpoint, id=m.object_uuid),
_()))
return
|
Format object view link.
|
25,598 |
def _assert_is_color(value):
if not isinstance(value, tuple) or len(value) != 3:
raise ValueError("Color must be a RGB tuple.")
if not all(0 <= x <= 255 for x in value):
raise ValueError("RGB values of color must be between 0 and 255.")
|
Assert that the given value is a valid brightness.
:param value: The value to check.
|
25,599 |
def plot_cost(scores=np.random.rand(100), thresh=0.5, noise=0):
c = pd.DataFrame(index=np.arange(0, 1, 0.01))
if isinstance(thresh, (int, float)):
thresh = [thresh]
elif not isinstance(thresh, (pd.Series, np.ndarray, list, tuple)):
thresh = np.arange(0, 1, .2)
cost_fun.fun = spec_from_thresh
for t in thresh:
labels = (scores / t / scores.max() / 1.00001).astype(int)
cost_fun.target = t
c[.format(int(t * 10))] = np.array([cost_fun(x, labels, scores, verbose=True) for x in c.index])
c.plot()
plt.show(block=False)
|
Plot the cost function topology (contours for each of several targets)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.