Unnamed: 0
int64 0
389k
| code
stringlengths 26
79.6k
| docstring
stringlengths 1
46.9k
|
---|---|---|
3,800 | def GetChildrenByPriority(self, allow_external=True):
for child in sorted(self.OpenChildren(), key=lambda x: x.PRIORITY):
if not allow_external and child.EXTERNAL:
continue
if child.Get(child.Schema.ACTIVE):
yield child | Generator that yields active filestore children in priority order. |
3,801 | def freeze_parameter(self, name):
i = self.get_parameter_names(include_frozen=True).index(name)
self.unfrozen_mask[i] = False | Freeze a parameter by name
Args:
name: The name of the parameter |
3,802 | def add_row(self, id_):
row = self._parser.new_row(id_)
self._rows.append(row)
return row | Add a new row to the pattern.
:param id_: the id of the row |
3,803 | def boundary_maximum_linear(graph, xxx_todo_changeme1):
r
(gradient_image, spacing) = xxx_todo_changeme1
gradient_image = scipy.asarray(gradient_image)
max_intensity = float(numpy.abs(gradient_image).max())
def boundary_term_linear(intensities):
intensities /= max_intensity
intensities = (1. - intensities)
intensities[intensities == 0.] = sys.float_info.min
return intensities
__skeleton_maximum(graph, gradient_image, boundary_term_linear, spacing) | r"""
Boundary term processing adjacent voxels maximum value using a linear relationship.
An implementation of a boundary term, suitable to be used with the
`~medpy.graphcut.generate.graph_from_voxels` function.
The same as `boundary_difference_linear`, but working on the gradient image instead
of the original. See there for details.
Parameters
----------
graph : GCGraph
The graph to add the weights to.
gradient_image : ndarray
The gradient image.
spacing : sequence of float or False
A sequence containing the slice spacing used for weighting the
computed neighbourhood weight value for different dimensions. If
`False`, no distance based weighting of the graph edges is performed.
Notes
-----
This function requires the gradient image to be passed along. That means that
`~medpy.graphcut.generate.graph_from_voxels` has to be called with ``boundary_term_args`` set to the
gradient image. |
3,804 | def namedb_get_name_from_name_hash128( cur, name_hash128, block_number ):
unexpired_query, unexpired_args = namedb_select_where_unexpired_names( block_number )
select_query = "SELECT name FROM name_records JOIN namespaces ON name_records.namespace_id = namespaces.namespace_id " + \
"WHERE name_hash128 = ? AND revoked = 0 AND " + unexpired_query + ";"
args = (name_hash128,) + unexpired_args
name_rows = namedb_query_execute( cur, select_query, args )
name_row = name_rows.fetchone()
if name_row is None:
return None
return name_row[] | Given the hexlified 128-bit hash of a name, get the name. |
3,805 | def append_value_continuation(self, linenum, indent, continuation):
frame = self.current_frame()
assert isinstance(frame,FieldFrame) or isinstance(frame,ValueContinuationFrame)
if isinstance(frame, FieldFrame):
assert frame.indent < indent and frame.container.contains(ROOT_PATH, frame.field_name)
if isinstance(frame, ValueContinuationFrame):
assert frame.indent == indent and frame.container.contains(ROOT_PATH, frame.field_name)
self.pop_frame()
field_value = frame.field_value + + continuation
frame.container.put_field(ROOT_PATH, frame.field_name, field_value)
frame = ValueContinuationFrame(linenum, indent, frame.path, frame.container, frame.field_name, field_value)
self.push_frame(frame) | :param linenum: The line number of the frame.
:type linenum: int
:param indent: The indentation level of the frame.
:type indent: int
:param continuation:
:type continuation: str |
3,806 | def _distance(self):
return np.average(self.min_kl, weights=self.f.weights) | Compute the distance function d(f,g,\pi), Eq. (3) |
3,807 | def match(self, node, results=None):
if not isinstance(node, Leaf):
return False
return BasePattern.match(self, node, results) | Override match() to insist on a leaf node. |
3,808 | def get_download_url(self, instance, default=None):
download = default
download = "{url}/@@download/{fieldname}/{filename}".format(
url=api.get_url(instance),
fieldname=self.get_field_name(),
filename=self.get_filename(instance),
)
return download | Calculate the download url |
3,809 | def fromMimeData(self, data):
if data.hasText():
self.insert(data.text())
return (QtCore.QByteArray(), False) | Paste the clipboard data at the current cursor position.
This method also adds another undo-object to the undo-stack.
..note: This method forcefully interrupts the ``QsciInternal``
pasting mechnism by returning an empty MIME data element.
This is not an elegant implementation, but the best I
could come up with at the moment. |
3,810 | def heartbeat(self):
unique_id = self.new_unique_id()
message = {
: ,
: unique_id,
}
self._send(message)
return unique_id | Heartbeat request to keep session alive. |
3,811 | def space_acl(args):
r = fapi.get_workspace_acl(args.project, args.workspace)
fapi._check_response_code(r, 200)
result = dict()
for user, info in sorted(r.json()[].items()):
result[user] = info[]
return result | Retrieve access control list for a workspace |
3,812 | def ruamelindex(self, strictindex):
return (
self.key_association.get(strictindex, strictindex)
if self.is_mapping()
else strictindex
) | Get the ruamel equivalent of a strict parsed index.
E.g. 0 -> 0, 1 -> 2, parsed-via-slugify -> Parsed via slugify |
3,813 | def send_packet(self, pk, expected_reply=(), resend=False, timeout=0.2):
self._send_lock.acquire()
if self.link is not None:
if len(expected_reply) > 0 and not resend and \
self.link.needs_resending:
pattern = (pk.header,) + expected_reply
logger.debug(
,
pattern)
new_timer = Timer(timeout,
lambda: self._no_answer_do_retry(pk,
pattern))
self._answer_patterns[pattern] = new_timer
new_timer.start()
elif resend:
pattern = expected_reply
if pattern in self._answer_patterns:
logger.debug()
if self._answer_patterns[pattern]:
new_timer = Timer(timeout,
lambda:
self._no_answer_do_retry(
pk, pattern))
self._answer_patterns[pattern] = new_timer
new_timer.start()
else:
logger.debug(,
self._answer_patterns)
self.link.send_packet(pk)
self.packet_sent.call(pk)
self._send_lock.release() | Send a packet through the link interface.
pk -- Packet to send
expect_answer -- True if a packet from the Crazyflie is expected to
be sent back, otherwise false |
3,814 | def _maybeCleanSessions(self):
sinceLast = self._clock.seconds() - self._lastClean
if sinceLast > self.sessionCleanFrequency:
self._cleanSessions() | Clean expired sessions if it's been long enough since the last clean. |
3,815 | def _get_best_effort_ndims(x,
expect_ndims=None,
expect_ndims_at_least=None,
expect_ndims_no_more_than=None):
ndims_static = _get_static_ndims(
x,
expect_ndims=expect_ndims,
expect_ndims_at_least=expect_ndims_at_least,
expect_ndims_no_more_than=expect_ndims_no_more_than)
if ndims_static is not None:
return ndims_static
return tf.rank(x) | Get static ndims if possible. Fallback on `tf.rank(x)`. |
3,816 | def total_members_in_score_range_in(
self, leaderboard_name, min_score, max_score):
return self.redis_connection.zcount(
leaderboard_name, min_score, max_score) | Retrieve the total members in a given score range from the named leaderboard.
@param leaderboard_name Name of the leaderboard.
@param min_score [float] Minimum score.
@param max_score [float] Maximum score.
@return the total members in a given score range from the named leaderboard. |
3,817 | def delayed_redraw(self):
with self._defer_lock:
whence = self._defer_whence
self._defer_whence = self._defer_whence_reset
flag = self._defer_flag
self._defer_flag = False
if flag:
self.redraw_now(whence=whence) | Handle delayed redrawing of the canvas. |
3,818 | def apply_transform(self, matrix):
matrix = np.asanyarray(matrix,
order=,
dtype=np.float64)
if matrix.shape != (4, 4):
raise ValueError()
self.face_normals = new_face_normals
self.vertex_normals = new_vertex_normals
self._cache.clear(exclude=[
,
,
,
,
,
,
,
,
,
,
,
])
self._cache.id_set()
log.debug()
return self | Transform mesh by a homogenous transformation matrix.
Does the bookkeeping to avoid recomputing things so this function
should be used rather than directly modifying self.vertices
if possible.
Parameters
----------
matrix : (4, 4) float
Homogenous transformation matrix |
3,819 | def update_browsers(self, *args, **kwargs):
sel = self.prjbrws.selected_indexes(0)
if not sel:
return
prjindex = sel[0]
if not prjindex.isValid():
prj = None
else:
prjitem = prjindex.internalPointer()
prj = prjitem.internal_data()
self.set_project_banner(prj)
releasetype = self.get_releasetype()
self.update_shot_browser(prj, releasetype)
self.update_asset_browser(prj, releasetype) | Update the shot and the assetbrowsers
:returns: None
:rtype: None
:raises: None |
3,820 | def train(self):
start = time.time()
result = self._train()
assert isinstance(result, dict), "_train() needs to return a dict."
if RESULT_DUPLICATE in result:
return result
result = result.copy()
self._iteration += 1
self._iterations_since_restore += 1
if result.get(TIME_THIS_ITER_S) is not None:
time_this_iter = result[TIME_THIS_ITER_S]
else:
time_this_iter = time.time() - start
self._time_total += time_this_iter
self._time_since_restore += time_this_iter
result.setdefault(DONE, False)
if result.get(TIMESTEPS_THIS_ITER) is not None:
if self._timesteps_total is None:
self._timesteps_total = 0
self._timesteps_total += result[TIMESTEPS_THIS_ITER]
self._timesteps_since_restore += result[TIMESTEPS_THIS_ITER]
if result.get(EPISODES_THIS_ITER) is not None:
if self._episodes_total is None:
self._episodes_total = 0
self._episodes_total += result[EPISODES_THIS_ITER]
result.setdefault(TIMESTEPS_TOTAL, self._timesteps_total)
result.setdefault(EPISODES_TOTAL, self._episodes_total)
result.setdefault(TRAINING_ITERATION, self._iteration)
if result.get("mean_loss"):
result.setdefault("neg_mean_loss", -result["mean_loss"])
now = datetime.today()
result.update(
experiment_id=self._experiment_id,
date=now.strftime("%Y-%m-%d_%H-%M-%S"),
timestamp=int(time.mktime(now.timetuple())),
time_this_iter_s=time_this_iter,
time_total_s=self._time_total,
pid=os.getpid(),
hostname=os.uname()[1],
node_ip=self._local_ip,
config=self.config,
time_since_restore=self._time_since_restore,
timesteps_since_restore=self._timesteps_since_restore,
iterations_since_restore=self._iterations_since_restore)
self._log_result(result)
return result | Runs one logical iteration of training.
Subclasses should override ``_train()`` instead to return results.
This class automatically fills the following fields in the result:
`done` (bool): training is terminated. Filled only if not provided.
`time_this_iter_s` (float): Time in seconds this iteration
took to run. This may be overriden in order to override the
system-computed time difference.
`time_total_s` (float): Accumulated time in seconds for this
entire experiment.
`experiment_id` (str): Unique string identifier
for this experiment. This id is preserved
across checkpoint / restore calls.
`training_iteration` (int): The index of this
training iteration, e.g. call to train().
`pid` (str): The pid of the training process.
`date` (str): A formatted date of when the result was processed.
`timestamp` (str): A UNIX timestamp of when the result
was processed.
`hostname` (str): Hostname of the machine hosting the training
process.
`node_ip` (str): Node ip of the machine hosting the training
process.
Returns:
A dict that describes training progress. |
3,821 | def get_language(query: str) -> str:
query = query.lower()
for language in LANGUAGES:
if query.endswith(language):
return language
return | Tries to work out the highlight.js language of a given file name or
shebang. Returns an empty string if none match. |
3,822 | def setCheckedRecords(self, records, column=0, parent=None):
if parent is None:
for i in range(self.topLevelItemCount()):
item = self.topLevelItem(i)
try:
has_record = item.record() in records
except AttributeError:
has_record = False
if has_record:
item.setCheckState(column, Qt.Checked)
self.setCheckedRecords(records, column, item)
else:
for c in range(parent.childCount()):
item = parent.child(c)
try:
has_record = item.record() in records
except AttributeError:
has_record = False
if has_record:
item.setCheckState(column, Qt.Checked)
self.setCheckedRecords(records, column, item) | Sets the checked items based on the inputed list of records.
:param records | [<orb.Table>, ..]
parent | <QTreeWidgetItem> || None |
3,823 | def save(self, request):
comment = self.get_comment_object()
obj = comment.content_object
if request.user.is_authenticated():
comment.user = request.user
comment.by_author = request.user == getattr(obj, "user", None)
comment.ip_address = ip_for_request(request)
comment.replied_to_id = self.data.get("replied_to")
lookup = {
"content_type": comment.content_type,
"object_pk": comment.object_pk,
"user_name": comment.user_name,
"user_email": comment.user_email,
"user_url": comment.user_url,
"replied_to_id": comment.replied_to_id,
}
for duplicate in self.get_comment_model().objects.filter(**lookup):
if (duplicate.submit_date.date() == comment.submit_date.date() and
duplicate.comment == comment.comment):
return duplicate
comment.save()
comment_was_posted.send(sender=comment.__class__, comment=comment,
request=request)
notify_emails = split_addresses(settings.COMMENTS_NOTIFICATION_EMAILS)
if notify_emails:
subject = ugettext("New comment for: ") + str(obj)
context = {
"comment": comment,
"comment_url": add_cache_bypass(comment.get_absolute_url()),
"request": request,
"obj": obj,
}
send_mail_template(subject, "email/comment_notification",
settings.DEFAULT_FROM_EMAIL, notify_emails,
context)
return comment | Saves a new comment and sends any notification emails. |
3,824 | def publish(self, message):
message_data = self._to_data(message)
self._encode_invoke(topic_publish_codec, message=message_data) | Publishes the message to all subscribers of this topic
:param message: (object), the message to be published. |
3,825 | def _set_alert(self, v, load=False):
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=alert.alert, is_container=, presence=False, yang_name="alert", rest_name="alert", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u: {u: u, u: None, u: None}}, namespace=, defining_module=, yang_type=, is_config=True)
except (TypeError, ValueError):
raise ValueError({
: ,
: "container",
: ,
})
self.__alert = t
if hasattr(self, ):
self._set() | Setter method for alert, mapped from YANG variable /rbridge_id/threshold_monitor/interface/policy/area/alert (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_alert is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_alert() directly. |
3,826 | def is_pure_name(path):
return (
not os.path.isabs(path)
and len(os.path.dirname(path)) == 0
and not os.path.splitext(path)[1]
and path !=
and path !=
) | Return True if path is a name and not a file path.
Parameters
----------
path : str
Path (can be absolute, relative, etc.)
Returns
-------
bool
True if path is a name of config in config dir, not file path. |
3,827 | def _flatten_lane_details(runinfo):
out = []
for ldetail in runinfo["details"]:
if "project_name" not in ldetail and ldetail["description"] == "control":
ldetail["project_name"] = "control"
for i, barcode in enumerate(ldetail.get("multiplex", [{}])):
cur = copy.deepcopy(ldetail)
cur["name"] = "%s-%s" % (ldetail["name"], i + 1)
cur["description"] = barcode.get("name", ldetail["description"])
cur["bc_index"] = barcode.get("sequence", "")
cur["project_name"] = clean_name(ldetail["project_name"])
out.append(cur)
return out | Provide flattened lane information with multiplexed barcodes separated. |
3,828 | def dbg(*objects, file=sys.stderr, flush=True, **kwargs):
"Helper function to print to stderr and flush"
print(*objects, file=file, flush=flush, **kwargs) | Helper function to print to stderr and flush |
3,829 | def GetOrderKey(self):
context_attributes = []
context_attributes.extend(ExceptionWithContext.CONTEXT_PARTS)
context_attributes.extend(self._GetExtraOrderAttributes())
tokens = []
for context_attribute in context_attributes:
tokens.append(getattr(self, context_attribute, None))
return tokens | Return a tuple that can be used to sort problems into a consistent order.
Returns:
A list of values. |
3,830 | def get_option(self, option):
value = getattr(self, option, None)
if value is not None:
return value
return getattr(settings, "COUNTRIES_{0}".format(option.upper())) | Get a configuration option, trying the options attribute first and
falling back to a Django project setting. |
3,831 | def swd_write(self, output, value, nbits):
pDir = binpacker.pack(output, nbits)
pIn = binpacker.pack(value, nbits)
bitpos = self._dll.JLINK_SWD_StoreRaw(pDir, pIn, nbits)
if bitpos < 0:
raise errors.JLinkException(bitpos)
return bitpos | Writes bytes over SWD (Serial Wire Debug).
Args:
self (JLink): the ``JLink`` instance
output (int): the output buffer offset to write to
value (int): the value to write to the output buffer
nbits (int): the number of bits needed to represent the ``output`` and
``value``
Returns:
The bit position of the response in the input buffer. |
3,832 | def operations(self, op_types=None):
if not op_types:
op_types = [, , , , ]
while self._handle.tell() < self._eof:
current_time = mgz.util.convert_to_timestamp(self._time / 1000)
try:
operation = mgz.body.operation.parse_stream(self._handle)
except (ConstructError, ValueError):
raise MgzError()
if operation.type == :
if operation.action.type in ACTIONS_WITH_PLAYER_ID:
counter = self._actions_by_player[operation.action.player_id]
counter.update([operation.action.type])
else:
self._actions_without_player.update([operation.action.type])
if operation.type == and isinstance(operation.action.type, int):
print(operation.action)
if operation.type == :
self._time += operation.time_increment
if operation.type == and operation.action.type == :
self._postgame = operation
if operation.type == :
action = Action(operation, current_time)
self._parse_action(action, current_time)
if operation.type == :
self._parse_chat(chat)
if operation.type in op_types:
yield chat | Process operation stream. |
3,833 | def top(num_processes=5, interval=3):
**
result = []
start_usage = {}
for pid in psutil.pids():
try:
process = psutil.Process(pid)
user, system = process.cpu_times()
except ValueError:
user, system, _, _ = process.cpu_times()
except psutil.NoSuchProcess:
continue
start_usage[process] = user + system
time.sleep(interval)
usage = set()
for process, start in six.iteritems(start_usage):
try:
user, system = process.cpu_times()
except ValueError:
user, system, _, _ = process.cpu_times()
except psutil.NoSuchProcess:
continue
now = user + system
diff = now - start
usage.add((diff, process))
for idx, (diff, process) in enumerate(reversed(sorted(usage))):
if num_processes and idx >= num_processes:
break
if not _get_proc_cmdline(process):
cmdline = _get_proc_name(process)
else:
cmdline = _get_proc_cmdline(process)
info = {: cmdline,
: _get_proc_username(process),
: _get_proc_status(process),
: _get_proc_pid(process),
: _get_proc_create_time(process),
: {},
: {},
}
for key, value in six.iteritems(process.cpu_times()._asdict()):
info[][key] = value
for key, value in six.iteritems(process.memory_info()._asdict()):
info[][key] = value
result.append(info)
return result | Return a list of top CPU consuming processes during the interval.
num_processes = return the top N CPU consuming processes
interval = the number of seconds to sample CPU usage over
CLI Examples:
.. code-block:: bash
salt '*' ps.top
salt '*' ps.top 5 10 |
3,834 | def check_columns(self, check_views=True):
if check_views:
query = .format(self.exclude_schema)
else:
query = .format(self.exclude_schema)
return self.__check_equals(query) | Check if the columns in all tables are equals.
Parameters
----------
check_views: bool
if True, check the columns of all the tables and views, if
False check only the columns of the tables
Returns
-------
bool
True if the columns are the same
False otherwise
list
A list with the differences |
3,835 | def _parse_bands(self, band_input):
all_bands = AwsConstants.S2_L1C_BANDS if self.data_source is DataSource.SENTINEL2_L1C else \
AwsConstants.S2_L2A_BANDS
if band_input is None:
return all_bands
if isinstance(band_input, str):
band_list = band_input.split()
elif isinstance(band_input, list):
band_list = band_input.copy()
else:
raise ValueError()
band_list = [band.strip().split()[0] for band in band_list]
band_list = [band for band in band_list if band != ]
if not set(band_list) <= set(all_bands):
raise ValueError(.format(band_list, all_bands))
return band_list | Parses class input and verifies band names.
:param band_input: input parameter `bands`
:type band_input: str or list(str) or None
:return: verified list of bands
:rtype: list(str) |
3,836 | def tril(array, k=0):
try:
tril_array = np.tril(array, k=k)
except:
tril_array = np.zeros_like(array)
shape = array.shape
otherdims = shape[:-2]
for index in np.ndindex(otherdims):
tril_array[index] = np.tril(array[index], k=k)
return tril_array | Lower triangle of an array.
Return a copy of an array with elements above the k-th diagonal zeroed.
Need a multi-dimensional version here because numpy.tril does not
broadcast for numpy verison < 1.9. |
3,837 | def encrypt(self, wif):
if not self.unlocked():
raise WalletLocked
return format(bip38.encrypt(str(wif), self.masterkey), "encwif") | Encrypt the content according to BIP38
:param str wif: Unencrypted key |
3,838 | def find_requirement(self, req, upgrade, ignore_compatibility=False):
all_candidates = self.find_all_candidates(req.name)
[str(c.version) for c in all_candidates],
prereleases=(
self.allow_all_prereleases
if self.allow_all_prereleases else None
),
)
)
applicable_candidates = [
c for c in all_candidates if str(c.version) in compatible_versions
]
if applicable_candidates:
best_candidate = max(applicable_candidates,
key=self._candidate_sort_key)
else:
best_candidate = None
if req.satisfied_by is not None:
installed_version = parse_version(req.satisfied_by.version)
else:
installed_version = None
if installed_version is None and best_candidate is None:
logger.critical(
,
req,
.join(
sorted(
{str(c.version) for c in all_candidates},
key=parse_version,
)
)
)
raise DistributionNotFound(
% req
)
best_installed = False
if installed_version and (
best_candidate is None or
best_candidate.version <= installed_version):
best_installed = True
if not upgrade and installed_version is not None:
if best_installed:
logger.debug(
,
installed_version,
)
else:
logger.debug(
,
installed_version,
best_candidate.version,
)
return None
if best_installed:
logger.debug(
,
installed_version,
.join(sorted(compatible_versions, key=parse_version)) or
"none",
)
raise BestVersionAlreadyInstalled
logger.debug(
,
best_candidate.version,
.join(sorted(compatible_versions, key=parse_version))
)
return best_candidate.location | Try to find a Link matching req
Expects req, an InstallRequirement and upgrade, a boolean
Returns a Link if found,
Raises DistributionNotFound or BestVersionAlreadyInstalled otherwise |
3,839 | def select_edges_by(docgraph, layer=None, edge_type=None, data=False):
edge_type_eval = "edge_attribs[] == ".format(edge_type)
layer_eval = " in edge_attribs[]".format(layer)
if layer is not None:
if edge_type is not None:
return select_edges(docgraph, data=data,
conditions=[edge_type_eval, layer_eval])
else:
return select_edges(docgraph, conditions=[layer_eval], data=data)
else:
if edge_type is not None:
return select_edges(docgraph, data=data,
conditions=[edge_type_eval])
else:
return docgraph.edges_iter(data=data) | get all edges with the given edge type and layer.
Parameters
----------
docgraph : DiscourseDocumentGraph
document graph from which the nodes will be extracted
layer : str
name of the layer
edge_type : str
Type of the edges to be extracted (Edge types are defined in the
Enum ``EdgeTypes``).
data : bool
If True, results will include edge attributes.
Returns
-------
edges : generator of str
a container/list of edges (represented as (source node ID, target
node ID) tuples). If data is True, edges are represented as
(source node ID, target node ID, edge attribute dict) tuples. |
3,840 | def create_nginx_config(self):
cfg = .format(self._project_name)
if not self._shared_hosting:
if self._user:
cfg += .format(self._user)
cfg += .format(os.path.join(self._log_dir, \
self._project_name), os.path.join(self._var_dir, self._project_name))
cfg +=
cfg +=
if self._include_mimetypes:
cfg +=
cfg +=
cfg +=
cfg +=
cfg +=
cfg += .format(os.path.join \
(self._log_dir, self._project_name))
cfg +=
cfg +=
cfg +=
cfg += .format(self._port)
if self._server_name:
cfg += .format(self._server_name)
cfg +=
cfg += .format(\
os.path.join(self._var_dir, self._project_name))
cfg +=
cfg +=
cfg +=
cfg +=
cfg +=
cfg +=
cfg +=
if not self._shared_hosting:
cfg +=
f = open(self._nginx_config, )
f.write(cfg)
f.close() | Creates the Nginx configuration for the project |
3,841 | def __update_offset_table(self, fileobj, fmt, atom, delta, offset):
if atom.offset > offset:
atom.offset += delta
fileobj.seek(atom.offset + 12)
data = fileobj.read(atom.length - 12)
fmt = fmt % cdata.uint_be(data[:4])
offsets = struct.unpack(fmt, data[4:])
offsets = [o + (0, delta)[offset < o] for o in offsets]
fileobj.seek(atom.offset + 16)
fileobj.write(struct.pack(fmt, *offsets)) | Update offset table in the specified atom. |
3,842 | def on_left_click(self, event, grid, choices):
row, col = event.GetRow(), event.GetCol()
if col == 0 and self.grid.name != :
default_val = self.grid.GetCellValue(row, col)
msg = "Choose a new name for {}.\nThe new value will propagate throughout the contribution.".format(default_val)
dia = wx.TextEntryDialog(self.grid, msg,
"Rename {}".format(self.grid.name, default_val),
default_val)
res = dia.ShowModal()
if res == wx.ID_OK:
new_val = dia.GetValue()
self.contribution.rename_item(self.grid.name,
default_val, new_val)
self.window.Bind(wx.EVT_MENU, lambda event: self.on_select_menuitem(event, grid, row, col, selection), clear)
for choice in sorted(choices.items()):
submenu = wx.Menu()
for item in choice[1]:
menuitem = submenu.Append(-1, str(item))
self.window.Bind(wx.EVT_MENU, lambda event: self.on_select_menuitem(event, grid, row, col, selection), menuitem)
menu.Append(-1, choice[0], submenu)
self.show_menu(event, menu)
if selection:
for row, col in selection:
self.grid.SetCellBackgroundColour(row, col, self.col_color)
self.dispersed_selection = []
self.selection = []
self.grid.ForceRefresh() | creates popup menu when user clicks on the column
if that column is in the list of choices that get a drop-down menu.
allows user to edit the column, but only from available values |
3,843 | def set_insn(self, insn):
self.insn = insn
self.size = len(self.insn) | Set a new raw buffer to disassemble
:param insn: the buffer
:type insn: string |
3,844 | def get_referenced_object(self):
if self._BunqMeTab is not None:
return self._BunqMeTab
if self._BunqMeTabResultResponse is not None:
return self._BunqMeTabResultResponse
if self._BunqMeFundraiserResult is not None:
return self._BunqMeFundraiserResult
if self._Card is not None:
return self._Card
if self._CardDebit is not None:
return self._CardDebit
if self._DraftPayment is not None:
return self._DraftPayment
if self._FeatureAnnouncement is not None:
return self._FeatureAnnouncement
if self._IdealMerchantTransaction is not None:
return self._IdealMerchantTransaction
if self._Invoice is not None:
return self._Invoice
if self._ScheduledPayment is not None:
return self._ScheduledPayment
if self._ScheduledPaymentBatch is not None:
return self._ScheduledPaymentBatch
if self._ScheduledInstance is not None:
return self._ScheduledInstance
if self._MasterCardAction is not None:
return self._MasterCardAction
if self._BankSwitchServiceNetherlandsIncomingPayment is not None:
return self._BankSwitchServiceNetherlandsIncomingPayment
if self._Payment is not None:
return self._Payment
if self._PaymentBatch is not None:
return self._PaymentBatch
if self._RequestInquiryBatch is not None:
return self._RequestInquiryBatch
if self._RequestInquiry is not None:
return self._RequestInquiry
if self._RequestResponse is not None:
return self._RequestResponse
if self._RewardRecipient is not None:
return self._RewardRecipient
if self._RewardSender is not None:
return self._RewardSender
if self._ShareInviteBankInquiryBatch is not None:
return self._ShareInviteBankInquiryBatch
if self._ShareInviteBankInquiry is not None:
return self._ShareInviteBankInquiry
if self._ShareInviteBankResponse is not None:
return self._ShareInviteBankResponse
if self._SofortMerchantTransaction is not None:
return self._SofortMerchantTransaction
if self._TabResultInquiry is not None:
return self._TabResultInquiry
if self._TabResultResponse is not None:
return self._TabResultResponse
if self._TransferwiseTransfer is not None:
return self._TransferwiseTransfer
raise exception.BunqException(self._ERROR_NULL_FIELDS) | :rtype: core.BunqModel
:raise: BunqException |
3,845 | def _docstring_parse(self, blocks):
result = {}
for block, docline, doclength, key in blocks:
doctext = "<doc>{}</doc>".format(" ".join(block))
try:
docs = ET.XML(doctext)
docstart = self.parser.charindex(docline, 0, self.context)
if not key in result:
result[key] = [list(docs), docstart, docstart + doclength]
else:
result[key][0].extend(list(docs))
except ET.ParseError:
msg.warn(doctext)
return result | Parses the XML from the specified blocks of docstrings. |
3,846 | def calls(self):
return WebhookWebhooksCallProxy(self._client, self.sys[].id, self.sys[]) | Provides access to call overview for the given webhook.
API reference: https://www.contentful.com/developers/docs/references/content-management-api/#/reference/webhook-calls
:return: :class:`WebhookWebhooksCallProxy <contentful_management.webhook_webhooks_call_proxy.WebhookWebhooksCallProxy>` object.
:rtype: contentful.webhook_webhooks_call_proxy.WebhookWebhooksCallProxy
Usage:
>>> webhook_webhooks_call_proxy = webhook.calls()
<WebhookWebhooksCallProxy space_id="cfexampleapi" webhook_id="my_webhook"> |
3,847 | def write(self, data):
report_size = self.packet_size
if self.ep_out:
report_size = self.ep_out.wMaxPacketSize
for _ in range(report_size - len(data)):
data.append(0)
self.read_sem.release()
if not self.ep_out:
bmRequestType = 0x21
bmRequest = 0x09
wValue = 0x200
wIndex = self.intf_number
self.dev.ctrl_transfer(bmRequestType, bmRequest, wValue, wIndex, data)
return
self.ep_out.write(data)
return | write data on the OUT endpoint associated to the HID interface |
3,848 | def parse(file_or_string):
from mysqlparse.grammar.sql_file import sql_file_syntax
if hasattr(file_or_string, ) and hasattr(file_or_string.read, ):
return sql_file_syntax.parseString(file_or_string.read())
elif isinstance(file_or_string, six.string_types):
return sql_file_syntax.parseString(file_or_string)
else:
raise TypeError("Expected file-like or string object, but got instead.".format(
type_name=type(file_or_string).__name__,
)) | Parse a file-like object or string.
Args:
file_or_string (file, str): File-like object or string.
Returns:
ParseResults: instance of pyparsing parse results. |
3,849 | def get_clean_url(self):
if self.needs_auth:
self.prompt_auth()
url = RepositoryURL(self.url.full_url)
url.username = self.username
url.password = self.password
return url | Retrieve the clean, full URL - including username/password. |
3,850 | def make_action_list(self, item_list, **kwargs):
action_list = []
es_index = get2(kwargs, "es_index", self.es_index)
action_type = kwargs.get("action_type","index")
action_settings = {: action_type,
: es_index}
doc_type = kwargs.get("doc_type", self.doc_type)
if not doc_type:
doc_type = "unk"
id_field = kwargs.get("id_field")
for item in item_list:
action = get_es_action_item(item,
action_settings,
doc_type,
id_field)
action_list.append(action)
return action_list | Generates a list of actions for sending to Elasticsearch |
3,851 | def predict_subsequences(self, sequence_dict, peptide_lengths=None):
sequence_dict = check_sequence_dictionary(sequence_dict)
peptide_lengths = self._check_peptide_lengths(peptide_lengths)
binding_predictions = []
expected_peptides = set([])
normalized_alleles = []
for key, amino_acid_sequence in sequence_dict.items():
for l in peptide_lengths:
for i in range(len(amino_acid_sequence) - l + 1):
expected_peptides.add(amino_acid_sequence[i:i + l])
self._check_peptide_inputs(expected_peptides)
for allele in self.alleles:
allele = normalize_allele_name(allele, omit_dra1=True)
normalized_alleles.append(allele)
request = self._get_iedb_request_params(
amino_acid_sequence, allele)
logger.info(
"Calling IEDB (%s) with request %s",
self.url,
request)
response_df = _query_iedb(request, self.url)
for _, row in response_df.iterrows():
binding_predictions.append(
BindingPrediction(
source_sequence_name=key,
offset=row[] - 1,
allele=row[],
peptide=row[],
affinity=row[],
percentile_rank=row[],
prediction_method_name="iedb-" + self.prediction_method))
self._check_results(
binding_predictions,
alleles=normalized_alleles,
peptides=expected_peptides)
return BindingPredictionCollection(binding_predictions) | Given a dictionary mapping unique keys to amino acid sequences,
run MHC binding predictions on all candidate epitopes extracted from
sequences and return a EpitopeCollection.
Parameters
----------
fasta_dictionary : dict or string
Mapping of protein identifiers to protein amino acid sequences.
If string then converted to dictionary. |
3,852 | def _glslify(r):
if isinstance(r, string_types):
return r
else:
assert 2 <= len(r) <= 4
return .format(len(r), .join(map(str, r))) | Transform a string or a n-tuple to a valid GLSL expression. |
3,853 | def getFilenameSet(self):
result = set(self.file_dict)
result.discard(inspect.currentframe().f_code.co_filename)
return result | Returns a set of profiled file names.
Note: "file name" is used loosely here. See python documentation for
co_filename, linecache module and PEP302. It may not be a valid
filesystem path. |
3,854 | def print_list(extracted_list, file=None):
if file is None:
file = sys.stderr
for filename, lineno, name, line in extracted_list:
_print(file,
% (filename,lineno,name))
if line:
_print(file, % line.strip()) | Print the list of tuples as returned by extract_tb() or
extract_stack() as a formatted stack trace to the given file. |
3,855 | def flows(args):
def flow_if_not(fun):
if isinstance(fun, iterator):
return fun
elif isinstance(fun, type) and in str(fun.__class__):
return fun
else:
try:
return flow(fun)
except AttributeError:
return fun
return FlowList(map(flow_if_not, args)) | todo : add some example
:param args:
:return: |
3,856 | def find_copies(input_dir, exclude_list):
copies = []
def copy_finder(copies, dirname):
for obj in os.listdir(dirname):
pathname = os.path.join(dirname, obj)
if os.path.isdir(pathname):
continue
if obj in exclude_list:
continue
if obj.endswith():
continue
copies.append(os.path.join(dirname, obj))
dir_visitor(
input_dir,
functools.partial(copy_finder, copies)
)
return copies | find files that are not templates and not
in the exclude_list for copying from template to image |
3,857 | def user_topic_ids(user):
if user.is_super_admin() or user.is_read_only_user():
query = sql.select([models.TOPICS])
else:
query = (sql.select([models.JOINS_TOPICS_TEAMS.c.topic_id])
.select_from(
models.JOINS_TOPICS_TEAMS.join(
models.TOPICS, sql.and_(models.JOINS_TOPICS_TEAMS.c.topic_id == models.TOPICS.c.id,
models.TOPICS.c.state == ))
).where(
sql.or_(models.JOINS_TOPICS_TEAMS.c.team_id.in_(user.teams_ids),
models.JOINS_TOPICS_TEAMS.c.team_id.in_(user.child_teams_ids))))
rows = flask.g.db_conn.execute(query).fetchall()
return [str(row[0]) for row in rows] | Retrieve the list of topics IDs a user has access to. |
3,858 | def solve_discrete_lyapunov(A, B, max_it=50, method="doubling"):
r
if method == "doubling":
A, B = list(map(np.atleast_2d, [A, B]))
alpha0 = A
gamma0 = B
diff = 5
n_its = 1
while diff > 1e-15:
alpha1 = alpha0.dot(alpha0)
gamma1 = gamma0 + np.dot(alpha0.dot(gamma0), alpha0.conjugate().T)
diff = np.max(np.abs(gamma1 - gamma0))
alpha0 = alpha1
gamma0 = gamma1
n_its += 1
if n_its > max_it:
msg = "Exceeded maximum iterations {}, check input matrics"
raise ValueError(msg.format(n_its))
elif method == "bartels-stewart":
gamma1 = sp_solve_discrete_lyapunov(A, B)
else:
msg = "Check your method input. Should be doubling or bartels-stewart"
raise ValueError(msg)
return gamma1 | r"""
Computes the solution to the discrete lyapunov equation
.. math::
AXA' - X + B = 0
:math:`X` is computed by using a doubling algorithm. In particular, we
iterate to convergence on :math:`X_j` with the following recursions for
:math:`j = 1, 2, \dots` starting from :math:`X_0 = B`, :math:`a_0 = A`:
.. math::
a_j = a_{j-1} a_{j-1}
.. math::
X_j = X_{j-1} + a_{j-1} X_{j-1} a_{j-1}'
Parameters
----------
A : array_like(float, ndim=2)
An n x n matrix as described above. We assume in order for
convergence that the eigenvalues of A have moduli bounded by
unity
B : array_like(float, ndim=2)
An n x n matrix as described above. We assume in order for
convergence that the eigenvalues of A have moduli bounded by
unity
max_it : scalar(int), optional(default=50)
The maximum number of iterations
method : string, optional(default="doubling")
Describes the solution method to use. If it is "doubling" then
uses the doubling algorithm to solve, if it is "bartels-stewart"
then it uses scipy's implementation of the Bartels-Stewart
approach.
Returns
-------
gamma1: array_like(float, ndim=2)
Represents the value :math:`X` |
3,859 | def get_widget(self, request):
return self._update_widget_choices(self.field.formfield(widget=RestrictedSelectWidget).widget) | Field widget is replaced with "RestrictedSelectWidget" because we not want to use modified widgets for
filtering. |
3,860 | async def jsk_vc_join(self, ctx: commands.Context, *,
destination: typing.Union[discord.VoiceChannel, discord.Member] = None):
destination = destination or ctx.author
if isinstance(destination, discord.Member):
if destination.voice and destination.voice.channel:
destination = destination.voice.channel
else:
return await ctx.send("Member has no voice channel.")
voice = ctx.guild.voice_client
if voice:
await voice.move_to(destination)
else:
await destination.connect(reconnect=True)
await ctx.send(f"Connected to {destination.name}.") | Joins a voice channel, or moves to it if already connected.
Passing a voice channel uses that voice channel.
Passing a member will use that member's current voice channel.
Passing nothing will use the author's voice channel. |
3,861 | def add_edge(self, fr, to):
fr = self.add_vertex(fr)
to = self.add_vertex(to)
self.adjacency[fr].children.add(to)
self.adjacency[to].parents.add(fr) | Add an edge to the graph. Multiple edges between the same vertices will quietly be ignored. N-partite graphs
can be used to permit multiple edges by partitioning the graph into vertices and edges.
:param fr: The name of the origin vertex.
:param to: The name of the destination vertex.
:return: |
3,862 | def build_idx_set(branch_id, start_date):
code_set = branch_id.split("/")
code_set.insert(3, "Rates")
idx_set = {
"sec": "/".join([code_set[0], code_set[1], "Sections"]),
"mag": "/".join([code_set[0], code_set[1], code_set[2], "Magnitude"])}
idx_set["rate"] = "/".join(code_set)
idx_set["rake"] = "/".join([code_set[0], code_set[1], "Rake"])
idx_set["msr"] = "-".join(code_set[:3])
idx_set["geol"] = code_set[0]
if start_date:
idx_set["grid_key"] = "_".join(
branch_id.replace("/", "_").split("_")[:-1])
else:
idx_set["grid_key"] = branch_id.replace("/", "_")
idx_set["total_key"] = branch_id.replace("/", "|")
return idx_set | Builds a dictionary of keys based on the branch code |
3,863 | def parse(self, valstr):
if self._initialized:
raise pycdlibexception.PyCdlibInternalError()
(self.header_indicator, self.platform_id, self.num_section_entries,
self.id_string) = struct.unpack_from(self.FMT, valstr, 0)
self._initialized = True | Parse an El Torito section header from a string.
Parameters:
valstr - The string to parse.
Returns:
Nothing. |
3,864 | def close_files(self):
for name in self:
if getattr(self, % name):
file_ = getattr(self, % name)
file_.close() | Close all files with an activated disk flag. |
3,865 | def _update_counters(self, ti_status):
for key, ti in list(ti_status.running.items()):
ti.refresh_from_db()
if ti.state == State.SUCCESS:
ti_status.succeeded.add(key)
self.log.debug("Task instance %s succeeded. Dont rerun.", ti)
ti_status.running.pop(key)
continue
elif ti.state == State.FAILED:
self.log.error("Task instance %s failed", ti)
ti_status.failed.add(key)
ti_status.running.pop(key)
continue
elif ti.state == State.UP_FOR_RETRY:
self.log.warning("Task instance %s is up for retry", ti)
ti_status.running.pop(key)
ti_status.to_run[key] = ti
elif ti.state == State.UP_FOR_RESCHEDULE:
self.log.warning("Task instance %s is up for reschedule", ti)
ti_status.running.pop(key)
ti_status.to_run[key] = ti
elif ti.state == State.NONE:
self.log.warning(
"FIXME: task instance %s state was set to none externally or "
"reaching concurrency limits. Re-adding task to queue.",
ti
)
ti.set_state(State.SCHEDULED)
ti_status.running.pop(key)
ti_status.to_run[key] = ti | Updates the counters per state of the tasks that were running. Can re-add
to tasks to run in case required.
:param ti_status: the internal status of the backfill job tasks
:type ti_status: BackfillJob._DagRunTaskStatus |
3,866 | def to_bson_voronoi_list2(self):
bson_nb_voro_list2 = [None] * len(self.voronoi_list2)
for ivoro, voro in enumerate(self.voronoi_list2):
if voro is None or voro == :
continue
site_voro = []
for nb_dict in voro:
site = nb_dict[]
site_dict = {key: val for key, val in nb_dict.items() if key not in []}
diff = site.frac_coords - self.structure[nb_dict[]].frac_coords
site_voro.append([[nb_dict[], [float(c) for c in diff]],
site_dict])
bson_nb_voro_list2[ivoro] = site_voro
return bson_nb_voro_list2 | Transforms the voronoi_list into a vlist + bson_nb_voro_list, that are BSON-encodable.
:return: [vlist, bson_nb_voro_list], to be used in the as_dict method |
3,867 | def sync(self):
LOGGER.debug("Company.sync")
params = None
if self.id is not None:
params = {: self.id}
elif self.name is not None:
params = {: self.name}
if params is not None:
args = {: , : , : params}
response = CompanyService.requester.call(args)
if response.rc != 0:
LOGGER.warning(
+ self.name + + str(self.id) +
+ str(response.response_content) + + str(response.error_message) +
" (" + str(response.rc) + ")"
)
else:
json_obj = response.response_content
self.id = json_obj[]
self.name = json_obj[]
self.description = json_obj[]
self.applications_ids = json_obj[]
self.ost_ids = json_obj[] | synchronize self from Ariane server according its id (prioritary) or name
:return: |
3,868 | def to_language(locale):
p = locale.find()
if p >= 0:
return locale[:p].lower() + + locale[p + 1:].lower()
else:
return locale.lower() | Turns a locale name (en_US) into a language name (en-us).
Taken `from Django <http://bit.ly/1vWACbE>`_. |
3,869 | async def issueClaim(self, schemaId: ID, claimRequest: ClaimRequest,
iA=None,
i=None) -> (Claims, Dict[str, ClaimAttributeValues]):
schemaKey = (await self.wallet.getSchema(schemaId)).getKey()
attributes = self._attrRepo.getAttributes(schemaKey,
claimRequest.userId)
await self._genContxt(schemaId, iA, claimRequest.userId)
(c1, claim) = await self._issuePrimaryClaim(schemaId, attributes,
claimRequest.U)
c2 = await self._issueNonRevocationClaim(schemaId, claimRequest.Ur,
iA,
i) if claimRequest.Ur else None
signature = Claims(primaryClaim=c1, nonRevocClaim=c2)
return (signature, claim) | Issue a claim for the given user and schema.
:param schemaId: The schema ID (reference to claim
definition schema)
:param claimRequest: A claim request containing prover ID and
prover-generated values
:param iA: accumulator ID
:param i: claim's sequence number within accumulator
:return: The claim (both primary and non-revocation) |
3,870 | def swallow_stdout(stream=None):
saved = sys.stdout
if stream is None:
stream = StringIO()
sys.stdout = stream
try:
yield
finally:
sys.stdout = saved | Divert stdout into the given stream
>>> string = StringIO()
>>> with swallow_stdout(string):
... print('hello')
>>> assert string.getvalue().rstrip() == 'hello' |
3,871 | def call_inputhook(self, input_is_ready_func):
self._input_is_ready = input_is_ready_func
def thread():
input_is_ready_func(wait=True)
os.write(self._w, b)
threading.Thread(target=thread).start()
self.inputhook(self)
try:
if not is_windows():
select_fds([self._r], timeout=None)
os.read(self._r, 1024)
except OSError:
pass
self._input_is_ready = None | Call the inputhook. (Called by a prompt-toolkit eventloop.) |
3,872 | def get_long_short_pos(positions):
pos_wo_cash = positions.drop(, axis=1)
longs = pos_wo_cash[pos_wo_cash > 0].sum(axis=1).fillna(0)
shorts = pos_wo_cash[pos_wo_cash < 0].sum(axis=1).fillna(0)
cash = positions.cash
net_liquidation = longs + shorts + cash
df_pos = pd.DataFrame({: longs.divide(net_liquidation, axis=),
: shorts.divide(net_liquidation,
axis=)})
df_pos[] = df_pos[] + df_pos[]
return df_pos | Determines the long and short allocations in a portfolio.
Parameters
----------
positions : pd.DataFrame
The positions that the strategy takes over time.
Returns
-------
df_long_short : pd.DataFrame
Long and short allocations as a decimal
percentage of the total net liquidation |
3,873 | def _decorate_urlconf(urlpatterns, decorator=require_auth, *args, **kwargs):
if isinstance(urlpatterns, (list, tuple)):
for pattern in urlpatterns:
if getattr(pattern, , None):
pattern._callback = decorator(
pattern.callback, *args, **kwargs)
if getattr(pattern, , []):
_decorate_urlconf(
pattern.url_patterns, decorator, *args, **kwargs)
else:
if getattr(urlpatterns, , None):
urlpatterns._callback = decorator(
urlpatterns.callback, *args, **kwargs) | Decorate all urlpatterns by specified decorator |
3,874 | def parse_allele_name(name, species_prefix=None):
original = name
name = name.strip()
if len(name) == 0:
raise ValueError("Cant have allele families
return AlleleName("H-2", gene, "", allele_code)
if len(name) == 0:
raise AlleleParseError("Incomplete MHC allele name: %s" % (original,))
elif not species:
species = "HLA"
if name[0].upper() == "D":
if len(name) == 7:
gene, name = parse_letters(name, 3)
else:
gene, name = parse_alphanum(name, 4)
if gene.isalpha():
gene = gene + "1"
elif len(name) == 5:
gene, name = name[0], name[1:]
elif name[0].isalpha():
gene, name = parse_letters(name)
elif name[0].isdigit():
gene, name = parse_numbers(name)
elif len(name) in (6, 7) and ("*" in name or "-" in name or ":" in name):
gene, name = parse_alphanum(name)
_, name = parse_separator(name)
else:
raise AlleleParseError(
"Can:%s%sw13sms left is e.g. "0201" then only parse the
family, name = parse_numbers(name, max_len=2)
else:
family, name = parse_numbers(name, max_len=3)
sep, name = parse_separator(name)
allele_code, rest_of_text = parse_numbers(name)
rest_of_text = rest_of_text.strip()
if len(rest_of_text) > 0:
raise AlleleParseError("The suffix of was not parsed" % (
rest_of_text,
original))
if len(family) == 1:
family = "0" + family
elif len(family) == 3 and family[0] == "0":
family = family[1:]
if len(allele_code) == 0:
allele_code = "01"
elif len(allele_code) == 3 and allele_code[0] == "0":
allele_code = allele_code[1:]
return AlleleName(species, gene, family, allele_code) | Takes an allele name and splits it into four parts:
1) species prefix
2) gene name
3) allele family
4) allele code
If species_prefix is provided, that is used instead of getting the species prefix from the name.
(And in that case, a species prefix in the name will result in an error being raised.)
For example, in all of the following inputs:
"HLA-A*02:01"
"A0201"
"A00201"
The result is a AlleleName object. Example:
AlleleName(
species="HLA", # species prefix
gene="A", # gene name
allele_family="02", # allele family
allele_code="01", # allele code
)
The logic for other species mostly resembles the naming system for humans,
except for mice, rats, and swine, which have archaic nomenclature. |
3,875 | def create_replication_instance(ReplicationInstanceIdentifier=None, AllocatedStorage=None, ReplicationInstanceClass=None, VpcSecurityGroupIds=None, AvailabilityZone=None, ReplicationSubnetGroupIdentifier=None, PreferredMaintenanceWindow=None, MultiAZ=None, EngineVersion=None, AutoMinorVersionUpgrade=None, Tags=None, KmsKeyId=None, PubliclyAccessible=None):
pass | Creates the replication instance using the specified parameters.
See also: AWS API Documentation
:example: response = client.create_replication_instance(
ReplicationInstanceIdentifier='string',
AllocatedStorage=123,
ReplicationInstanceClass='string',
VpcSecurityGroupIds=[
'string',
],
AvailabilityZone='string',
ReplicationSubnetGroupIdentifier='string',
PreferredMaintenanceWindow='string',
MultiAZ=True|False,
EngineVersion='string',
AutoMinorVersionUpgrade=True|False,
Tags=[
{
'Key': 'string',
'Value': 'string'
},
],
KmsKeyId='string',
PubliclyAccessible=True|False
)
:type ReplicationInstanceIdentifier: string
:param ReplicationInstanceIdentifier: [REQUIRED]
The replication instance identifier. This parameter is stored as a lowercase string.
Constraints:
Must contain from 1 to 63 alphanumeric characters or hyphens.
First character must be a letter.
Cannot end with a hyphen or contain two consecutive hyphens.
Example: myrepinstance
:type AllocatedStorage: integer
:param AllocatedStorage: The amount of storage (in gigabytes) to be initially allocated for the replication instance.
:type ReplicationInstanceClass: string
:param ReplicationInstanceClass: [REQUIRED]
The compute and memory capacity of the replication instance as specified by the replication instance class.
Valid Values: dms.t2.micro | dms.t2.small | dms.t2.medium | dms.t2.large | dms.c4.large | dms.c4.xlarge | dms.c4.2xlarge | dms.c4.4xlarge
:type VpcSecurityGroupIds: list
:param VpcSecurityGroupIds: Specifies the VPC security group to be used with the replication instance. The VPC security group must work with the VPC containing the replication instance.
(string) --
:type AvailabilityZone: string
:param AvailabilityZone: The EC2 Availability Zone that the replication instance will be created in.
Default: A random, system-chosen Availability Zone in the endpoint's region.
Example: us-east-1d
:type ReplicationSubnetGroupIdentifier: string
:param ReplicationSubnetGroupIdentifier: A subnet group to associate with the replication instance.
:type PreferredMaintenanceWindow: string
:param PreferredMaintenanceWindow: The weekly time range during which system maintenance can occur, in Universal Coordinated Time (UTC).
Format: ddd:hh24:mi-ddd:hh24:mi
Default: A 30-minute window selected at random from an 8-hour block of time per region, occurring on a random day of the week.
Valid Days: Mon, Tue, Wed, Thu, Fri, Sat, Sun
Constraints: Minimum 30-minute window.
:type MultiAZ: boolean
:param MultiAZ: Specifies if the replication instance is a Multi-AZ deployment. You cannot set the AvailabilityZone parameter if the Multi-AZ parameter is set to true .
:type EngineVersion: string
:param EngineVersion: The engine version number of the replication instance.
:type AutoMinorVersionUpgrade: boolean
:param AutoMinorVersionUpgrade: Indicates that minor engine upgrades will be applied automatically to the replication instance during the maintenance window.
Default: true
:type Tags: list
:param Tags: Tags to be associated with the replication instance.
(dict) --
Key (string) --A key is the required name of the tag. The string value can be from 1 to 128 Unicode characters in length and cannot be prefixed with 'aws:' or 'dms:'. The string can only contain only the set of Unicode letters, digits, white-space, '_', '.', '/', '=', '+', '-' (Java regex: '^([\p{L}\p{Z}\p{N}_.:/=+\-]*)$').
Value (string) --A value is the optional value of the tag. The string value can be from 1 to 256 Unicode characters in length and cannot be prefixed with 'aws:' or 'dms:'. The string can only contain only the set of Unicode letters, digits, white-space, '_', '.', '/', '=', '+', '-' (Java regex: '^([\p{L}\p{Z}\p{N}_.:/=+\-]*)$').
:type KmsKeyId: string
:param KmsKeyId: The KMS key identifier that will be used to encrypt the content on the replication instance. If you do not specify a value for the KmsKeyId parameter, then AWS DMS will use your default encryption key. AWS KMS creates the default encryption key for your AWS account. Your AWS account has a different default encryption key for each AWS region.
:type PubliclyAccessible: boolean
:param PubliclyAccessible: Specifies the accessibility options for the replication instance. A value of true represents an instance with a public IP address. A value of false represents an instance with a private IP address. The default value is true .
:rtype: dict
:return: {
'ReplicationInstance': {
'ReplicationInstanceIdentifier': 'string',
'ReplicationInstanceClass': 'string',
'ReplicationInstanceStatus': 'string',
'AllocatedStorage': 123,
'InstanceCreateTime': datetime(2015, 1, 1),
'VpcSecurityGroups': [
{
'VpcSecurityGroupId': 'string',
'Status': 'string'
},
],
'AvailabilityZone': 'string',
'ReplicationSubnetGroup': {
'ReplicationSubnetGroupIdentifier': 'string',
'ReplicationSubnetGroupDescription': 'string',
'VpcId': 'string',
'SubnetGroupStatus': 'string',
'Subnets': [
{
'SubnetIdentifier': 'string',
'SubnetAvailabilityZone': {
'Name': 'string'
},
'SubnetStatus': 'string'
},
]
},
'PreferredMaintenanceWindow': 'string',
'PendingModifiedValues': {
'ReplicationInstanceClass': 'string',
'AllocatedStorage': 123,
'MultiAZ': True|False,
'EngineVersion': 'string'
},
'MultiAZ': True|False,
'EngineVersion': 'string',
'AutoMinorVersionUpgrade': True|False,
'KmsKeyId': 'string',
'ReplicationInstanceArn': 'string',
'ReplicationInstancePublicIpAddress': 'string',
'ReplicationInstancePrivateIpAddress': 'string',
'ReplicationInstancePublicIpAddresses': [
'string',
],
'ReplicationInstancePrivateIpAddresses': [
'string',
],
'PubliclyAccessible': True|False,
'SecondaryAvailabilityZone': 'string'
}
}
:returns:
Must contain from 1 to 63 alphanumeric characters or hyphens.
First character must be a letter.
Cannot end with a hyphen or contain two consecutive hyphens. |
3,876 | def main_loop(self, steps_per_epoch, starting_epoch, max_epoch):
with self.sess.as_default():
self.loop.config(steps_per_epoch, starting_epoch, max_epoch)
self.loop.update_global_step()
try:
self._callbacks.before_train()
self.loop.update_global_step()
for self.loop._epoch_num in range(
self.loop.starting_epoch, self.loop.max_epoch + 1):
logger.info("Start Epoch {} ...".format(self.loop.epoch_num))
self._callbacks.before_epoch()
start_time = time.time()
for self.loop._local_step in range(self.loop.steps_per_epoch):
if self.hooked_sess.should_stop():
return
self.run_step()
self._callbacks.trigger_step()
self._callbacks.after_epoch()
logger.info("Epoch {} (global_step {}) finished, time:{}.".format(
self.loop.epoch_num, self.loop.global_step, humanize_time_delta(time.time() - start_time)))
self._callbacks.trigger_epoch()
logger.info("Training has finished!")
except (StopTraining, tf.errors.OutOfRangeError) as e:
logger.info("Training was stopped by exception {}.".format(str(e)))
except KeyboardInterrupt:
logger.info("Detected Ctrl-C and exiting main loop.")
raise
finally:
self._callbacks.after_train()
self.hooked_sess.close() | Run the main training loop.
Args:
steps_per_epoch, starting_epoch, max_epoch (int): |
3,877 | def list_nodes(conn=None, call=None):
if call == :
raise SaltCloudSystemExit(
)
if not conn:
conn = get_conn()
nodes = conn.list_nodes()
ret = {}
for node in nodes:
ret[node.name] = {
: node.id,
: node.image,
: node.name,
: node.private_ips,
: node.public_ips,
: node.size,
: node_state(node.state)
}
return ret | Return a list of the VMs that are on the provider |
3,878 | def solution_violations(solution, events, slots):
array = converter.solution_to_array(solution, events, slots)
return array_violations(array, events, slots) | Take a solution and return a list of violated constraints
Parameters
----------
solution: list or tuple
a schedule in solution form
events : list or tuple
of resources.Event instances
slots : list or tuple
of resources.Slot instances
Returns
-------
Generator
of a list of strings indicating the nature of the violated
constraints |
3,879 | def get_args():
parser = argparse.ArgumentParser(description=)
parser.Add_argument(
, ,
required=True,
action=,
help=
)
parser.add_argument(
, ,
type=int,
default=443,
action=,
help=
)
parser.add_argument(
, ,
required=True,
action=,
help=
)
parser.add_argument(
, ,
required=False,
action=,
help=
)
parser.add_argument(
, ,
required=False,
action=,
help=
)
parser.add_argument(
, ,
required=False,
default=False,
action=,
help=
)
parser.add_argument(
, ,
required=False,
action=,
default=False,
help=t annotate any VM-v--verbosestore_trueVerbose output'
)
return parser.parse_args() | Parse CLI args |
3,880 | def _mirror_groups(self):
target_group_names = frozenset(self._get_groups().get_group_names())
current_group_names = frozenset(
self._user.groups.values_list("name", flat=True).iterator()
)
MIRROR_GROUPS_EXCEPT = self.settings.MIRROR_GROUPS_EXCEPT
MIRROR_GROUPS = self.settings.MIRROR_GROUPS
if isinstance(MIRROR_GROUPS_EXCEPT, (set, frozenset)):
target_group_names = (target_group_names - MIRROR_GROUPS_EXCEPT) | (
current_group_names & MIRROR_GROUPS_EXCEPT
)
elif isinstance(MIRROR_GROUPS, (set, frozenset)):
target_group_names = (target_group_names & MIRROR_GROUPS) | (
current_group_names - MIRROR_GROUPS
)
if target_group_names != current_group_names:
existing_groups = list(
Group.objects.filter(name__in=target_group_names).iterator()
)
existing_group_names = frozenset(group.name for group in existing_groups)
new_groups = [
Group.objects.get_or_create(name=name)[0]
for name in target_group_names
if name not in existing_group_names
]
self._user.groups.set(existing_groups + new_groups) | Mirrors the user's LDAP groups in the Django database and updates the
user's membership. |
3,881 | def query_item(self, key, abis):
try:
key = int(key)
field =
except ValueError:
try:
key = int(key, 16)
field =
except ValueError:
field =
arg = and_(getattr(Item, field) == key,
or_(Item.abi == abi for abi in abis))
return self.session.query(Item).filter(arg).all() | Query items based on system call number or name. |
3,882 | def _set_extensions(self):
self._critical_extensions = set()
for extension in self[]:
name = extension[].native
attribute_name = % name
if hasattr(self, attribute_name):
setattr(self, attribute_name, extension[].parsed)
if extension[].native:
self._critical_extensions.add(name)
self._processed_extensions = True | Sets common named extensions to private attributes and creates a list
of critical extensions |
3,883 | def touch_member(config, dcs):
p = Postgresql(config[])
p.set_state()
p.set_role()
def restapi_connection_string(config):
protocol = if config.get() else
connect_address = config.get()
listen = config[]
return .format(protocol, connect_address or listen)
data = {
: p.connection_string,
: restapi_connection_string(config[]),
: p.state,
: p.role
}
return dcs.touch_member(data, permanent=True) | Rip-off of the ha.touch_member without inter-class dependencies |
3,884 | def _get_event_source_obj(awsclient, evt_source):
event_source_map = {
: event_source.dynamodb_stream.DynamoDBStreamEventSource,
: event_source.kinesis.KinesisEventSource,
: event_source.s3.S3EventSource,
: event_source.sns.SNSEventSource,
: event_source.cloudwatch.CloudWatchEventSource,
: event_source.cloudfront.CloudFrontEventSource,
: event_source.cloudwatch_logs.CloudWatchLogsEventSource,
}
evt_type = _get_event_type(evt_source)
event_source_func = event_source_map.get(evt_type, None)
if not event_source:
raise ValueError(.format(
evt_source[]))
return event_source_func(awsclient, evt_source) | Given awsclient, event_source dictionary item
create an event_source object of the appropriate event type
to schedule this event, and return the object. |
3,885 | def run_analysis(self, argv):
args = self._parser.parse_args(argv)
obs = BinnedAnalysis.BinnedObs(irfs=args.irfs,
expCube=args.expcube,
srcMaps=args.srcmaps,
binnedExpMap=args.bexpmap)
like = BinnedAnalysis.BinnedAnalysis(obs,
optimizer=,
srcModel=GtMergeSrcmaps.NULL_MODEL,
wmap=None)
like.logLike.set_use_single_fixed_map(False)
print("Reading xml model from %s" % args.srcmdl)
source_factory = pyLike.SourceFactory(obs.observation)
source_factory.readXml(args.srcmdl, BinnedAnalysis._funcFactory, False, True, True)
strv = pyLike.StringVector()
source_factory.fetchSrcNames(strv)
source_names = [strv[i] for i in range(strv.size())]
missing_sources = []
srcs_to_merge = []
for source_name in source_names:
try:
source = source_factory.releaseSource(source_name)
like.logLike.addSource(source)
srcs_to_merge.append(source_name)
except KeyError:
missing_sources.append(source_name)
comp = like.mergeSources(args.merged, source_names, )
like.logLike.getSourceMap(comp.getName())
print("Merged %i sources into %s" % (len(srcs_to_merge), comp.getName()))
if missing_sources:
print("Missed sources: ", missing_sources)
print("Writing output source map file %s" % args.outfile)
like.logLike.saveSourceMaps(args.outfile, False, False)
if args.gzip:
os.system("gzip -9 %s" % args.outfile)
print("Writing output xml file %s" % args.outxml)
like.writeXml(args.outxml) | Run this analysis |
3,886 | def naturalsortkey(s):
return [int(part) if part.isdigit() else part
for part in re.split(, s)] | Natural sort order |
3,887 | def view_surface_app_activity(self) -> str:
output, error = self._execute(
, self.device_sn, , , , )
return re.findall(r"name=([a-zA-Z0-9\.]+/.[a-zA-Z0-9\.]+)", output) | Get package with activity of applications that are running in the foreground. |
3,888 | def get(self, path):
path = path or
path = path.lstrip()
parts = path.split()
if not parts[0]:
parts = parts[1:]
statDict = util.lookup(scales.getStats(), parts)
if statDict is None:
self.set_status(404)
self.finish()
return
outputFormat = self.get_argument(, default=)
query = self.get_argument(, default=None)
if outputFormat == :
formats.jsonFormat(self, statDict, query)
elif outputFormat == :
formats.jsonFormat(self, statDict, query, pretty=True)
else:
formats.htmlHeader(self, + path, self.serverName, query)
formats.htmlFormat(self, tuple(parts), statDict, query)
return None | Renders a GET request, by showing this nodes stats and children. |
3,889 | def ip_allocate(self, public=False):
result = self._client.post(
"{}/ips".format(Instance.api_endpoint),
model=self,
data={
"type": "ipv4",
"public": public,
})
if not in result:
raise UnexpectedResponseError(,
json=result)
i = IPAddress(self._client, result[], result)
return i | Allocates a new :any:`IPAddress` for this Instance. Additional public
IPs require justification, and you may need to open a :any:`SupportTicket`
before you can add one. You may only have, at most, one private IP per
Instance.
:param public: If the new IP should be public or private. Defaults to
private.
:type public: bool
:returns: The new IPAddress
:rtype: IPAddress |
3,890 | def iterator_chain(variables: VarType, parent: str = None) -> Iterable[VarMatrix]:
logger.debug("Yielding from append iterator")
if not isinstance(variables, list):
raise ValueError(
f"Append keyword only takes a list of arguments, got {variables} of type {type(variables)}"
)
yield list(
chain.from_iterable(
variable_matrix(item, parent, "product") for item in variables
)
) | This successively appends each element of an array to a single list of values.
This takes a list of values and puts all the values generated for each element in
the list into a single list of values. It uses the :func:`itertools.chain` function to
achieve this. This function is particularly useful for specifying multiple types of
simulations with different parameters.
Args:
variables: The variables object
parent: Unused |
3,891 | def get_exe_path(cls):
return os.path.abspath(os.path.join(ROOT, cls.bmds_version_dir, cls.exe + ".exe")) | Return the full path to the executable. |
3,892 | def update_vm(vm_ref, vm_config_spec):
vm_name = get_managed_object_name(vm_ref)
log.trace(%s\, vm_name)
try:
task = vm_ref.ReconfigVM_Task(vm_config_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
vm_ref = wait_for_task(task, vm_name, )
return vm_ref | Updates the virtual machine configuration with the given object
vm_ref
Virtual machine managed object reference
vm_config_spec
Virtual machine config spec object to update |
3,893 | def get_subhash(hash):
idx = [0xe, 0x3, 0x6, 0x8, 0x2]
mul = [2, 2, 5, 4, 3]
add = [0, 0xd, 0x10, 0xb, 0x5]
b = []
for i in range(len(idx)):
a = add[i]
m = mul[i]
i = idx[i]
t = a + int(hash[i], 16)
v = int(hash[t:t + 2], 16)
b.append(( % (v * m))[-1])
return .join(b) | Get a second hash based on napiprojekt's hash.
:param str hash: napiprojekt's hash.
:return: the subhash.
:rtype: str |
3,894 | def _apply_policy_config(policy_spec, policy_dict):
log.trace(, policy_dict)
if policy_dict.get():
policy_spec.name = policy_dict[]
if policy_dict.get():
policy_spec.description = policy_dict[]
if policy_dict.get():
policy_spec.constraints = pbm.profile.SubProfileCapabilityConstraints()
subprofiles = []
for subprofile_dict in policy_dict[]:
subprofile_spec = \
pbm.profile.SubProfileCapabilityConstraints.SubProfile(
name=subprofile_dict[])
cap_specs = []
if subprofile_dict.get():
subprofile_spec.forceProvision = \
subprofile_dict[]
for cap_dict in subprofile_dict[]:
prop_inst_spec = pbm.capability.PropertyInstance(
id=cap_dict[]
)
setting_type = cap_dict[][]
if setting_type == :
prop_inst_spec.value = pbm.capability.types.DiscreteSet()
prop_inst_spec.value.values = cap_dict[][]
elif setting_type == :
prop_inst_spec.value = pbm.capability.types.Range()
prop_inst_spec.value.max = cap_dict[][]
prop_inst_spec.value.min = cap_dict[][]
elif setting_type == :
prop_inst_spec.value = cap_dict[][]
cap_spec = pbm.capability.CapabilityInstance(
id=pbm.capability.CapabilityMetadata.UniqueId(
id=cap_dict[],
namespace=cap_dict[]),
constraint=[pbm.capability.ConstraintInstance(
propertyInstance=[prop_inst_spec])])
cap_specs.append(cap_spec)
subprofile_spec.capability = cap_specs
subprofiles.append(subprofile_spec)
policy_spec.constraints.subProfiles = subprofiles
log.trace(, policy_spec)
return policy_spec | Applies a policy dictionary to a policy spec |
3,895 | def best_model(self):
if hasattr(self, ):
model = self._params[](
*self.parse_individual(self.halloffame[0]))
model.pack_new_sequences(self._params[])
return model
else:
raise NameError() | Rebuilds the top scoring model from an optimisation.
Returns
-------
model: AMPAL
Returns an AMPAL model of the top scoring parameters.
Raises
------
NameError:
Raises a name error if the optimiser has not been run. |
3,896 | def _calculate_values(self, tree, bar_d):
if all([
isinstance(tree, dict),
type(tree) != BarDescriptor
]):
max_val = 0
value = 0
for k in tree:
bar_desc = self._calculate_values(tree[k], bar_d)
tree[k] = (bar_desc, tree[k])
value += bar_desc["value"].value
max_val += bar_desc.get("kwargs", {}).get("max_value", 100)
kwargs = merge_dicts(
[bar_d.get("kwargs", {}),
dict(max_value=max_val)],
deepcopy=True
)
ret_d = merge_dicts(
[bar_d,
dict(value=Value(floor(value)), kwargs=kwargs)],
deepcopy=True
)
return BarDescriptor(ret_d)
elif isinstance(tree, BarDescriptor):
return tree
else:
raise TypeError("Unexpected type {}".format(type(tree))) | Calculate values for drawing bars of non-leafs in ``tree``
Recurses through ``tree``, replaces ``dict``s with
``(BarDescriptor, dict)`` so ``ProgressTree._draw`` can use
the ``BarDescriptor``s to draw the tree |
3,897 | def can_handle(self, data):
r
try:
requestline, _ = decode_from_bytes(data).split(CRLF, 1)
method, path, version = self._parse_requestline(requestline)
except ValueError:
try:
return self == Mocket._last_entry
except AttributeError:
return False
uri = urlsplit(path)
can_handle = uri.path == self.path and method == self.method
if self._match_querystring:
kw = dict(keep_blank_values=True)
can_handle = can_handle and parse_qs(uri.query, **kw) == parse_qs(self.query, **kw)
if can_handle:
Mocket._last_entry = self
return can_handle | r"""
>>> e = Entry('http://www.github.com/?bar=foo&foobar', Entry.GET, (Response(b'<html/>'),))
>>> e.can_handle(b'GET /?bar=foo HTTP/1.1\r\nHost: github.com\r\nAccept-Encoding: gzip, deflate\r\nConnection: keep-alive\r\nUser-Agent: python-requests/2.7.0 CPython/3.4.3 Linux/3.19.0-16-generic\r\nAccept: */*\r\n\r\n')
False
>>> e = Entry('http://www.github.com/?bar=foo&foobar', Entry.GET, (Response(b'<html/>'),))
>>> e.can_handle(b'GET /?bar=foo&foobar HTTP/1.1\r\nHost: github.com\r\nAccept-Encoding: gzip, deflate\r\nConnection: keep-alive\r\nUser-Agent: python-requests/2.7.0 CPython/3.4.3 Linux/3.19.0-16-generic\r\nAccept: */*\r\n\r\n')
True |
3,898 | def fade(self, fade_in_len=0.0, fade_out_len=0.0, fade_shape=):
qqhtlp
fade_shapes = [, , , , ]
if fade_shape not in fade_shapes:
raise ValueError(
"Fade shape must be one of {}".format(" ".join(fade_shapes))
)
if not is_number(fade_in_len) or fade_in_len < 0:
raise ValueError("fade_in_len must be a nonnegative number.")
if not is_number(fade_out_len) or fade_out_len < 0:
raise ValueError("fade_out_len must be a nonnegative number.")
effect_args = []
if fade_in_len > 0:
effect_args.extend([
, .format(fade_shape), .format(fade_in_len)
])
if fade_out_len > 0:
effect_args.extend([
, , .format(fade_shape),
.format(fade_out_len),
])
if len(effect_args) > 0:
self.effects.extend(effect_args)
self.effects_log.append()
return self | Add a fade in and/or fade out to an audio file.
Default fade shape is 1/4 sine wave.
Parameters
----------
fade_in_len : float, default=0.0
Length of fade-in (seconds). If fade_in_len = 0,
no fade in is applied.
fade_out_len : float, defaut=0.0
Length of fade-out (seconds). If fade_out_len = 0,
no fade in is applied.
fade_shape : str, default='q'
Shape of fade. Must be one of
* 'q' for quarter sine (default),
* 'h' for half sine,
* 't' for linear,
* 'l' for logarithmic
* 'p' for inverted parabola.
See Also
--------
splice |
3,899 | def get_align(text):
"Return (halign, valign, angle) of the <text>."
(x1, x2, h, v, a) = unaligned_get_dimension(text)
return (h, v, a) | Return (halign, valign, angle) of the <text>. |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.