Unnamed: 0
int64 0
389k
| code
stringlengths 26
79.6k
| docstring
stringlengths 1
46.9k
|
---|---|---|
375,500 | def _check_for_eltorito_boot_info_table(self, ino):
orig = self._cdfp.tell()
with inode.InodeOpenData(ino, self.pvd.logical_block_size()) as (data_fp, data_len):
data_fp.seek(8, os.SEEK_CUR)
bi_table = eltorito.EltoritoBootInfoTable()
if bi_table.parse(self.pvd, data_fp.read(eltorito.EltoritoBootInfoTable.header_length()), ino):
data_fp.seek(-24, os.SEEK_CUR)
csum = self._calculate_eltorito_boot_info_table_csum(data_fp, data_len)
if csum == bi_table.csum:
ino.add_boot_info_table(bi_table)
self._cdfp.seek(orig) | An internal method to check a boot directory record to see if it has
an El Torito Boot Info Table embedded inside of it.
Parameters:
ino - The Inode to check for a Boot Info Table.
Returns:
Nothing. |
375,501 | def launch_slurm(jobname: str,
cmd: str,
memory_mb: int,
project: str,
qos: str,
email: str,
duration: timedelta,
tasks_per_node: int,
cpus_per_task: int,
partition: str = "",
modules: List[str] = None,
directory: str = os.getcwd(),
encoding: str = "ascii") -> None:
if partition:
partition_cmd = "
else:
partition_cmd = ""
if modules is None:
modules = ["default-wbic"]
log.info("Launching SLURM job: {}", jobname)
script = .format(
cmd=cmd,
cpus_per_task=cpus_per_task,
duration=strfdelta(duration, SLURM_TIMEDELTA_FMT),
email=email,
jobname=jobname,
memory_mb=memory_mb,
modules=" ".join(modules),
partition_cmd=partition_cmd,
project=project,
qos=qos,
tasks_per_node=tasks_per_node,
)
cmdargs = ["sbatch"]
with pushd(directory):
p = Popen(cmdargs, stdin=PIPE)
p.communicate(input=script.encode(encoding)) | Launch a job into the SLURM environment.
Args:
jobname: name of the job
cmd: command to be executed
memory_mb: maximum memory requirement per process (Mb)
project: project name
qos: quality-of-service name
email: user's e-mail address
duration: maximum duration per job
tasks_per_node: tasks per (cluster) node
cpus_per_task: CPUs per task
partition: cluster partition name
modules: SLURM modules to load
directory: directory to change to
encoding: encoding to apply to launch script as sent to ``sbatch`` |
375,502 | def safe_sparse_dot(a, b, dense_output=False):
if issparse(a) or issparse(b):
ret = a * b
if dense_output and hasattr(ret, "toarray"):
ret = ret.toarray()
return ret
else:
return np.dot(a, b) | Dot product that handle the sparse matrix case correctly
Uses BLAS GEMM as replacement for numpy.dot where possible
to avoid unnecessary copies.
Parameters
----------
a : array or sparse matrix
b : array or sparse matrix
dense_output : boolean, default False
When False, either ``a`` or ``b`` being sparse will yield sparse
output. When True, output will always be an array.
Returns
-------
dot_product : array or sparse matrix
sparse if ``a`` or ``b`` is sparse and ``dense_output=False``. |
375,503 | def lookup_values_from_error_table(scores, err_df):
ix = find_nearest_matches(np.float32(err_df.cutoff.values), np.float32(scores))
return err_df.pvalue.iloc[ix].values, err_df.svalue.iloc[ix].values, err_df.pep.iloc[ix].values, err_df.qvalue.iloc[ix].values | Find matching q-value for each score in 'scores' |
375,504 | def addGaussNoise(self, sigma):
sz = self.diagonalSize()
pts = self.coordinates()
n = len(pts)
ns = np.random.randn(n, 3) * sigma * sz / 100
vpts = vtk.vtkPoints()
vpts.SetNumberOfPoints(n)
vpts.SetData(numpy_to_vtk(pts + ns, deep=True))
self.poly.SetPoints(vpts)
self.poly.GetPoints().Modified()
return self | Add gaussian noise.
:param float sigma: sigma is expressed in percent of the diagonal size of actor.
:Example:
.. code-block:: python
from vtkplotter import Sphere
Sphere().addGaussNoise(1.0).show() |
375,505 | def get_dump(self, fmap=, with_stats=False):
length = ctypes.c_ulong()
sarr = ctypes.POINTER(ctypes.c_char_p)()
if self.feature_names is not None and fmap == :
flen = int(len(self.feature_names))
fname = from_pystr_to_cstr(self.feature_names)
if self.feature_types is None:
ftype = from_pystr_to_cstr([] * flen)
else:
ftype = from_pystr_to_cstr(self.feature_types)
_check_call(_LIB.XGBoosterDumpModelWithFeatures(self.handle,
flen,
fname,
ftype,
int(with_stats),
ctypes.byref(length),
ctypes.byref(sarr)))
else:
if fmap != and not os.path.exists(fmap):
raise ValueError("No such file: {0}".format(fmap))
_check_call(_LIB.XGBoosterDumpModel(self.handle,
c_str(fmap),
int(with_stats),
ctypes.byref(length),
ctypes.byref(sarr)))
res = from_cstr_to_pystr(sarr, length)
return res | Returns the dump the model as a list of strings. |
375,506 | def to_grey(self, on: bool=False):
self._on = False
self._load_new(led_grey) | Change the LED to grey.
:param on: Unused, here for API consistency with the other states
:return: None |
375,507 | def fit(self, blocks, y=None):
self.kmeans.fit(make_weninger_features(blocks))
self.kmeans.cluster_centers_.sort(axis=0)
self.kmeans.cluster_centers_[0, :] = np.zeros(2)
return self | Fit a k-means clustering model using an ordered sequence of blocks. |
375,508 | def ParseMultiple(self, stats, file_objs, kb):
fileset = {stat.pathspec.path: obj for stat, obj in zip(stats, file_objs)}
return self.ParseFileset(fileset) | Process files together. |
375,509 | def zero_datetime(dt, tz=None):
if tz is None:
tz = get_current_timezone()
return coerce_naive(dt).replace(hour=0, minute=0, second=0, microsecond=0) | Return the given datetime with hour/minutes/seconds/ms zeroed and the
timezone coerced to the given ``tz`` (or UTC if none is given). |
375,510 | def pandas(self):
if self._pandas is None:
self._pandas = pd.DataFrame().from_records(self.list_of_dicts)
return self._pandas | Return a Pandas dataframe. |
375,511 | def hs_join(ls_hsi, hso):
N = len(ls_hsi)
ls_hsi_rdy, ls_hsi_vld = zip(*ls_hsi)
ls_hsi_rdy, ls_hsi_vld = list(ls_hsi_rdy), list(ls_hsi_vld)
hso_rdy, hso_vld = hso
@always_comb
def _hsjoin():
all_vld = True
for i in range(N):
all_vld = all_vld and ls_hsi_vld[i]
hso_vld.next = all_vld
for i in range(N):
ls_hsi_rdy[i].next = all_vld and hso_rdy
return _hsjoin | [Many-to-one] Synchronizes (joins) a list of input handshake interfaces: output is ready when ALL inputs are ready
ls_hsi - (i) list of input handshake tuples (ready, valid)
hso - (o) an output handshake tuple (ready, valid) |
375,512 | def _add_styles(self, add_paragraph=True, add_text=True):
p_styles = self.get_para_styles()
t_styles = self.get_span_styles()
for s in self.slide.pending_styles:
if isinstance(s, ParagraphStyle):
p_styles.update(s.styles)
elif isinstance(s, TextStyle):
t_styles.update(s.styles)
para = ParagraphStyle(**p_styles)
if add_paragraph or self.slide.paragraph_attribs:
p_attrib = {ns("text", "style-name"): para.name}
p_attrib.update(self.slide.paragraph_attribs)
if not self._in_tag(ns("text", "p"), p_attrib):
self.parent_of(ns("text", "p"))
self.slide._preso.add_style(para)
self.add_node("text:p", attrib=p_attrib)
if add_text and t_styles:
text = TextStyle(**t_styles)
children = self.cur_node.getchildren()
if children:
last = children[-1]
if (
last.tag == ns("text", "span")
and last.attrib[ns("text", "style-name")] == text.name
and last.tail is None
):
self.cur_node = children[-1]
return
if not self._is_node(
ns("text", "span"), {ns("text", "style-name"): text.name}
):
self.slide._preso.add_style(text)
self.add_node("text:span", attrib={"text:style-name": text.name}) | Adds paragraph and span wrappers if necessary based on style |
375,513 | def make_data(n,m):
p = {}
for i in range(1,m+1):
for j in range(1,n+1):
p[i,j] = random.randint(1,10)
return p | make_data: prepare matrix of m times n random processing times |
375,514 | def basic_stats(G, area=None, clean_intersects=False, tolerance=15,
circuity_dist=):
sq_m_in_sq_km = 1e6
G_undirected = None
n = len(list(G.nodes()))
m = len(list(G.edges()))
k_avg = 2 * m / n
if in G.graph:
streets_per_node = G.graph[]
else:
streets_per_node = count_streets_per_node(G)
node_ids = set(G.nodes())
intersection_count = len([True for node, count in streets_per_node.items() if (count > 1) and (node in node_ids)])
streets_per_node_avg = sum(streets_per_node.values()) / n
streets_per_node_counts = {num:list(streets_per_node.values()).count(num) for num in range(max(streets_per_node.values()) + 1)}
streets_per_node_proportion = {num:count/n for num, count in streets_per_node_counts.items()}
edge_length_total = sum([d[] for u, v, d in G.edges(data=True)])
edge_length_avg = edge_length_total / m
if G_undirected is None:
G_undirected = G.to_undirected(reciprocal=False)
street_length_total = sum([d[] for u, v, d in G_undirected.edges(data=True)])
street_segments_count = len(list(G_undirected.edges(keys=True)))
street_length_avg = street_length_total / street_segments_count
if clean_intersects:
clean_intersection_points = clean_intersections(G, tolerance=tolerance, dead_ends=False )
clean_intersection_count = len(clean_intersection_points)
else:
clean_intersection_count = None
if area is not None:
area_km = area / sq_m_in_sq_km
node_density_km = n / area_km
intersection_density_km = intersection_count / area_km
edge_density_km = edge_length_total / area_km
street_density_km = street_length_total / area_km
if clean_intersects:
clean_intersection_density_km = clean_intersection_count / area_km
else:
clean_intersection_density_km = None
else:
node_density_km = None
intersection_density_km = None
edge_density_km = None
street_density_km = None
clean_intersection_density_km = None
coords = np.array([[G.nodes[u][], G.nodes[u][], G.nodes[v][], G.nodes[v][]] for u, v, k in G.edges(keys=True)])
df_coords = pd.DataFrame(coords, columns=[, , , ])
if circuity_dist == :
gc_distances = great_circle_vec(lat1=df_coords[],
lng1=df_coords[],
lat2=df_coords[],
lng2=df_coords[])
elif circuity_dist == :
gc_distances = euclidean_dist_vec(y1=df_coords[],
x1=df_coords[],
y2=df_coords[],
x2=df_coords[])
else:
raise ValueError()
gc_distances = gc_distances.fillna(value=0)
try:
circuity_avg = edge_length_total / gc_distances.sum()
except ZeroDivisionError:
circuity_avg = np.nan
self_loops = [True for u, v, k in G.edges(keys=True) if u == v]
self_loops_count = len(self_loops)
self_loop_proportion = self_loops_count / m
stats = {:n,
:m,
:k_avg,
:intersection_count,
:streets_per_node_avg,
:streets_per_node_counts,
:streets_per_node_proportion,
:edge_length_total,
:edge_length_avg,
:street_length_total,
:street_length_avg,
:street_segments_count,
:node_density_km,
:intersection_density_km,
:edge_density_km,
:street_density_km,
:circuity_avg,
:self_loop_proportion,
:clean_intersection_count,
:clean_intersection_density_km}
return stats | Calculate basic descriptive metric and topological stats for a graph.
For an unprojected lat-lng graph, tolerance and graph units should be in
degrees, and circuity_dist should be 'gc'. For a projected graph, tolerance
and graph units should be in meters (or similar) and circuity_dist should be
'euclidean'.
Parameters
----------
G : networkx multidigraph
area : numeric
the area covered by the street network, in square meters (typically land
area); if none, will skip all density-based metrics
clean_intersects : bool
if True, calculate clean intersections count (and density, if area is
provided)
tolerance : numeric
tolerance value passed along if clean_intersects=True, see
clean_intersections() function documentation for details and usage
circuity_dist : str
'gc' or 'euclidean', how to calculate straight-line distances for
circuity measurement; use former for lat-lng networks and latter for
projected networks
Returns
-------
stats : dict
dictionary of network measures containing the following elements (some
keys may not be present, based on the arguments passed into the function):
- n = number of nodes in the graph
- m = number of edges in the graph
- k_avg = average node degree of the graph
- intersection_count = number of intersections in graph, that is,
nodes with >1 street emanating from them
- streets_per_node_avg = how many streets (edges in the undirected
representation of the graph) emanate from each node (ie,
intersection or dead-end) on average (mean)
- streets_per_node_counts = dict, with keys of number of streets
emanating from the node, and values of number of nodes with this
count
- streets_per_node_proportion = dict, same as previous, but as a
proportion of the total, rather than counts
- edge_length_total = sum of all edge lengths in the graph, in meters
- edge_length_avg = mean edge length in the graph, in meters
- street_length_total = sum of all edges in the undirected
representation of the graph
- street_length_avg = mean edge length in the undirected
representation of the graph, in meters
- street_segments_count = number of edges in the undirected
representation of the graph
- node_density_km = n divided by area in square kilometers
- intersection_density_km = intersection_count divided by area in
square kilometers
- edge_density_km = edge_length_total divided by area in square
kilometers
- street_density_km = street_length_total divided by area in square
kilometers
- circuity_avg = edge_length_total divided by the sum of the great
circle distances between the nodes of each edge
- self_loop_proportion = proportion of edges that have a single node
as its two endpoints (ie, the edge links nodes u and v, and u==v)
- clean_intersection_count = number of intersections in street
network, merging complex ones into single points
- clean_intersection_density_km = clean_intersection_count divided by
area in square kilometers |
375,515 | def create_connection(port=_PORT_, timeout=_TIMEOUT_, restart=False):
if _CON_SYM_ in globals():
if not isinstance(globals()[_CON_SYM_], pdblp.BCon):
del globals()[_CON_SYM_]
if (_CON_SYM_ in globals()) and (not restart):
con = globals()[_CON_SYM_]
if getattr(con, ).start(): con.start()
return con, False
else:
con = pdblp.BCon(port=port, timeout=timeout)
globals()[_CON_SYM_] = con
con.start()
return con, True | Create Bloomberg connection
Returns:
(Bloomberg connection, if connection is new) |
375,516 | def stop(self, timeout=None):
logger.debug("docker plugin - Close thread for container {}".format(self._container.name))
self._stopper.set() | Stop the thread. |
375,517 | def on_message(self, con, event):
msg_type = event.getType()
nick = event.getFrom().getResource()
from_jid = event.getFrom().getStripped()
body = event.getBody()
if msg_type == and body is None:
return
logger.debug( % (msg_type, from_jid, nick, body,))
sender = filter(lambda m: m[] == from_jid, self.params[])
should_process = msg_type in [, , None] and body is not None and len(sender) == 1
if not should_process: return
sender = sender[0]
try:
for p in self.command_patterns:
reg, cmd = p
m = reg.match(body)
if m:
logger.info(%s\ % (cmd,))
function = getattr(self, str(cmd), None)
if function:
return function(sender, body, m)
words = body.split()
cmd, args = words[0], words[1:]
if cmd and cmd[0] == :
cmd = cmd[1:]
command_handler = getattr(self, +cmd, None)
if command_handler:
return command_handler(sender, body, args)
broadcast_body = % (sender[], body,)
return self.broadcast(broadcast_body, exclude=(sender,))
except:
logger.exception( % (body, sender[])) | Handles messge stanzas |
375,518 | def GetStructFormatString(self):
if not self._element_data_type_map:
return None
number_of_elements = None
if self._data_type_definition.elements_data_size:
element_byte_size = self._element_data_type_definition.GetByteSize()
if element_byte_size is None:
return None
number_of_elements, _ = divmod(
self._data_type_definition.elements_data_size, element_byte_size)
elif self._data_type_definition.number_of_elements:
number_of_elements = self._data_type_definition.number_of_elements
format_string = self._element_data_type_map.GetStructFormatString()
if not number_of_elements or not format_string:
return None
return .format(number_of_elements, format_string) | Retrieves the Python struct format string.
Returns:
str: format string as used by Python struct or None if format string
cannot be determined. |
375,519 | def create_schema(self, model, waiting_models):
bucket_name = model._get_bucket_name()
index_name = "%s_%s" % (settings.DEFAULT_BUCKET_TYPE, bucket_name)
ins = model(fake_context)
fields = self.get_schema_fields(ins._collect_index_fields())
new_schema = self.compile_schema(fields)
schema = get_schema_from_solr(index_name)
if not (schema == new_schema):
try:
client.create_search_schema(index_name, new_schema)
print("+ %s (%s) search schema is created." % (model.__name__, index_name))
except:
print("+ %s (%s) search schema checking operation is taken to queue." % (
model.__name__, index_name))
waiting_models.append(model) | Creates search schemas.
Args:
model: model to execute
waiting_models: if riak can't return response immediately, model is taken to queue.
After first execution session, method is executed with waiting models and controlled.
And be ensured that all given models are executed properly.
Returns: |
375,520 | def generate_local_url(self, js_name):
host = self._settings[].format(**self._host_context).rstrip()
return .format(host, js_name) | Generate the local url for a js file.
:param js_name:
:return: |
375,521 | def toxml(self):
return .format(self.name) + + \
(.format(self.description) if self.description else ) +\
| Exports this object into a LEMS XML object |
375,522 | def run(self):
with self.input().open() as handle:
body = json.loads(handle.read())
es = elasticsearch.Elasticsearch()
id = body.get()
es.index(index=, doc_type=, id=id, body=body) | Index the document. Since ids are predictable,
we won't index anything twice. |
375,523 | def _add_relations(self, relations):
for k, v in six.iteritems(relations):
self.d.relate(k, v) | Add all of the relations for the services. |
375,524 | def process_streamers(self):
in_progress = self._stream_manager.in_progress()
triggered = self.graph.check_streamers(blacklist=in_progress)
for streamer in triggered:
self._stream_manager.process_streamer(streamer, callback=self._handle_streamer_finished) | Check if any streamers should be handed to the stream manager. |
375,525 | def setbridgeprio(self, prio):
_runshell([brctlexe, , self.name, str(prio)],
"Could not set bridge priority in %s." % self.name) | Set bridge priority value. |
375,526 | def generate_tensor_filename(self, field_name, file_num, compressed=True):
file_ext = TENSOR_EXT
if compressed:
file_ext = COMPRESSED_TENSOR_EXT
filename = os.path.join(self.filename, , %(field_name, file_num, file_ext))
return filename | Generate a filename for a tensor. |
375,527 | def start(self, use_atexit=True):
assert not self._process
_logger.debug(, self._proc_args)
process_future = asyncio.create_subprocess_exec(
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
*self._proc_args
)
self._process = yield from process_future
self._stderr_reader = asyncio.async(self._read_stderr())
self._stdout_reader = asyncio.async(self._read_stdout())
if use_atexit:
atexit.register(self.close) | Start the executable.
Args:
use_atexit (bool): If True, the process will automatically be
terminated at exit. |
375,528 | def _item_keys_match(crypto_config, item1, item2):
partition_key_name = crypto_config.encryption_context.partition_key_name
sort_key_name = crypto_config.encryption_context.sort_key_name
partition_keys_match = item1[partition_key_name] == item2[partition_key_name]
if sort_key_name is None:
return partition_keys_match
return partition_keys_match and item1[sort_key_name] == item2[sort_key_name] | Determines whether the values in the primary and sort keys (if they exist) are the same
:param CryptoConfig crypto_config: CryptoConfig used in encrypting the given items
:param dict item1: The first item to compare
:param dict item2: The second item to compare
:return: Bool response, True if the key attributes match
:rtype: bool |
375,529 | def refresh_address_presence(self, address):
composite_presence = {
self._fetch_user_presence(uid)
for uid
in self._address_to_userids[address]
}
new_presence = UserPresence.UNKNOWN
for presence in UserPresence.__members__.values():
if presence in composite_presence:
new_presence = presence
break
new_address_reachability = USER_PRESENCE_TO_ADDRESS_REACHABILITY[new_presence]
if new_address_reachability == self._address_to_reachability.get(address):
return
log.debug(
,
current_user=self._user_id,
address=to_normalized_address(address),
prev_state=self._address_to_reachability.get(address),
state=new_address_reachability,
)
self._address_to_reachability[address] = new_address_reachability
self._address_reachability_changed_callback(address, new_address_reachability) | Update synthesized address presence state from cached user presence states.
Triggers callback (if any) in case the state has changed.
This method is only provided to cover an edge case in our use of the Matrix protocol and
should **not** generally be used. |
375,530 | def close(self):
if self._access is not None:
_logger.debug("Cleaning up")
pci_cleanup(self._access)
self._access = None | Release libpci resources. |
375,531 | def create_address(cls, address, **kwargs):
kwargs[] = True
if kwargs.get():
return cls._create_address_with_http_info(address, **kwargs)
else:
(data) = cls._create_address_with_http_info(address, **kwargs)
return data | Create Address
Create a new Address
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.create_address(address, async=True)
>>> result = thread.get()
:param async bool
:param Address address: Attributes of address to create (required)
:return: Address
If the method is called asynchronously,
returns the request thread. |
375,532 | def _lowfreq_linear_filter(tumor_index, is_paired):
if is_paired:
sbf = "FORMAT/SBF[%s]" % tumor_index
nm = "FORMAT/NM[%s]" % tumor_index
else:
sbf = "INFO/SBF"
nm = "INFO/NM"
cmd = (
)
return cmd.format(**locals()) | Linear classifier for removing low frequency false positives.
Uses a logistic classifier based on 0.5% tumor only variants from the smcounter2 paper:
https://github.com/bcbio/bcbio_validations/tree/master/somatic-lowfreq
The classifier uses strand bias (SBF) and read mismatches (NM) and
applies only for low frequency (<2%) and low depth (<30) variants. |
375,533 | def selected(self, new):
def preprocess(item):
if isinstance(item, str):
return self.options[item]
return item
items = coerce_to_list(new, preprocess)
self.widget.value = items | Set selected from list or instance of object or name.
Over-writes existing selection |
375,534 | def get_first_recipient_with_address(self):
recipients_with_address = [recipient for recipient in self._recipients
if recipient.address]
if recipients_with_address:
return recipients_with_address[0]
else:
return None | Returns the first recipient found with a non blank address
:return: First Recipient
:rtype: Recipient |
375,535 | def safe_listdir(path):
try:
return os.listdir(path)
except (PermissionError, NotADirectoryError):
pass
except OSError as e:
ignorable = (
e.errno in (errno.ENOTDIR, errno.EACCES, errno.ENOENT)
or getattr(e, "winerror", None) == 267
)
if not ignorable:
raise
return () | Attempt to list contents of path, but suppress some exceptions. |
375,536 | def on_canvas_slave__electrode_pair_selected(self, slave, data):
import networkx as nx
source_id = data[]
target_id = data[]
if self.canvas_slave.device is None or self.plugin is None:
return
slave.df_routes = slave.df_routes.loc[slave.df_routes.route_i >=
0].copy()
try:
shortest_path = self.canvas_slave.device.find_path(source_id,
target_id)
self.plugin.execute_async(,
, drop_route=shortest_path)
except nx.NetworkXNoPath:
logger.error(, source_id,
target_id) | Process pair of selected electrodes.
For now, this consists of finding the shortest path between the two
electrodes and appending it to the list of droplet routes for the
current step.
Note that the droplet routes for a step are stored in a frame/table in
the `DmfDeviceController` step options.
.. versionchanged:: 0.11
Clear any temporary routes (drawn while mouse is down) from routes
list.
.. versionchanged:: 0.11.3
Clear temporary routes by setting ``df_routes`` property of
:attr:`canvas_slave`. |
375,537 | def det_4x3(a,b,c,d):
s must be 3D points; the matrix is given a fourth column of 1s and the resulting
determinant is of this matrix.
] m]*m':
return (a[1]*b[2]*c[0] + a[2]*b[0]*c[1] - a[2]*b[1]*c[0] - a[0]*b[2]*c[1] -
a[1]*b[0]*c[2] + a[0]*b[1]*c[2] + a[2]*b[1]*d[0] - a[1]*b[2]*d[0] -
a[2]*c[1]*d[0] + b[2]*c[1]*d[0] + a[1]*c[2]*d[0] - b[1]*c[2]*d[0] -
a[2]*b[0]*d[1] + a[0]*b[2]*d[1] + a[2]*c[0]*d[1] - b[2]*c[0]*d[1] -
a[0]*c[2]*d[1] + b[0]*c[2]*d[1] + a[1]*b[0]*d[2] - a[0]*b[1]*d[2] -
a[1]*c[0]*d[2] + b[1]*c[0]*d[2] + a[0]*c[1]*d[2] - b[0]*c[1]*d[2]) | det_4x3(a,b,c,d) yields the determinate of the matrix formed the given rows, which may have
more than 1 dimension, in which case the later dimensions are multiplied and added point-wise.
The point's must be 3D points; the matrix is given a fourth column of 1s and the resulting
determinant is of this matrix. |
375,538 | def update(self, read, write, manage):
data = values.of({: read, : write, : manage, })
payload = self._version.update(
,
self._uri,
data=data,
)
return SyncListPermissionInstance(
self._version,
payload,
service_sid=self._solution[],
list_sid=self._solution[],
identity=self._solution[],
) | Update the SyncListPermissionInstance
:param bool read: Read access.
:param bool write: Write access.
:param bool manage: Manage access.
:returns: Updated SyncListPermissionInstance
:rtype: twilio.rest.sync.v1.service.sync_list.sync_list_permission.SyncListPermissionInstance |
375,539 | def get_vlan_brief_input_request_type_get_next_request_last_rcvd_vlan_id(self, **kwargs):
config = ET.Element("config")
get_vlan_brief = ET.Element("get_vlan_brief")
config = get_vlan_brief
input = ET.SubElement(get_vlan_brief, "input")
request_type = ET.SubElement(input, "request-type")
get_next_request = ET.SubElement(request_type, "get-next-request")
last_rcvd_vlan_id = ET.SubElement(get_next_request, "last-rcvd-vlan-id")
last_rcvd_vlan_id.text = kwargs.pop()
callback = kwargs.pop(, self._callback)
return callback(config) | Auto Generated Code |
375,540 | def get_sparse_matrix_keys(session, key_table):
return session.query(key_table).order_by(key_table.name).all() | Return a list of keys for the sparse matrix. |
375,541 | def get_most_severe_consequence(transcripts):
most_severe_consequence = None
most_severe_score = None
for transcript in transcripts:
for consequence in transcript[].split():
logger.debug("Checking severity score for consequence: {0}".format(
consequence
))
severity_score = SEVERITY_DICT.get(
consequence
)
logger.debug("Severity score found: {0}".format(
severity_score
))
if severity_score != None:
if most_severe_score:
if severity_score < most_severe_score:
most_severe_consequence = consequence
most_severe_score = severity_score
else:
most_severe_consequence = consequence
most_severe_score = severity_score
return most_severe_consequence | Get the most severe consequence
Go through all transcripts and get the most severe consequence
Args:
transcripts (list): A list of transcripts to evaluate
Returns:
most_severe_consequence (str): The most severe consequence |
375,542 | def magic_timeit(setup, stmt, ncalls=None, repeat=3, force_ms=False):
import timeit
import math
units = ["s", "ms", , "ns"]
scaling = [1, 1e3, 1e6, 1e9]
timer = timeit.Timer(stmt, setup)
if ncalls is None:
number = 1
for _ in range(1, 10):
if timer.timeit(number) >= 0.1:
break
number *= 10
else:
number = ncalls
best = min(timer.repeat(repeat, number)) / number
if force_ms:
order = 1
else:
if best > 0.0 and best < 1000.0:
order = min(-int(math.floor(math.log10(best)) // 3), 3)
elif best >= 1000.0:
order = 0
else:
order = 3
return {: number,
: repeat,
: best * scaling[order],
: units[order]} | Time execution of a Python statement or expression
Usage:\\
%timeit [-n<N> -r<R> [-t|-c]] statement
Time execution of a Python statement or expression using the timeit
module.
Options:
-n<N>: execute the given statement <N> times in a loop. If this value
is not given, a fitting value is chosen.
-r<R>: repeat the loop iteration <R> times and take the best result.
Default: 3
-t: use time.time to measure the time, which is the default on Unix.
This function measures wall time.
-c: use time.clock to measure the time, which is the default on
Windows and measures wall time. On Unix, resource.getrusage is used
instead and returns the CPU user time.
-p<P>: use a precision of <P> digits to display the timing result.
Default: 3
Examples:
In [1]: %timeit pass
10000000 loops, best of 3: 53.3 ns per loop
In [2]: u = None
In [3]: %timeit u is None
10000000 loops, best of 3: 184 ns per loop
In [4]: %timeit -r 4 u == None
1000000 loops, best of 4: 242 ns per loop
In [5]: import time
In [6]: %timeit -n1 time.sleep(2)
1 loops, best of 3: 2 s per loop
The times reported by %timeit will be slightly higher than those
reported by the timeit.py script when variables are accessed. This is
due to the fact that %timeit executes the statement in the namespace
of the shell, compared with timeit.py, which uses a single setup
statement to import function or create variables. Generally, the bias
does not matter as long as results from timeit.py are not mixed with
those from %timeit. |
375,543 | def deploy_config(model, initial_instance_count, instance_type, endpoint_name=None, tags=None):
model_base_config = model_config(instance_type, model)
production_variant = sagemaker.production_variant(model.name, instance_type, initial_instance_count)
name = model.name
config_options = {: name, : [production_variant]}
if tags is not None:
config_options[] = tags
endpoint_name = endpoint_name or name
endpoint_base_config = {
: endpoint_name,
: name
}
config = {
: model_base_config,
: config_options,
: endpoint_base_config
}
s3_operations = model_base_config.pop(, None)
if s3_operations is not None:
config[] = s3_operations
return config | Export Airflow deploy config from a SageMaker model
Args:
model (sagemaker.model.Model): The SageMaker model to export the Airflow config from.
instance_type (str): The EC2 instance type to deploy this Model to. For example, 'ml.p2.xlarge'.
initial_instance_count (int): The initial number of instances to run in the
``Endpoint`` created from this ``Model``.
endpoint_name (str): The name of the endpoint to create (default: None).
If not specified, a unique endpoint name will be created.
tags (list[dict]): List of tags for labeling a training job. For more, see
https://docs.aws.amazon.com/sagemaker/latest/dg/API_Tag.html.
Returns:
dict: Deploy config that can be directly used by SageMakerEndpointOperator in Airflow. |
375,544 | def consolidate(self, args):
result = dict(args)
for opt in self:
if opt.name in result:
result[opt.name] = opt.convert(result[opt.name])
else:
if opt.default is not None:
result[opt.name] = opt.convert(opt.default)
return result | Consolidate the provided arguments.
If the provided arguments have matching options, this performs a type conversion.
For any option that has a default value and is not present in the provided
arguments, the default value is added.
Args:
args (dict): A dictionary of the provided arguments.
Returns:
dict: A dictionary with the type converted and with default options enriched
arguments. |
375,545 | async def close(self) -> None:
LOGGER.debug()
if not self.handle:
LOGGER.warning(, self.name)
else:
await pool.close_pool_ledger(self.handle)
self._handle = None
LOGGER.debug() | Explicit exit. Closes pool. For use when keeping pool open across multiple calls. |
375,546 | def entry_point() -> None:
sys.path.insert(0, os.getcwd())
parser = get_cxflow_arg_parser(True)
known_args, unknown_args = parser.parse_known_args()
if not hasattr(known_args, ):
parser.print_help()
quit(1)
logger = logging.getLogger()
logger.setLevel(logging.DEBUG if known_args.verbose else logging.INFO)
logger.handlers = []
stderr_handler = logging.StreamHandler(sys.stderr)
stderr_handler.setFormatter(logging.Formatter(CXF_LOG_FORMAT, datefmt=CXF_LOG_DATE_FORMAT))
logger.addHandler(stderr_handler)
if known_args.subcommand == :
train(config_path=known_args.config_file, cl_arguments=unknown_args, output_root=known_args.output_root)
elif known_args.subcommand == :
resume(config_path=known_args.config_path, restore_from=known_args.restore_from, cl_arguments=unknown_args,
output_root=known_args.output_root)
elif known_args.subcommand == :
logging.warning()
predict(config_path=known_args.config_path, restore_from=known_args.restore_from, cl_arguments=unknown_args,
output_root=known_args.output_root)
elif known_args.subcommand == :
evaluate(model_path=known_args.model_path, stream_name=known_args.stream_name,
config_path=known_args.config, cl_arguments=unknown_args, output_root=known_args.output_root)
elif known_args.subcommand == :
invoke_dataset_method(config_path=known_args.config_file, method_name=known_args.method,
cl_arguments=unknown_args, output_root=known_args.output_root)
elif known_args.subcommand == :
grid_search(script=known_args.script, params=known_args.params, dry_run=known_args.dry_run)
elif known_args.subcommand == :
list_train_dirs(known_args.dir, known_args.recursive, known_args.all, known_args.long, known_args.verbose)
elif known_args.subcommand == :
prune_train_dirs(known_args.dir, known_args.epochs, known_args.subdirs) | **cxflow** entry point. |
375,547 | def parse_reports(self):
self.picard_HsMetrics_data = dict()
for f in self.find_log_files(, filehandles=True):
parsed_data = dict()
s_name = None
keys = None
commadecimal = None
for l in f[]:
if in l or in l and in l:
s_name = None
keys = None
fn_search = re.search(r"INPUT(?:=|\s+)(\[?[^\s]+\]?)", l, flags=re.IGNORECASE)
if fn_search:
s_name = os.path.basename(fn_search.group(1).strip())
s_name = self.clean_s_name(s_name, f[])
parsed_data[s_name] = dict()
if s_name is not None:
if in l and in l:
keys = f[].readline().strip("\n").split("\t")
elif keys:
vals = l.strip("\n").split("\t")
if len(vals) == len(keys):
j =
if keys[0] == :
j = vals[0]
parsed_data[s_name][j] = dict()
)
return len(self.picard_HsMetrics_data) | Find Picard HsMetrics reports and parse their data |
375,548 | def action(self):
self.return_value = self.function(*self.args, **self.kwargs) | This class overrides this method |
375,549 | def add_unique(self, attr, item):
return self.set(attr, operation.AddUnique([item])) | 在对象此字段对应的数组末尾添加指定对象,如果此对象并没有包含在字段中。
:param attr: 字段名
:param item: 要添加的对象
:return: 当前对象 |
375,550 | def Call(method,url,payload,silent=False,hide_errors=[],session=None,recursion_cnt=0,debug=False):
if not clc._LOGIN_COOKIE_V1: API._Login()
if session is None:
session = clc._REQUESTS_SESSION
session.headers.update({: })
r = session.request(method,"%s%s/JSON" % (clc.defaults.ENDPOINT_URL_V1,url),
params=payload,
cookies=clc._LOGIN_COOKIE_V1,
verify=API._ResourcePath())
if debug:
API._DebugRequest(request=requests.Request(method,"%s%s/JSON" % (clc.defaults.ENDPOINT_URL_V1,url),
data=payload,headers=session.headers).prepare(),response=r)
try:
if int(r.json()[]) == 0:
if clc.args and not silent: clc.v1.output.Status(,2, % (r.json()[]))
return(r.json())
elif int(r.json()[]) in hide_errors:
return(r.json())
elif int(r.json()[]) == 2:
if clc.args and not silent: clc.v1.output.Status(,3, % (r.json()[]))
raise Exception(r.json()[])
elif int(r.json()[]) == 5:
raise clc.v1.AccountDoesNotExistException(r.json()[])
elif int(r.json()[]) == 100 and recursion_cnt<2:
clc._LOGIN_COOKIE_V1 = False
return(clc.v1.API.Call(method,url,payload,silent,hide_errors,recursion_cnt+1))
elif int(r.json()[]) == 100:
raise clc.v1.AccountLoginException(r.json()[])
else:
if clc.args and (not hide_errors or not silent): clc.v1.output.Status(,3, % (url,r.json()[],r.json()[]))
raise Exception( % (url,r.json()[],r.json()[]))
except clc.CLCException:
raise
except:
if clc.args and (not hide_errors or not silent): clc.v1.output.Status(,3, % (url,r.status_code))
raise Exception( % (url,r.status_code)) | Execute v1 API call.
:param url: URL paths associated with the API call
:param payload: dict containing all parameters to submit with POST call
:param hide_errors: list of API error codes to ignore. These are not http error codes but returned from the API itself
:param recursion_cnt: recursion counter. This call is recursed if we experience a transient error
:returns: decoded API json result |
375,551 | def post(arguments):
twitter = api.API(arguments)
params = {}
if arguments.update == :
params[] = sys.stdin.read()
else:
params[] = arguments.update
if arguments.media_file:
medias = [twitter.media_upload(m) for m in arguments.media_file]
params[] = [m.media_id for m in medias]
try:
logging.getLogger(arguments.screen_name).info(, params[])
if not arguments.dry_run:
twitter.update_status(**params)
except tweepy.TweepError as e:
logging.getLogger(arguments.screen_name).error(e.message) | Post text to a given twitter account. |
375,552 | def close(self, code=3000, message=):
if self.state != SESSION_STATE.CLOSED:
try:
self.conn.connectionLost()
except Exception as e:
log.msg("Failed to call connectionLost(): %r." % e)
finally:
self.state = SESSION_STATE.CLOSED
self.close_reason = (code, message)
self.stats.sessionClosed(self.transport_name)
if self.handler is not None:
self.handler.session_closed() | Close session or endpoint connection.
@param code: Closing code
@param message: Close message |
375,553 | def tweets_for(query_type, args, per_user=None):
lookup = {"query_type": query_type, "value": args[0]}
try:
tweets = Tweet.objects.get_for(**lookup)
except TwitterQueryException:
return []
if per_user is not None:
_tweets = defaultdict(list)
for tweet in tweets:
if len(_tweets[tweet.user_name]) < per_user:
_tweets[tweet.user_name].append(tweet)
tweets = sum(_tweets.values(), [])
tweets.sort(key=lambda t: t.created_at, reverse=True)
if len(args) > 1 and str(args[-1]).isdigit():
tweets = tweets[:int(args[-1])]
return tweets | Retrieve tweets for a user, list or search term. The optional
``per_user`` arg limits the number of tweets per user, for
example to allow a fair spread of tweets per user for a list. |
375,554 | def widen(self, other):
if self.low < other.low:
low = -float("inf")
else:
low = self.low
if self.high > other.high:
high = float("inf")
else:
high = self.high
return Interval(low, high) | Widen current range. |
375,555 | def convert_data_iterable(data_iterable, filter_func=None, converter_func=None):
data_list = []
for item in data_iterable:
data_list.append((convert_data_array(item[0], filter_func=filter_func, converter_func=converter_func), item[1], item[2], item[3]))
return data_list | Convert raw data in data iterable.
Parameters
----------
data_iterable : iterable
Iterable where each element is a tuple with following content: (raw data, timestamp_start, timestamp_stop, status).
filter_func : function
Function that takes array and returns true or false for each item in array.
converter_func : function
Function that takes array and returns an array or tuple of arrays.
Returns
-------
data_list : list
Data list of the form [(converted data, timestamp_start, timestamp_stop, status), (...), ...] |
375,556 | def inside_try(func, options={}):
if six.PY2:
name = func.func_name
else:
name = func.__name__
@wraps(func)
def silenceit(*args, **kwargs):
excpt = None
try:
return func(*args, **kwargs)
return silenceit | decorator to silence exceptions, for logging
we want a "safe" fail of the functions |
375,557 | def process_records(records):
changes = defaultdict(int)
cascaded_update_records = set()
cascaded_delete_records = set()
for record in records:
if record.change != ChangeType.deleted and record.object is None:
continue
if record.change == ChangeType.created:
assert record.type != EntryType.category
changes[record.object] |= SimpleChange.created
elif record.change == ChangeType.deleted:
assert record.type != EntryType.category
cascaded_delete_records.add(record)
elif record.change in {ChangeType.moved, ChangeType.protection_changed}:
cascaded_update_records.add(record)
elif record.change == ChangeType.data_changed:
assert record.type != EntryType.category
changes[record.object] |= SimpleChange.updated
for obj in _process_cascaded_category_contents(cascaded_update_records):
changes[obj] |= SimpleChange.updated
for obj in _process_cascaded_event_contents(cascaded_delete_records):
changes[obj] |= SimpleChange.deleted
return changes | Converts queue entries into object changes.
:param records: an iterable containing `LiveSyncQueueEntry` objects
:return: a dict mapping object references to `SimpleChange` bitsets |
375,558 | def delete_files():
session_token = request.headers[]
repository = request.headers[]
current_user = have_authenticated_user(request.environ[], repository, session_token)
if current_user is False: return fail(user_auth_fail_msg)
repository_path = config[][repository][]
body_data = request.get_json()
def with_exclusive_lock():
if not varify_user_lock(repository_path, session_token): return fail(lock_fail_msg)
try:
data_store = versioned_storage(repository_path)
if not data_store.have_active_commit(): return fail(no_active_commit_msg)
for fle in json.loads(body_data[]):
data_store.fs_delete(fle)
update_user_lock(repository_path, session_token)
return success()
except Exception: return fail()
return lock_access(repository_path, with_exclusive_lock) | Delete one or more files from the server |
375,559 | def makeOuputDir(outputDir, force):
if outputDir:
if exists(outputDir):
if not force:
print(
, file=sys.stderr)
sys.exit(1)
else:
mkdir(outputDir)
else:
outputDir = mkdtemp()
print( % outputDir)
return outputDir | Create or check for an output directory.
@param outputDir: A C{str} output directory name, or C{None}.
@param force: If C{True}, allow overwriting of pre-existing files.
@return: The C{str} output directory name. |
375,560 | def __deserialize_primitive(self, data, klass):
try:
return klass(data)
except UnicodeEncodeError:
return six.text_type(data)
except TypeError:
return data | Deserializes string to primitive type.
:param data: str.
:param klass: class literal.
:return: int, long, float, str, bool. |
375,561 | def saltenviron(environ):
s opts dict and the APIClient available in the WSGI environ
__opts__SALT_MASTER_CONFIG/etc/salt/masterSALT_OPTSSALT_APIClient'] = salt.netapi.NetapiClient(__opts__) | Make Salt's opts dict and the APIClient available in the WSGI environ |
375,562 | def userToId(url):
match = re.search(r"users(/ME/contacts)?/[0-9]+:([^/]+)", url)
return match.group(2) if match else None | Extract the username from a contact URL.
Matches addresses containing ``users/<user>`` or ``users/ME/contacts/<user>``.
Args:
url (str): Skype API URL
Returns:
str: extracted identifier |
375,563 | def create_html(self, fname, title="ClassTracker Statistics"):
self.basedir = os.path.dirname(os.path.abspath(fname))
self.filesdir = os.path.splitext(fname)[0] +
if not os.path.isdir(self.filesdir):
os.mkdir(self.filesdir)
self.filesdir = os.path.abspath(self.filesdir)
self.links = {}
self.annotate()
self.charts = {}
fn = os.path.join(self.filesdir, )
self.charts[] = self.create_snapshot_chart(fn)
for fp, idx in zip(self.snapshots, list(range(len(self.snapshots)))):
fn = os.path.join(self.filesdir, % (idx))
self.charts[fp] = self.create_pie_chart(fp, fn)
for cn in list(self.index.keys()):
fn = os.path.join(self.filesdir, cn.replace(, )+)
self.charts[cn] = self.create_lifetime_chart(cn, fn)
for cn in list(self.index.keys()):
fn = os.path.join(self.filesdir, cn.replace(, )+)
self.links[cn] = fn
self.print_class_details(fn, cn)
self.create_title_page(fname, title=title) | Create HTML page `fname` and additional files in a directory derived
from `fname`. |
375,564 | def select_balanced_subset(items, select_count, categories, select_count_values=None, seed=None):
rand = random.Random()
rand.seed(seed)
if select_count_values is None:
select_count_values = {item_id: 1 for item_id in items.keys()}
if sum(select_count_values.values()) < select_count:
return list(items.keys())
available_item_ids = sorted(list(items.keys()))
weight_per_category = np.zeros(len(categories))
selected_item_ids = []
available_item_weights = []
current_select_count = 0
rand.shuffle(available_item_ids)
for item_id in available_item_ids:
weights = items[item_id]
all_weights = np.zeros(len(categories))
for category, weight in weights.items():
all_weights[categories.index(category)] = float(weight)
available_item_weights.append(all_weights)
while current_select_count < select_count:
best_item_index = 0
best_item_id = None
best_item_dist = float()
current_item_index = 0
while current_item_index < len(available_item_ids) and best_item_dist > 0:
item_id = available_item_ids[current_item_index]
item_weights = available_item_weights[current_item_index]
temp_total_weights = weight_per_category + item_weights
dist = temp_total_weights.var()
if dist < best_item_dist:
best_item_index = current_item_index
best_item_dist = dist
best_item_id = item_id
current_item_index += 1
weight_per_category += available_item_weights[best_item_index]
selected_item_ids.append(best_item_id)
del available_item_ids[best_item_index]
del available_item_weights[best_item_index]
current_select_count += select_count_values[best_item_id]
return selected_item_ids | Select items so the summed category weights are balanced.
Each item has a dictionary containing the category weights.
Items are selected until ``select_count`` is reached.
The value that is added to ``select_count`` for an item can be defined in the dictionary ``select_count_values``.
If this is not defined it is assumed to be 1, which means `select_count` items are selected.
Args:
items (dict): Dictionary containing items with category weights.
select_count (float): Value to reach for selected items.
categories (list): List of all categories.
select_count_values (dict): The select_count values to be used.
For example an utterance with multiple labels:
The category weights (label-lengths) are used for balance,
but the utterance-duration is used for reaching the select_count.
Returns:
list: List of item ids, containing ``number_of_items`` (or ``len(items)`` if smaller).
Example:
>>> items = {
>>> 'utt-1' : {'m': 1, 's': 0, 'n': 0},
>>> 'utt-2' : {'m': 0, 's': 2, 'n': 1},
>>> ...
>>> }
>>> select_balanced_subset(items, 5)
>>> ['utt-1', 'utt-3', 'utt-9', 'utt-33', 'utt-34'] |
375,565 | def _init_from_csc(self, csc):
if len(csc.indices) != len(csc.data):
raise ValueError(.format(len(csc.indices), len(csc.data)))
handle = ctypes.c_void_p()
_check_call(_LIB.XGDMatrixCreateFromCSCEx(c_array(ctypes.c_size_t, csc.indptr),
c_array(ctypes.c_uint, csc.indices),
c_array(ctypes.c_float, csc.data),
ctypes.c_size_t(len(csc.indptr)),
ctypes.c_size_t(len(csc.data)),
ctypes.c_size_t(csc.shape[0]),
ctypes.byref(handle)))
self.handle = handle | Initialize data from a CSC matrix. |
375,566 | def voxelwise_diff(img_spec1=None,
img_spec2=None,
abs_value=True,
cmap=,
overlay_image=False,
overlay_alpha=0.8,
num_rows=2,
num_cols=6,
rescale_method=,
background_threshold=0.05,
annot=None,
padding=5,
output_path=None,
figsize=None):
if not isinstance(abs_value, bool):
abs_value = bool(abs_value)
mixer_params = dict(abs_value=abs_value,
cmap=cmap,
overlay_image=overlay_image,
overlay_alpha=overlay_alpha)
fig = _compare(img_spec1,
img_spec2,
num_rows=num_rows,
num_cols=num_cols,
mixer=,
annot=annot,
padding=padding,
rescale_method=rescale_method,
bkground_thresh=background_threshold,
output_path=output_path,
figsize=figsize,
**mixer_params)
return fig | Voxel-wise difference map.
Parameters
----------
img_spec1 : str or nibabel image-like object
MR image (or path to one) to be visualized
img_spec2 : str or nibabel image-like object
MR image (or path to one) to be visualized
abs_value : bool
Flag indicating whether to take the absolute value of the diffenence or not.
Default: True, display absolute differences only (so order of images does not matter)
Colormap to show the difference values.
overlay_image : bool
Flag to specify whether to overlay the difference values on the original image.
.. note: This feature is not reliable and supported well yet.
num_rows : int
number of rows (top to bottom) per each of 3 dimensions
num_cols : int
number of panels (left to right) per row of each dimension.
rescale_method : bool or str or list or None
Range to rescale the intensity values to
Default: 'global', min and max values computed based on ranges from both images.
If false or None, no rescaling is done (does not work yet).
background_threshold : float or str
A threshold value below which all the background voxels will be set to zero.
Default : 0.05. Other option is a string specifying a percentile: '5%', '10%'.
Specify None if you don't want any thresholding.
annot : str
Text to display to annotate the visualization
padding : int
number of voxels to pad around each panel.
output_path : str
path to save the generate collage to.
figsize : list
Size of figure in inches to be passed on to plt.figure() e.g. [12, 12] or [20, 20]
Returns
-------
fig : figure handle
handle to the collage figure generated. |
375,567 | def options(argv=[]):
parser = HendrixOptionParser
parsed_args = parser.parse_args(argv)
return vars(parsed_args[0]) | A helper function that returns a dictionary of the default key-values pairs |
375,568 | def _set_vrrpv3e(self, v, load=False):
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGListType("vrid",vrrpv3e.vrrpv3e, yang_name="vrrpv3e", rest_name="vrrp-extended-group", parent=self, is_container=, user_ordered=False, path_helper=self._path_helper, yang_keys=, extensions={u: {u: u, u: None, u: u, u: u, u: None, u: None, u: None, u: u}}), is_container=, yang_name="vrrpv3e", rest_name="vrrp-extended-group", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u: {u: u, u: None, u: u, u: u, u: None, u: None, u: None, u: u}}, namespace=, defining_module=, yang_type=, is_config=True)
except (TypeError, ValueError):
raise ValueError({
: ,
: "list",
: ,
})
self.__vrrpv3e = t
if hasattr(self, ):
self._set() | Setter method for vrrpv3e, mapped from YANG variable /routing_system/interface/ve/ipv6/vrrpv3e (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_vrrpv3e is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_vrrpv3e() directly. |
375,569 | def create_option_vip(self):
return OptionVIP(
self.networkapi_url,
self.user,
self.password,
self.user_ldap) | Get an instance of option_vip services facade. |
375,570 | def _results_accumulator(self, filename):
file_results = {}
for plugin in self.plugins:
yield file_results, plugin
if not file_results:
return
if filename not in self.data:
self.data[filename] = file_results
else:
self.data[filename].update(file_results) | :type filename: str
:param filename: name of file, used as a key to store in self.data
:yields: (dict, detect_secrets.plugins.base.BasePlugin)
Caller is responsible for updating the dictionary with
results of plugin analysis. |
375,571 | def color_args(args, *indexes):
for i,arg in enumerate(args):
if i in indexes:
yield lookup_color(arg)
else:
yield arg | Color a list of arguments on particular indexes
>>> c = color_args([None,'blue'], 1)
>>> c.next()
None
>>> c.next()
'0000FF' |
375,572 | def get_discrete_task_agent(generators, market, nStates, nOffer, markups,
withholds, maxSteps, learner, Pd0=None, Pd_min=0.0):
env = pyreto.discrete.MarketEnvironment(generators, market,
numStates=nStates,
numOffbids=nOffer,
markups=markups,
withholds=withholds,
Pd0=Pd0,
Pd_min=Pd_min)
task = pyreto.discrete.ProfitTask(env, maxSteps=maxSteps)
nActions = len(env._allActions)
module = ActionValueTable(numStates=nStates, numActions=nActions)
agent = LearningAgent(module, learner)
return task, agent | Returns a tuple of task and agent for the given learner. |
375,573 | def from_code(cls, code: int) -> :
c = cls()
c._init_code(code)
return c | Return a ColorCode from a terminal code. |
375,574 | def get_datetext(year, month, day):
input_format = "%Y-%m-%d"
try:
datestruct = time.strptime("%i-%i-%i" % (year, month, day),
input_format)
return strftime(datetext_format, datestruct)
except:
return datetext_default | year=2005, month=11, day=16 => '2005-11-16 00:00:00 |
375,575 | def add(self, value):
ind = int(self._ind % self.shape)
self._pos = self._ind % self.shape
self._values[ind] = value
if self._ind < self.shape:
self._ind += 1
else:
self._ind += self._splitValue
self._splitPos += self._splitValue
self._cached = False | Add a value to the buffer. |
375,576 | def _worker_thread_disk(self):
while not self.termination_check:
try:
dd, offsets, data = self._disk_queue.get(
block=False, timeout=0.1)
except queue.Empty:
continue
try:
self._process_data(dd, offsets, data)
except Exception as e:
with self._transfer_lock:
self._exceptions.append(e) | Worker thread for disk
:param Downloader self: this |
375,577 | def update(self, resource, rid, updates):
if resource[-1] != :
resource +=
resource += str(rid)
return self.put(resource, data=updates) | Updates the resource with id 'rid' with the given updates dictionary. |
375,578 | def dls(self)->List[DeviceDataLoader]:
"Returns a list of all DeviceDataLoaders. If you need a specific DeviceDataLoader, access via the relevant property (`train_dl`, `valid_dl`, etc) as the index of DLs in this list is not guaranteed to remain constant."
res = [self.train_dl, self.fix_dl, self.single_dl]
if self.valid_dl: res.insert(1, self.valid_dl)
return res if not self.test_dl else res + [self.test_dl] | Returns a list of all DeviceDataLoaders. If you need a specific DeviceDataLoader, access via the relevant property (`train_dl`, `valid_dl`, etc) as the index of DLs in this list is not guaranteed to remain constant. |
375,579 | def format(self, vertices):
buf = io.StringIO()
buf.write(self.name + )
buf.write()
buf.write(.format(self.type_))
buf.write()
buf.write()
for f in self.faces:
s = f.format(vertices)
buf.write(.format(s))
buf.write()
buf.write()
return buf.getvalue() | Format instance to dump
vertices is dict of name to Vertex |
375,580 | def dump(
self, stream, progress=None, lower=None, upper=None,
incremental=False, deltas=False
):
cmd = [SVNADMIN, , ]
if progress is None:
cmd.append()
if lower is not None:
cmd.append()
if upper is None:
cmd.append(str(int(lower)))
else:
cmd.append( % (int(lower), int(upper)))
if incremental:
cmd.append()
if deltas:
cmd.append()
p = subprocess.Popen(cmd, cwd=self.path, stdout=stream, stderr=progress)
p.wait()
if p.returncode != 0:
raise subprocess.CalledProcessError(p.returncode, cmd) | Dump the repository to a dumpfile stream.
:param stream: A file stream to which the dumpfile is written
:param progress: A file stream to which progress is written
:param lower: Must be a numeric version number
:param upper: Must be a numeric version number
See ``svnadmin help dump`` for details on the other arguments. |
375,581 | def decimate(self, fraction=0.5, N=None, boundaries=False, verbose=True):
poly = self.polydata(True)
if N:
Np = poly.GetNumberOfPoints()
fraction = float(N) / Np
if fraction >= 1:
return self
decimate = vtk.vtkDecimatePro()
decimate.SetInputData(poly)
decimate.SetTargetReduction(1 - fraction)
decimate.PreserveTopologyOff()
if boundaries:
decimate.BoundaryVertexDeletionOff()
else:
decimate.BoundaryVertexDeletionOn()
decimate.Update()
if verbose:
print("Nr. of pts, input:", poly.GetNumberOfPoints(), end="")
print(" output:", decimate.GetOutput().GetNumberOfPoints())
return self.updateMesh(decimate.GetOutput()) | Downsample the number of vertices in a mesh.
:param float fraction: the desired target of reduction.
:param int N: the desired number of final points (**fraction** is recalculated based on it).
:param bool boundaries: (True), decide whether to leave boundaries untouched or not.
.. note:: Setting ``fraction=0.1`` leaves 10% of the original nr of vertices.
.. hint:: |skeletonize| |skeletonize.py|_ |
375,582 | def do_forget(self, repo):
self.abort_on_nonexisting_repo(repo, )
self.network.forget(repo) | Drop definition of a repo.
forget REPO |
375,583 | def ParseNumericOption(self, options, name, base=10, default_value=None):
numeric_value = getattr(options, name, None)
if not numeric_value:
return default_value
try:
return int(numeric_value, base)
except (TypeError, ValueError):
name = name.replace(, )
raise errors.BadConfigOption(
.format(
name, numeric_value)) | Parses a numeric option.
If the option is not set the default value is returned.
Args:
options (argparse.Namespace): command line arguments.
name (str): name of the numeric option.
base (Optional[int]): base of the numeric value.
default_value (Optional[object]): default value.
Returns:
int: numeric value.
Raises:
BadConfigOption: if the options are invalid. |
375,584 | def per_from_id(flavors=chat_flavors+inline_flavors):
return _wrap_none(lambda msg:
msg[][]
if flavors == or flavor(msg) in flavors
else None) | :param flavors:
``all`` or a list of flavors
:return:
a seeder function that returns the from id only if the message flavor is
in ``flavors``. |
375,585 | def collapse_nodes(graph, survivor_mapping: Mapping[BaseEntity, Set[BaseEntity]]) -> None:
inconsistencies = surviors_are_inconsistent(survivor_mapping)
if inconsistencies:
raise ValueError(.format(inconsistencies))
for survivor, victims in survivor_mapping.items():
for victim in victims:
collapse_pair(graph, survivor=survivor, victim=victim)
_remove_self_edges(graph) | Collapse all nodes in values to the key nodes, in place.
:param pybel.BELGraph graph: A BEL graph
:param survivor_mapping: A dictionary with survivors as their keys, and iterables of the corresponding victims as
values. |
375,586 | def get_body_text(self):
if self.body_type != :
return self.body
try:
soup = bs(self.body, )
except RuntimeError:
return self.body
else:
return soup.body.text | Parse the body html and returns the body text using bs4
:return: body text
:rtype: str |
375,587 | def get_branches(self, local=True, remote_branches=True):
if not self.repo.remotes:
remote_branches = False
branches = []
if remote_branches:
try:
for b in self.remote.refs:
name = .join(b.name.split()[1:])
if name not in legit_settings.forbidden_branches:
branches.append(Branch(name, is_published=True))
except (IndexError, AssertionError):
pass
if local:
for b in [h.name for h in self.repo.heads]:
if (not remote_branches) or (b not in [br.name for br in branches]):
if b not in legit_settings.forbidden_branches:
branches.append(Branch(b, is_published=False))
return sorted(branches, key=attrgetter()) | Returns a list of local and remote branches. |
375,588 | def decorate(self, function_or_name):
application_namemy_timer
if callable(function_or_name):
return self._decorate(function_or_name.__name__, function_or_name)
else:
return partial(self._decorate, function_or_name) | Decorate a function to time the execution
The method can be called with or without a name. If no name is given
the function defaults to the name of the function.
:keyword function_or_name: The name to post to or the function to wrap
>>> from statsd import Timer
>>> timer = Timer('application_name')
>>>
>>> @timer.decorate
... def some_function():
... # resulting timer name: application_name.some_function
... pass
>>>
>>> @timer.decorate('my_timer')
... def some_other_function():
... # resulting timer name: application_name.my_timer
... pass |
375,589 | def connect(self):
if not HAVE_ZMQ:
raise errors.AnsibleError("zmq is not installed")
self.context = zmq.Context()
socket = self.context.socket(zmq.REQ)
addr = "tcp://%s:%s" % (self.host, self.port)
socket.connect(addr)
self.socket = socket
return self | activates the connection object |
375,590 | def normalize_weight(self, samples):
for i in range(NUM_OF_INSTANCE):
total = 0
for j in range(self.effective_model_num):
total += samples[i][j]
for j in range(self.effective_model_num):
samples[i][j] /= total
return samples | normalize weight
Parameters
----------
samples: list
a collection of sample, it's a (NUM_OF_INSTANCE * NUM_OF_FUNCTIONS) matrix,
representing{{w11, w12, ..., w1k}, {w21, w22, ... w2k}, ...{wk1, wk2,..., wkk}}
Returns
-------
list
samples after normalize weight |
375,591 | async def api_call(self, verb, action, params=None, add_authorization_token=True, retry=False):
if add_authorization_token and not self.token:
await self.refresh_token()
try:
return await self._api_call_impl(verb, action, params, add_authorization_token)
except InvalidToken:
if not retry and add_authorization_token:
await self.refresh_token()
return await self.api_call(verb, action, params, add_authorization_token, True)
raise | Send api call. |
375,592 | def body(self) -> Union[bytes, str, List[Any], Dict[Any, Any], RawIOBase, None]:
return self._body | 获取body |
375,593 | def get_eci_assignment_number(encoding):
try:
return consts.ECI_ASSIGNMENT_NUM[codecs.lookup(encoding).name]
except KeyError:
raise QRCodeError(
.format(encoding)) | \
Returns the ECI number for the provided encoding.
:param str encoding: A encoding name
:return str: The ECI number. |
375,594 | def MSTORE8(self, address, value):
if istainted(self.pc):
for taint in get_taints(self.pc):
value = taint_with(value, taint)
self._allocate(address, 1)
self._store(address, Operators.EXTRACT(value, 0, 8), 1) | Save byte to memory |
375,595 | def fix_nls(self, in_, out_):
if 0 == len(in_) or 0 == len(out_):
return out_
if "\r" in out_ and "\r" not in in_:
out_ = out_.replace("\r", )
if "\n" == in_[0] and "\n" != out_[0]:
out_ = "\n" + out_
elif "\n" != in_[0] and "\n" == out_[0]:
out_ = out_.lstrip()
if 0 == len(out_):
pass
elif "\n" == in_[-1] and "\n" != out_[-1]:
out_ = out_ + "\n"
elif "\n" != in_[-1] and "\n" == out_[-1]:
out_ = out_.rstrip()
return out_ | Fixes submitted translations by filtering carriage returns and pairing
newlines at the begging and end of the translated string with the original |
375,596 | def parse(self):
opt, arg = self.parser.parse_known_args(self.arguments)
self.opt = opt
self.arg = arg
self.check()
opt.all = not any([
getattr(opt, stat.dest) or getattr(opt, group.dest)
for group in self.sample_stats.stats
for stat in group.stats])
if opt.since is None and opt.until is None:
opt.since, opt.until, period = did.base.Date.period(arg)
else:
opt.since = did.base.Date(opt.since or "1993-01-01")
opt.until = did.base.Date(opt.until or "today")
opt.until.date += delta(days=1)
period = "given date range"
if not opt.since.date < opt.until.date:
raise RuntimeError(
"Invalid date range ({0} to {1})".format(
opt.since, opt.until.date - delta(days=1)))
header = "Status report for {0} ({1} to {2}).".format(
period, opt.since, opt.until.date - delta(days=1))
log.debug("Gathered options:")
log.debug(.format(opt))
return opt, header | Parse the options. |
375,597 | def _write_branch_and_tag_to_meta_yaml(self):
with open(self.meta_yaml.replace("meta", "template"), ) as infile:
dat = infile.read()
newdat = dat.format(**{: self.tag, : self.branch})
with open(self.meta_yaml, ) as outfile:
outfile.write(newdat) | Write branch and tag to meta.yaml by editing in place |
375,598 | def initialize(self, training_info, model, environment, device):
self.target_model = self.model_factory.instantiate(action_space=environment.action_space).to(device)
self.target_model.load_state_dict(model.state_dict())
self.target_model.eval()
histogram_info = model.histogram_info()
self.vmin = histogram_info[]
self.vmax = histogram_info[]
self.num_atoms = histogram_info[]
self.support_atoms = histogram_info[]
self.atom_delta = histogram_info[] | Initialize policy gradient from reinforcer settings |
375,599 | def to_rest_models(models, includes=None):
props = {}
props[] = []
for model in models:
props[].append(_to_rest(model, includes=includes))
props[] = _to_rest_includes(models, includes=includes)
return props | Convert the models into a dict for serialization
models should be an array of single model objects that
will each be serialized.
:return: dict |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.