Unnamed: 0
int64 0
389k
| code
stringlengths 26
79.6k
| docstring
stringlengths 1
46.9k
|
---|---|---|
383,000 | def forward(self,
inputs: torch.Tensor,
word_inputs: torch.Tensor = None) -> Dict[str, Union[torch.Tensor, List[torch.Tensor]]]:
if self._word_embedding is not None and word_inputs is not None:
try:
mask_without_bos_eos = (word_inputs > 0).long()
embedded_inputs = self._word_embedding(word_inputs)
type_representation, mask = add_sentence_boundary_token_ids(
embedded_inputs,
mask_without_bos_eos,
self._bos_embedding,
self._eos_embedding
)
except RuntimeError:
token_embedding = self._token_embedder(inputs)
mask = token_embedding[]
type_representation = token_embedding[]
else:
token_embedding = self._token_embedder(inputs)
mask = token_embedding[]
type_representation = token_embedding[]
lstm_outputs = self._elmo_lstm(type_representation, mask)
output_tensors = [
torch.cat([type_representation, type_representation], dim=-1) * mask.float().unsqueeze(-1)
]
for layer_activations in torch.chunk(lstm_outputs, lstm_outputs.size(0), dim=0):
output_tensors.append(layer_activations.squeeze(0))
return {
: output_tensors,
: mask,
} | Parameters
----------
inputs: ``torch.Tensor``, required.
Shape ``(batch_size, timesteps, 50)`` of character ids representing the current batch.
word_inputs : ``torch.Tensor``, required.
If you passed a cached vocab, you can in addition pass a tensor of shape ``(batch_size, timesteps)``,
which represent word ids which have been pre-cached.
Returns
-------
Dict with keys:
``'activations'``: ``List[torch.Tensor]``
A list of activations at each layer of the network, each of shape
``(batch_size, timesteps + 2, embedding_dim)``
``'mask'``: ``torch.Tensor``
Shape ``(batch_size, timesteps + 2)`` long tensor with sequence mask.
Note that the output tensors all include additional special begin and end of sequence
markers. |
383,001 | def create_role(name, policy_document=None, path=None, region=None, key=None,
keyid=None, profile=None):
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
if role_exists(name, region, key, keyid, profile):
return True
if not policy_document:
policy_document = None
try:
conn.create_role(name, assume_role_policy_document=policy_document,
path=path)
log.info(, name)
return True
except boto.exception.BotoServerError as e:
log.error(e)
log.error(, name)
return False | Create an instance role.
CLI Example:
.. code-block:: bash
salt myminion boto_iam.create_role myrole |
383,002 | def on_builder_inited(app):
app.cache_db_path = ":memory:"
if app.config["uqbar_book_use_cache"]:
logger.info(bold("[uqbar-book]"), nonl=True)
logger.info(" initializing cache db")
app.connection = uqbar.book.sphinx.create_cache_db(app.cache_db_path) | Hooks into Sphinx's ``builder-inited`` event. |
383,003 | def from_urdf_file(cls, urdf_file, base_elements=None, last_link_vector=None, base_element_type="link", active_links_mask=None, name="chain"):
if base_elements is None:
base_elements = ["base_link"]
links = URDF_utils.get_urdf_parameters(urdf_file, base_elements=base_elements, last_link_vector=last_link_vector, base_element_type=base_element_type)
return cls([link_lib.OriginLink()] + links, active_links_mask=active_links_mask, name=name) | Creates a chain from an URDF file
Parameters
----------
urdf_file: str
The path of the URDF file
base_elements: list of strings
List of the links beginning the chain
last_link_vector: numpy.array
Optional : The translation vector of the tip.
name: str
The name of the Chain
base_element_type: str
active_links_mask: list[bool] |
383,004 | def start_server(socket, projectname, xmlfilename: str) -> None:
state.initialise(projectname, xmlfilename)
server = http.server.HTTPServer((, int(socket)), HydPyServer)
server.serve_forever() | Start the *HydPy* server using the given socket.
The folder with the given `projectname` must be available within the
current working directory. The XML configuration file must be placed
within the project folder unless `xmlfilename` is an absolute file path.
The XML configuration file must be valid concerning the schema file
`HydPyConfigMultipleRuns.xsd` (see method |ServerState.initialise|
for further information). |
383,005 | def phistogram(view, a, bins=10, rng=None, normed=False):
nengines = len(view.targets)
with view.sync_imports():
import numpy
rets = view.apply_sync(lambda a, b, rng: numpy.histogram(a,b,rng), Reference(a), bins, rng)
hists = [ r[0] for r in rets ]
lower_edges = [ r[1] for r in rets ]
lower_edges = view.pull(, targets=0)
hist_array = numpy.array(hists).reshape(nengines, -1)
total_hist = numpy.sum(hist_array, 0)
if normed:
total_hist = total_hist/numpy.sum(total_hist,dtype=float)
return total_hist, lower_edges | Compute the histogram of a remote array a.
Parameters
----------
view
IPython DirectView instance
a : str
String name of the remote array
bins : int
Number of histogram bins
rng : (float, float)
Tuple of min, max of the range to histogram
normed : boolean
Should the histogram counts be normalized to 1 |
383,006 | def remove_unicode_dict(input_dict):
if isinstance(input_dict, collections.Mapping):
return dict(map(remove_unicode_dict, input_dict.iteritems()))
elif isinstance(input_dict, collections.Iterable):
return type(input_dict)(map(remove_unicode_dict, input_dict))
else:
return input_dict | remove unicode keys and values from dict, encoding in utf8 |
383,007 | def parse_encoding(fp):
pos = fp.tell()
fp.seek(0)
try:
line1 = fp.readline()
has_bom = line1.startswith(codecs.BOM_UTF8)
if has_bom:
line1 = line1[len(codecs.BOM_UTF8):]
m = _PYTHON_MAGIC_COMMENT_re.match(line1.decode(, ))
if not m:
try:
import parser
parser.suite(line1.decode(, ))
except (ImportError, SyntaxError):
pass
else:
line2 = fp.readline()
m = _PYTHON_MAGIC_COMMENT_re.match(
line2.decode(, ))
if has_bom:
if m:
raise SyntaxError("python refuses to compile code with both a UTF8" \
" byte-order-mark and a magic encoding comment")
return
elif m:
return m.group(1)
else:
return None
finally:
fp.seek(pos) | Deduce the encoding of a Python source file (binary mode) from magic
comment.
It does this in the same way as the `Python interpreter`__
.. __: http://docs.python.org/ref/encodings.html
The ``fp`` argument should be a seekable file object in binary mode. |
383,008 | def max_entropy_distribution(node_indices, number_of_nodes):
distribution = np.ones(repertoire_shape(node_indices, number_of_nodes))
return distribution / distribution.size | Return the maximum entropy distribution over a set of nodes.
This is different from the network's uniform distribution because nodes
outside ``node_indices`` are fixed and treated as if they have only 1
state.
Args:
node_indices (tuple[int]): The set of node indices over which to take
the distribution.
number_of_nodes (int): The total number of nodes in the network.
Returns:
np.ndarray: The maximum entropy distribution over the set of nodes. |
383,009 | def partial_dependence(self, term, X=None, width=None, quantiles=None,
meshgrid=False):
if not self._is_fitted:
raise AttributeError()
if not isinstance(term, int):
raise ValueError(.format(term))
if (term >= len(self.terms)) or (term < -1):
raise ValueError(\
.format(term, len(self.terms)))
if self.terms[term].isintercept:
raise ValueError()
if X is None:
X = self.generate_X_grid(term=term, meshgrid=meshgrid)
if meshgrid:
if not isinstance(X, tuple):
raise ValueError(\
.format(X))
shape = X[0].shape
X = self._flatten_mesh(X, term=term)
X = check_X(X, n_feats=self.statistics_[],
edge_knots=self.edge_knots_, dtypes=self.dtype,
features=self.feature, verbose=self.verbose)
modelmat = self._modelmat(X, term=term)
pdep = self._linear_predictor(modelmat=modelmat, term=term)
out = [pdep]
compute_quantiles = (width is not None) or (quantiles is not None)
if compute_quantiles:
conf_intervals = self._get_quantiles(X, width=width,
quantiles=quantiles,
modelmat=modelmat,
lp=pdep,
term=term,
xform=False)
out += [conf_intervals]
if meshgrid:
for i, array in enumerate(out):
if array.ndim > 1:
depth = array.shape[-1]
shape += (depth,)
out[i] = np.reshape(array, shape)
if compute_quantiles:
return out
return out[0] | Computes the term functions for the GAM
and possibly their confidence intervals.
if both width=None and quantiles=None,
then no confidence intervals are computed
Parameters
----------
term : int, optional
Term for which to compute the partial dependence functions.
X : array-like with input data, optional
if `meshgrid=False`, then `X` should be an array-like
of shape (n_samples, m_features).
if `meshgrid=True`, then `X` should be a tuple containing
an array for each feature in the term.
if None, an equally spaced grid of points is generated.
width : float on (0, 1), optional
Width of the confidence interval.
quantiles : array-like of floats on (0, 1), optional
instead of specifying the prediciton width, one can specify the
quantiles. so width=.95 is equivalent to quantiles=[.025, .975].
if None, defaults to width.
meshgrid : bool, whether to return and accept meshgrids.
Useful for creating outputs that are suitable for
3D plotting.
Note, for simple terms with no interactions, the output
of this function will be the same for ``meshgrid=True`` and
``meshgrid=False``, but the inputs will need to be different.
Returns
-------
pdeps : np.array of shape (n_samples,)
conf_intervals : list of length len(term)
containing np.arrays of shape (n_samples, 2 or len(quantiles))
Raises
------
ValueError :
If the term requested is an intercept
since it does not make sense to process the intercept term.
See Also
--------
generate_X_grid : for help creating meshgrids. |
383,010 | def match_comment(self):
match = self.match(r"<%doc>(.*?)</%doc>", re.S)
if match:
self.append_node(parsetree.Comment, match.group(1))
return True
else:
return False | matches the multiline version of a comment |
383,011 | def _find_symbol(self, module, name, fallback=None):
if not hasattr(module, name) and fallback:
return self._find_symbol(module, fallback, None)
return getattr(module, name) | Find the symbol of the specified name inside the module or raise an
exception. |
383,012 | def __parse_dois(self, x):
if isinstance(x, str):
m = clean_doi(x)
if m:
self.doi = m
elif isinstance(x, list):
for entry in x:
m = clean_doi(entry)
if m:
self.doi += m
return | Parse the Dataset_DOI field. Could be one DOI string, or a list of DOIs
:param any x: Str or List of DOI ids
:return none: list is set to self |
383,013 | def create_request(query):
yarr_url = app.config.get(, False)
if not yarr_url:
raise()
api_token = app.config.get(, False)
headers = {: api_token} if api_token else {}
payload = {: query}
url = % yarr_url
return requests.get(url, params=payload, headers=headers) | Creates a GET request to Yarr! server
:param query: Free-text search query
:returns: Requests object |
383,014 | def insert_lemmatisation_data(germanet_db):
germanet_db.lemmatiser.drop()
num_lemmas = 0
input_file = gzip.open(os.path.join(os.path.dirname(__file__),
LEMMATISATION_FILE))
for line in input_file:
line = line.decode().strip().split()
assert len(line) == 2
germanet_db.lemmatiser.insert(dict(list(zip((, ), line))))
num_lemmas += 1
input_file.close()
germanet_db.lemmatiser.create_index()
print(.format(num_lemmas)) | Creates the lemmatiser collection in the given MongoDB instance
using the data derived from the Projekt deutscher Wortschatz.
Arguments:
- `germanet_db`: a pymongo.database.Database object |
383,015 | def vtas2cas(tas, h):
p, rho, T = vatmos(h)
qdyn = p*((1.+rho*tas*tas/(7.*p))**3.5-1.)
cas = np.sqrt(7.*p0/rho0*((qdyn/p0+1.)**(2./7.)-1.))
cas = np.where(tas<0, -1*cas, cas)
return cas | tas2cas conversion both m/s |
383,016 | def paginate(self, url, key, params=None):
if params is None:
params = {}
if self.per_page is not None and "per_page" not in params:
params = dict(params, per_page=self.per_page)
page = self.request(url, params=params)
while True:
try:
objects = page[key]
except (KeyError, TypeError):
raise ValueError(\
.format(key))
for obj in objects:
yield obj
try:
url = page["links"]["pages"]["next"]
except KeyError:
break
page = self.request(url) | Fetch a sequence of paginated resources from the API endpoint. The
initial request to ``url`` and all subsequent requests must respond
with a JSON object; the field specified by ``key`` must be a list,
whose elements will be yielded, and the next request will be made to
the URL in the ``.links.pages.next`` field until the responses no
longer contain that field.
:param str url: the URL to make the initial request of. If ``url``
begins with a forward slash, :attr:`endpoint` is prepended to it;
otherwise, ``url`` is treated as an absolute URL.
:param str key: the field on each page containing a list of values to
yield
:param dict params: parameters to add to the initial URL's query
string. A ``"per_page"`` parameter may be included to override
the default :attr:`per_page` setting.
:rtype: generator of decoded JSON values
:raises ValueError: if a response body is not an object or ``key`` is
not one of its keys
:raises DOAPIError: if the API endpoint replies with an error |
383,017 | def get_ip(data):
try:
ip = data.public_ips[0]
except Exception:
ip = data.private_ips[0]
return ip | Return the IP address of the VM
If the VM has public IP as defined by libcloud module then use it
Otherwise try to extract the private IP and use that one. |
383,018 | def rescan_file(self, resource, date=, period=, repeat=, notify_url=, notify_changes_only=, timeout=None):
params = {: self.api_key, : resource}
try:
response = requests.post(self.base + , params=params, proxies=self.proxies, timeout=timeout)
except requests.RequestException as e:
return dict(error=str(e))
return _return_response_and_status_code(response) | Rescan a previously submitted filed or schedule an scan to be performed in the future.
This API allows you to rescan files present in VirusTotal's file store without having to
resubmit them, thus saving bandwidth. You only need to know one of the hashes of the file
to rescan.
:param resource: An md5/sha1/sha256 hash. You can also specify a CSV list made up of a
combination of any of the three allowed hashes (up to 25 items), this allows you to perform
a batch request with just one single call. Note that the file must already be present in our
file store.
:param date: (optional) Date in %Y%m%d%H%M%S format (example: 20120725170000) in which the rescan should
be performed. If not specified the rescan will be performed immediately.
:param period: (optional) Periodicity (in days) with which the file should be rescanned. If this argument
is provided the file will be rescanned periodically every period days, if not, the rescan is
performed once and not repated again.
:param repeat: (optional) Used in conjunction with period to specify the number of times the file should be
rescanned. If this argument is provided the file will be rescanned the given amount of times in coherence
with the chosen periodicity, if not, the file will be rescanned indefinitely.
:param notify_url: (optional) A URL to which a POST notification should be sent when the rescan finishes.
:param notify_changes_only: (optional) Used in conjunction with notify_url. Indicates if POST notifications
should only be sent if the scan results differ from the previous one.
:param timeout: The amount of time in seconds the request should wait before timing out.
:return: JSON response that contains scan_id and permalink. |
383,019 | def _mouseMoveDrag(moveOrDrag, x, y, xOffset, yOffset, duration, tween=linear, button=None):
assert moveOrDrag in (, ), "moveOrDrag must be in (, ), not %s" % (moveOrDrag)
if sys.platform != :
moveOrDrag =
xOffset = int(xOffset) if xOffset is not None else 0
yOffset = int(yOffset) if yOffset is not None else 0
if x is None and y is None and xOffset == 0 and yOffset == 0:
return
startx, starty = position()
x = int(x) if x is not None else startx
y = int(y) if y is not None else starty
x += xOffset
y += yOffset
width, height = size()
x = max(0, min(x, width - 1))
y = max(0, min(y, height - 1))
steps = [(x, y)]
if duration > MINIMUM_DURATION:
num_steps = max(width, height)
sleep_amount = duration / num_steps
if sleep_amount < MINIMUM_SLEEP:
num_steps = int(duration / MINIMUM_SLEEP)
sleep_amount = duration / num_steps
steps = [
getPointOnLine(startx, starty, x, y, tween(n / num_steps))
for n in range(num_steps)
]
steps.append((x, y))
for tweenX, tweenY in steps:
if len(steps) > 1:
time.sleep(sleep_amount)
_failSafeCheck()
tweenX = int(round(tweenX))
tweenY = int(round(tweenY))
if moveOrDrag == :
platformModule._moveTo(tweenX, tweenY)
elif moveOrDrag == :
platformModule._dragTo(tweenX, tweenY, button)
else:
raise NotImplementedError(.format(moveOrDrag))
_failSafeCheck() | Handles the actual move or drag event, since different platforms
implement them differently.
On Windows & Linux, a drag is a normal mouse move while a mouse button is
held down. On OS X, a distinct "drag" event must be used instead.
The code for moving and dragging the mouse is similar, so this function
handles both. Users should call the moveTo() or dragTo() functions instead
of calling _mouseMoveDrag().
Args:
moveOrDrag (str): Either 'move' or 'drag', for the type of action this is.
x (int, float, None, optional): How far left (for negative values) or
right (for positive values) to move the cursor. 0 by default.
y (int, float, None, optional): How far up (for negative values) or
down (for positive values) to move the cursor. 0 by default.
xOffset (int, float, None, optional): How far left (for negative values) or
right (for positive values) to move the cursor. 0 by default.
yOffset (int, float, None, optional): How far up (for negative values) or
down (for positive values) to move the cursor. 0 by default.
duration (float, optional): The amount of time it takes to move the mouse
cursor to the new xy coordinates. If 0, then the mouse cursor is moved
instantaneously. 0.0 by default.
tween (func, optional): The tweening function used if the duration is not
0. A linear tween is used by default. See the tweens.py file for
details.
button (str, int, optional): The mouse button clicked. Must be one of
'left', 'middle', 'right' (or 1, 2, or 3) respectively. 'left' by
default.
Returns:
None |
383,020 | def configfile_from_path(path, strict=True):
extension = path.split()[-1]
conf_type = FILE_TYPES.get(extension)
if not conf_type:
raise exc.UnrecognizedFileExtension(
"Cannot parse file of type {0}. Choices are {1}.".format(
extension,
FILE_TYPES.keys(),
)
)
return conf_type(path=path, strict=strict) | Get a ConfigFile object based on a file path.
This method will inspect the file extension and return the appropriate
ConfigFile subclass initialized with the given path.
Args:
path (str): The file path which represents the configuration file.
strict (bool): Whether or not to parse the file in strict mode.
Returns:
confpy.loaders.base.ConfigurationFile: The subclass which is
specialized for the given file path.
Raises:
UnrecognizedFileExtension: If there is no loader for the path. |
383,021 | def set_option(self, option, value):
CONF.set(self.CONF_SECTION, str(option), value) | Set a plugin option in configuration file.
Note: Use sig_option_changed to call it from widgets of the
same or another plugin. |
383,022 | def setColumnMapper(self, columnName, callable):
columnName = nativestring(columnName)
if ( callable is None and columnName in self._columnMappers ):
self._columnMappers.pop(columnName)
return
self._columnMappers[nativestring(columnName)] = callable | Sets the mapper for the given column name to the callable. The inputed
callable should accept a single argument for a record from the tree and
return the text that should be displayed in the column.
:param columnName | <str>
callable | <function> || <method> || <lambda> |
383,023 | async def play_tone(self, pin, tone_command, frequency, duration):
if tone_command == Constants.TONE_TONE:
if duration:
data = [tone_command, pin, frequency & 0x7f, (frequency >> 7) & 0x7f,
duration & 0x7f, (duration >> 7) & 0x7f]
else:
data = [tone_command, pin,
frequency & 0x7f, (frequency >> 7) & 0x7f, 0, 0]
else:
data = [tone_command, pin]
await self._send_sysex(PrivateConstants.TONE_DATA, data) | This method will call the Tone library for the selected pin.
It requires FirmataPlus to be loaded onto the arduino
If the tone command is set to TONE_TONE, then the specified
tone will be played.
Else, if the tone command is TONE_NO_TONE, then any currently
playing tone will be disabled.
:param pin: Pin number
:param tone_command: Either TONE_TONE, or TONE_NO_TONE
:param frequency: Frequency of tone
:param duration: Duration of tone in milliseconds
:returns: No return value |
383,024 | def starts(self, layer):
starts = []
for data in self[layer]:
starts.append(data[START])
return starts | Retrieve start positions of elements if given layer. |
383,025 | def crypto_core_ed25519_is_valid_point(p):
ensure(isinstance(p, bytes) and len(p) == crypto_core_ed25519_BYTES,
,
raising=exc.TypeError)
rc = lib.crypto_core_ed25519_is_valid_point(p)
return rc == 1 | Check if ``p`` represents a point on the edwards25519 curve, in canonical
form, on the main subgroup, and that the point doesn't have a small order.
:param p: a :py:data:`.crypto_core_ed25519_BYTES` long bytes sequence
representing a point on the edwards25519 curve
:type p: bytes
:return: point validity
:rtype: bool |
383,026 | def githubtunnel(user1, server1, user2, server2, port, verbose, stanford=False):
if stanford:
port_shift = 1
else:
port_shift = 0
command1 = .format(port-1-port_shift, server2, user1, server1)
command2 = .format(port-port_shift, port-port_shift-1, user2)
command3 = .format(port, port-1, user2)
if verbose:
print(command1)
if stanford:
print(command2)
print(command3)
try:
call(shlex.split(command1))
if stanford:
call(shlex.split(command2))
call(shlex.split(command3))
except:
print()
pass | Opens a nested tunnel, first to *user1*@*server1*, then to *user2*@*server2*, for accessing on *port*.
If *verbose* is true, prints various ssh commands.
If *stanford* is true, shifts ports up by 1.
Attempts to get *user1*, *user2* from environment variable ``USER_NAME`` if called from the command line. |
383,027 | def perimeter(self):
return sum([a.distance(b) for a, b in self.pairs()]) | Sum of the length of all sides, float. |
383,028 | def transfer(self, name, local, remote, **kwargs):
try:
remote.save(name, local.open(name))
return True
except Exception as e:
logger.error("Unable to save to remote storage. "
"About to retry." % name)
logger.exception(e)
return False | Transfers the file with the given name from the local to the remote
storage backend.
:param name: The name of the file to transfer
:param local: The local storage backend instance
:param remote: The remote storage backend instance
:returns: `True` when the transfer succeeded, `False` if not. Retries
the task when returning `False`
:rtype: bool |
383,029 | def resize_max(img, max_side):
h, w = img.shape[:2]
if h > w:
nh = max_side
nw = w * (nh / h)
else:
nw = max_side
nh = h * (nw / w)
return cv.resize(img, (nw, nh)) | Resize the image to threshold the maximum dimension within max_side
:param img:
:param max_side: Length of the maximum height or width
:return: |
383,030 | def filter(self, s, method=, order=30):
r
s = self.G._check_signal(s)
if s.ndim == 1 or s.shape[-1] not in [1, self.Nf]:
if s.ndim == 3:
raise ValueError(
.format(self.Nf, s.shape))
s = np.expand_dims(s, -1)
n_features_in = s.shape[-1]
if s.ndim < 3:
s = np.expand_dims(s, 1)
n_signals = s.shape[1]
if s.ndim > 3:
raise ValueError(
)
assert s.ndim == 3
n_features_out = self.Nf if n_features_in == 1 else 1
if method == :
axis = 1 if n_features_in == 1 else 2
f = self.evaluate(self.G.e)
f = np.expand_dims(f.T, axis)
assert f.shape == (self.G.N, n_features_in, n_features_out)
s = self.G.gft(s)
s = np.matmul(s, f)
s = self.G.igft(s)
elif method == :
c = approximations.compute_cheby_coeff(self, m=order)
if n_features_in == 1:
s = s.squeeze(axis=2)
s = approximations.cheby_op(self.G, c, s)
s = s.reshape((self.G.N, n_features_out, n_signals), order=)
s = s.swapaxes(1, 2)
elif n_features_in == self.Nf:
s = s.swapaxes(1, 2)
s_in = s.reshape(
(self.G.N * n_features_in, n_signals), order=)
s = np.zeros((self.G.N, n_signals))
tmpN = np.arange(self.G.N, dtype=int)
for i in range(n_features_in):
s += approximations.cheby_op(self.G,
c[i],
s_in[i * self.G.N + tmpN])
s = np.expand_dims(s, 2)
else:
raise ValueError(.format(method))
return s.squeeze() | r"""Filter signals (analysis or synthesis).
A signal is defined as a rank-3 tensor of shape ``(N_NODES, N_SIGNALS,
N_FEATURES)``, where ``N_NODES`` is the number of nodes in the graph,
``N_SIGNALS`` is the number of independent signals, and ``N_FEATURES``
is the number of features which compose a graph signal, or the
dimensionality of a graph signal. For example if you filter a signal
with a filter bank of 8 filters, you're extracting 8 features and
decomposing your signal into 8 parts. That is called analysis. Your are
thus transforming your signal tensor from ``(G.N, 1, 1)`` to ``(G.N, 1,
8)``. Now you may want to combine back the features to form an unique
signal. For this you apply again 8 filters, one filter per feature, and
sum the result up. As such you're transforming your ``(G.N, 1, 8)``
tensor signal back to ``(G.N, 1, 1)``. That is known as synthesis. More
generally, you may want to map a set of features to another, though
that is not implemented yet.
The method computes the transform coefficients of a signal :math:`s`,
where the atoms of the transform dictionary are generalized
translations of each graph spectral filter to each vertex on the graph:
.. math:: c = D^* s,
where the columns of :math:`D` are :math:`g_{i,m} = T_i g_m` and
:math:`T_i` is a generalized translation operator applied to each
filter :math:`\hat{g}_m(\cdot)`. Each column of :math:`c` is the
response of the signal to one filter.
In other words, this function is applying the analysis operator
:math:`D^*`, respectively the synthesis operator :math:`D`, associated
with the frame defined by the filter bank to the signals.
Parameters
----------
s : array_like
Graph signals, a tensor of shape ``(N_NODES, N_SIGNALS,
N_FEATURES)``, where ``N_NODES`` is the number of nodes in the
graph, ``N_SIGNALS`` the number of independent signals you want to
filter, and ``N_FEATURES`` is either 1 (analysis) or the number of
filters in the filter bank (synthesis).
method : {'exact', 'chebyshev'}
Whether to use the exact method (via the graph Fourier transform)
or the Chebyshev polynomial approximation. A Lanczos
approximation is coming.
order : int
Degree of the Chebyshev polynomials.
Returns
-------
s : ndarray
Graph signals, a tensor of shape ``(N_NODES, N_SIGNALS,
N_FEATURES)``, where ``N_NODES`` and ``N_SIGNALS`` are the number
of nodes and signals of the signal tensor that pas passed in, and
``N_FEATURES`` is either 1 (synthesis) or the number of filters in
the filter bank (analysis).
References
----------
See :cite:`hammond2011wavelets` for details on filtering graph signals.
Examples
--------
Create a bunch of smooth signals by low-pass filtering white noise:
>>> import matplotlib.pyplot as plt
>>> G = graphs.Ring(N=60)
>>> G.estimate_lmax()
>>> s = np.random.RandomState(42).uniform(size=(G.N, 10))
>>> taus = [1, 10, 100]
>>> s = filters.Heat(G, taus).filter(s)
>>> s.shape
(60, 10, 3)
Plot the 3 smoothed versions of the 10th signal:
>>> fig, ax = plt.subplots()
>>> G.set_coordinates('line1D') # To visualize multiple signals in 1D.
>>> _ = G.plot(s[:, 9, :], ax=ax)
>>> legend = [r'$\tau={}$'.format(t) for t in taus]
>>> ax.legend(legend) # doctest: +ELLIPSIS
<matplotlib.legend.Legend object at ...>
Low-pass filter a delta to create a localized smooth signal:
>>> G = graphs.Sensor(30, seed=42)
>>> G.compute_fourier_basis() # Reproducible computation of lmax.
>>> s1 = np.zeros(G.N)
>>> s1[13] = 1
>>> s1 = filters.Heat(G, 3).filter(s1)
>>> s1.shape
(30,)
Filter and reconstruct our signal:
>>> g = filters.MexicanHat(G, Nf=4)
>>> s2 = g.analyze(s1)
>>> s2.shape
(30, 4)
>>> s2 = g.synthesize(s2)
>>> s2.shape
(30,)
Look how well we were able to reconstruct:
>>> fig, axes = plt.subplots(1, 2)
>>> _ = G.plot(s1, ax=axes[0])
>>> _ = G.plot(s2, ax=axes[1])
>>> print('{:.5f}'.format(np.linalg.norm(s1 - s2)))
0.26808
Perfect reconstruction with Itersine, a tight frame:
>>> g = filters.Itersine(G)
>>> s2 = g.analyze(s1, method='exact')
>>> s2 = g.synthesize(s2, method='exact')
>>> np.linalg.norm(s1 - s2) < 1e-10
True |
383,031 | def lorenz_animation(N_trajectories=20, rseed=1, frames=200, interval=30):
from scipy import integrate
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.colors import cnames
def lorentz_deriv(coords, t0, sigma=10., beta=8./3, rho=28.0):
x, y, z = coords
return [sigma * (y - x), x * (rho - z) - y, x * y - beta * z]
np.random.seed(rseed)
x0 = -15 + 30 * np.random.random((N_trajectories, 3))
t = np.linspace(0, 2, 500)
x_t = np.asarray([integrate.odeint(lorentz_deriv, x0i, t)
for x0i in x0])
fig = plt.figure()
ax = fig.add_axes([0, 0, 1, 1], projection=)
ax.axis()
colors = plt.cm.jet(np.linspace(0, 1, N_trajectories))
lines = sum([ax.plot([], [], [], , c=c)
for c in colors], [])
pts = sum([ax.plot([], [], [], , c=c, ms=4)
for c in colors], [])
ax.set_xlim((-25, 25))
ax.set_ylim((-35, 35))
ax.set_zlim((5, 55))
ax.view_init(30, 0)
def init():
for line, pt in zip(lines, pts):
line.set_data([], [])
line.set_3d_properties([])
pt.set_data([], [])
pt.set_3d_properties([])
return lines + pts
def animate(i):
i = (2 * i) % x_t.shape[1]
for line, pt, xi in zip(lines, pts, x_t):
x, y, z = xi[:i + 1].T
line.set_data(x, y)
line.set_3d_properties(z)
pt.set_data(x[-1:], y[-1:])
pt.set_3d_properties(z[-1:])
ax.view_init(30, 0.3 * i)
fig.canvas.draw()
return lines + pts
return animation.FuncAnimation(fig, animate, init_func=init,
frames=frames, interval=interval) | Plot a 3D visualization of the dynamics of the Lorenz system |
383,032 | def analyze_internal_angles(self, return_plot=False):
angles = self.get_internal_angles().flatten()
print(.format(np.min(angles)))
print(.format(np.max(angles)))
for i in range(10, 100, 10):
print(.format(
i,
np.percentile(angles, i),
))
if return_plot:
print()
fig, ax = plt.subplots(1, 1, figsize=(12 / 2.54, 8 / 2.54))
ax.hist(angles, int(angles.size / 10))
ax.set_xlabel()
ax.set_ylabel()
fig.tight_layout()
return fig, ax | Analyze the internal angles of the grid. Angles shouldn't be too
small because this can cause problems/uncertainties in the
Finite-Element solution of the forward problem. This function prints
the min/max values, as well as quantiles, to the command line, and can
also produce a histogram plot of the angles.
Parameters
----------
return_plot: bool
if true, return (fig, ax) objects of the histogram plot
Returns
-------
fig: matplotlib.figure
figure object
ax: matplotlib.axes
axes object
Examples
--------
>>> import crtomo.grid as CRGrid
grid = CRGrid.crt_grid()
grid.load_elem_file('elem.dat')
fig, ax = grid.analyze_internal_angles(Angles)
This grid was sorted using CutMcK. The nodes were resorted!
Triangular grid found
Minimal angle: 22.156368696965796 degrees
Maximal angle: 134.99337326279496 degrees
Angle percentile 10%: 51.22 degrees
Angle percentile 20%: 55.59 degrees
Angle percentile 30%: 58.26 degrees
Angle percentile 40%: 59.49 degrees
Angle percentile 50%: 59.95 degrees
Angle percentile 60%: 60.25 degrees
Angle percentile 70%: 61.16 degrees
Angle percentile 80%: 63.44 degrees
Angle percentile 90%: 68.72 degrees
generating plot...
>>> # save to file with
fig.savefig('element_angles.png', dpi=300) |
383,033 | def write(self, data):
if self.closed:
raise ConnectionResetError(
% self
)
else:
t = self.transport
if self._paused or self._buffer:
self._buffer.appendleft(data)
self._buffer_size += len(data)
self._write_from_buffer()
if self._buffer_size > 2 * self._b_limit:
if self._waiter and not self._waiter.cancelled():
self.logger.warning(
,
self._buffer_size, self._b_limit
)
else:
t.pause_reading()
self._waiter = self._loop.create_future()
else:
t.write(data)
self.changed()
return self._waiter | Write ``data`` into the wire.
Returns an empty tuple or a :class:`~asyncio.Future` if this
protocol has paused writing. |
383,034 | def authenticate(session, username, password):
if not username or not password:
raise AuthenticationError()
user = session.query(PasswordUser).filter(
PasswordUser.username == username).first()
if not user:
raise AuthenticationError()
if not user.authenticate(password):
raise AuthenticationError()
log.info("User %s successfully authenticated", username)
return user | Authenticate a PasswordUser with the specified
username/password.
:param session: An active SQLAlchemy session
:param username: The username
:param password: The password
:raise AuthenticationError: if an error occurred
:return: a PasswordUser |
383,035 | def handle_request(self):
try:
request, client_address = self.get_request()
except socket.error:
return
if self.verify_request(request, client_address):
self.workerpool.run(self.process_request_thread,
**{: request,
: client_address}) | simply collect requests and put them on the queue for the workers. |
383,036 | def removeAssociation(self, server_url, handle):
assoc = self.getAssociation(server_url, handle)
if assoc is None:
return 0
else:
filename = self.getAssociationFilename(server_url, handle)
return _removeIfPresent(filename) | Remove an association if it exists. Do nothing if it does not.
(str, str) -> bool |
383,037 | def get(self, model_class, strict=True, returnDict=False, fetchOne=False, **where):
self.typeassert(model_class, strict, returnDict, where)
table = model_class.__name__.lower()
with Session(self.settings) as conn:
if not where:
query = f
else:
query = f
index= 1
operator = if strict else
for key, value in where.items():
if index == 1:
query+= " %s %s "%(key, operator, value)
else:
query+= " AND %s %s "%(key, operator, value)
index += 1
try:
cursor=conn.cursor()
cursor.execute(query)
except mysql.Error as e:
if e.errno == 1146:
print(f"The table {table} does not exist")
return []
else:
raise e
else:
if fetchOne:
colnames = [d[0] for d in cursor.description]
results = cursor.fetchone()
if returnDict:
return {col: val for col, val in zip(colnames, results)}\
if results else {}
return results
return self.handleResult(cursor, returnDict) | params:
model_class: The queried model class
strict: bool -> If True, queries are run with EQUAL(=) operator.
If False: Queries are run with RLIKE keyword
returnDict: bool -> Return a list if dictionaries(field_names: values)
fetchOne: bool -> cursor.fetchone() else: cursor.fetchall()
where: **kwargs for quere WHERE condition.
if where in {}: Returns all results in the table
Usage:
print(Session().get(Employee, id=1, returnDict=True)) |
383,038 | def add_text(self, tag, text, global_step=None):
self._file_writer.add_summary(text_summary(tag, text), global_step)
if tag not in self._text_tags:
self._text_tags.append(tag)
extension_dir = self.get_logdir() +
if not os.path.exists(extension_dir):
os.makedirs(extension_dir)
with open(extension_dir + , ) as fp:
json.dump(self._text_tags, fp) | Add text data to the event file.
Parameters
----------
tag : str
Name for the `text`.
text : str
Text to be saved to the event file.
global_step : int
Global step value to record. |
383,039 | def extract_secs(self, tx, tx_in_idx):
sc = tx.SolutionChecker(tx)
tx_context = sc.tx_context_for_idx(tx_in_idx)
solution_stack = []
for puzzle_script, solution_stack, flags, sighash_f in sc.puzzle_and_solution_iterator(tx_context):
for opcode, data, pc, new_pc in self._script_tools.get_opcodes(puzzle_script):
if data and is_sec(data):
yield data
for data in solution_stack:
if is_sec(data):
yield data | For a given script solution, iterate yield its sec blobs |
383,040 | def find_config_section(self, object_type, name=None):
possible = []
for name_options in object_type.config_prefixes:
for name_prefix in name_options:
found = self._find_sections(
self.parser.sections(), name_prefix, name)
if found:
possible.extend(found)
break
if not possible:
raise LookupError(
"No section %r (prefixed by %s) found in config %s"
% (name,
.join(map(repr, _flatten(object_type.config_prefixes))),
self.filename))
if len(possible) > 1:
raise LookupError(
"Ambiguous section names %r for section %r (prefixed by %s) "
"found in config %s"
% (possible, name,
.join(map(repr, _flatten(object_type.config_prefixes))),
self.filename))
return possible[0] | Return the section name with the given name prefix (following the
same pattern as ``protocol_desc`` in ``config``. It must have the
given name, or for ``'main'`` an empty name is allowed. The
prefix must be followed by a ``:``.
Case is *not* ignored. |
383,041 | def flow_meter_discharge(D, Do, P1, P2, rho, C, expansibility=1.0):
r
beta = Do/D
beta2 = beta*beta
return (0.25*pi*Do*Do)*C*expansibility*(
(2.0*rho*(P1 - P2))/(1.0 - beta2*beta2))**0.5 | r'''Calculates the flow rate of an orifice plate based on the geometry
of the plate, measured pressures of the orifice, and the density of the
fluid.
.. math::
m = \left(\frac{\pi D_o^2}{4}\right) C \frac{\sqrt{2\Delta P \rho_1}}
{\sqrt{1 - \beta^4}}\cdot \epsilon
Parameters
----------
D : float
Upstream internal pipe diameter, [m]
Do : float
Diameter of orifice at flow conditions, [m]
P1 : float
Static pressure of fluid upstream of orifice at the cross-section of
the pressure tap, [Pa]
P2 : float
Static pressure of fluid downstream of orifice at the cross-section of
the pressure tap, [Pa]
rho : float
Density of fluid at `P1`, [kg/m^3]
C : float
Coefficient of discharge of the orifice, [-]
expansibility : float, optional
Expansibility factor (1 for incompressible fluids, less than 1 for
real fluids), [-]
Returns
-------
m : float
Mass flow rate of fluid, [kg/s]
Notes
-----
This is formula 1-12 in [1]_ and also [2]_.
Examples
--------
>>> flow_meter_discharge(D=0.0739, Do=0.0222, P1=1E5, P2=9.9E4, rho=1.1646,
... C=0.5988, expansibility=0.9975)
0.01120390943807026
References
----------
.. [1] American Society of Mechanical Engineers. Mfc-3M-2004 Measurement
Of Fluid Flow In Pipes Using Orifice, Nozzle, And Venturi. ASME, 2001.
.. [2] ISO 5167-2:2003 - Measurement of Fluid Flow by Means of Pressure
Differential Devices Inserted in Circular Cross-Section Conduits Running
Full -- Part 2: Orifice Plates. |
383,042 | def temporal_from_resource(resource):
if isinstance(resource.identifier, URIRef):
g = Graph().parse(str(resource.identifier))
resource = g.resource(resource.identifier)
if resource.value(SCHEMA.startDate):
return db.DateRange(
start=resource.value(SCHEMA.startDate).toPython(),
end=resource.value(SCHEMA.endDate).toPython()
)
elif resource.value(SCV.min):
return db.DateRange(
start=resource.value(SCV.min).toPython(),
end=resource.value(SCV.max).toPython()
) | Parse a temporal coverage from a RDF class/resource ie. either:
- a `dct:PeriodOfTime` with schema.org `startDate` and `endDate` properties
- an inline gov.uk Time Interval value
- an URI reference to a gov.uk Time Interval ontology
http://reference.data.gov.uk/ |
383,043 | def get_parent_vault_ids(self, vault_id):
if self._catalog_session is not None:
return self._catalog_session.get_parent_catalog_ids(catalog_id=vault_id)
return self._hierarchy_session.get_parents(id_=vault_id) | Gets the parent ``Ids`` of the given vault.
arg: vault_id (osid.id.Id): a vault ``Id``
return: (osid.id.IdList) - the parent ``Ids`` of the vault
raise: NotFound - ``vault_id`` is not found
raise: NullArgument - ``vault_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.* |
383,044 | def forward(self, input_ids, target=None, mems=None):
bsz = input_ids.size(0)
tgt_len = input_ids.size(1)
last_hidden, new_mems = self.transformer(input_ids, mems)
pred_hid = last_hidden[:, -tgt_len:]
if self.sample_softmax > 0 and self.training:
assert self.config.tie_weight
logit = sample_logits(self.transformer.word_emb, self.out_layer.bias, target, pred_hid, self.sampler)
softmax_output = -F.log_softmax(logit, -1)[:, :, 0]
else:
softmax_output = self.crit(pred_hid.view(-1, pred_hid.size(-1)), target)
if target is None:
softmax_output = softmax_output.view(bsz, tgt_len, -1)
else:
softmax_output = softmax_output.view(bsz, tgt_len)
return (softmax_output, new_mems) | Params:
input_ids :: [bsz, len]
target :: [bsz, len]
Returns:
tuple(softmax_output, new_mems) where:
new_mems: list (num layers) of hidden states at the entry of each layer
shape :: [mem_len, bsz, self.config.d_model] :: Warning: shapes are transposed here w. regards to input_ids
softmax_output: output of the (adaptive) softmax:
if target is None:
Negative log likelihood of shape :: [bsz, len]
else:
log probabilities of tokens, shape :: [bsz, len, n_tokens] |
383,045 | def sh_report(self, full=True, latest=False):
def pathvar_repr(var):
_var = var.replace(, )
return % _var
output = []
if not self.remote_url:
output.append()
output = output + (
[self.label]
+ self.clone_cmd
+ [pathvar_repr(self.remote_url)]
+ [pathvar_repr(self.relpath)]
)
yield
yield "
yield .join(output)
if full:
checkout_rev = self.current_id
relpath = pathvar_repr(self.relpath) if self.relpath else None
relpath = relpath if relpath else
checkout_branch_cmd = (
[self.label]
+ self.checkout_branch_cmd + [self.branch]
+ self.repo_abspath_cmd
+ [relpath])
checkout_rev_cmd = (
[self.label]
+ self.checkout_rev_cmd + [checkout_rev]
+ self.repo_abspath_cmd
+ [relpath])
if latest:
checkout_cmd = checkout_branch_cmd
comment = checkout_rev_cmd
else:
checkout_cmd = checkout_rev_cmd
comment = checkout_branch_cmd
yield .join(c for c in checkout_cmd if c is not None)
yield % .join(c for c in comment if c is not None)
for x in self.recreate_remotes_shellcmd():
yield x | Show shell command necessary to clone this repository
If there is no primary remote url, prefix-comment the command
Keyword Arguments:
full (bool): also include commands to recreate branches and remotes
latest (bool): checkout repo.branch instead of repo.current_id
Yields:
str: shell command necessary to clone this repository |
383,046 | def AddXrefTo(self, ref_kind, classobj, methodobj, offset):
self.xrefto[classobj].add((ref_kind, methodobj, offset)) | Creates a crossreference to another class.
XrefTo means, that the current class calls another class.
The current class should also be contained in the another class' XrefFrom list.
:param REF_TYPE ref_kind: type of call
:param classobj: :class:`ClassAnalysis` object to link
:param methodobj:
:param offset: Offset in the Methods Bytecode, where the call happens
:return: |
383,047 | def clean_linebreaks(self, tag):
stripped = tag.decode(formatter=None)
stripped = re.sub(, , stripped)
stripped = re.sub(, , stripped)
return stripped | get unicode string without any other content transformation.
and clean extra spaces |
383,048 | def columns_used(self):
return list(tz.unique(tz.concatv(
self.choosers_columns_used(),
self.alts_columns_used(),
self.interaction_columns_used()))) | Columns from any table used in the model. May come from either
the choosers or alternatives tables. |
383,049 | def iterbyscore(self, min=, max=, start=None, num=None,
withscores=False, reverse=None):
reverse = reverse if reverse is not None else self.reversed
zfunc = self._client.zrangebyscore if not reverse \
else self._client.zrevrangebyscore
_loads = self._loads
for member in zfunc(
self.key_prefix, min=min, max=max, start=start, num=num,
withscores=withscores, score_cast_func=self.cast):
if withscores:
yield (_loads(member[0]), self.cast(member[1]))
else:
yield _loads(member) | Return a range of values from the sorted set name with scores
between @min and @max.
If @start and @num are specified, then return a slice
of the range.
@min: #int minimum score, or #str '-inf'
@max: #int minimum score, or #str '+inf'
@start: #int starting range position
@num: #int number of members to fetch
@withscores: #bool indicates to return the scores along with the
members, as a list of |(member, score)| pairs
@reverse: #bool indicating whether to sort the results descendingly
-> yields members or |(member, score)| #tuple pairs |
383,050 | def lookup_by_partial_name(self, partial_name):
for k, v in self._name_database.items():
if _uax44lm2transform(partial_name) in k:
yield v | Similar to lookup_by_name(name), this method uses loose matching rule UAX44-LM2 to attempt to find the
UnicodeCharacter associated with a name. However, it attempts to permit even looser matching by doing a
substring search instead of a simple match. This method will return a generator that yields instances of
UnicodeCharacter where the partial_name passed in is a substring of the full name.
For example:
>>> ucd = UnicodeData()
>>> for data in ucd.lookup_by_partial_name("SHARP S"):
>>> print(data.code + " " + data.name)
>>>
>>> U+00DF LATIN SMALL LETTER SHARP S
>>> U+1E9E LATIN CAPITAL LETTER SHARP S
>>> U+266F MUSIC SHARP SIGN
:param partial_name: Partial name of the character to look up.
:return: Generator that yields instances of UnicodeCharacter. |
383,051 | def national_significant_number(numobj):
national_number = U_EMPTY_STRING
if numobj.italian_leading_zero:
num_zeros = numobj.number_of_leading_zeros
if num_zeros is None:
num_zeros = 1
if num_zeros > 0:
national_number = U_ZERO * num_zeros
national_number += str(numobj.national_number)
return national_number | Gets the national significant number of a phone number.
Note that a national significant number doesn't contain a national prefix
or any formatting.
Arguments:
numobj -- The PhoneNumber object for which the national significant number
is needed.
Returns the national significant number of the PhoneNumber object passed
in. |
383,052 | def translate_js_with_compilation_plan(js, HEADER=DEFAULT_HEADER):
match_increaser_str, match_increaser_num, compilation_plan = get_compilation_plan(
js)
cp_hash = hashlib.md5(compilation_plan.encode()).digest()
try:
python_code = cache[cp_hash][]
except:
parser = pyjsparser.PyJsParser()
parsed = parser.parse(compilation_plan)
translating_nodes.clean_stacks()
python_code = translating_nodes.trans(
parsed)
cache[cp_hash] = {
: compilation_plan,
: python_code,
}
python_code = match_increaser_str.wrap_up(python_code)
python_code = match_increaser_num.wrap_up(python_code)
return HEADER + python_code | js has to be a javascript source code.
returns equivalent python code.
compile plans only work with the following restrictions:
- only enabled for oneliner expressions
- when there are comments in the js code string substitution is disabled
- when there nested escaped quotes string substitution is disabled, so
cacheable:
Q1 == 1 && name == 'harry'
not cacheable:
Q1 == 1 && name == 'harry' // some comment
not cacheable:
Q1 == 1 && name == 'o\'Reilly'
not cacheable:
Q1 == 1 && name /* some comment */ == 'o\'Reilly' |
383,053 | def from_las3(cls, string, lexicon=None,
source="LAS",
dlm=,
abbreviations=False):
f = re.DOTALL | re.IGNORECASE
regex = r
pattern = re.compile(regex, flags=f)
text = pattern.search(string).group(1)
s = re.search(r, string)
if s:
source = s.group(1).strip()
return cls.from_descriptions(text, lexicon,
source=source,
dlm=dlm,
abbreviations=abbreviations) | Turn LAS3 'lithology' section into a Striplog.
Args:
string (str): A section from an LAS3 file.
lexicon (Lexicon): The language for conversion to components.
source (str): A source for the data.
dlm (str): The delimiter.
abbreviations (bool): Whether to expand abbreviations.
Returns:
Striplog: The ``striplog`` object.
Note:
Handles multiple 'Data' sections. It would be smarter for it
to handle one at a time, and to deal with parsing the multiple
sections in the Well object.
Does not read an actual LAS file. Use the Well object for that. |
383,054 | def _insert_stack(stack, sample_count, call_tree):
curr_level = call_tree
for func in stack:
next_level_index = {
node[]: node for node in curr_level[]}
if func not in next_level_index:
new_node = {: func, : [], : 0}
curr_level[].append(new_node)
curr_level = new_node
else:
curr_level = next_level_index[func]
curr_level[] = sample_count | Inserts stack into the call tree.
Args:
stack: Call stack.
sample_count: Sample count of call stack.
call_tree: Call tree. |
383,055 | def cache_page(**kwargs):
cache_timeout = kwargs.pop(, None)
key_prefix = kwargs.pop(, None)
cache_min_age = kwargs.pop(, None)
decorator = decorators.decorator_from_middleware_with_args(CacheMiddleware)(
cache_timeout=cache_timeout,
key_prefix=key_prefix,
cache_min_age=cache_min_age,
**kwargs
)
return decorator | This decorator is similar to `django.views.decorators.cache.cache_page` |
383,056 | def on_exception(func):
class OnExceptionDecorator(LambdaDecorator):
def on_exception(self, exception):
return func(exception)
return OnExceptionDecorator | Run a function when a handler thows an exception. It's return value is
returned to AWS.
Usage::
>>> # to create a reusable decorator
>>> @on_exception
... def handle_errors(exception):
... print(exception)
... return {'statusCode': 500, 'body': 'uh oh'}
>>> @handle_errors
... def handler(event, context):
... raise Exception('it broke!')
>>> handler({}, object())
it broke!
{'statusCode': 500, 'body': 'uh oh'}
>>> # or a one off
>>> @on_exception(lambda e: {'statusCode': 500})
... def handler(body, context):
... raise Exception
>>> handler({}, object())
{'statusCode': 500} |
383,057 | def switch_focus(self, layout, column, widget):
for i, l in enumerate(self._layouts):
if l is layout:
break
else:
return
self._layouts[self._focus].blur()
self._focus = i
self._layouts[self._focus].focus(force_column=column,
force_widget=widget) | Switch focus to the specified widget.
:param layout: The layout that owns the widget.
:param column: The column the widget is in.
:param widget: The index of the widget to take the focus. |
383,058 | def fetch_github_activity(gen, metadata):
if in gen.settings.keys():
gen.context[] = gen.plugin_instance.fetch() | registered handler for the github activity plugin
it puts in generator.context the html needed to be displayed on a
template |
383,059 | def load_all_distributions(self):
distributions = []
for this in dir(scipy.stats):
if "fit" in eval("dir(scipy.stats." + this +")"):
distributions.append(this)
self.distributions = distributions[:] | Replace the :attr:`distributions` attribute with all scipy distributions |
383,060 | def format_text_as_docstr(text):
r
import utool as ut
import re
min_indent = ut.get_minimum_indentation(text)
indent_ = * min_indent
formated_text = re.sub( + indent_, + indent_ + , text,
flags=re.MULTILINE)
formated_text = re.sub(, + indent_ + , formated_text,
flags=re.MULTILINE)
return formated_text | r"""
CommandLine:
python ~/local/vim/rc/pyvim_funcs.py --test-format_text_as_docstr
Example:
>>> # DISABLE_DOCTEST
>>> from pyvim_funcs import * # NOQA
>>> text = testdata_text()
>>> formated_text = format_text_as_docstr(text)
>>> result = ('formated_text = \n%s' % (str(formated_text),))
>>> print(result) |
383,061 | def _get_serialization_name(element_name):
known = _KNOWN_SERIALIZATION_XFORMS.get(element_name)
if known is not None:
return known
if element_name.startswith():
return element_name.replace(, )
if element_name.endswith():
element_name = element_name.replace(, )
for name in [, , , ]:
if element_name.startswith(name):
element_name = element_name.replace(, )
return .join(name.capitalize() for name in element_name.split()) | converts a Python name into a serializable name |
383,062 | def parser(self):
if self._parser is None:
apkw = {
: self.description,
: self.epilog,
}
self._parser = argparse.ArgumentParser(**apkw)
if self.version:
self._parser.add_argument(
, , action=,
version="%(prog)s {}".format(self.version),
)
return self._parser | Instantiates the argparse parser |
383,063 | def matches(self, filter_props):
if filter_props is None:
return False
found_one = False
for key, value in filter_props.items():
if key in self.properties and value != self.properties[key]:
return False
elif key in self.properties and value == self.properties[key]:
found_one = True
return found_one | Check if the filter matches the supplied properties. |
383,064 | def parse(template, delimiters=None):
if type(template) is not unicode:
raise Exception("Template is not unicode: %s" % type(template))
parser = _Parser(delimiters)
return parser.parse(template) | Parse a unicode template string and return a ParsedTemplate instance.
Arguments:
template: a unicode template string.
delimiters: a 2-tuple of delimiters. Defaults to the package default.
Examples:
>>> parsed = parse(u"Hey {{#who}}{{name}}!{{/who}}")
>>> print str(parsed).replace('u', '') # This is a hack to get the test to pass both in Python 2 and 3.
['Hey ', _SectionNode(key='who', index_begin=12, index_end=21, parsed=[_EscapeNode(key='name'), '!'])] |
383,065 | def get_template_loader(app, subdir=):
dmp = apps.get_app_config()
return dmp.engine.get_template_loader(app, subdir, create=True) | Convenience method that calls get_template_loader() on the DMP
template engine instance. |
383,066 | def _make_actor_method_executor(self, method_name, method, actor_imported):
def actor_method_executor(dummy_return_id, actor, *args):
self._worker.actor_task_counter += 1
try:
if is_class_method(method):
method_returns = method(*args)
else:
method_returns = method(actor, *args)
except Exception as e:
if isinstance(actor, ray.actor.Checkpointable):
if self._worker.actor_task_counter == 1:
if actor_imported:
self._restore_and_log_checkpoint(actor)
else:
self._save_and_log_checkpoint(actor)
return method_returns
return actor_method_executor | Make an executor that wraps a user-defined actor method.
The wrapped method updates the worker's internal state and performs any
necessary checkpointing operations.
Args:
method_name (str): The name of the actor method.
method (instancemethod): The actor method to wrap. This should be a
method defined on the actor class and should therefore take an
instance of the actor as the first argument.
actor_imported (bool): Whether the actor has been imported.
Checkpointing operations will not be run if this is set to
False.
Returns:
A function that executes the given actor method on the worker's
stored instance of the actor. The function also updates the
worker's internal state to record the executed method. |
383,067 | def spacing(text):
if len(text) <= 1 or not ANY_CJK.search(text):
return text
new_text = text
matched = CONVERT_TO_FULLWIDTH_CJK_SYMBOLS_CJK.search(new_text)
while matched:
start, end = matched.span()
new_text = .join((new_text[:start + 1], convert_to_fullwidth(new_text[start + 1:end - 1]), new_text[end - 1:]))
matched = CONVERT_TO_FULLWIDTH_CJK_SYMBOLS_CJK.search(new_text)
matched = CONVERT_TO_FULLWIDTH_CJK_SYMBOLS.search(new_text)
while matched:
start, end = matched.span()
new_text = .join((new_text[:start + 1].strip(), convert_to_fullwidth(new_text[start + 1:end]), new_text[end:].strip()))
matched = CONVERT_TO_FULLWIDTH_CJK_SYMBOLS.search(new_text)
new_text = DOTS_CJK.sub(r, new_text)
new_text = FIX_CJK_COLON_ANS.sub(r, new_text)
new_text = CJK_QUOTE.sub(r, new_text)
new_text = QUOTE_CJK.sub(r, new_text)
new_text = FIX_QUOTE_ANY_QUOTE.sub(r, new_text)
new_text = CJK_SINGLE_QUOTE_BUT_POSSESSIVE.sub(r, new_text)
new_text = SINGLE_QUOTE_CJK.sub(r, new_text)
new_text = FIX_POSSESSIVE_SINGLE_QUOTE.sub(r"\1\1 \2\3\4 \5\1 \2\1 \3\1 \2 \3\1 \2 \3\1\2\1\2\3\1 \2\1 \2\1\3\5\1 \2\3\4\1\2\3 \4\1 \2\1 \2\1 \2\1 \2\1 \2・', new_text)
return new_text.strip() | Perform paranoid text spacing on text. |
383,068 | def save_new_channel(self):
form_info = self.input[]
channel = Channel(typ=15, name=form_info[],
description=form_info[],
owner_id=form_info[])
channel.blocking_save()
self.current.task_data[] = channel.key | It saves new channel according to specified channel features. |
383,069 | def eof_received(self) -> bool:
logger.debug("%s - event = eof_received()", self.side)
super().eof_received()
return False | Close the transport after receiving EOF.
Since Python 3.5, `:meth:~StreamReaderProtocol.eof_received` returns
``True`` on non-TLS connections.
See http://bugs.python.org/issue24539 for more information.
This is inappropriate for websockets for at least three reasons:
1. The use case is to read data until EOF with self.reader.read(-1).
Since websockets is a TLV protocol, this never happens.
2. It doesn't work on TLS connections. A falsy value must be
returned to have the same behavior on TLS and plain connections.
3. The websockets protocol has its own closing handshake. Endpoints
close the TCP connection after sending a close frame.
As a consequence we revert to the previous, more useful behavior. |
383,070 | def saltbridge(poscenter, negcenter, protispos):
data = namedtuple(
, )
pairings = []
for pc, nc in itertools.product(poscenter, negcenter):
if not config.MIN_DIST < euclidean3d(pc.center, nc.center) < config.SALTBRIDGE_DIST_MAX:
continue
resnr = pc.resnr if protispos else nc.resnr
resnr_l = whichresnumber(nc.orig_atoms[0]) if protispos else whichresnumber(pc.orig_atoms[0])
restype = pc.restype if protispos else nc.restype
restype_l = whichrestype(nc.orig_atoms[0]) if protispos else whichrestype(pc.orig_atoms[0])
reschain = pc.reschain if protispos else nc.reschain
reschain_l = whichchain(nc.orig_atoms[0]) if protispos else whichchain(pc.orig_atoms[0])
contact = data(positive=pc, negative=nc, distance=euclidean3d(pc.center, nc.center), protispos=protispos,
resnr=resnr, restype=restype, reschain=reschain, resnr_l=resnr_l, restype_l=restype_l,
reschain_l=reschain_l)
pairings.append(contact)
return filter_contacts(pairings) | Detect all salt bridges (pliprofiler between centers of positive and negative charge) |
383,071 | def setup_button_connectors(self):
self.help_button.clicked.connect(self.show_help)
self.run_button.clicked.connect(self.accept)
self.about_button.clicked.connect(self.about)
self.print_button.clicked.connect(self.show_print_dialog)
self.hazard_layer_combo.currentIndexChanged.connect(
self.index_changed_hazard_layer_combo)
self.exposure_layer_combo.currentIndexChanged.connect(
self.index_changed_exposure_layer_combo)
self.aggregation_layer_combo.currentIndexChanged.connect(
self.index_changed_aggregation_layer_combo) | Setup signal/slot mechanisms for dock buttons. |
383,072 | def photo_url(self):
if self.url is not None:
if self.soup is not None:
img = self.soup.find(, class_=)[]
return img.replace(, )
else:
assert (self.card is not None)
return PROTOCOL + self.card.img[].replace(, )
else:
return | 获取用户头像图片地址.
:return: 用户头像url
:rtype: str |
383,073 | def Popen(self, cmd, **kwargs):
prefixed_cmd = self._prepare_cmd(cmd)
return subprocess.Popen(prefixed_cmd, **kwargs) | Remote Popen. |
383,074 | def build_matlab(static=False):
cfg = get_config()
if in cfg and cfg[] != :
matlab_bin = cfg[].strip()
else:
matlab_bin = which_matlab()
if matlab_bin is None:
raise ValueError("specify in cfg file")
extcmd = esc(os.path.join(matlab_bin, "mexext"))
extension = subprocess.check_output(extcmd, shell=use_shell)
extension = extension.decode().rstrip()
mex = esc(os.path.join(matlab_bin, "mex"))
paths = "-L%(zmq_lib)s -I%(zmq_inc)s" % cfg
make_cmd = % (mex, paths)
if static:
make_cmd +=
do_build(make_cmd, % extension) | build the messenger mex for MATLAB
static : bool
Determines if the zmq library has been statically linked.
If so, it will append the command line option -DZMQ_STATIC
when compiling the mex so it matches libzmq. |
383,075 | def load(self, path=None):
section1option1valueoption2value2
paths = self._paths.copy()
if path:
if path.is_dir():
path /= .format(self._configuration_name)
paths.append(path)
paths = [(path_._root / str(x)[1:] if x.is_absolute() else x) for x in paths]
if path:
path = paths[-1]
if not path.exists():
raise ValueError(.format(path))
config_parser = ConfigParser(
inline_comment_prefixes=(, ),
empty_lines_in_values=False,
default_section=,
interpolation=ExtendedInterpolation()
)
def option_transform(name):
return name.replace(, ).replace(, ).lower()
config_parser.optionxform = option_transform
with suppress(FileNotFoundError):
defaults_contents = resource_string(self._package_name, .format(self._configuration_name))
config_parser.read_string(defaults_contents.decode())
config_parser.read([str(x) for x in paths])
config = {k : dict(v) for k,v in config_parser.items()}
del config[]
return config | Load configuration (from configuration files).
Parameters
----------
path : ~pathlib.Path or None
Path to configuration file, which must exist; or path to directory
containing a configuration file; or None.
Returns
-------
~typing.Dict[str, ~typing.Dict[str, str]]
The configuration as a dict of sections mapping section name to
options. Each options dict maps from option name to option value. The
``default`` section is not included. However, all options from the
``default`` section are included in each returned section.
Raises
------
ValueError
If ``path`` is a missing file; or if it is a directory which does not
contain the configuration file.
Examples
--------
>>> loader.load()
{
'section1': {
'option1': 'value',
'option2': 'value2',
}
} |
383,076 | def from_pb(cls, operation_pb, client, **caller_metadata):
result = cls(operation_pb.name, client, **caller_metadata)
result._update_state(operation_pb)
result._from_grpc = True
return result | Factory: construct an instance from a protobuf.
:type operation_pb:
:class:`~google.longrunning.operations_pb2.Operation`
:param operation_pb: Protobuf to be parsed.
:type client: object: must provide ``_operations_stub`` accessor.
:param client: The client used to poll for the status of the operation.
:type caller_metadata: dict
:param caller_metadata: caller-assigned metadata about the operation
:rtype: :class:`Operation`
:returns: new instance, with attributes based on the protobuf. |
383,077 | def get(self):
custom_arg = {}
if self.key is not None and self.value is not None:
custom_arg[self.key] = self.value
return custom_arg | Get a JSON-ready representation of this CustomArg.
:returns: This CustomArg, ready for use in a request body.
:rtype: dict |
383,078 | def infer_child_relations(graph, node: BaseEntity) -> List[str]:
return list(_infer_child_relations_iter(graph, node)) | Propagate causal relations to children. |
383,079 | def get_meta_attributes(self, **kwargs):
superuser = kwargs.get(, False)
if (self.untl_object.qualifier ==
or self.untl_object.qualifier == ):
if superuser:
self.editable = True
self.repeatable = True
else:
self.editable = False
self.view_type =
elif self.untl_object.qualifier == :
self.label =
self.view_type =
else:
self.editable = False
self.view_type = | Determine the form attributes for the meta field. |
383,080 | def parseSOAPMessage(data, ipAddr):
"parse raw XML data string, return a (minidom) xml document"
try:
dom = minidom.parseString(data)
except Exception:
return None
if dom.getElementsByTagNameNS(NS_S, "Fault"):
return None
soapAction = dom.getElementsByTagNameNS(NS_A, "Action")[0].firstChild.data.strip()
if soapAction == ACTION_PROBE:
return parseProbeMessage(dom)
elif soapAction == ACTION_PROBE_MATCH:
return parseProbeMatchMessage(dom)
elif soapAction == ACTION_RESOLVE:
return parseResolveMessage(dom)
elif soapAction == ACTION_RESOLVE_MATCH:
return parseResolveMatchMessage(dom)
elif soapAction == ACTION_BYE:
return parseByeMessage(dom)
elif soapAction == ACTION_HELLO:
return parseHelloMessage(dom) | parse raw XML data string, return a (minidom) xml document |
383,081 | def get_acf(x, axis=0, fast=False):
x = np.atleast_1d(x)
m = [slice(None), ] * len(x.shape)
if fast:
n = int(2 ** np.floor(np.log2(x.shape[axis])))
m[axis] = slice(0, n)
x = x
else:
n = x.shape[axis]
f = np.fft.fft(x - np.mean(x, axis=axis), n=2 * n, axis=axis)
m[axis] = slice(0, n)
acf = np.fft.ifft(f * np.conjugate(f), axis=axis)[tuple(m)].real
m[axis] = 0
return acf / acf[tuple(m)] | Estimate the autocorrelation function of a time series using the FFT.
:param x:
The time series. If multidimensional, set the time axis using the
``axis`` keyword argument and the function will be computed for every
other axis.
:param axis: (optional)
The time axis of ``x``. Assumed to be the first axis if not specified.
:param fast: (optional)
If ``True``, only use the largest ``2^n`` entries for efficiency.
(default: False) |
383,082 | def load_credential_file(self, path):
c_data = StringIO.StringIO()
c_data.write("[Credentials]\n")
for line in open(path, "r").readlines():
c_data.write(line.replace("AWSAccessKeyId", "aws_access_key_id").replace("AWSSecretKey", "aws_secret_access_key"))
c_data.seek(0)
self.readfp(c_data) | Load a credential file as is setup like the Java utilities |
383,083 | def _get_clause_words( sentence_text, clause_id ):
clause = []
isEmbedded = False
indices = sentence_text.clause_indices
clause_anno = sentence_text.clause_annotations
for wid, token in enumerate(sentence_text[WORDS]):
if indices[wid] == clause_id:
if not clause and clause_anno[wid] == EMBEDDED_CLAUSE_START:
isEmbedded = True
clause.append((wid, token))
return clause, isEmbedded | Collects clause with index *clause_id* from given *sentence_text*.
Returns a pair (clause, isEmbedded), where:
*clause* is a list of word tokens in the clause;
*isEmbedded* is a bool indicating whether the clause is embedded; |
383,084 | def make_heading_authors(self, authors):
author_element = etree.Element(, {: })
first = True
for author in authors:
if first:
first = False
else:
append_new_text(author_element, , join_str=)
collab = author.find()
anon = author.find()
if collab is not None:
append_all_below(author_element, collab)
elif anon is not None:
append_new_text(author_element, )
else:
author_name, _ = self.get_contrib_names(author)
append_new_text(author_element, author_name)
first = True
for xref in author.xpath("./xref[@ref-type= or @ref-type=]"):
_sup = xref.find()
sup_text = all_text(_sup) if _sup is not None else
auth_sup = etree.SubElement(author_element, )
sup_link = etree.SubElement(auth_sup,
,
{: self.main_fragment.format(xref.attrib[])})
sup_link.text = sup_text
if first:
first = False
else:
append_new_text(auth_sup, , join_str=)
return author_element | Constructs the Authors content for the Heading. This should display
directly after the Article Title.
Metadata element, content derived from FrontMatter |
383,085 | def add_aacgm_coordinates(inst, glat_label=, glong_label=,
alt_label=):
import aacgmv2
aalat = []; aalon = []; mlt = []
for lat, lon, alt, time in zip(inst[glat_label], inst[glong_label], inst[alt_label],
inst.data.index):
tlat, tlon, tmlt = aacgmv2.get_aacgm_coord(lat, lon, alt, time)
aalat.append(tlat)
aalon.append(tlon)
mlt.append(tmlt)
inst[] = aalat
inst[] = aalon
inst[] = mlt
inst.meta[] = {:,:}
inst.meta[] = {:,:}
inst.meta[] = {:,:}
return | Uses AACGMV2 package to add AACGM coordinates to instrument object.
The Altitude Adjusted Corrected Geomagnetic Coordinates library is used
to calculate the latitude, longitude, and local time
of the spacecraft with respect to the geomagnetic field.
Example
-------
# function added velow modifies the inst object upon every inst.load call
inst.custom.add(add_quasi_dipole_coordinates, 'modify', glat_label='custom_label')
Parameters
----------
inst : pysat.Instrument
Designed with pysat_sgp4 in mind
glat_label : string
label used in inst to identify WGS84 geodetic latitude (degrees N)
glong_label : string
label used in inst to identify WGS84 geodetic longitude (degrees E)
alt_label : string
label used in inst to identify WGS84 geodetic altitude (km, height above surface)
Returns
-------
inst
Input pysat.Instrument object modified to include quasi-dipole coordinates, 'aacgm_lat'
for magnetic latitude, 'aacgm_long' for longitude, and 'aacgm_mlt' for magnetic local time. |
383,086 | def target_show(self, id, **kwargs):
"https://developer.zendesk.com/rest_api/docs/core/targets
api_path = "/api/v2/targets/{id}.json"
api_path = api_path.format(id=id)
return self.call(api_path, **kwargs) | https://developer.zendesk.com/rest_api/docs/core/targets#show-target |
383,087 | def avail_locations(call=None):
if call == :
raise SaltCloudSystemExit(
)
ret = {}
conn = get_conn()
for item in conn.list_locations()[]:
reg, loc = item[].split()
location = {: item[]}
if reg not in ret:
ret[reg] = {}
ret[reg][loc] = location
return ret | Return a dict of all available VM locations on the cloud provider with
relevant data |
383,088 | def _select_position(self, w, h):
fitn = ((m.y+h, m.x, w, h, m) for m in self._max_rects
if self._rect_fitness(m, w, h) is not None)
fitr = ((m.y+w, m.x, h, w, m) for m in self._max_rects
if self._rect_fitness(m, h, w) is not None)
if not self.rot:
fitr = []
fit = itertools.chain(fitn, fitr)
try:
_, _, w, h, m = min(fit, key=first_item)
except ValueError:
return None, None
return Rectangle(m.x, m.y, w, h), m | Select the position where the y coordinate of the top of the rectangle
is lower, if there are severtal pick the one with the smallest x
coordinate |
383,089 | def run(self, lines):
if (not self.adjust_path) and (not self.image_ext):
return lines
ret = []
for line in lines:
processed = {}
while True:
alt =
img_name =
match = re.search(r, line)
if match:
if match.group(0) in processed:
break
if re.match(, match.group(2)):
break
alt = match.group(1)
img_name = match.group(2)
else:
break
if self.image_ext:
img_name = re.sub(r, + self.image_ext, img_name)
if self.adjust_path and (self.image_path or self.filename):
if self.image_path and self.filename:
img_name = os.path.join(
os.path.abspath(self.image_path),
os.path.dirname(self.filename),
img_name)
if self.filename and (not self.image_path):
img_name = os.path.join(
os.path.abspath(
os.path.dirname(self.filename)),
img_name)
img_name = img_name.replace(os.path.sep, )
line = re.sub(r,
% (alt, img_name), line)
processed[match.group(0)] = True
ret.append(line)
return ret | Filter method |
383,090 | def asFloat(self, maxval=1.0):
x,y,pixels,info = self.asDirect()
sourcemaxval = 2**info[]-1
del info[]
info[] = float(maxval)
factor = float(maxval)/float(sourcemaxval)
def iterfloat():
for row in pixels:
yield map(factor.__mul__, row)
return x,y,iterfloat(),info | Return image pixels as per :meth:`asDirect` method, but scale
all pixel values to be floating point values between 0.0 and
*maxval*. |
383,091 | def list_attributes(self):
return [attribute for attribute, value in self.iteritems() if issubclass(value.__class__, Attribute)] | Returns the Node attributes names.
Usage::
>>> node_a = AbstractNode("MyNodeA", attributeA=Attribute(), attributeB=Attribute())
>>> node_a.list_attributes()
['attributeB', 'attributeA']
:return: Attributes names.
:rtype: list |
383,092 | def _construct_from_permutation(self, significant_pathways):
for side, pathway_feature_tuples in significant_pathways.items():
feature_pathway_dict = self._collect_pathways_by_feature(
pathway_feature_tuples)
self._edges_from_permutation(feature_pathway_dict) | Build the network from a dictionary of (side -> tuple lists),
where the side is specified as "pos" and/or "neg" (from the feature
gene signature(s)) and mapped to a tuple list of [(pathway, feature)].
Used during the PathCORE-T permutation test by applying the method
`permute_pathways_across_features` to an existing CoNetwork. |
383,093 | def __alterDocstring(self, tail=, writer=None):
assert isinstance(tail, str) and isinstance(writer, GeneratorType)
lines = []
timeToSend = False
inCodeBlock = False
inCodeBlockObj = [False]
inSection = False
prefix =
firstLineNum = -1
sectionHeadingIndent = 0
codeChecker = self._checkIfCode(inCodeBlockObj)
while True:
lineNum, line = (yield)
if firstLineNum < 0:
firstLineNum = lineNum
if line is not None:
lines[-1], inCodeBlock = self._endCodeIfNeeded(
lines[-1], inCodeBlock)
inCodeBlockObj[0] = inCodeBlock
writer.send((firstLineNum, lineNum - 1, lines))
lines = []
firstLineNum = lineNum
line = line.replace(match.group(1), doxyTag)
timeToSend = True
if inSection:
match = AstWalker.__blanklineRE.match(line)
if not match:
indent = len(line.expandtabs(self.options.tablength)) - \
len(line.expandtabs(self.options.tablength).lstrip())
if indent <= sectionHeadingIndent:
inSection = False
else:
if lines[-1] == :
lines[-1], inCodeBlock = self._endCodeIfNeeded(
lines[-1], inCodeBlock)
inCodeBlockObj[0] = inCodeBlock
line = line.replace(match.group(0), ).rstrip()
prefix =
else:
match = AstWalker.__argsStartRE.match(line)
if match:
if in prefix:
line = .format(
prefix, match.groupdict(), linesep)
else:
line = .format(
prefix, match.groupdict())
else:
match = AstWalker.__raisesStartRE.match(line)
if match:
line = line.replace(match.group(0), ).rstrip()
if in match.group(1).lower():
prefix =
lines[-1], inCodeBlock = self._endCodeIfNeeded(
lines[-1], inCodeBlock)
inCodeBlockObj[0] = inCodeBlock
lines.append( + line)
continue
else:
match = AstWalker.__listRE.match(line)
if match and not inCodeBlock:
inCodeBlock = True
inCodeBlockObj[0] = True
line = line.replace(match.group(0),
.format(linesep))
else:
match = AstWalker.__sectionStartRE.match(line)
if match:
sectionHeadingIndent = len(line.expandtabs(self.options.tablength)) \
- len(line.expandtabs(self.options.tablength).lstrip())
line = line.replace(
match.group(0),
.format(match.group(1))
)
if lines[-1] == :
lines[-1] =
lines[-1], inCodeBlock = self._endCodeIfNeeded(
lines[-1], inCodeBlock)
inCodeBlockObj[0] = inCodeBlock
lines.append( + line)
continue
elif prefix:
match = AstWalker.__singleListItemRE.match(line)
if match and not inCodeBlock:
line = .format(
prefix, match.group(0))
elif self.options.autocode:
codeChecker.send(
(
line, lines,
lineNum - firstLineNum
)
)
inCodeBlock = inCodeBlockObj[0]
else:
if self.options.autocode:
codeChecker.send(
(
line, lines,
lineNum - firstLineNum
)
)
inCodeBlock = inCodeBlockObj[0]
if tail and lineNum == len(self.docLines) - 1:
line = .format(line.rstrip(), linesep, tail)
line = .format(line.rstrip())
if lineNum == 0:
line = + line
lines.append(line.replace( + linesep, linesep))
else:
timeToSend = True
if timeToSend:
lines[-1], inCodeBlock = self._endCodeIfNeeded(lines[-1],
inCodeBlock)
inCodeBlockObj[0] = inCodeBlock
writer.send((firstLineNum, lineNum, lines))
lines = []
firstLineNum = -1
timeToSend = False | Runs eternally, processing docstring lines.
Parses docstring lines as they get fed in via send, applies appropriate
Doxygen tags, and passes them along in batches for writing. |
383,094 | def update_long(self, **kwargs):
for key, val in six.iteritems(kwargs):
self.update_arg(key, long=val) | Update the long optional arguments (those with two leading '-')
This method updates the short argument name for the specified function
arguments as stored in :attr:`unfinished_arguments`
Parameters
----------
``**kwargs``
Keywords must be keys in the :attr:`unfinished_arguments`
dictionary (i.e. keywords of the root functions), values the long
argument names
Examples
--------
Setting::
>>> parser.update_long(something='s', something_else='se')
is basically the same as::
>>> parser.update_arg('something', long='s')
>>> parser.update_arg('something_else', long='se')
which in turn is basically comparable to::
>>> parser.add_argument('--s', dest='something', ...)
>>> parser.add_argument('--se', dest='something_else', ...)
See Also
--------
update_short, update_longf |
383,095 | def e(message, exit_code=None):
print_log(message, YELLOW, BOLD)
if exit_code is not None:
sys.exit(exit_code) | Print an error log message. |
383,096 | def AAAA(host, nameserver=None):
dig = [, , six.text_type(host), ]
if nameserver is not None:
dig.append(.format(nameserver))
cmd = __salt__[](dig, python_shell=False)
if cmd[] != 0:
log.warning(
%s\,
cmd[]
)
return []
return [x for x in cmd[].split() if check_ip(x)] | Return the AAAA record for ``host``.
Always returns a list.
CLI Example:
.. code-block:: bash
salt ns1 dig.AAAA www.google.com |
383,097 | def disconnect(receiver, signal=Any, sender=Any, weak=True):
if signal is None:
raise errors.DispatcherTypeError(
%( receiver,sender)
)
if weak: receiver = saferef.safeRef(receiver)
senderkey = id(sender)
try:
signals = connections[senderkey]
receivers = signals[signal]
except KeyError:
raise errors.DispatcherKeyError(
%(
signal,
sender
)
)
try:
_removeOldBackRefs(senderkey, signal, receiver, receivers)
except ValueError:
raise errors.DispatcherKeyError(
%(
receiver,
signal,
sender
)
)
_cleanupConnections(senderkey, signal) | Disconnect receiver from sender for signal
receiver -- the registered receiver to disconnect
signal -- the registered signal to disconnect
sender -- the registered sender to disconnect
weak -- the weakref state to disconnect
disconnect reverses the process of connect,
the semantics for the individual elements are
logically equivalent to a tuple of
(receiver, signal, sender, weak) used as a key
to be deleted from the internal routing tables.
(The actual process is slightly more complex
but the semantics are basically the same).
Note:
Using disconnect is not required to cleanup
routing when an object is deleted, the framework
will remove routes for deleted objects
automatically. It's only necessary to disconnect
if you want to stop routing to a live object.
returns None, may raise DispatcherTypeError or
DispatcherKeyError |
383,098 | def filter(self, **kwargs):
self._filters.append(ReadsAlignmentsFilter(**kwargs).filter)
return self | Add a filter to this C{readsAlignments}.
@param kwargs: Keyword arguments, as accepted by
C{ReadsAlignmentsFilter}.
@return: C{self} |
383,099 | def sendReset(self, sequenceId=0):
for col in xrange(self.numColumns):
self.sensorInputs[col].addResetToQueue(sequenceId)
self.network.run(1) | Sends a reset signal to the network. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.