Unnamed: 0
int64 0
389k
| code
stringlengths 26
79.6k
| docstring
stringlengths 1
46.9k
|
---|---|---|
380,200 | def pull(self):
if not in session or session[].dead:
client = Client(str(request.environ[][]))
print , client
session[] = client
session.save()
yield request.environ[].events.AddCoro(client.watch, prio=priority.CORO)
return
else:
client = session[]
yield request.environ[](client.messages.get)(timeout=10)
if isinstance(request.environ[].result, events.OperationTimeout):
pass
elif isinstance(request.environ[].result, Exception):
import traceback
traceback.print_exception(*request.environ[].exception)
else:
yield "%s\r\n"% .join(request.environ[].result) | This action does some state checking (adds a object in the session
that will identify this chat participant and adds a coroutine to manage
it's state) and gets new messages or bail out in 10 seconds if there are
no messages. |
380,201 | def GetKeyByPath(self, key_path):
key_path_upper = key_path.upper()
if key_path_upper.startswith(self._key_path_prefix_upper):
relative_key_path = key_path[self._key_path_prefix_length:]
elif key_path.startswith(definitions.KEY_PATH_SEPARATOR):
relative_key_path = key_path
key_path = .join([self._key_path_prefix, key_path])
else:
return None
try:
regf_key = self._regf_file.get_key_by_path(relative_key_path)
except IOError:
regf_key = None
if not regf_key:
return None
return REGFWinRegistryKey(regf_key, key_path=key_path) | Retrieves the key for a specific path.
Args:
key_path (str): Windows Registry key path.
Returns:
WinRegistryKey: Registry key or None if not available. |
380,202 | def add_class(self, cssclass):
if self.has_class(cssclass):
return self
return self.toggle_class(cssclass) | Adds a css class to this element. |
380,203 | def fetchmany(self, size=None):
self._check_executed()
fut = self._loop.create_future()
if self._rows is None:
fut.set_result([])
return fut
end = self._rownumber + (size or self._arraysize)
result = self._rows[self._rownumber:end]
self._rownumber = min(end, len(self._rows))
fut.set_result(result)
return fut | Returns the next set of rows of a query result, returning a
list of tuples. When no more rows are available, it returns an
empty list.
The number of rows returned can be specified using the size argument,
which defaults to one
:param size: ``int`` number of rows to return
:returns: ``list`` of fetched rows |
380,204 | def _immediate_dominators(self, node, target_graph=None, reverse_graph=False):
if target_graph is None:
target_graph = self.graph
if node not in target_graph:
raise AngrCFGError( % node)
graph = networkx.DiGraph(target_graph)
if reverse_graph:
for n in target_graph.nodes():
graph.add_node(n)
for src, dst in target_graph.edges():
graph.add_edge(dst, src)
idom = {node: node}
order = list(networkx.dfs_postorder_nodes(graph, node))
dfn = {u: i for i, u in enumerate(order)}
order.pop()
order.reverse()
def intersect(u_, v_):
while u_ != v_:
while dfn[u_] < dfn[v_]:
u_ = idom[u_]
while dfn[u_] > dfn[v_]:
v_ = idom[v_]
return u_
changed = True
while changed:
changed = False
for u in order:
new_idom = reduce(intersect, (v for v in graph.pred[u] if v in idom))
if u not in idom or idom[u] != new_idom:
idom[u] = new_idom
changed = True
return idom | Get all immediate dominators of sub graph from given node upwards.
:param str node: id of the node to navigate forwards from.
:param networkx.classes.digraph.DiGraph target_graph: graph to analyse, default is self.graph.
:param bool reverse_graph: Whether the target graph should be reversed before analysation.
:return: each node of graph as index values, with element as respective node's immediate dominator.
:rtype: dict |
380,205 | def _nested_unary_mul(nested_a, p):
def mul_with_broadcast(tensor):
ndims = tensor.shape.ndims
if ndims != 2:
p_reshaped = tf.reshape(p, [-1] + [1] * (ndims - 1))
return p_reshaped * tensor
else:
return p * tensor
return nest.map(mul_with_broadcast, nested_a) | Multiply `Tensors` in arbitrarily nested `Tensor` `nested_a` with `p`. |
380,206 | def borrow_readwrite_instance(cls, working_dir, block_number, expected_snapshots={}):
global blockstack_db, blockstack_db_lastblock, blockstack_db_lock
import virtualchain_hooks
db_path = virtualchain.get_db_filename(virtualchain_hooks, working_dir)
blockstack_db_lock.acquire()
try:
assert blockstack_db is None, "Borrowing violation"
except Exception, e:
log.exception(e)
log.error("FATAL: Borrowing violation")
os.abort()
db = BlockstackDB(db_path, DISPOSITION_RW, working_dir, get_genesis_block(), expected_snapshots=expected_snapshots)
rc = db.db_setup()
if not rc:
db.close()
blockstack_db_lock.release()
log.error("Failed to set up virtualchain state engine")
return None
blockstack_db = db
blockstack_db_lastblock = block_number
blockstack_db_lock.release()
return blockstack_db | Get a read/write database handle to the blockstack db.
At most one such handle can exist within the program.
When the caller is done with the handle, it should call release_readwrite_instance()
Returns the handle on success
Returns None if we can't set up the db.
Aborts if there is another read/write handle out there somewhere. |
380,207 | def update(self, app_id, data):
assert ( in data
and data[]
and in data
and in data
and data[]
and in data
and data[]
and in data
and data[]
and in data
and data[]
and in data
and data[])
return self.conn.fetch(, self.url() % app_id, data) | Update app identified by app_id with data
:params:
* app_id (int) id in the marketplace received with :method:`create`
* data (dict) some keys are required:
* *name*: the title of the app. Maximum length 127
characters.
* *summary*: the summary of the app. Maximum length
255 characters.
* *categories*: a list of the categories, at least
two of the category ids provided from the category api
(see below).
* *support_email*: the email address for support.
* *device_types*: a list of the device types at least
one of: 'desktop', 'phone', 'tablet'.
* *payment_type*: only choice at this time is 'free'.
:returns: HttResponse:
* status_code (int) 202 if successful
* content (dict) or empty if successful |
380,208 | def where(cls, **kwargs):
scope = kwargs.pop(, None)
if not scope:
return super(Classification, cls).where(**kwargs)
return cls.paginated_results(*cls.http_get(scope, params=kwargs)) | where(scope=None, **kwargs)
Like :py:meth:`.PanoptesObject.where`, but also allows setting the
query scope.
- **scope** can be any of the values given in the `Classification
Collection API documentation <http://docs.panoptes.apiary.io/#reference/classification/classification/list-all-classifications>`_
without the leading slash.
Examples::
my_classifications = Classification.where()
my_proj_123_classifications = Classification.where(project_id=123)
all_proj_123_classifications = Classification.where(
scope='project',
project_id=123,
) |
380,209 | def login(self, username=None, password=None, token=None):
if token:
self.headers[] = .format(token)
self.auth = None
elif username and password:
self.headers.pop(, None)
self.auth = (username, password) | Login into KE-chain with either username/password or token.
:param basestring username: username for your user from KE-chain
:param basestring password: password for your user from KE-chain
:param basestring token: user authentication token retrieved from KE-chain
Examples
--------
Using Token Authentication (retrieve user Token from the KE-chain instance)
>>> client = Client()
>>> client.login(token='<some-super-long-secret-token>')
Using Basic authentications (Username/Password)
>>> client = Client()
>>> client.login(username='user', password='pw')
>>> client = Client()
>>> client.login('username','password') |
380,210 | def p_statement_randomize_expr(p):
p[0] = make_sentence(, make_typecast(TYPE.ulong, p[2], p.lineno(1))) | statement : RANDOMIZE expr |
380,211 | def _make_tagdict(self, sentences):
counts = defaultdict(lambda: defaultdict(int))
for words, tags in sentences:
for word, tag in zip(words, tags):
counts[word][tag] += 1
self.classes.add(tag)
freq_thresh = 20
ambiguity_thresh = 0.97
for word, tag_freqs in counts.items():
tag, mode = max(tag_freqs.items(), key=lambda item: item[1])
n = sum(tag_freqs.values())
if n >= freq_thresh and (float(mode) / n) >= ambiguity_thresh:
self.tagdict[word] = tag | Make a tag dictionary for single-tag words. |
380,212 | def create_feature_vectorizer(input_features, output_feature_name,
known_size_map = {}):
spec = _Model_pb2.Model()
spec.specificationVersion = SPECIFICATION_VERSION
input_features = process_or_validate_features(input_features)
feature_vectorizer = spec.featureVectorizer
num_output_dimensions = 0
for n, ft in input_features:
if n in known_size_map:
dim = known_size_map[n]
if ft.num_elements is not None:
if dim != ft.num_elements:
raise ValueError(("In feature %s, override size (%d) not "
"compatible with inherent value size (%d).")
% (n, dim, ft.num_elements))
else:
if ft.num_elements is None:
raise ValueError("In feature %s, inherent size unknown so must be manually supplied.")
dim = ft.num_elements
num_output_dimensions += dim
new_feature = feature_vectorizer.inputList.add()
new_feature.inputColumn = n
new_feature.inputDimensions = dim
if not isinstance(output_feature_name, _string_types):
if (is_valid_feature_list(output_feature_name)
and len(output_feature_name) == 1
and output_feature_name[0][1] == datatypes.Array(num_output_dimensions)):
output_feature_name = output_feature_name[0][0]
else:
raise TypeError("Output feature must be specified as a "
"feature name or correct output feature list.")
output_features = [(output_feature_name, datatypes.Array(num_output_dimensions))]
set_transform_interface_params(spec, input_features, output_features)
return spec, num_output_dimensions | Creates a feature vectorizer from input features, return the spec for
a feature vectorizer that puts everything into a single array of length
equal to the total size of all the input features. Returns a 2-tuple
`(spec, num_dimension)`
Parameters
----------
input_features: [list of 2-tuples]
Name(s) of the input features, given as a list of `('name', datatype)`
tuples. The datatypes entry is one of the data types defined in the
:ref:`datatypes` module. Allowed datatypes are :ref:`datatype.Int64`,
:ref:`datatype.Double`, :ref:`datatypes.Dictionary`,
or :ref:`datatype.Array`.
If the feature is a dictionary type, then the dictionary must have integer
keys, and the number of dimensions to expand it into must be given by
`known_size_map`.
Feature indices in the final array are counted sequentially from the
from 0 through the total number of features.
output_feature_name: str
The name of the output feature. The type is an Array
List of output feature of the network.
known_size_map:
A dictionary mapping the feature name to the expanded size in the final
array. This is most useful for specifying the size of sparse vectors
given as dictionaries of index to value. |
380,213 | def get(self, name):
for c in self.comps:
if c.category == name:
return c
return None | Return component by category name |
380,214 | def eclean_pkg(destructive=False, package_names=False, time_limit=0,
exclude_file=):
t delete distfiles files modified since <time>
<time> is an amount of time: "1y" is "one year", "2w" is
"two weeks", etc. Units are: y (years), m (months), w (weeks),
d (days) and h (hours).
exclude_file
Path to exclusion file. Default is /etc/eclean/packages.exclude
This is the same default eclean-pkg uses. Use None if this file
exists and you want to ignore.
Returns a dict containing the cleaned binary packages:
.. code-block:: python
{: {<dist file>: <size>},
: <size>}
CLI Example:
.. code-block:: bash
salt gentoolkit.eclean_pkg destructive=True
Invalid exclusion file: {0}cleanedtotal_cleaned': _pretty_size(clean_size)}
return ret | Clean obsolete binary packages
destructive
Only keep minimum for reinstallation
package_names
Protect all versions of installed packages. Only meaningful if used
with destructive=True
time_limit <time>
Don't delete distfiles files modified since <time>
<time> is an amount of time: "1y" is "one year", "2w" is
"two weeks", etc. Units are: y (years), m (months), w (weeks),
d (days) and h (hours).
exclude_file
Path to exclusion file. Default is /etc/eclean/packages.exclude
This is the same default eclean-pkg uses. Use None if this file
exists and you want to ignore.
Returns a dict containing the cleaned binary packages:
.. code-block:: python
{'cleaned': {<dist file>: <size>},
'total_cleaned': <size>}
CLI Example:
.. code-block:: bash
salt '*' gentoolkit.eclean_pkg destructive=True |
380,215 | def __find_star_in_col(self, col):
row = -1
for i in range(self.n):
if self.marked[i][col] == 1:
row = i
break
return row | Find the first starred element in the specified row. Returns
the row index, or -1 if no starred element was found. |
380,216 | def __parse_fc_data(fc_data):
from buienradar.buienradar import condition_from_code
fc = []
for daycnt in range(1, 6):
daysection = __BRDAYFC % daycnt
if daysection in fc_data:
tmpsect = fc_data[daysection]
fcdatetime = datetime.now(pytz.timezone(__TIMEZONE))
fcdatetime = fcdatetime.replace(hour=12,
minute=0,
second=0,
microsecond=0)
fcdatetime = fcdatetime + timedelta(days=daycnt)
code = tmpsect.get(__BRICOON, []).get(__BRID)
fcdata = {
CONDITION: condition_from_code(code),
TEMPERATURE: __get_float(tmpsect, __BRMAXTEMP),
MIN_TEMP: __get_float(tmpsect, __BRMINTEMP),
MAX_TEMP: __get_float(tmpsect, __BRMAXTEMP),
SUN_CHANCE: __get_int(tmpsect, __BRKANSZON),
RAIN_CHANCE: __get_int(tmpsect, __BRKANSREGEN),
RAIN: __get_float(tmpsect, __BRMAXMMREGEN),
SNOW: __get_float(tmpsect, __BRSNEEUWCMS),
WINDFORCE: __get_int(tmpsect, __BRWINDKRACHT),
DATETIME: fcdatetime,
}
fcdata[CONDITION][IMAGE] = tmpsect.get(__BRICOON, []).get(__BRTEXT)
fc.append(fcdata)
return fc | Parse the forecast data from the xml section. |
380,217 | def start_aikif():
if sys.platform[0:3] == :
os.system("start go_web_aikif.bat")
else:
os.system("../aikif/web_app/web_aikif.py")
import webbrowser
import time
time.sleep(1)
webbrowser.open() | starts the web interface and possibly other processes |
380,218 | def filefind(self, names):
if type(names) is str:
names = [names]
lower_names = []
for name in names:
lower_names.append(name.lower())
names = lower_names
files = self.list_files()
found = []
for fullpath in files:
filename = os.path.basename(fullpath)
if filename.lower() in names:
logger.debug("Found %s", fullpath)
if not os.path.exists(fullpath):
logger.warn("Found file %s in version control but not on "
"file system.", fullpath)
continue
found.append(fullpath)
if not found:
return
if len(found) > 1:
found.sort(key=len)
logger.warn("Found more than one file, picked the shortest one to "
"change: %s", .join(found))
return found[0] | Return first found file matching name (case-insensitive).
Some packages have docs/HISTORY.txt and
package/name/HISTORY.txt. We make sure we only return the one
in the docs directory if no other can be found.
'names' can be a string or a list of strings; if you have both
a CHANGES.txt and a docs/HISTORY.txt, you want the top level
CHANGES.txt to be found first. |
380,219 | def similarity(ctx, app_id, json_flag, query_pair, request_id):
app_id = clean_app_id(app_id)
api = GoolabsAPI(app_id)
ret = api.similarity(
query_pair=query_pair,
request_id=request_id
)
if json_flag:
click.echo(format_json(api.response.json()))
return
click.echo(.format(ret[])) | Scoring the similarity of two words. |
380,220 | def split_path(path_):
path = path_.lstrip()
first, _, rest = path.partition()
lang = first.lower()
if lang in settings.LANGUAGE_URL_MAP:
return settings.LANGUAGE_URL_MAP[lang], rest
else:
supported = find_supported(first)
if len(supported):
return supported[0], rest
else:
return , path | Split the requested path into (locale, path).
locale will be empty if it isn't found. |
380,221 | def reshape_range(tensor, i, j, shape):
t_shape = common_layers.shape_list(tensor)
target_shape = t_shape[:i] + shape + t_shape[j:]
return tf.reshape(tensor, target_shape) | Reshapes a tensor between dimensions i and j. |
380,222 | def cmd_fence_move(self, args):
if len(args) < 1:
print("Usage: fence move FENCEPOINTNUM")
return
if not self.have_list:
print("Please list fence points first")
return
idx = int(args[0])
if idx <= 0 or idx > self.fenceloader.count():
print("Invalid fence point number %u" % idx)
return
try:
latlon = self.module().click_position
except Exception:
print("No map available")
return
if latlon is None:
print("No map click position available")
return
self.fenceloader.move(idx, latlon[0], latlon[1])
if self.send_fence():
print("Moved fence point %u" % idx) | handle fencepoint move |
380,223 | def parses(self, words, S=):
if isinstance(words, str):
words = words.split()
self.parse(words, S)
return [[i, j, S, found, []]
for (i, j, lhs, found, expects) in self.chart[len(words)]
if i == 0 and lhs == S and expects == []] | Return a list of parses; words can be a list or string.
>>> chart = Chart(E_NP_)
>>> chart.parses('happy man', 'NP')
[[0, 2, 'NP', [('Adj', 'happy'), [1, 2, 'NP', [('N', 'man')], []]], []]] |
380,224 | def run_step(self):
formatted_replacements = self.context.get_formatted_iterable(
self.replace_pairs)
iter = StreamReplacePairsRewriterStep.iter_replace_strings(
formatted_replacements)
rewriter = StreamRewriter(iter)
super().run_step(rewriter) | Write in to out, replacing strings per the replace_pairs. |
380,225 | def publocus(args):
p = OptionParser(publocus.__doc__)
p.add_option("--locus_tag", default="MTR_",
help="GenBank locus tag [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
locus_tag = opts.locus_tag
index = AutoVivification()
idsfile, = args
fp = must_open(idsfile)
for row in fp:
locus, chrom, sep, rank, iso = atg_name(row, retval="locus,chr,sep,rank,iso")
if None in (locus, chrom, sep, rank, iso):
logging.warning("{0} is not a valid gene model identifier".format(row))
continue
if locus not in index.keys():
pub_locus = gene_name(chrom, rank, prefix=locus_tag, sep=sep)
index[locus][] = pub_locus
index[locus][] = set()
index[locus][].add(int(iso))
for locus in index:
pub_locus = index[locus][]
index[locus][] = sorted(index[locus][])
if len(index[locus][]) > 1:
new = [chr(n+64) for n in index[locus][] if n < 27]
for i, ni in zip(index[locus][], new):
print("\t".join(x for x in ("{0}.{1}".format(locus, i), \
"{0}{1}".format(pub_locus, ni))))
else:
print("\t".join(x for x in ("{0}.{1}".format(locus, index[locus][][0]), \
pub_locus))) | %prog publocus idsfile > idsfiles.publocus
Given a list of model identifiers, convert each into a GenBank approved
pub_locus.
Example output:
Medtr1g007020.1 MTR_1g007020
Medtr1g007030.1 MTR_1g007030
Medtr1g007060.1 MTR_1g007060A
Medtr1g007060.2 MTR_1g007060B |
380,226 | def make_empty(self, axes=None):
if axes is None:
axes = [ensure_index([])] + [ensure_index(a)
for a in self.axes[1:]]
if self.ndim == 1:
blocks = np.array([], dtype=self.array_dtype)
else:
blocks = []
return self.__class__(blocks, axes) | return an empty BlockManager with the items axis of len 0 |
380,227 | def ionic_radius(self):
if self._oxi_state in self.ionic_radii:
return self.ionic_radii[self._oxi_state]
d = self._el.data
oxstr = str(int(self._oxi_state))
if oxstr in d.get("Ionic radii hs", {}):
warnings.warn("No default ionic radius for %s. Using hs data." %
self)
return d["Ionic radii hs"][oxstr]
elif oxstr in d.get("Ionic radii ls", {}):
warnings.warn("No default ionic radius for %s. Using ls data." %
self)
return d["Ionic radii ls"][oxstr]
warnings.warn("No ionic radius for {}!".format(self))
return None | Ionic radius of specie. Returns None if data is not present. |
380,228 | def get_language():
from parler import appsettings
language = dj_get_language()
if language is None and appsettings.PARLER_DEFAULT_ACTIVATE:
return appsettings.PARLER_DEFAULT_LANGUAGE_CODE
else:
return language | Wrapper around Django's `get_language` utility.
For Django >= 1.8, `get_language` returns None in case no translation is activate.
Here we patch this behavior e.g. for back-end functionality requiring access to translated fields |
380,229 | def _determine_profiles(self):
mp_insts = self._conn.EnumerateInstances("CIM_RegisteredProfile",
namespace=self.interop_ns)
self._profiles = mp_insts | Determine the WBEM management profiles advertised by the WBEM server,
by communicating with it and enumerating the instances of
`CIM_RegisteredProfile`.
If the profiles could be determined, this method sets the
:attr:`profiles` property of this object to the list of
`CIM_RegisteredProfile` instances (as :class:`~pywbem.CIMInstance`
objects), and returns.
Otherwise, it raises an exception.
Raises:
Exceptions raised by :class:`~pywbem.WBEMConnection`.
CIMError: CIM_ERR_NOT_FOUND, Interop namespace could not be
determined. |
380,230 | def dependencies_satisfied(self, plugin):
for depends in plugin.dependencies:
if depends not in self.config[]:
log.error("{0} depends on {1}, but {1} wasn't in the "
"config file. To use {0}, install {1} and add "
"it to the config.".format(plugin.name, depends))
return False
return True | Checks whether a plugin's dependencies are satisfied.
Logs an error if there is an unsatisfied dependencies
Returns: Bool |
380,231 | def setRGB(self, pixel, r, g, b):
self.set(pixel, (r, g, b)) | Set single pixel using individual RGB values instead of tuple |
380,232 | def _run_aws(cmd, region, opts, user, **kwargs):
| Runs the given command against AWS.
cmd
Command to run
region
Region to execute cmd in
opts
Pass in from salt
user
Pass in from salt
kwargs
Key-value arguments to pass to the command |
380,233 | def israw(self):
if self.raw is None:
info = self.container_info()
self.raw = self.stdout.isatty() and info[][]
return self.raw | Returns True if the PTY should operate in raw mode.
If the container was not started with tty=True, this will return False. |
380,234 | def create_search_url(self):
url =
for key, value in self.arguments.items():
url += % (quote_plus(key), quote_plus(value))
self.url = url[:-1]
return self.url | Generates (urlencoded) query string from stored key-values tuples
:returns: A string containing all arguments in a url-encoded format |
380,235 | def e(self, eid):
ta = datetime.datetime.now()
rs = self.rest(, self.uri_db + , data={:int(eid)}, parse=True)
tb = datetime.datetime.now() - ta
print cl( % (eid, tb.microseconds/1000.0), )
return rs | Get an Entity |
380,236 | def summarize_notices(self, notices_json):
ret = []
for notices_dict in notices_json:
tmp = {
: None,
: None,
: None
}
try:
tmp[] = notices_dict[]
except (KeyError, ValueError, TypeError):
pass
try:
tmp[] = .join(notices_dict[])
except (KeyError, ValueError, TypeError):
pass
try:
tmp[] = self.summarize_links(notices_dict[])
except (KeyError, ValueError, TypeError):
pass
if any(tmp.values()):
ret.append(tmp)
return ret | The function for summarizing RDAP notices in to a unique list.
https://tools.ietf.org/html/rfc7483#section-4.3
Args:
notices_json (:obj:`dict`): A json mapping of notices from RDAP
results.
Returns:
list of dict: Unique RDAP notices information:
::
[{
'title' (str) - The title/header of the notice.
'description' (str) - The description/body of the notice.
'links' (list) - Unique links returned by
:obj:`ipwhois.rdap._RDAPCommon.summarize_links()`.
}] |
380,237 | def __update_state(self):
if self._state.active:
self._state = self.__get_state_by_id(self.job_config.job_id) | Fetches most up to date state from db. |
380,238 | def cardinal_groupby(self):
g, t = self._cardinal
self[g] = self[g].astype(t)
grpby = self.groupby(g)
self[g] = self[g].astype()
return grpby | Group this object on it cardinal dimension (_cardinal).
Returns:
grpby: Pandas groupby object (grouped on _cardinal) |
380,239 | def serialisable(cls, key, obj):
t
_Serialisable_<cls.__name__>__variables remove these too
if in key:
return False
if key in obj.__blacklist:
return False
if callable(getattr(obj, key)):
return False
if hasattr(obj.__class__, key):
if isinstance(getattr(obj.__class__, key), property):
return False
return True | Determines what can be serialised and what shouldn't |
380,240 | def send(self, **kwargs):
assert len(kwargs) == 1, "Must make a single request."
res = self.send_req(sc_pb.Request(**kwargs))
return getattr(res, list(kwargs.keys())[0]) | Create and send a specific request, and return the response.
For example: send(ping=sc_pb.RequestPing()) => sc_pb.ResponsePing
Args:
**kwargs: A single kwarg with the name and value to fill in to Request.
Returns:
The Response corresponding to your request. |
380,241 | def save_dash(self, dashboard_id):
session = db.session()
dash = (session
.query(models.Dashboard)
.filter_by(id=dashboard_id).first())
check_ownership(dash, raise_if_false=True)
data = json.loads(request.form.get())
self._set_dash_metadata(dash, data)
session.merge(dash)
session.commit()
session.close()
return json_success(json.dumps({: })) | Save a dashboard's metadata |
380,242 | def exists(self):
session = client.get_client().create_session()
ret = self._base_query(session).count() > 0
session.close()
return ret | Check if a target exists
This function is called by :mod:`luigi` to check if a task output exists. By default,
:mod:`luigi` considers a task as complete if all it targets (outputs) exist.
Returns:
bool: ``True`` if target exists, ``False`` otherwise |
380,243 | def threshold_monitor_hidden_threshold_monitor_security_policy_area_timebase(self, **kwargs):
config = ET.Element("config")
threshold_monitor_hidden = ET.SubElement(config, "threshold-monitor-hidden", xmlns="urn:brocade.com:mgmt:brocade-threshold-monitor")
threshold_monitor = ET.SubElement(threshold_monitor_hidden, "threshold-monitor")
security = ET.SubElement(threshold_monitor, "security")
policy = ET.SubElement(security, "policy")
sec_policy_name_key = ET.SubElement(policy, "sec_policy_name")
sec_policy_name_key.text = kwargs.pop()
area = ET.SubElement(policy, "area")
sec_area_value_key = ET.SubElement(area, "sec_area_value")
sec_area_value_key.text = kwargs.pop()
timebase = ET.SubElement(area, "timebase")
timebase.text = kwargs.pop()
callback = kwargs.pop(, self._callback)
return callback(config) | Auto Generated Code |
380,244 | async def createWorkerType(self, *args, **kwargs):
return await self._makeApiCall(self.funcinfo["createWorkerType"], *args, **kwargs) | Create new Worker Type
Create a worker type. A worker type contains all the configuration
needed for the provisioner to manage the instances. Each worker type
knows which regions and which instance types are allowed for that
worker type. Remember that Capacity is the number of concurrent tasks
that can be run on a given EC2 resource and that Utility is the relative
performance rate between different instance types. There is no way to
configure different regions to have different sets of instance types
so ensure that all instance types are available in all regions.
This function is idempotent.
Once a worker type is in the provisioner, a back ground process will
begin creating instances for it based on its capacity bounds and its
pending task count from the Queue. It is the worker's responsibility
to shut itself down. The provisioner has a limit (currently 96hours)
for all instances to prevent zombie instances from running indefinitely.
The provisioner will ensure that all instances created are tagged with
aws resource tags containing the provisioner id and the worker type.
If provided, the secrets in the global, region and instance type sections
are available using the secrets api. If specified, the scopes provided
will be used to generate a set of temporary credentials available with
the other secrets.
This method takes input: ``http://schemas.taskcluster.net/aws-provisioner/v1/create-worker-type-request.json#``
This method gives output: ``http://schemas.taskcluster.net/aws-provisioner/v1/get-worker-type-response.json#``
This method is ``stable`` |
380,245 | def update(self, batch_webhook_id, data):
self.batch_webhook_id = batch_webhook_id
if not in data:
raise KeyError()
return self._mc_client._patch(url=self._build_path(batch_webhook_id), data=data) | Update a webhook that will fire whenever any batch request completes
processing.
:param batch_webhook_id: The unique id for the batch webhook.
:type batch_webhook_id: :py:class:`str`
:param data: The request body parameters
:type data: :py:class:`dict`
data = {
"url": string*
} |
380,246 | def fresh_working_set():
class WorkingSetPlusEditableInstalls(pkg_resources.WorkingSet):
def __init__(self, *args, **kwargs):
self._normalized_name_mapping = {}
super(WorkingSetPlusEditableInstalls, self).__init__(*args, **kwargs)
def add_entry(self, entry):
logger.debug(, entry)
self.entry_keys.setdefault(entry, [])
self.entries.append(entry)
for dist in pkg_resources.find_distributions(entry, False):
replace = (dist.precedence == pkg_resources.EGG_DIST)
self._normalized_name_mapping[normalize_name(dist.key)] = dist.key
self.add(dist, entry, False, replace=replace)
def find_normalized(self, req):
req = _package_req_to_pkg_resources_req(str(req))
req.key = self._normalized_name_mapping.get(normalize_name(req.key), req.key)
return self.find(req)
return WorkingSetPlusEditableInstalls() | return a pkg_resources "working set", representing the *currently* installed packages |
380,247 | def display_matrix(self, matrix, interval=2.0, brightness=1.0, fading=False, ignore_duplicates=False):
self._matrix_writer.write(
matrix=matrix,
interval=interval,
brightness=brightness,
fading=fading,
ignore_duplicates=ignore_duplicates
) | Displays an LED matrix on Nuimo's LED matrix display.
:param matrix: the matrix to display
:param interval: interval in seconds until the matrix disappears again
:param brightness: led brightness between 0..1
:param fading: if True, the previous matrix fades into the new matrix
:param ignore_duplicates: if True, the matrix is not sent again if already being displayed |
380,248 | def GetTZInfo(tzname=, utcOffset=None, dst=None):
key = (tzname, utcOffset, dst)
tzInfo = TZManager._tzInfos.get(key)
if not tzInfo:
tzInfo = TZInfo(tzname, utcOffset, dst)
TZManager._tzInfos[key] = tzInfo
return tzInfo | Get / Add timezone info |
380,249 | async def psetex(self, name, time_ms, value):
if isinstance(time_ms, datetime.timedelta):
ms = int(time_ms.microseconds / 1000)
time_ms = (time_ms.seconds + time_ms.days * 24 * 3600) * 1000 + ms
return await self.execute_command(, name, time_ms, value) | Set the value of key ``name`` to ``value`` that expires in ``time_ms``
milliseconds. ``time_ms`` can be represented by an integer or a Python
timedelta object |
380,250 | def create(self, size):
thumbnail = images.create(self.source_image.name, size,
self.metadata_backend, self.storage)
return thumbnail | Creates and return a thumbnail of a given size. |
380,251 | def remove_entry_listener(self, registration_id):
return self._stop_listening(registration_id,
lambda i: multi_map_remove_entry_listener_codec.encode_request(self.name, i)) | Removes the specified entry listener. Returns silently if there is no such listener added before.
:param registration_id: (str), id of registered listener.
:return: (bool), ``true`` if registration is removed, ``false`` otherwise. |
380,252 | def createZone(self, zone, zoneFile=None, callback=None, errback=None,
**kwargs):
import ns1.zones
zone = ns1.zones.Zone(self.config, zone)
return zone.create(zoneFile=zoneFile, callback=callback,
errback=errback, **kwargs) | Create a new zone, and return an associated high level Zone object.
Several optional keyword arguments are available to configure the SOA
record.
If zoneFile is specified, upload the specific zone definition file
to populate the zone with.
:param str zone: zone name, like 'example.com'
:param str zoneFile: absolute path of a zone file
:keyword int retry: retry time
:keyword int refresh: refresh ttl
:keyword int expiry: expiry ttl
:keyword int nx_ttl: nxdomain TTL
:rtype: :py:class:`ns1.zones.Zone` |
380,253 | def dimensionNames(self):
nSubDims = len(self._subArrayShape)
subArrayDims = [.format(dimNr) for dimNr in range(nSubDims)]
return list(self._ncVar.dimensions + tuple(subArrayDims)) | Returns a list with the dimension names of the underlying NCDF variable |
380,254 | def random_str(Nchars=6, randstrbase=):
return .join([randstrbase[random.randint(0, len(randstrbase) - 1)] for i in range(Nchars)]) | Return a random string of <Nchars> characters. Characters are sampled
uniformly from <randstrbase>. |
380,255 | def create_lazy_user(self):
user_class = self.model.get_user_class()
username = self.generate_username(user_class)
user = user_class.objects.create_user(username, )
self.create(user=user)
return user, username | Create a lazy user. Returns a 2-tuple of the underlying User
object (which may be of a custom class), and the username. |
380,256 | def listify(*args):
if (len(args) == 1) and callable(args[0]):
func = args[0]
@wraps(func)
def _inner(*args, **kwargs):
return list(func(*args, **kwargs))
return _inner
else:
return list(args) | Convert args to a list, unless there's one arg and it's a
function, then acts a decorator. |
380,257 | def _create_window_function(name, doc=):
def _():
sc = SparkContext._active_spark_context
jc = getattr(sc._jvm.functions, name)()
return Column(jc)
_.__name__ = name
_.__doc__ = + doc
return _ | Create a window function by name |
380,258 | def timeseries(self):
if in self._timeseries.columns:
return self._timeseries
else:
self._timeseries[] = abs(self._timeseries.p) * self.q_sign * \
tan(acos(self.power_factor))
return self._timeseries.loc[
self.grid.network.timeseries.timeindex, :] | Time series of storage operation
Parameters
----------
ts : :pandas:`pandas.DataFrame<dataframe>`
DataFrame containing active power the storage is charged (negative)
and discharged (positive) with (on the grid side) in kW in column
'p' and reactive power in kvar in column 'q'. When 'q' is positive,
reactive power is supplied (behaving as a capacitor) and when 'q'
is negative reactive power is consumed (behaving as an inductor).
Returns
-------
:pandas:`pandas.DataFrame<dataframe>`
See parameter `timeseries`. |
380,259 | def publish_scene_name(self, scene_id, name):
self.sequence_number += 1
self.publisher.send_multipart(msgs.MessageBuilder.scene_name(self.sequence_number, scene_id, name))
return self.sequence_number | publish a changed scene name |
380,260 | def camelify(self):
outstring = self.titleify(allwords=True)
outstring = re.sub(r"&[^;]+;", " ", outstring)
outstring = re.sub(r"\W+", "", outstring)
return String(outstring) | turn a string to CamelCase, omitting non-word characters |
380,261 | def open(filename, frame=):
data = BagOfPoints.load_data(filename)
return Direction(data, frame) | Create a Direction from data saved in a file.
Parameters
----------
filename : :obj:`str`
The file to load data from.
frame : :obj:`str`
The frame to apply to the created Direction.
Returns
-------
:obj:`Direction`
A Direction created from the data in the file. |
380,262 | def update_process_behavior(self, behavior_data, process_id, behavior_ref_name):
route_values = {}
if process_id is not None:
route_values[] = self._serialize.url(, process_id, )
if behavior_ref_name is not None:
route_values[] = self._serialize.url(, behavior_ref_name, )
content = self._serialize.body(behavior_data, )
response = self._send(http_method=,
location_id=,
version=,
route_values=route_values,
content=content)
return self._deserialize(, response) | UpdateProcessBehavior.
[Preview API] Replaces a behavior in the process.
:param :class:`<ProcessBehaviorUpdateRequest> <azure.devops.v5_0.work_item_tracking_process.models.ProcessBehaviorUpdateRequest>` behavior_data:
:param str process_id: The ID of the process
:param str behavior_ref_name: The reference name of the behavior
:rtype: :class:`<ProcessBehavior> <azure.devops.v5_0.work_item_tracking_process.models.ProcessBehavior>` |
380,263 | def batch_delete_intents(self,
parent,
intents,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None):
if not in self._inner_api_calls:
self._inner_api_calls[
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.batch_delete_intents,
default_retry=self._method_configs[
].retry,
default_timeout=self._method_configs[]
.timeout,
client_info=self._client_info,
)
request = intent_pb2.BatchDeleteIntentsRequest(
parent=parent,
intents=intents,
)
operation = self._inner_api_calls[](
request, retry=retry, timeout=timeout, metadata=metadata)
return google.api_core.operation.from_gapic(
operation,
self.transport._operations_client,
empty_pb2.Empty,
metadata_type=struct_pb2.Struct,
) | Deletes intents in the specified agent.
Operation <response: ``google.protobuf.Empty``>
Example:
>>> import dialogflow_v2
>>>
>>> client = dialogflow_v2.IntentsClient()
>>>
>>> parent = client.project_agent_path('[PROJECT]')
>>>
>>> # TODO: Initialize ``intents``:
>>> intents = []
>>>
>>> response = client.batch_delete_intents(parent, intents)
>>>
>>> def callback(operation_future):
... # Handle result.
... result = operation_future.result()
>>>
>>> response.add_done_callback(callback)
>>>
>>> # Handle metadata.
>>> metadata = response.metadata()
Args:
parent (str): Required. The name of the agent to delete all entities types for. Format:
``projects/<Project ID>/agent``.
intents (list[Union[dict, ~google.cloud.dialogflow_v2.types.Intent]]): Required. The collection of intents to delete. Only intent ``name`` must be
filled in.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.dialogflow_v2.types.Intent`
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.dialogflow_v2.types._OperationFuture` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid. |
380,264 | def list_local():
*
cmd =
out = __salt__[](cmd, python_shell=False).split()
ret = [line.split()[1] for line in out if len(line.split()) > 2]
return ret | List the locally installed overlays.
Return a list of installed overlays:
CLI Example:
.. code-block:: bash
salt '*' layman.list_local |
380,265 | def volumes(self):
return sys_volumes.VolumeCollection(
self._conn, utils.get_subresource_path_by(self, ),
redfish_version=self.redfish_version) | This property prepares the list of volumes
:return a list of volumes. |
380,266 | def where(self, **kwargs):
clauses = copy(self.clauses)
for dimension, condition in kwargs.items():
if dimension in self.clauses:
raise Exception(.format(dimension))
if dimension not in self.schema:
raise Exception(t exist'.format(dimension))
if isfunction(condition) or isinstance(condition, functools.partial):
clauses[dimension] = condition
else:
clauses[dimension] = functools.partial((lambda x, y: x == y), self._sanitize_dimension(str(condition)))
return self._copy(clauses=clauses) | Return a new Dataset refined using the given condition
:param kwargs: a map of `dimension` => `condition` to filter the elements
of the dataset. `condition` can either be an exact value or a
callable returning a boolean value. If `condition` is a value, it is
converted to a string, then sanitized. If `condition` is a callable, note that it will
be passed sanitized values -- i.e., characters outside [a-zA-Z0-9_.] are converted
to `_`. |
380,267 | def createEditor(self, parent, option, index):
model = index.model()
value = model.get_value(index)
if model._data.dtype.name == "bool":
value = not value
model.setData(index, to_qvariant(value))
return
elif value is not np.ma.masked:
editor = QLineEdit(parent)
editor.setFont(get_font(font_size_delta=DEFAULT_SMALL_DELTA))
editor.setAlignment(Qt.AlignCenter)
if is_number(self.dtype):
validator = QDoubleValidator(editor)
validator.setLocale(QLocale())
editor.setValidator(validator)
editor.returnPressed.connect(self.commitAndCloseEditor)
return editor | Create editor widget |
380,268 | def from_pycbc(cls, pycbcseries, copy=True):
return cls(pycbcseries.data, t0=pycbcseries.start_time,
dt=pycbcseries.delta_t, copy=copy) | Convert a `pycbc.types.timeseries.TimeSeries` into a `TimeSeries`
Parameters
----------
pycbcseries : `pycbc.types.timeseries.TimeSeries`
the input PyCBC `~pycbc.types.timeseries.TimeSeries` array
copy : `bool`, optional, default: `True`
if `True`, copy these data to a new array
Returns
-------
timeseries : `TimeSeries`
a GWpy version of the input timeseries |
380,269 | def proc_check_guard(self, instance, sql):
self.open_db_connections(instance, self.PROC_GUARD_DB_KEY)
cursor = self.get_cursor(instance, self.PROC_GUARD_DB_KEY)
should_run = False
try:
cursor.execute(sql, ())
result = cursor.fetchone()
should_run = result[0] == 1
except Exception as e:
self.log.error("Failed to run proc_only_if sql {} : {}".format(sql, e))
self.close_cursor(cursor)
self.close_db_connections(instance, self.PROC_GUARD_DB_KEY)
return should_run | check to see if the guard SQL returns a single column containing 0 or 1
We return true if 1, else False |
380,270 | def fire_event(self, event_name, service_name, default=None):
service = self.get_service(service_name)
callbacks = service.get(event_name, default)
if not callbacks:
return
if not isinstance(callbacks, Iterable):
callbacks = [callbacks]
for callback in callbacks:
if isinstance(callback, ManagerCallback):
callback(self, service_name, event_name)
else:
callback(service_name) | Fire a data_ready, data_lost, start, or stop event on a given service. |
380,271 | def parent(self):
parent = list(self.graph.objects(self.asNode(), RDF_NAMESPACES.DTS.parent))
if parent:
return self.parent_class(parent[0])
return None | Parent of current object
:rtype: Collection |
380,272 | def update_usage_plan(plan_id, throttle=None, quota=None, region=None, key=None, keyid=None, profile=None):
usage plan id{"rateLimit": 10.0, "burstLimit": 10}
try:
_validate_throttle(throttle)
_validate_quota(quota)
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
patchOperations = []
if throttle is None:
patchOperations.append({: , : })
else:
if in throttle:
patchOperations.append({: , : , : str(throttle[])})
if in throttle:
patchOperations.append({: , : , : str(throttle[])})
if quota is None:
patchOperations.append({: , : })
else:
patchOperations.append({: , : , : str(quota[])})
patchOperations.append({: , : , : str(quota[])})
if in quota:
patchOperations.append({: , : , : str(quota[])})
if patchOperations:
res = conn.update_usage_plan(usagePlanId=plan_id,
patchOperations=patchOperations)
return {: True, : res}
return {: False}
except ClientError as e:
return {: __utils__[](e)}
except (TypeError, ValueError) as e:
return {: six.text_type(e)} | Updates an existing usage plan with throttling and quotas
.. versionadded:: 2017.7.0
plan_id
Id of the created usage plan
throttle
A dictionary consisting of the following keys:
rateLimit
requests per second at steady rate, float
burstLimit
maximum number of requests per second, integer
quota
A dictionary consisting of the following keys:
limit
number of allowed requests per specified quota period [required if quota parameter is present]
offset
number of requests to be subtracted from limit at the beginning of the period [optional]
period
quota period, must be one of DAY, WEEK, or MONTH. [required if quota parameter is present
CLI Example:
.. code-block:: bash
salt myminion boto_apigateway.update_usage_plan plan_id='usage plan id' throttle='{"rateLimit": 10.0, "burstLimit": 10}' |
380,273 | def update(self, obj, size):
self.number += 1
self.total += size
if self.high < size:
self.high = size
try:
self.objref, self.weak = Weakref.ref(obj), True
except TypeError:
self.objref, self.weak = obj, False | Update this profile. |
380,274 | def interfaces(root):
*rwrw
root = target(root)
if root is False or not os.path.isdir(root):
log.error(, root)
return False
readwrites = []
reads = []
writes = []
for path, _, files in salt.utils.path.os_walk(root, followlinks=False):
for afile in files:
canpath = os.path.join(path, afile)
if not os.path.isfile(canpath):
continue
stat_mode = os.stat(canpath).st_mode
is_r = bool(stat.S_IRUSR & stat_mode)
is_w = bool(stat.S_IWUSR & stat_mode)
relpath = os.path.relpath(canpath, root)
if is_w:
if is_r:
readwrites.append(relpath)
else:
writes.append(relpath)
elif is_r:
reads.append(relpath)
else:
log.warning(, canpath)
return {
: reads,
: writes,
: readwrites
} | Generate a dictionary with all available interfaces relative to root.
Symlinks are not followed.
CLI example:
.. code-block:: bash
salt '*' sysfs.interfaces block/bcache0/bcache
Output example:
.. code-block:: json
{
"r": [
"state",
"partial_stripes_expensive",
"writeback_rate_debug",
"stripe_size",
"dirty_data",
"stats_total/cache_hits",
"stats_total/cache_bypass_misses",
"stats_total/bypassed",
"stats_total/cache_readaheads",
"stats_total/cache_hit_ratio",
"stats_total/cache_miss_collisions",
"stats_total/cache_misses",
"stats_total/cache_bypass_hits",
],
"rw": [
"writeback_rate",
"writeback_rate_update_seconds",
"cache_mode",
"writeback_delay",
"label",
"writeback_running",
"writeback_metadata",
"running",
"writeback_rate_p_term_inverse",
"sequential_cutoff",
"writeback_percent",
"writeback_rate_d_term",
"readahead"
],
"w": [
"stop",
"clear_stats",
"attach",
"detach"
]
}
.. note::
* 'r' interfaces are read-only
* 'w' interfaces are write-only (e.g. actions)
* 'rw' are interfaces that can both be read or written |
380,275 | def smoothMLS1D(actor, f=0.2, showNLines=0):
coords = actor.coordinates()
ncoords = len(coords)
Ncp = int(ncoords * f / 10)
nshow = int(ncoords)
if showNLines:
ndiv = int(nshow / showNLines)
if Ncp < 3:
vc.printc("~target Please choose a fraction higher than " + str(f), c=1)
Ncp = 3
poly = actor.GetMapper().GetInput()
vpts = poly.GetPoints()
locator = vtk.vtkPointLocator()
locator.SetDataSet(poly)
locator.BuildLocator()
vtklist = vtk.vtkIdList()
variances, newline, acts = [], [], []
for i, p in enumerate(coords):
locator.FindClosestNPoints(Ncp, p, vtklist)
points = []
for j in range(vtklist.GetNumberOfIds()):
trgp = [0, 0, 0]
vpts.GetPoint(vtklist.GetId(j), trgp)
points.append(trgp)
if len(points) < 2:
continue
points = np.array(points)
pointsmean = points.mean(axis=0)
uu, dd, vv = np.linalg.svd(points - pointsmean)
newp = np.dot(p - pointsmean, vv[0]) * vv[0] + pointsmean
variances.append(dd[1] + dd[2])
newline.append(newp)
if showNLines and not i % ndiv:
fline = fitLine(points, lw=4)
iapts = vs.Points(points)
acts += [fline, iapts]
for i in range(ncoords):
vpts.SetPoint(i, newline[i])
if showNLines:
apts = vs.Points(newline, c="r 0.6", r=2)
ass = Assembly([apts] + acts)
return ass
actor.info["variances"] = np.array(variances)
return actor | Smooth actor or points with a `Moving Least Squares` variant.
The list ``actor.info['variances']`` contain the residue calculated for each point.
Input actor's polydata is modified.
:param float f: smoothing factor - typical range is [0,2].
:param int showNLines: build an actor showing the fitting line for N random points.
.. hint:: |moving_least_squares1D| |moving_least_squares1D.py|_
|skeletonize| |skeletonize.py|_ |
380,276 | def create_database():
db = get_db()
response = db.query()
items = list(response.get_points())
databases = [database[] for database in items]
if settings.INFLUXDB_DATABASE not in databases:
db.create_database(settings.INFLUXDB_DATABASE)
print(.format(settings.INFLUXDB_DATABASE)) | creates database if necessary |
380,277 | def evaluate_model(filepath,
train_start=0, train_end=60000, test_start=0,
test_end=10000, batch_size=128,
testing=False, num_threads=None):
tf.set_random_seed(1234)
set_log_level(logging.INFO)
if num_threads:
config_args = dict(intra_op_parallelism_threads=1)
else:
config_args = {}
sess = tf.Session(config=tf.ConfigProto(**config_args))
mnist = MNIST(train_start=train_start, train_end=train_end,
test_start=test_start, test_end=test_end)
x_train, y_train = mnist.get_set()
x_test, y_test = mnist.get_set()
img_rows, img_cols, nchannels = x_train.shape[1:4]
nb_classes = y_train.shape[1]
x = tf.placeholder(tf.float32, shape=(None, img_rows, img_cols,
nchannels))
y = tf.placeholder(tf.float32, shape=(None, nb_classes))
eval_params = {: batch_size}
fgsm_params = {
: 0.3,
: 0.,
: 1.
}
def do_eval(preds, x_set, y_set, report_key, is_adv=None):
acc = model_eval(sess, x, y, preds, x_set, y_set, args=eval_params)
if is_adv is None:
report_text = None
elif is_adv:
report_text =
else:
report_text =
if report_text:
print( % (report_text, acc))
with sess.as_default():
model = load(filepath)
assert len(model.get_params()) > 0
fgsm = FastGradientMethod(model, sess=sess)
adv_x = fgsm.generate(x, **fgsm_params)
preds_adv = model.get_logits(adv_x)
preds = model.get_logits(x)
do_eval(preds, x_test, y_test, , False)
do_eval(preds_adv, x_test, y_test, , True) | Run evaluation on a saved model
:param filepath: path to model to evaluate
:param train_start: index of first training set example
:param train_end: index of last training set example
:param test_start: index of first test set example
:param test_end: index of last test set example
:param batch_size: size of evaluation batches |
380,278 | def epubcheck_help():
with open(os.devnull, "w") as devnull:
p = subprocess.Popen(
[c.JAVA, , , c.EPUBCHECK, ],
stdout=subprocess.PIPE,
stderr=devnull,
)
result = p.communicate()[0]
return result.decode() | Return epubcheck.jar commandline help text.
:return unicode: helptext from epubcheck.jar |
380,279 | def restore(name, run_path=None, replace=False, root="."):
if run_path is None and run is None:
raise ValueError(
"You must call `wandb.init` before calling restore or specify a run_path")
api = Api()
api_run = api.run(run_path or run.path)
root = run.dir if run else root
path = os.path.exists(os.path.join(root, name))
if path and replace == False:
return open(path, "r")
files = api_run.files([name])
if len(files) == 0:
return None
return files[0].download(root=root, replace=True) | Downloads the specified file from cloud storage into the current run directory
if it doesn exist.
name: the name of the file
run_path: optional path to a different run to pull files from
replace: whether to download the file even if it already exists locally
root: the directory to download the file to. Defaults to the current
directory or the run directory if wandb.init was called.
returns None if it can't find the file, otherwise a file object open for reading
raises wandb.CommError if it can't find the run |
380,280 | def random_split(self, weights):
jvalues = self.image_frame.random_split(weights)
return [ImageFrame(jvalue) for jvalue in jvalues] | Random split imageframes according to weights
:param weights: weights for each ImageFrame
:return: |
380,281 | def _match_errors_queues(self, query):
if query in self._error_queues:
queue = self._error_queues[query]
response = queue.value
logger.debug(,
repr(response))
return response | Tries to match in error queues
:param query: message tuple
:type query: Tuple[bytes]
:return: response if found or None
:rtype: Tuple[bytes] | None |
380,282 | def env(*_vars, **kwargs):
for v in _vars:
value = os.environ.get(v, None)
if value:
return value
return kwargs.get(, ) | Search for the first defined of possibly many env vars.
Returns the first environment variable defined in vars, or
returns the default defined in kwargs. |
380,283 | def set_blocks(self, list=None, dict=None, fill_air=False):
if list:
self.blocksList = list
elif dict:
list = []
for x in range(16):
for z in range(16):
for y in range(128):
coord = x,y,z
offset = y + z*128 + x*128*16
if (coord in dict):
list.append(dict[coord])
else:
if (self.blocksList[offset] and not fill_air):
list.append(self.blocksList[offset])
else:
list.append(0)
self.blocksList = list
else:
return False
return True | Sets all blocks in this chunk, using either a list or dictionary.
Blocks not explicitly set can be filled to air by setting fill_air to True. |
380,284 | def visitPrefixDecl(self, ctx: ShExDocParser.PrefixDeclContext):
iri = self.context.iriref_to_shexj_iriref(ctx.IRIREF())
prefix = ctx.PNAME_NS().getText()
if iri not in self.context.ld_prefixes:
self.context.prefixes.setdefault(prefix, iri.val) | prefixDecl: KW_PREFIX PNAME_NS IRIREF |
380,285 | def get_methods(self):
for c in self.classes.values():
for m in c.get_methods():
yield m | Returns a list of `MethodClassAnalysis` objects |
380,286 | def convert(self, input_path=None, output_path=None, markup=None,
break_lines=False, divide_works=False, latin=False,
extra_args=None):
input_path = os.path.expanduser(input_path)
output_path = os.path.expanduser(output_path)
assert os.path.isfile(input_path), .format(input_path)
tlgu_options = []
if markup == :
full_args = [, , , , ]
[tlgu_options.append(x) for x in full_args]
if break_lines:
tlgu_options.append()
if divide_works:
tlgu_options.append()
if latin:
tlgu_options.append()
if extra_args is None:
extra_args = []
else:
try:
extra_args = list(extra_args)
except Exception as exc:
logger.error("Argument must be a list: %s.", exc)
raise
tlgu_options = tlgu_options + extra_args
tlgu_options = list(set(tlgu_options))
if tlgu_options:
tlgu_flags = + .join(tlgu_options)
else:
tlgu_flags =
tlgu_call = .format(tlgu_flags,
input_path,
output_path)
logger.info(tlgu_call)
try:
p_out = subprocess.call(tlgu_call, shell=True)
if p_out == 1:
logger.error(,
input_path,
output_path)
except Exception as exc:
logger.error(,
input_path,
output_path,
exc)
raise | :param input_path: TLG filepath to convert.
:param output_path: filepath of new converted text.
:param markup: Specificity of inline markup. Default None removes all
numerical markup; 'full' gives most detailed, with reference numbers
included before each text line.
:param break_lines: No spaces; removes line ends and hyphens before an
ID code; hyphens and spaces before page and column ends are retained.
:param divide_works: Each work (book) is output as a separate file in
the form output_file-xxx.txt; if an output file is not specified, this
option has no effect.
:param latin: Primarily Latin text (PHI). Some TLG texts, notably
doccan1.txt and doccan2.txt are mostly roman texts lacking explicit
language change codes. Setting this option will force a change to
Latin text after each citation block is encountered.
:param extra_args: Any other tlgu args to be passed, in list form and
without dashes, e.g.: ['p', 'b', 'B']. |
380,287 | def _validate_rule(self, rule):
if not inspect.isclass(rule) or not issubclass(rule, Rule):
raise NotRuleException(rule)
rule.validate(self._grammar) | Validate rule. Valid rule must inherit from Rule and have valid syntax.
:param rule: Rule to validate.
:raise NotRuleException: If the parameter doesn't inherit from Rule. |
380,288 | def read_stream(stream, output, prebuffer, chunk_size=8192):
is_player = isinstance(output, PlayerOutput)
is_http = isinstance(output, HTTPServer)
is_fifo = is_player and output.namedpipe
show_progress = isinstance(output, FileOutput) and output.fd is not stdout and sys.stdout.isatty()
show_record_progress = hasattr(output, "record") and isinstance(output.record, FileOutput) and output.record.fd is not stdout and sys.stdout.isatty()
stream_iterator = chain(
[prebuffer],
iter(partial(stream.read, chunk_size), b"")
)
if show_progress:
stream_iterator = progress(stream_iterator,
prefix=os.path.basename(args.output))
elif show_record_progress:
stream_iterator = progress(stream_iterator,
prefix=os.path.basename(args.record))
try:
for data in stream_iterator:
if is_win32 and is_fifo:
output.player.poll()
if output.player.returncode is not None:
log.info("Player closed")
break
try:
output.write(data)
except IOError as err:
if is_player and err.errno in ACCEPTABLE_ERRNO:
log.info("Player closed")
elif is_http and err.errno in ACCEPTABLE_ERRNO:
log.info("HTTP connection closed")
else:
console.exit("Error when writing to output: {0}, exiting", err)
break
except IOError as err:
console.exit("Error when reading from stream: {0}, exiting", err)
finally:
stream.close()
log.info("Stream ended") | Reads data from stream and then writes it to the output. |
380,289 | def finalise_same_chip_constraints(substitutions, placements):
for merged_vertex in reversed(substitutions):
placement = placements.pop(merged_vertex)
for v in merged_vertex.vertices:
placements[v] = placement | Given a set of placements containing the supplied
:py:class:`MergedVertex`, remove the merged vertices replacing them with
their constituent vertices (changing the placements inplace). |
380,290 | def load_secrets(self, secret_path):
self._config = p_config.render_secrets(self.config_path, secret_path) | render secrets into config object |
380,291 | def clear(self, *resource_types):
resource_types = resource_types or tuple(self.__caches.keys())
for cls in resource_types:
self.__caches[cls].clear()
del self.__caches[cls] | Clear cache for each provided APIResource class, or all resources if no classes are provided |
380,292 | def _get_fault_type_dummy_variables(self, rup):
U, SS, NS, RS = 0, 0, 0, 0
if np.abs(rup.rake) <= 30.0 or (180.0 - np.abs(rup.rake)) <= 30.0:
SS = 1
elif rup.rake > 30.0 and rup.rake < 150.0:
RS = 1
else:
NS = 1
return U, SS, NS, RS | Fault type (Strike-slip, Normal, Thrust/reverse) is
derived from rake angle.
Rakes angles within 30 of horizontal are strike-slip,
angles from 30 to 150 are reverse, and angles from
-30 to -150 are normal.
Note that the 'Unspecified' case is not considered,
because rake is always given. |
380,293 | def parse_cidr (addr, infer=True, allow_host=False):
def check (r0, r1):
a = int(r0)
b = r1
if (not allow_host) and (a & ((1<<b)-1)):
raise RuntimeError("Host part of CIDR address is not zero (%s)"
% (addr,))
return (r0,32-r1)
addr = addr.split(, 2)
if len(addr) == 1:
if infer is False:
return check(IPAddr(addr[0]), 0)
addr = IPAddr(addr[0])
b = 32-infer_netmask(addr)
m = (1<<b)-1
if (int(addr) & m) == 0:
return check(addr, 0)
try:
wild = 32-int(addr[1])
except:
m = int(IPAddr(addr[1]))
b = 0
while m & (1<<31):
b += 1
m <<= 1
if m & 0x7fffffff != 0:
raise RuntimeError("Netmask " + str(addr[1]) + " is not CIDR-compatible")
wild = 32-b
if not (wild >= 0 and wild <= 32):
raise RuntimeError("Invalid mask length")
return check(IPAddr(addr[0]), wild)
if not (wild >= 0 and wild <= 32):
raise RuntimeError("Invalid mask length")
return check(IPAddr(addr[0]), wild) | Takes a CIDR address or plain dotted-quad, and returns a tuple of address
and count-of-network-bits.
Can infer the network bits based on network classes if infer=True.
Can also take a string in the form 'address/netmask', as long as the
netmask is representable in CIDR.
FIXME: This function is badly named. |
380,294 | def compute_loss_curves_maps(filename, builder, rlzi, monitor):
with datastore.read(filename) as dstore:
rlzs = dstore[][]
losses = dstore[][rlzs == rlzi][]
return rlzi, builder.build_curves_maps(losses, rlzi) | :param filename: path to the datastore
:param builder: LossCurvesMapsBuilder instance
:param rlzi: realization index
:param monitor: Monitor instance
:returns: rlzi, (curves, maps) |
380,295 | def Focus(self):
self._Skype._Api.allow_focus(self._Skype.Timeout)
self._Skype._DoCommand() | Brings the client window into focus. |
380,296 | def get_client(self, request=None):
if not isinstance(request, oauth.Request):
request = self.get_oauth_request()
client_key = request.get_parameter()
if not client_key:
raise Exception( \
)
client = models.Client.get_by_key_name(client_key)
if not client:
raise Exception( % client_key)
return client | Return the client from the OAuth parameters. |
380,297 | def compat_kwargs(kwargs):
warn_deprecations(kwargs)
for old, new in RENAMED_VARS.items():
if old in kwargs:
kwargs[new] = kwargs[old]
for c_old, c_new in RENAMED_VARS.items():
if c_new == new:
kwargs[c_old] = kwargs[new] | To keep backwards compat change the kwargs to new names |
380,298 | def plot_predict(self, h=5, past_values=20, intervals=True,**kwargs):
import matplotlib.pyplot as plt
import seaborn as sns
figsize = kwargs.get(,(10,7))
if self.latent_variables.estimated is False:
raise Exception("No latent variables estimated!")
else:
predictions, variance, lower, upper = self._construct_predict(self.latent_variables.get_z_values(),h)
full_predictions = np.append(self.data,predictions)
full_lower = np.append(self.data,lower)
full_upper = np.append(self.data,upper)
date_index = self.shift_dates(h)
plot_values = full_predictions[-h-past_values:]*self._norm_std + self._norm_mean
plot_index = date_index[-h-past_values:]
lower = np.append(full_predictions[-h-1],lower)
upper = np.append(full_predictions[-h-1],upper)
plt.figure(figsize=figsize)
if intervals == True:
plt.fill_between(date_index[-h-1:],
lower*self._norm_std + self._norm_mean,
upper*self._norm_std + self._norm_mean,
alpha=0.2)
plt.plot(plot_index,plot_values)
plt.title("Forecast for " + self.data_name)
plt.xlabel("Time")
plt.ylabel(self.data_name)
plt.show() | Plots forecast with the estimated model
Parameters
----------
h : int (default : 5)
How many steps ahead would you like to forecast?
past_values : int (default : 20)
How many past observations to show on the forecast graph?
intervals : Boolean
Would you like to show 95% prediction intervals for the forecast?
Returns
----------
- Plot of the forecast
- Error bars, forecasted_values, plot_values, plot_index |
380,299 | def docx_process_table(table: DOCX_TABLE_TYPE,
config: TextProcessingConfig) -> str:
def get_cell_text(cell_) -> str:
cellparagraphs = [paragraph.text.strip()
for paragraph in cell_.paragraphs]
cellparagraphs = [x for x in cellparagraphs if x]
return .join(cellparagraphs)
ncols = 1
for row in table.rows:
ncols = max(ncols, len(row.cells))
pt = prettytable.PrettyTable(
field_names=list(range(ncols)),
encoding=ENCODING,
header=False,
border=True,
hrules=prettytable.ALL,
vrules=prettytable.NONE if config.plain else prettytable.ALL,
)
pt.align =
pt.valign =
pt.max_width = max(config.width // ncols, config.min_col_width)
if config.plain:
for row in table.rows:
for i, cell in enumerate(row.cells):
n_before = i
n_after = ncols - i - 1
ptrow = (
[] * n_before +
[get_cell_text(cell)] +
[] * n_after
)
assert(len(ptrow) == ncols)
pt.add_row(ptrow)
else:
for row in table.rows:
ptrow = []
for cell in row.cells:
ptrow.append(get_cell_text(cell))
ptrow += [] * (ncols - len(ptrow))
assert (len(ptrow) == ncols)
pt.add_row(ptrow)
return pt.get_string() | Converts a DOCX table to text.
Structure representing a DOCX table:
.. code-block:: none
table
.rows[]
.cells[]
.paragraphs[]
.text
That's the structure of a :class:`docx.table.Table` object, but also of our
homebrew creation, :class:`CustomDocxTable`.
The ``plain`` option optimizes for natural language processing, by:
- removing vertical lines:
.. code-block:: none
+-------------+-------------+
| AAA AAA | BBB BBB |
| AAA AAA | BBB BBB |
+-------------+-------------+
becomes
.. code-block:: none
-----------------------------
AAA AAA BBB BBB
AAA AAA BBB BBB
-----------------------------
- and offsetting cells:
.. code-block:: none
AAA AAA BBB BBB CCC CCC
AAA AAA BBB BBB CCC CCC
becomes
.. code-block:: none
AAA AAA
AAA AAA
BBB BBB
BBB BBB
CCC CCC
CCC CCC
- Note also that the grids in DOCX files can have varying number of cells
per row, e.g.
.. code-block:: none
+---+---+---+
| 1 | 2 | 3 |
+---+---+---+
| 1 | 2 |
+---+---+ |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.