Unnamed: 0
int64 0
389k
| code
stringlengths 26
79.6k
| docstring
stringlengths 1
46.9k
|
---|---|---|
14,900 | def from_ZNM(cls, Z, N, M, name=):
df = pd.DataFrame.from_dict({: Z, : N, : M}).set_index([, ])[]
df.name = name
return cls(df=df, name=name) | Creates a table from arrays Z, N and M
Example:
________
>>> Z = [82, 82, 83]
>>> N = [126, 127, 130]
>>> M = [-21.34, -18.0, -14.45]
>>> Table.from_ZNM(Z, N, M, name='Custom Table')
Z N
82 126 -21.34
127 -18.00
83 130 -14.45
Name: Custom Table, dtype: float64 |
14,901 | def run_synthetic_SGLD():
theta1 = 0
theta2 = 1
sigma1 = numpy.sqrt(10)
sigma2 = 1
sigmax = numpy.sqrt(2)
X = load_synthetic(theta1=theta1, theta2=theta2, sigmax=sigmax, num=100)
minibatch_size = 1
total_iter_num = 1000000
lr_scheduler = SGLDScheduler(begin_rate=0.01, end_rate=0.0001, total_iter_num=total_iter_num,
factor=0.55)
optimizer = mx.optimizer.create(,
learning_rate=None,
rescale_grad=1.0,
lr_scheduler=lr_scheduler,
wd=0)
updater = mx.optimizer.get_updater(optimizer)
theta = mx.random.normal(0, 1, (2,), mx.cpu())
grad = nd.empty((2,), mx.cpu())
samples = numpy.zeros((2, total_iter_num))
start = time.time()
for i in range(total_iter_num):
if (i + 1) % 100000 == 0:
end = time.time()
print("Iter:%d, Time spent: %f" % (i + 1, end - start))
start = time.time()
ind = numpy.random.randint(0, X.shape[0])
synthetic_grad(X[ind], theta, sigma1, sigma2, sigmax,
rescale_grad=X.shape[0] / float(minibatch_size), grad=grad)
updater(, grad, theta)
samples[:, i] = theta.asnumpy()
plt.hist2d(samples[0, :], samples[1, :], (200, 200), cmap=plt.cm.jet)
plt.colorbar()
plt.show() | Run synthetic SGLD |
14,902 | def remove_notification_listener(self, notification_id):
for v in self.notifications.values():
toRemove = list(filter(lambda tup: tup[0] == notification_id, v))
if len(toRemove) > 0:
v.remove(toRemove[0])
return True
return False | Remove a previously added notification callback.
Args:
notification_id: The numeric id passed back from add_notification_listener
Returns:
The function returns boolean true if found and removed, false otherwise. |
14,903 | def space(self,bins=None,units=None,conversion_function=convert_time,resolution=None,end_at_end=True,scale=None):
if scale in [] or (scale is None and self.scale in []):
return self.logspace(bins=bins,units=units,conversion_function=conversion_function,resolution=resolution,end_at_end=end_at_end)
return self.linspace(bins=bins,units=units,conversion_function=conversion_function,resolution=resolution,end_at_end=end_at_end) | Computes adequat binning for the dimension (on the values).
bins: number of bins or None
units: str or None
conversion_function: function to convert units to other units
resolution: step size or None
end_at_end: Boolean
only if `unit == 1`
whether or not the last point should be the last data point (True) or one after the last valid point (False)
scale: 'lin','log' or None
a spike container can also use 'unique', but not the LabelDimension itself!
if the LabelDimension.scale is 'unique', .bins() will return a linear spacing |
14,904 | def parse(self, stream, mimetype, content_length, options=None):
if (
self.max_content_length is not None
and content_length is not None
and content_length > self.max_content_length
):
raise exceptions.RequestEntityTooLarge()
if options is None:
options = {}
parse_func = self.get_parse_func(mimetype, options)
if parse_func is not None:
try:
return parse_func(self, stream, mimetype, content_length, options)
except ValueError:
if not self.silent:
raise
return stream, self.cls(), self.cls() | Parses the information from the given stream, mimetype,
content length and mimetype parameters.
:param stream: an input stream
:param mimetype: the mimetype of the data
:param content_length: the content length of the incoming data
:param options: optional mimetype parameters (used for
the multipart boundary for instance)
:return: A tuple in the form ``(stream, form, files)``. |
14,905 | def grid_expansion_costs(network, without_generator_import=False):
def _get_transformer_costs(transformer):
if isinstance(transformer.grid, LVGrid):
return float(network.config[][])
elif isinstance(transformer.grid, MVGrid):
return float(network.config[][])
def _get_line_costs(line, quantity):
if isinstance(line.grid, LVGrid):
voltage_level =
elif isinstance(line.grid, MVGrid):
voltage_level =
else:
raise KeyError("Grid must be LVGrid or MVGrid.")
projection = partial(
pyproj.transform,
pyproj.Proj(init=.format(
int(network.config[][]))),
pyproj.Proj(init=))
sqm2sqkm = 1e6
population_density = (line.grid.grid_district[] /
(transform(projection,
line.grid.grid_district[]).area /
sqm2sqkm))
if population_density <= 500:
population_density =
else:
population_density =
costs_cable = float(network.config[][.format(
voltage_level)])
costs_cable_earthwork = float(network.config[][
.format(
voltage_level,
population_density)])
return (costs_cable_earthwork * l.length +
costs_cable * l.length * (quantity - 1))
costs = pd.DataFrame()
if without_generator_import:
equipment_changes = network.results.equipment_changes.loc[
network.results.equipment_changes.iteration_step > 0]
else:
equipment_changes = network.results.equipment_changes
if not equipment_changes.empty:
transformers = equipment_changes[equipment_changes[].apply(
isinstance, args=(Transformer,))]
added_transformers = transformers[transformers[] == ]
removed_transformers = transformers[
transformers[] == ]
added_removed_transformers = added_transformers.loc[
added_transformers[].isin(
removed_transformers[])]
added_transformers = added_transformers[
~added_transformers[].isin(
added_removed_transformers.equipment)]
for t in added_transformers[]:
costs = costs.append(pd.DataFrame(
{: t.type.name,
: _get_transformer_costs(t),
: 1,
: ,
: t.grid.station.mv_feeder if isinstance(
t.grid, LVGrid) else None},
index=[t]))
lines = equipment_changes.loc[equipment_changes.index[
equipment_changes.reset_index()[].apply(
isinstance, args=(Line,))]]
for l in list(lines.index.unique()):
aggr_lines = []
aggr_lines_generator = l.grid.graph.lines_by_attribute()
for aggr_line in aggr_lines_generator:
aggr_lines.append(repr(aggr_line[]))
if not repr(l) in aggr_lines:
number_lines_added = equipment_changes[
(equipment_changes.index == l) &
(equipment_changes.equipment ==
l.type.name)][].sum()
costs = costs.append(pd.DataFrame(
{: l.type.name,
: _get_line_costs(l, number_lines_added),
: l.length * number_lines_added,
: number_lines_added,
: ( if isinstance(l.grid, LVGrid)
else ),
: get_mv_feeder_from_line(l)},
index=[l]))
if costs.empty:
costs = costs.append(pd.DataFrame(
{: [],
: [0],
: [0],
: [0],
: ,
:
},
index=[]))
return costs | Calculates grid expansion costs for each reinforced transformer and line
in kEUR.
Attributes
----------
network : :class:`~.grid.network.Network`
without_generator_import : Boolean
If True excludes lines that were added in the generator import to
connect new generators to the grid from calculation of grid expansion
costs. Default: False.
Returns
-------
`pandas.DataFrame<dataframe>`
DataFrame containing type and costs plus in the case of lines the
line length and number of parallel lines of each reinforced
transformer and line. Index of the DataFrame is the respective object
that can either be a :class:`~.grid.components.Line` or a
:class:`~.grid.components.Transformer`. Columns are the following:
type: String
Transformer size or cable name
total_costs: float
Costs of equipment in kEUR. For lines the line length and number of
parallel lines is already included in the total costs.
quantity: int
For transformers quantity is always one, for lines it specifies the
number of parallel lines.
line_length: float
Length of line or in case of parallel lines all lines in km.
voltage_level : :obj:`str` {'lv' | 'mv' | 'mv/lv'}
Specifies voltage level the equipment is in.
mv_feeder : :class:`~.grid.components.Line`
First line segment of half-ring used to identify in which
feeder the grid expansion was conducted in.
Notes
-------
Total grid expansion costs can be obtained through
self.grid_expansion_costs.total_costs.sum(). |
14,906 | def mangle_signature(sig, max_chars=30):
s = re.sub(r"^\((.*)\)$", r"\1", sig).strip()
s = re.sub(r"\\\\", "", s)
s = re.sub(r"\\[^", "", s)
args = []
opts = []
opt_re = re.compile(r"^(.*, |)([a-zA-Z0-9_*]+)=")
while s:
m = opt_re.search(s)
if not m:
args = s.split()
break
opts.insert(0, m.group(2))
s = m.group(1)[:-2]
sig = limited_join(", ", args, max_chars=max_chars-2)
if opts:
if not sig:
sig = "[%s]" % limited_join(", ", opts, max_chars=max_chars-4)
elif len(sig) < max_chars - 4 - 2 - 3:
sig += "[, %s]" % limited_join(", ", opts,
max_chars=max_chars-len(sig)-4-2)
return u"(%s)" % sig | Reformat a function signature to a more compact form. |
14,907 | def register_type(cls, name):
x = TypeDefinition(name, (cls,), ())
Validator.types_mapping[name] = x | Register `name` as a type to validate as an instance of class `cls`. |
14,908 | def VxLANTunnelState_originator_switch_info_switchIdentifier(self, **kwargs):
config = ET.Element("config")
VxLANTunnelState = ET.SubElement(config, "VxLANTunnelState", xmlns="http://brocade.com/ns/brocade-notification-stream")
originator_switch_info = ET.SubElement(VxLANTunnelState, "originator-switch-info")
switchIdentifier = ET.SubElement(originator_switch_info, "switchIdentifier")
switchIdentifier.text = kwargs.pop()
callback = kwargs.pop(, self._callback)
return callback(config) | Auto Generated Code |
14,909 | def create_optimizer(name, **kwargs):
if name.lower() in Optimizer.opt_registry:
return Optimizer.opt_registry[name.lower()](**kwargs)
else:
raise ValueError( % name) | Instantiates an optimizer with a given name and kwargs.
.. note:: We can use the alias `create` for ``Optimizer.create_optimizer``.
Parameters
----------
name: str
Name of the optimizer. Should be the name
of a subclass of Optimizer. Case insensitive.
kwargs: dict
Parameters for the optimizer.
Returns
-------
Optimizer
An instantiated optimizer.
Examples
--------
>>> sgd = mx.optimizer.Optimizer.create_optimizer('sgd')
>>> type(sgd)
<class 'mxnet.optimizer.SGD'>
>>> adam = mx.optimizer.create('adam', learning_rate=.1)
>>> type(adam)
<class 'mxnet.optimizer.Adam'> |
14,910 | def isConnected(self, fromName, toName):
for c in self.connections:
if (c.fromLayer.name == fromName and
c.toLayer.name == toName):
return 1
return 0 | Are these two layers connected this way? |
14,911 | def _submit(self, pool, args, callback):
if self.config.args.single_threaded:
callback(self.call_runner(*args))
else:
pool.apply_async(self.call_runner, args=args, callback=callback) | If the caller has passed the magic 'single-threaded' flag, call the
function directly instead of pool.apply_async. The single-threaded flag
is intended for gathering more useful performance information about
what appens beneath `call_runner`, since python's default profiling
tools ignore child threads.
This does still go through the callback path for result collection. |
14,912 | def comments(self):
record_numbers = range(2, self.fward)
if not record_numbers:
return
data = b.join(self.read_record(n)[0:1000] for n in record_numbers)
try:
return data[:data.find(b)].decode().replace(, )
except IndexError:
raise ValueError()
except UnicodeDecodeError:
raise ValueError() | Return the text inside the comment area of the file. |
14,913 | def _parse_tensor(self, indices=False):
if indices:
self.line = self._skip_lines(1)
tensor = np.zeros((3, 3))
for i in range(3):
tokens = self.line.split()
if indices:
tensor[i][0] = float(tokens[1])
tensor[i][1] = float(tokens[2])
tensor[i][2] = float(tokens[3])
else:
tensor[i][0] = float(tokens[0])
tensor[i][1] = float(tokens[1])
tensor[i][2] = float(tokens[2])
self.line = self._skip_lines(1)
return tensor | Parse a tensor. |
14,914 | def downstream(self, f, n=1):
if f.strand == -1:
return self.left(f, n)
return self.right(f, n) | find n downstream features where downstream is determined by
the strand of the query Feature f
Overlapping features are not considered.
f: a Feature object
n: the number of features to return |
14,915 | def from_file(path):
with open(path) as f:
content = f.readlines()
content = [x.strip() for x in content]
urls = list(filter(None, content))
return NewsPlease.from_urls(urls) | Crawls articles from the urls and extracts relevant information.
:param path: path to file containing urls (each line contains one URL)
:return: A dict containing given URLs as keys, and extracted information as corresponding values. |
14,916 | def get_active_pitch_range(self):
if self.pianoroll.shape[1] < 1:
raise ValueError("Cannot compute the active pitch range for an "
"empty pianoroll")
lowest = 0
highest = 127
while lowest < highest:
if np.any(self.pianoroll[:, lowest]):
break
lowest += 1
if lowest == highest:
raise ValueError("Cannot compute the active pitch range for an "
"empty pianoroll")
while not np.any(self.pianoroll[:, highest]):
highest -= 1
return lowest, highest | Return the active pitch range as a tuple (lowest, highest).
Returns
-------
lowest : int
The lowest active pitch in the pianoroll.
highest : int
The highest active pitch in the pianoroll. |
14,917 | def no_use_pep517_callback(option, opt, value, parser):
if value is not None:
msg =
raise_option_error(parser, option=option, msg=msg)
parser.values.use_pep517 = False | Process a value provided for the --no-use-pep517 option.
This is an optparse.Option callback for the no_use_pep517 option. |
14,918 | def find_page_of_state_m(self, state_m):
for state_identifier, page_info in list(self.tabs.items()):
if page_info[] is state_m:
return page_info[], state_identifier
return None, None | Return the identifier and page of a given state model
:param state_m: The state model to be searched
:return: page containing the state and the state_identifier |
14,919 | def Rizk(mp, dp, rhog, D):
rs equation." Proceedings of Pneumotransport 3,
paper D4. BHRA Fluid Engineering, Cranfield, England (1973)
.. [2] Klinzing, G. E., F. Rizk, R. Marcus, and L. S. Leung. Pneumatic
Conveying of Solids: A Theoretical and Practical Approach.
Springer, 2013.
.. [3] Rhodes, Martin J. Introduction to Particle Technology. Wiley, 2013.
'
alpha = 1440*dp + 1.96
beta = 1100*dp + 2.5
term1 = 1./10**alpha
Frs_sorta = 1/(g*D)**0.5
expression1 = term1*Frs_sorta**beta
expression2 = mp/rhog/(pi/4*D**2)
V = (expression2/expression1)**(1./(1 + beta))
return V | r'''Calculates saltation velocity of the gas for pneumatic conveying,
according to [1]_ as described in [2]_ and many others.
.. math::
\mu=\left(\frac{1}{10^{1440d_p+1.96}}\right)\left(Fr_s\right)^{1100d_p+2.5}
Fr_s = \frac{V_{salt}}{\sqrt{gD}}
\mu = \frac{m_p}{\frac{\pi}{4}D^2V \rho_f}
Parameters
----------
mp : float
Solid mass flow rate, [kg/s]
dp : float
Particle diameter, [m]
rhog : float
Gas density, [kg/m^3]
D : float
Diameter of pipe, [m]
Returns
-------
V : float
Saltation velocity of gas, [m/s]
Notes
-----
Model is rearanged to be explicit in terms of saltation velocity
internally.
Examples
--------
Example is from [3]_.
>>> Rizk(mp=0.25, dp=100E-6, rhog=1.2, D=.078)
9.8833092829357
References
----------
.. [1] Rizk, F. "Pneumatic conveying at optimal operation conditions and a
solution of Bath's equation." Proceedings of Pneumotransport 3,
paper D4. BHRA Fluid Engineering, Cranfield, England (1973)
.. [2] Klinzing, G. E., F. Rizk, R. Marcus, and L. S. Leung. Pneumatic
Conveying of Solids: A Theoretical and Practical Approach.
Springer, 2013.
.. [3] Rhodes, Martin J. Introduction to Particle Technology. Wiley, 2013. |
14,920 | def task_ref_role(name, rawtext, text, lineno, inliner,
options=None, content=None):
node = pending_task_xref(rawsource=text)
return [node], [] | Process a role that references the target nodes created by the
``lsst-task`` directive.
Parameters
----------
name
The role name used in the document.
rawtext
The entire markup snippet, with role.
text
The text marked with the role.
lineno
The line number where ``rawtext`` appears in the input.
inliner
The inliner instance that called us.
options
Directive options for customization.
content
The directive content for customization.
Returns
-------
nodes : `list`
List of nodes to insert into the document.
messages : `list`
List of system messages. |
14,921 | def predict_is(self, h):
result = pd.DataFrame([self.run(h=h)[2]]).T
result.index = self.index[-h:]
return result | Outputs predictions for the Aggregate algorithm on the in-sample data
Parameters
----------
h : int
How many steps to run the aggregating algorithm on
Returns
----------
- pd.DataFrame of ensemble predictions |
14,922 | def save(self, data):
if not self.is_connected:
raise Exception("No database selected")
if not data:
return False
if isinstance(data, dict):
doc = couchdb.Document()
doc.update(data)
self.db.create(doc)
elif isinstance(data, couchdb.Document):
self.db.update(data)
elif isinstance(data, list):
self.db.update(data)
return True | Save a document or list of documents |
14,923 | def _write_color (self, text, color=None):
if color is None:
self.fp.write(text)
else:
write_color(self.fp, text, color) | Print text with given color. If color is None, print text as-is. |
14,924 | def user_field_create(self, data, **kwargs):
"https://developer.zendesk.com/rest_api/docs/core/user_fields
api_path = "/api/v2/user_fields.json"
return self.call(api_path, method="POST", data=data, **kwargs) | https://developer.zendesk.com/rest_api/docs/core/user_fields#create-user-fields |
14,925 | def tokenize(text, to_lower=False, delimiters=DEFAULT_DELIMITERS):
_raise_error_if_not_sarray(text, "text")
sf = _turicreate.SFrame({: text})
fe = _feature_engineering.Tokenizer(features=,
to_lower=to_lower,
delimiters=delimiters,
output_column_prefix=None)
tokens = fe.fit_transform(sf)
return tokens[] | Tokenize the input SArray of text strings and return the list of tokens.
Parameters
----------
text : SArray[str]
Input data of strings representing English text. This tokenizer is not
intended to process XML, HTML, or other structured text formats.
to_lower : bool, optional
If True, all strings are converted to lower case before tokenization.
delimiters : list[str], None, optional
Input strings are tokenized using delimiter characters in this list.
Each entry in this list must contain a single character. If set to
`None`, then a Penn treebank-style tokenization is used, which contains
smart handling of punctuations.
Returns
-------
out : SArray[list]
Each text string in the input is mapped to a list of tokens.
See Also
--------
count_words, count_ngrams, tf_idf
References
----------
- `Penn treebank tokenization <https://web.archive.org/web/19970614072242/http://www.cis.upenn.edu:80/~treebank/tokenization.html>`_
Examples
--------
.. sourcecode:: python
>>> import turicreate
>>> docs = turicreate.SArray(['This is the first sentence.',
"This one, it's the second sentence."])
# Default tokenization by space characters
>>> turicreate.text_analytics.tokenize(docs)
dtype: list
Rows: 2
[['This', 'is', 'the', 'first', 'sentence.'],
['This', 'one,', "it's", 'the', 'second', 'sentence.']]
# Penn treebank-style tokenization
>>> turicreate.text_analytics.tokenize(docs, delimiters=None)
dtype: list
Rows: 2
[['This', 'is', 'the', 'first', 'sentence', '.'],
['This', 'one', ',', 'it', "'s", 'the', 'second', 'sentence', '.']] |
14,926 | def end_element (self, tag):
tag = tag.encode(self.encoding, "ignore")
self.fd.write("</%s>" % tag) | Print HTML end element.
@param tag: tag name
@type tag: string
@return: None |
14,927 | def hash_vector(self, v, querying=False):
bucket_keys = []
if querying:
for child_hash in self.child_hashes:
lshash = child_hash[]
if not lshash.hash_name in self.permutation.permutedIndexs:
raise AttributeError( % lshash.hash_name)
for bucket_key in lshash.hash_vector(v, querying):
neighbour_keys = self.permutation.get_neighbour_keys(lshash.hash_name,bucket_key)
for n in neighbour_keys:
bucket_keys.append(lshash.hash_name++n)
else:
for child_hash in self.child_hashes:
lshash = child_hash[]
for bucket_key in lshash.hash_vector(v, querying):
child_hash[][bucket_key] = bucket_key
bucket_keys.append(lshash.hash_name++bucket_key)
return bucket_keys | Hashes the vector and returns the bucket key as string. |
14,928 | def neighbours(self, word, size = 10):
word = word.strip()
v = self.word_vec(word)
[distances], [points] = self.kdt.query(array([v]), k = size, return_distance = True)
assert len(distances) == len(points), "distances and points should be in same shape."
words, scores = [], {}
for (x,y) in zip(points, distances):
w = self.index2word[x]
if w == word: s = 1.0
else: s = cosine(v, self.syn0[x])
if s < 0: s = abs(s)
words.append(w)
scores[w] = min(s, 1.0)
for x in sorted(words, key=scores.get, reverse=True):
yield x, scores[x] | Get nearest words with KDTree, ranking by cosine distance |
14,929 | def save_figure(self, event=None, transparent=False, dpi=600):
file_choices = "PNG (*.png)|*.png|SVG (*.svg)|*.svg|PDF (*.pdf)|*.pdf"
try:
ofile = self.conf.title.strip()
except:
ofile =
if len(ofile) > 64:
ofile = ofile[:63].strip()
if len(ofile) < 1:
ofile =
for c in :
ofile = ofile.replace(c, )
ofile = ofile +
orig_dir = os.path.abspath(os.curdir)
dlg = wx.FileDialog(self, message=,
defaultDir = os.getcwd(),
defaultFile=ofile,
wildcard=file_choices,
style=wx.FD_SAVE|wx.FD_CHANGE_DIR)
if dlg.ShowModal() == wx.ID_OK:
path = dlg.GetPath()
if hasattr(self, ):
self.fig.savefig(path, transparent=transparent, dpi=dpi)
else:
self.canvas.print_figure(path, transparent=transparent, dpi=dpi)
if (path.find(self.launch_dir) == 0):
path = path[len(self.launch_dir)+1:]
self.write_message( % path)
os.chdir(orig_dir) | save figure image to file |
14,930 | def set_num_special_tokens(self, num_special_tokens):
self.transformer.set_num_special_tokens(num_special_tokens)
self.lm_head.set_embeddings_weights(self.transformer.tokens_embed.weight) | Update input and output embeddings with new embedding matrice
Make sure we are sharing the embeddings |
14,931 | def get_instance(self, payload):
return TaskQueueInstance(self._version, payload, workspace_sid=self._solution[], ) | Build an instance of TaskQueueInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.taskrouter.v1.workspace.task_queue.TaskQueueInstance
:rtype: twilio.rest.taskrouter.v1.workspace.task_queue.TaskQueueInstance |
14,932 | def is_symbol(string):
return (
is_int(string) or is_float(string) or
is_constant(string) or is_unary(string) or
is_binary(string) or
(string == ) or (string == )
) | Return true if the string is a mathematical symbol. |
14,933 | def map_trigger(library, session, trigger_source, trigger_destination, mode):
return library.viMapTrigger(session, trigger_source, trigger_destination, mode) | Map the specified trigger source line to the specified destination line.
Corresponds to viMapTrigger function of the VISA library.
:param library: the visa library wrapped by ctypes.
:param session: Unique logical identifier to a session.
:param trigger_source: Source line from which to map. (Constants.TRIG*)
:param trigger_destination: Destination line to which to map. (Constants.TRIG*)
:param mode:
:return: return value of the library call.
:rtype: :class:`pyvisa.constants.StatusCode` |
14,934 | def getAllSystemVariables(self, remote):
variables = {}
if self.remotes[remote][] and self.remotes[remote][]:
LOG.debug(
"ServerThread.getAllSystemVariables: Getting all System variables via JSON-RPC")
session = self.jsonRpcLogin(remote)
if not session:
return
try:
params = {"_session_id_": session}
response = self._rpcfunctions.jsonRpcPost(
self.remotes[remote][], self.remotes[remote].get(, DEFAULT_JSONPORT), "SysVar.getAll", params)
if response[] is None and response[]:
for var in response[]:
key, value = self.parseCCUSysVar(var)
variables[key] = value
self.jsonRpcLogout(remote, session)
except Exception as err:
self.jsonRpcLogout(remote, session)
LOG.warning(
"ServerThread.getAllSystemVariables: Exception: %s" % str(err))
else:
try:
variables = self.proxies[
"%s-%s" % (self._interface_id, remote)].getAllSystemVariables()
except Exception as err:
LOG.debug(
"ServerThread.getAllSystemVariables: Exception: %s" % str(err))
return variables | Get all system variables from CCU / Homegear |
14,935 | def build_time(start_time):
diff_time = round(time.time() - start_time, 2)
if diff_time <= 59.99:
sum_time = str(diff_time) + " Sec"
elif diff_time > 59.99 and diff_time <= 3599.99:
sum_time = round(diff_time / 60, 2)
sum_time_list = re.findall(r"\d+", str(sum_time))
sum_time = ("{0} Min {1} Sec".format(sum_time_list[0],
sum_time_list[1]))
elif diff_time > 3599.99:
sum_time = round(diff_time / 3600, 2)
sum_time_list = re.findall(r"\d+", str(sum_time))
sum_time = ("{0} Hours {1} Min".format(sum_time_list[0],
sum_time_list[1]))
return sum_time | Calculate build time per package |
14,936 | def interfaces_info():
def replace(value):
if value == netifaces.AF_LINK:
return
if value == netifaces.AF_INET:
return
if value == netifaces.AF_INET6:
return
return value
results = {}
for iface in netifaces.interfaces():
addrs = netifaces.ifaddresses(iface)
results[iface] = {replace(k): v for k, v in addrs.items()}
return results | Returns interfaces data. |
14,937 | def _append_array(self, value, _file):
_labs =
_file.write(_labs)
self._tctr += 1
for _item in value:
_cmma = if self._vctr[self._tctr] else
_file.write(_cmma)
self._vctr[self._tctr] += 1
_item = self.object_hook(_item)
_type = type(_item).__name__
_MAGIC_TYPES[_type](self, _item, _file)
self._vctr[self._tctr] = 0
self._tctr -= 1
_labs =
_file.write(_labs) | Call this function to write array contents.
Keyword arguments:
* value - dict, content to be dumped
* _file - FileIO, output file |
14,938 | def text(self):
if isinstance(self._value, CommentedMap):
raise TypeError("{0} is a mapping, has no text value.".format(repr(self)))
if isinstance(self._value, CommentedSeq):
raise TypeError("{0} is a sequence, has no text value.".format(repr(self)))
return self._text | Return string value of scalar, whatever value it was parsed as. |
14,939 | def kids(tup_tree):
k = tup_tree[2]
if k is None:
return []
return [x for x in k if type(x) == tuple] | Return a list with the child elements of tup_tree.
The child elements are represented as tupletree nodes.
Child nodes that are not XML elements (e.g. text nodes) in tup_tree are
filtered out. |
14,940 | def _get_offset_day(self, other):
mstart = datetime(other.year, other.month, 1)
wday = mstart.weekday()
shift_days = (self.weekday - wday) % 7
return 1 + shift_days + self.week * 7 | Find the day in the same month as other that has the same
weekday as self.weekday and is the self.week'th such day in the month.
Parameters
----------
other : datetime
Returns
-------
day : int |
14,941 | def read_properties(group):
if not in group:
raise IOError()
data = group[][...][0].replace(b, b)
return pickle.loads(data) | Returns properties loaded from a group |
14,942 | def __fork_pty(self):
parent_fd, child_fd = os.openpty()
if parent_fd < 0 or child_fd < 0:
raise ExceptionPexpect, "Error! Could not open pty with os.openpty()."
pid = os.fork()
if pid < 0:
raise ExceptionPexpect, "Error! Failed os.fork()."
elif pid == 0:
os.close(parent_fd)
self.__pty_make_controlling_tty(child_fd)
os.dup2(child_fd, 0)
os.dup2(child_fd, 1)
os.dup2(child_fd, 2)
if child_fd > 2:
os.close(child_fd)
else:
os.close(child_fd)
return pid, parent_fd | This implements a substitute for the forkpty system call. This
should be more portable than the pty.fork() function. Specifically,
this should work on Solaris.
Modified 10.06.05 by Geoff Marshall: Implemented __fork_pty() method to
resolve the issue with Python's pty.fork() not supporting Solaris,
particularly ssh. Based on patch to posixmodule.c authored by Noah
Spurrier::
http://mail.python.org/pipermail/python-dev/2003-May/035281.html |
14,943 | def _html(title: str, field_names: List[str]) -> str:
inputs = .join(_SINGLE_INPUT_TEMPLATE.substitute(field_name=field_name)
for field_name in field_names)
quoted_field_names = [f"" for field_name in field_names]
quoted_field_list = f"[{.join(quoted_field_names)}]"
return _PAGE_TEMPLATE.substitute(title=title,
css=_CSS,
inputs=inputs,
qfl=quoted_field_list) | Returns bare bones HTML for serving up an input form with the
specified fields that can render predictions from the configured model. |
14,944 | def fromstring(text, schema=None):
if schema:
parser = objectify.makeparser(schema = schema.schema)
return objectify.fromstring(text, parser=parser)
else:
return objectify.fromstring(text) | Parses a KML text string
This function parses a KML text string and optionally validates it against
a provided schema object |
14,945 | def is_valid_name_error(name: str, node: Node = None) -> Optional[GraphQLError]:
if not isinstance(name, str):
raise TypeError("Expected string")
if name.startswith("__"):
return GraphQLError(
f"Name {name!r} must not begin with ,"
" which is reserved by GraphQL introspection.",
node,
)
if not re_name.match(name):
return GraphQLError(
f"Names must match /^[_a-zA-Z][_a-zA-Z0-9]*$/ but {name!r} does not.", node
)
return None | Return an Error if a name is invalid. |
14,946 | def start(self):
vms = yield from self.list()
for vm in vms:
if vm["vmname"] == self.vmname:
self._vmx_path = vm["vmx_path"]
break
if not self._vmx_path:
raise GNS3VMError("VMWare VM {} not found".format(self.vmname))
if not os.path.exists(self._vmx_path):
raise GNS3VMError("VMware VMX file {} doesn't exist".format(self._vmx_path))
vmware_tools_state = yield from self._execute("checkToolsState", [self._vmx_path])
if vmware_tools_state not in ("installed", "running"):
raise GNS3VMError("VMware tools are not installed in {}".format(self.vmname))
try:
running = yield from self._is_running()
except VMwareError as e:
raise GNS3VMError("Could not list VMware VMs: {}".format(str(e)))
if not running:
log.info("Update GNS3 VM settings")
yield from self._set_vcpus_ram(self.vcpus, self.ram)
yield from self._set_extra_options()
args = [self._vmx_path]
if self._headless:
args.extend(["nogui"])
yield from self._execute("start", args)
log.info("GNS3 VM has been started")
trial = 120
guest_ip_address = ""
log.info("Waiting for GNS3 VM IP")
while True:
guest_ip_address = yield from self._execute("readVariable", [self._vmx_path, "guestVar", "gns3.eth0"], timeout=120, log_level=logging.DEBUG)
guest_ip_address = guest_ip_address.strip()
if len(guest_ip_address) != 0:
break
trial -= 1
if trial == 0:
log.warning("No IP found for the VM via readVariable fallback to getGuestIPAddress")
guest_ip_address = yield from self._execute("getGuestIPAddress", [self._vmx_path, "-wait"], timeout=120)
break
yield from asyncio.sleep(1)
self.ip_address = guest_ip_address
log.info("GNS3 VM IP address set to {}".format(guest_ip_address))
self.running = True | Starts the GNS3 VM. |
14,947 | def decode_union_old(self, data_type, obj):
val = None
if isinstance(obj, six.string_types):
tag = obj
if data_type.definition._is_tag_present(tag, self.caller_permissions):
val_data_type = data_type.definition._get_val_data_type(tag,
self.caller_permissions)
if not isinstance(val_data_type, (bv.Void, bv.Nullable)):
raise bv.ValidationError(
"expected object for , got symbol" % tag)
else:
if not self.strict and data_type.definition._catch_all:
tag = data_type.definition._catch_all
else:
raise bv.ValidationError("unknown tag " % tag)
elif isinstance(obj, dict):
if len(obj) != 1:
raise bv.ValidationError( % len(obj))
tag = list(obj)[0]
raw_val = obj[tag]
if data_type.definition._is_tag_present(tag, self.caller_permissions):
val_data_type = data_type.definition._get_val_data_type(tag,
self.caller_permissions)
if isinstance(val_data_type, bv.Nullable) and raw_val is None:
val = None
elif isinstance(val_data_type, bv.Void):
if raw_val is None or not self.strict:
val = None
else:
raise bv.ValidationError( %
bv.generic_type_name(raw_val))
else:
try:
val = self.json_compat_obj_decode_helper(val_data_type, raw_val)
except bv.ValidationError as e:
e.add_parent(tag)
raise
else:
if not self.strict and data_type.definition._catch_all:
tag = data_type.definition._catch_all
else:
raise bv.ValidationError("unknown tag " % tag)
else:
raise bv.ValidationError("expected string or object, got %s" %
bv.generic_type_name(obj))
return data_type.definition(tag, val) | The data_type argument must be a Union.
See json_compat_obj_decode() for argument descriptions. |
14,948 | def __find_sentence_initial_proper_names(self, docs):
sentInitialNames = set()
for doc in docs:
for sentence in doc.divide( layer=WORDS, by=SENTENCES ):
sentencePos = 0
for i in range(len(sentence)):
word = sentence[i]
if all([ a[POSTAG] == for a in word[ANALYSIS] ]) and \
not re.match(, word[TEXT]):
sentencePos = 0
continue
if not re.match(, word[TEXT] ) and \
not re.match(, word[TEXT] ) and \
re.match("^[1234567890.()]*$", word[TEXT]):
sentencePos = 0
continue
if sentencePos == 0:
h_postags = [ a[POSTAG] == for a in word[ANALYSIS] ]
if any( h_postags ) and not all( h_postags ):
for analysis in word[ANALYSIS]:
if analysis[POSTAG] == :
sentInitialNames.add( analysis[ROOT] )
sentencePos += 1
return sentInitialNames | Moodustame lausealguliste pärisnimede loendi: vaatame sõnu, millel nii
pärisnimeanalüüs(id) kui ka mittepärisnimeanalüüs(id) ning mis esinevad
lause või nummerdatud loendi alguses - jäädvustame selliste sõnade
unikaalsed lemmad; |
14,949 | def subscribe_multi(self, topics):
if self.sock == NC.INVALID_SOCKET:
return NC.ERR_NO_CONN
self.logger.info("SUBSCRIBE: %s", .join([t for (t,q) in topics]))
return self.send_subscribe(False, [(utf8encode(topic), qos) for (topic, qos) in topics]) | Subscribe to some topics. |
14,950 | def before_after_send_handling(self):
self._init_delivery_statuses_dict()
self.before_send()
try:
yield
finally:
self.after_send()
self._update_dispatches() | Context manager that allows to execute send wrapped
in before_send() and after_send(). |
14,951 | def _set_nsx_controller(self, v, load=False):
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGListType("name",nsx_controller.nsx_controller, yang_name="nsx-controller", rest_name="nsx-controller", parent=self, is_container=, user_ordered=False, path_helper=self._path_helper, yang_keys=, extensions={u: {u: u, u: u, u: None, u: None, u: None, u: u}}), is_container=, yang_name="nsx-controller", rest_name="nsx-controller", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u: {u: u, u: u, u: None, u: None, u: None, u: u}}, namespace=, defining_module=, yang_type=, is_config=True)
except (TypeError, ValueError):
raise ValueError({
: ,
: "list",
: ,
})
self.__nsx_controller = t
if hasattr(self, ):
self._set() | Setter method for nsx_controller, mapped from YANG variable /nsx_controller (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_nsx_controller is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_nsx_controller() directly. |
14,952 | def _validate_geometry(self, geometry):
if geometry is not None and geometry not in self.valid_geometries:
raise InvalidParameterError("{} is not a valid geometry".format(geometry))
return geometry | Validates geometry, raising error if invalid. |
14,953 | def convert_conelp(c, G, h, dims, A = None, b = None, **kwargs):
offsets = dims[] + sum(dims[])
G_lq = G[:offsets,:]
h_lq = h[:offsets,0]
G_s = G[offsets:,:]
h_s = h[offsets:,0]
G_converted = [G_lq]; h_converted = [h_lq]
G_coupling = []
dims_list = []
symbs = []
offset = 0
block_to_sparse = []
for k, si in enumerate(dims[]):
G_b = G_s[offset:offset+si**2,:]
h_b = h_s[offset:offset+si**2,0]
offset += si**2
blkk, b2s, F = convert_block(G_b, h_b, si, **kwargs)
G1, h1, G2, blkdims = blkk
G_converted.append(G1)
h_converted.append(h1)
dims_list.extend(blkdims)
block_to_sparse.append(b2s)
symbs.append(F)
if G2 is not None: G_coupling.append(G2)
G1 = sparse(G_converted)
I,J,V = [],[],[]
offset = [G_lq.size[0], 0]
for Gcpl in G_coupling:
I.append(Gcpl.I + offset[0])
J.append(Gcpl.J + offset[1])
V.append(Gcpl.V)
offset[0] += Gcpl.size[0]
offset[1] += Gcpl.size[1]
G2 = spmatrix([v for v in itertools.chain(*V)],
[v for v in itertools.chain(*I)],
[v for v in itertools.chain(*J)],tuple(offset))
if offset[0] == 0 or offset[1] == 0:
G = G1
else:
G = sparse([[G1],[G2]])
ct = matrix([c,matrix(0.0,(G2.size[1],1))])
if A is not None:
return (ct, G, matrix(h_converted),\
{:dims[],:dims[],:dims_list},\
sparse([[A],[spmatrix([],[],[],(A.size[0],G2.size[1]))]]),\
b), block_to_sparse
else:
return (ct, G, matrix(h_converted),\
{:dims[],:dims[],:dims_list}), block_to_sparse, symbs | Applies the clique conversion method of Fukuda et al. to the positive semidefinite blocks of a cone LP.
:param c: :py:class:`matrix`
:param G: :py:class:`spmatrix`
:param h: :py:class:`matrix`
:param dims: dictionary
:param A: :py:class:`spmatrix` or :py:class:`matrix`
:param b: :py:class:`matrix`
The following example illustrates how to convert a cone LP:
.. code-block:: python
prob = (c,G,h,dims,A,b)
probc, blk2sparse, symbs = convert_conelp(*prob)
The return value `blk2sparse` is a list of 4-tuples
(`blki,I,J,n`) that each defines a mapping between the sparse
matrix representation and the converted block-diagonal
representation, and `symbs` is a list of symbolic factorizations
corresponding to each of the semidefinite blocks in the original cone LP.
.. seealso::
M. Fukuda, M. Kojima, K. Murota, and K. Nakata, `Exploiting Sparsity
in Semidefinite Programming via Matrix Completion I: General Framework
<http://dx.doi.org/10.1137/S1052623400366218>`_,
SIAM Journal on Optimization, 11:3, 2001, pp. 647-674.
S. Kim, M. Kojima, M. Mevissen, and M. Yamashita, `Exploiting Sparsity
in Linear and Nonlinear Matrix Inequalities via Positive Semidefinite
Matrix Completion <http://dx.doi.org/10.1007/s10107-010-0402-6>`_,
Mathematical Programming, 129:1, 2011, pp.. 33-68. |
14,954 | def mode_reader(self):
code, message = self.command("MODE READER")
if not code in [200, 201]:
raise NNTPReplyError(code, message)
return code == 200 | MODE READER command.
Instructs a mode-switching server to switch modes.
See <http://tools.ietf.org/html/rfc3977#section-5.3>
Returns:
Boolean value indicating whether posting is allowed or not. |
14,955 | def _capture_as_text(capture: Callable[..., Any]) -> str:
if not icontract._represent._is_lambda(a_function=capture):
signature = inspect.signature(capture)
param_names = list(signature.parameters.keys())
return "{}({})".format(capture.__qualname__, ", ".join(param_names))
lines, lineno = inspect.findsource(capture)
filename = inspect.getsourcefile(capture)
decorator_inspection = icontract._represent.inspect_decorator(lines=lines, lineno=lineno, filename=filename)
call_node = decorator_inspection.node
capture_node = None
if len(call_node.args) > 0:
assert isinstance(call_node.args[0], ast.Lambda), \
("Expected the first argument to the snapshot decorator to be a condition as lambda AST node, "
"but got: {}").format(type(call_node.args[0]))
capture_node = call_node.args[0]
elif len(call_node.keywords) > 0:
for keyword in call_node.keywords:
if keyword.arg == "capture":
assert isinstance(keyword.value, ast.Lambda), \
"Expected lambda node as value of the argument to the decorator."
capture_node = keyword.value
break
assert capture_node is not None, "Expected to find a keyword AST node with arg, but found none"
else:
raise AssertionError(
"Expected a call AST node of a snapshot decorator to have either args or keywords, but got: {}".format(
ast.dump(call_node)))
capture_text = decorator_inspection.atok.get_text(capture_node.body)
return capture_text | Convert the capture function into its text representation by parsing the source code of the decorator. |
14,956 | def chuid(name, uid):
*
pre_info = info(name)
if not pre_info:
raise CommandExecutionError(
{0}\.format(name)
)
if uid == pre_info[]:
return True
cmd = [, , , uid, , name]
__salt__[](cmd, python_shell=False)
return info(name).get() == uid | Change the uid for a named user
CLI Example:
.. code-block:: bash
salt '*' user.chuid foo 4376 |
14,957 | def _pys2row_heights(self, line):
split_line = self._split_tidy(line)
key = row, tab = self._get_key(*split_line[:2])
height = float(split_line[2])
shape = self.code_array.shape
try:
if row < shape[0] and tab < shape[2]:
self.code_array.row_heights[key] = height
except ValueError:
pass | Updates row_heights in code_array |
14,958 | def generate_name(self, name=None):
t
supply one.
-_') | generate a Robot Name for the instance to use, if the user doesn't
supply one. |
14,959 | def batchccn(args):
p = OptionParser(batchccn.__doc__)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
csvfile, = args
mm = MakeManager()
pf = op.basename(csvfile).split(".")[0]
mkdir(pf)
header = next(open(csvfile))
header = None if header.strip().endswith(".bam") else "infer"
logging.debug("Header={}".format(header))
df = pd.read_csv(csvfile, header=header)
cmd = "perl /mnt/software/ccn_gcn_hg38_script/ccn_gcn_hg38.pl"
cmd += " -n {} -b {}"
cmd += " -o {} -r hg38".format(pf)
for i, (sample_key, bam) in df.iterrows():
cmdi = cmd.format(sample_key, bam)
outfile = "{}/{}/{}.ccn".format(pf, sample_key, sample_key)
mm.add(csvfile, outfile, cmdi)
mm.write() | %prog batchccn test.csv
Run CCN script in batch. Write makefile. |
14,960 | def periodicvar_recovery(fakepfpkl,
simbasedir,
period_tolerance=1.0e-3):
recovered
if fakepfpkl.endswith():
infd = gzip.open(fakepfpkl,)
else:
infd = open(fakepfpkl,)
fakepf = pickle.load(infd)
infd.close()
objectid, lcfbasename = fakepf[], fakepf[]
lcfpath = os.path.join(simbasedir,,lcfbasename)
pfres[].append(this_lspval)
pfres[] = np.array(pfres[])
pfres[] = np.array(pfres[])
pfres[] = np.array(pfres[])
pfres[] = np.array(pfres[])
if (actual_vartype and
actual_vartype in PERIODIC_VARTYPES and
np.isfinite(actual_varperiod)):
if pfres[].size > 0:
for ri in range(pfres[].size):
pfres[].append(pfres[][ri] -
np.asscalar(actual_varperiod))
pfres[].append(
check_periodrec_alias(actual_varperiod,
pfres[][ri],
tolerance=period_tolerance)
)
pfres[] = np.array(pfres[])
pfres[] = np.array(pfres[])
rec_absdiff = np.abs(pfres[])
best_recp_ind = rec_absdiff == rec_absdiff.min()
pfres[] = (
pfres[][best_recp_ind]
)
pfres[] = (
pfres[][best_recp_ind]
)
pfres[] = (
pfres[][best_recp_ind]
)
pfres[] = (
pfres[][best_recp_ind]
)
pfres[] = (
pfres[][best_recp_ind]
)
else:
LOGWARNING(
%
fakepfpkl
)
pfres[] = np.array([])
pfres[] = np.array([np.nan])
pfres[] = np.array([np.nan])
pfres[] = np.array([],dtype=np.unicode_)
pfres[] = np.array([],dtype=np.unicode_)
pfres[] = np.array([],dtype=np.unicode_)
pfres[] = np.array([np.nan])
else:
pfres[] = np.array(
[]*pfres[].size
)
pfres[] = np.zeros(pfres[].size)
pfres[] = np.array([np.nan])
pfres[] = np.array([],dtype=np.unicode_)
pfres[] = np.array([],dtype=np.unicode_)
pfres[] = np.array([])
pfres[] = np.array([np.nan])
return pfres | Recovers the periodic variable status/info for the simulated PF result.
- Uses simbasedir and the lcfbasename stored in fakepfpkl to figure out
where the LC for this object is.
- Gets the actual_varparams, actual_varperiod, actual_vartype,
actual_varamplitude elements from the LC.
- Figures out if the current objectid is a periodic variable (using
actual_vartype).
- If it is a periodic variable, gets the canonical period assigned to it.
- Checks if the period was recovered in any of the five best periods
reported by any of the period-finders, checks if the period recovered was
a harmonic of the period.
- Returns the objectid, actual period and vartype, recovered period, and
recovery status.
Parameters
----------
fakepfpkl : str
This is a periodfinding-<objectid>.pkl[.gz] file produced in the
`simbasedir/periodfinding` subdirectory after `run_periodfinding` above
is done.
simbasedir : str
The base directory where all of the fake LCs and period-finding results
are.
period_tolerance : float
The maximum difference that this function will consider between an
actual period (or its aliases) and a recovered period to consider it as
as a 'recovered' period.
Returns
-------
dict
Returns a dict of period-recovery results. |
14,961 | def to_json(self) -> dict:
d = self.__dict__
d[] = self.p2th_wif
return d | export the Deck object to json-ready format |
14,962 | def _psed(text,
before,
after,
limit,
flags):
atext = text
if limit:
limit = re.compile(limit)
comps = text.split(limit)
atext = .join(comps[1:])
count = 1
if in flags:
count = 0
flags = flags.replace(, )
aflags = 0
for flag in flags:
aflags |= RE_FLAG_TABLE[flag]
before = re.compile(before, flags=aflags)
text = re.sub(before, after, atext, count=count)
return text | Does the actual work for file.psed, so that single lines can be passed in |
14,963 | async def get_soundfield(self) -> List[Setting]:
res = await self.services["audio"]["getSoundSettings"]({"target": "soundField"})
return Setting.make(**res[0]) | Get the current sound field settings. |
14,964 | def _numpy_index_by_percentile(self, data, percentile):
data_perc_low = np.nanpercentile(data, percentile, axis=0, interpolation=self.interpolation)
indices = np.empty(data_perc_low.shape, dtype=np.uint8)
indices[:] = np.nan
abs_diff = np.where(np.isnan(data_perc_low), np.inf, abs(data - data_perc_low))
indices = np.where(np.isnan(data_perc_low), self.max_index, np.nanargmin(abs_diff, axis=0))
return indices | Calculate percentile of numpy stack and return the index of the chosen pixel.
numpy percentile function is used with one of the following interpolations {'linear', 'lower', 'higher',
'midpoint', 'nearest'} |
14,965 | def _from_dict(cls, _dict):
args = {}
if in _dict:
args[] = [
QueryRelationsRelationship._from_dict(x)
for x in (_dict.get())
]
return cls(**args) | Initialize a QueryRelationsResponse object from a json dictionary. |
14,966 | def getUnitCost(self, CorpNum):
result = self._httpget(, CorpNum)
return int(result.unitCost) | 팩스 전송 단가 확인
args
CorpNum : 팝빌회원 사업자번호
return
전송 단가 by float
raise
PopbillException |
14,967 | def cli(env, account_id):
manager = SoftLayer.CDNManager(env.client)
origins = manager.get_origins(account_id)
table = formatting.Table([, , , ])
for origin in origins:
table.add_row([origin[],
origin[],
origin.get(, formatting.blank()),
origin[]])
env.fout(table) | List origin pull mappings. |
14,968 | def create_temporaries(self, r=True, f=True):
inverse = isinstance(self, FourierTransformInverse)
if inverse:
rspace = self.range
fspace = self.domain
else:
rspace = self.domain
fspace = self.range
if r:
self._tmp_r = rspace.element().asarray()
if f:
self._tmp_f = fspace.element().asarray() | Allocate and store reusable temporaries.
Existing temporaries are overridden.
Parameters
----------
r : bool, optional
Create temporary for the real space
f : bool, optional
Create temporary for the frequency space
Notes
-----
To save memory, clear the temporaries when the transform is
no longer used.
See Also
--------
clear_temporaries
clear_fftw_plan : can also hold references to the temporaries |
14,969 | def process_header(self, headers):
return [c.name for c in self.source.dest_table.columns][1:] | Ignore the incomming header and replace it with the destination header |
14,970 | def _upload_folder_recursive(local_folder,
parent_folder_id,
leaf_folders_as_items=False,
reuse_existing=False):
if leaf_folders_as_items and _has_only_files(local_folder):
print(.format(local_folder))
_upload_folder_as_item(local_folder, parent_folder_id, reuse_existing)
return
else:
print(.format(local_folder))
new_folder_id = _create_or_reuse_folder(local_folder, parent_folder_id,
reuse_existing)
for entry in sorted(os.listdir(local_folder)):
full_entry = os.path.join(local_folder, entry)
if os.path.islink(full_entry):
continue
elif os.path.isdir(full_entry):
_upload_folder_recursive(full_entry,
new_folder_id,
leaf_folders_as_items,
reuse_existing)
else:
print(.format(full_entry))
_upload_as_item(entry,
new_folder_id,
full_entry,
reuse_existing) | Function to recursively upload a folder and all of its descendants.
:param local_folder: full path to local folder to be uploaded
:type local_folder: string
:param parent_folder_id: id of parent folder on the Midas Server instance,
where the new folder will be added
:type parent_folder_id: int | long
:param leaf_folders_as_items: (optional) whether leaf folders should have
all files uploaded as single items
:type leaf_folders_as_items: bool
:param reuse_existing: (optional) whether to accept an existing item of the
same name in the same location, or create a new one instead
:type reuse_existing: bool |
14,971 | async def i2c_read_request(self, address, register, number_of_bytes,
read_type, cb=None, cb_type=None):
if address not in self.i2c_map:
self.i2c_map[address] = {: None, : cb,
: cb_type}
data = [address, read_type, register & 0x7f, (register >> 7) & 0x7f,
number_of_bytes & 0x7f, (number_of_bytes >> 7) & 0x7f]
await self._send_sysex(PrivateConstants.I2C_REQUEST, data) | This method requests the read of an i2c device. Results are retrieved
by a call to i2c_get_read_data(). or by callback.
If a callback method is provided, when data is received from the
device it will be sent to the callback method.
Some devices require that transmission be restarted
(e.g. MMA8452Q accelerometer).
Use Constants.I2C_READ | Constants.I2C_END_TX_MASK for those cases.
:param address: i2c device address
:param register: register number (can be set to zero)
:param number_of_bytes: number of bytes expected to be returned
:param read_type: I2C_READ or I2C_READ_CONTINUOUSLY. I2C_END_TX_MASK
may be OR'ed when required
:param cb: Optional callback function to report i2c data as a
result of read command
:param cb_type: Constants.CB_TYPE_DIRECT = direct call or
Constants.CB_TYPE_ASYNCIO = asyncio coroutine
:returns: No return value. |
14,972 | def write_config_file(self, params, path):
cfgp = ConfigParser()
cfgp.add_section(params[])
for p in params:
if p == :
continue
cfgp.set(params[], p, params[p])
f = open(os.path.join(path, ), )
cfgp.write(f)
f.close() | write a config file for this single exp in the folder path. |
14,973 | def text_bounding_box(self, size_pt, text):
if size_pt == 12:
mult = {"h": 9, "w_digit": 5, "w_space": 2}
elif size_pt == 18:
mult = {"h": 14, "w_digit": 9, "w_space": 2}
num_chars = len(text)
return (num_chars * mult["w_digit"] + (num_chars - 1) * mult["w_space"] + 1, mult["h"]) | Return the bounding box of the given text
at the given font size.
:param int size_pt: the font size in points
:param string text: the text
:rtype: tuple (width, height) |
14,974 | def all(self, axis=0, bool_only=None, skipna=True, level=None, **kwargs):
if axis is not None:
axis = self._get_axis_number(axis)
if bool_only and axis == 0:
if hasattr(self, "dtype"):
raise NotImplementedError(
"{}.{} does not implement numeric_only.".format(
self.__name__, "all"
)
)
data_for_compute = self[self.columns[self.dtypes == np.bool]]
return data_for_compute.all(
axis=axis, bool_only=False, skipna=skipna, level=level, **kwargs
)
return self._reduce_dimension(
self._query_compiler.all(
axis=axis, bool_only=bool_only, skipna=skipna, level=level, **kwargs
)
)
else:
if bool_only:
raise ValueError("Axis must be 0 or 1 (got {})".format(axis))
result = self._reduce_dimension(
self._query_compiler.all(
axis=0, bool_only=bool_only, skipna=skipna, level=level, **kwargs
)
)
if isinstance(result, BasePandasDataset):
return result.all(
axis=axis, bool_only=bool_only, skipna=skipna, level=level, **kwargs
)
return result | Return whether all elements are True over requested axis
Note:
If axis=None or axis=0, this call applies df.all(axis=1)
to the transpose of df. |
14,975 | def get_new_broks(self):
for satellites in [self.schedulers, self.pollers, self.reactionners, self.receivers]:
for satellite_link in list(satellites.values()):
logger.debug("Getting broks from %s", satellite_link)
_t0 = time.time()
try:
tmp_broks = satellite_link.get_broks(self.name)
except LinkError:
logger.warning("Daemon %s connection failed, I could not get the broks!",
satellite_link)
else:
if tmp_broks:
logger.debug("Got %d Broks from %s in %s",
len(tmp_broks), satellite_link.name, time.time() - _t0)
statsmgr.gauge(
% (satellite_link.name), len(tmp_broks))
statsmgr.timer(
% (satellite_link.name), time.time() - _t0)
for brok in tmp_broks:
brok.instance_id = satellite_link.instance_id
self.external_broks.extend(tmp_broks) | Get new broks from our satellites
:return: None |
14,976 | def pytwis_clt():
epilog =
twis, prompt = get_pytwis(epilog)
if twis is None:
return -1
auth_secret = []
while True:
try:
arg_dict = pytwis_command_parser(
input(
\
.format(prompt)))
if arg_dict[pytwis_clt_constants.ARG_COMMAND] == pytwis_clt_constants.CMD_EXIT \
or arg_dict[pytwis_clt_constants.ARG_COMMAND] == pytwis_clt_constants.CMD_QUIT:
if auth_secret[0]:
pytwis_command_processor(twis, auth_secret,
{pytwis_clt_constants.ARG_COMMAND:
pytwis_clt_constants.CMD_LOGOUT})
print()
return 0
except ValueError as excep:
print(.format(str(excep)),
file=sys.stderr)
continue
pytwis_command_processor(twis, auth_secret, arg_dict) | The main routine of this command-line tool. |
14,977 | def _get_field(self, field_name, default=None):
full_field_name = .format(field_name)
if full_field_name in self.extras:
return self.extras[full_field_name]
else:
return default | Fetches a field from extras, and returns it. This is some Airflow
magic. The grpc hook type adds custom UI elements
to the hook page, which allow admins to specify scopes, credential pem files, etc.
They get formatted as shown below. |
14,978 | def connect(self, slot):
if not callable(slot):
raise ValueError("Connection to non-callable object failed" % slot.__class__.__name__)
if (isinstance(slot, partial) or in slot.__name__):
slotSelf = slot.__self__
slotDict = weakref.WeakKeyDictionary()
slotDict[slotSelf] = slot.__func__
if slotDict not in self._slots:
self._slots.append(slotDict)
else:
newSlotRef = weakref.ref(slot)
if newSlotRef not in self._slots:
self._slots.append(newSlotRef) | Connects the signal to any callable object |
14,979 | def get_ip_interface_output_interface_vrf(self, **kwargs):
config = ET.Element("config")
get_ip_interface = ET.Element("get_ip_interface")
config = get_ip_interface
output = ET.SubElement(get_ip_interface, "output")
interface = ET.SubElement(output, "interface")
interface_type_key = ET.SubElement(interface, "interface-type")
interface_type_key.text = kwargs.pop()
interface_name_key = ET.SubElement(interface, "interface-name")
interface_name_key.text = kwargs.pop()
vrf = ET.SubElement(interface, "vrf")
vrf.text = kwargs.pop()
callback = kwargs.pop(, self._callback)
return callback(config) | Auto Generated Code |
14,980 | def entity(self):
if not issubclass(self.cls, Base) or issubclass(self.cls, DerivedBase):
raise ValueError("Cannot get entity for non-base-class {}".format(self.cls))
return self.cls(self._client, self.id) | Returns the object this grant is for. The objects type depends on the
type of object this grant is applied to, and the object returned is
not populated (accessing its attributes will trigger an api request).
:returns: This grant's entity
:rtype: Linode, NodeBalancer, Domain, StackScript, Volume, or Longview |
14,981 | def list_namespaced_service_account(self, namespace, **kwargs):
kwargs[] = True
if kwargs.get():
return self.list_namespaced_service_account_with_http_info(namespace, **kwargs)
else:
(data) = self.list_namespaced_service_account_with_http_info(namespace, **kwargs)
return data | list or watch objects of kind ServiceAccount
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_namespaced_service_account(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1ServiceAccountList
If the method is called asynchronously,
returns the request thread. |
14,982 | def DeserializeMessage(self, response_type, data):
try:
message = encoding.JsonToMessage(response_type, data)
except (exceptions.InvalidDataFromServerError,
messages.ValidationError, ValueError) as e:
raise exceptions.InvalidDataFromServerError(
% (
data, response_type.__name__, e))
return message | Deserialize the given data as method_config.response_type. |
14,983 | def _update_prx(self):
qx = scipy.ones(N_CODON, dtype=)
for j in range(3):
for w in range(N_NT):
qx[CODON_NT[j][w]] *= self.phi[w]
frx = self.pi_codon**self.beta
self.prx = frx * qx
with scipy.errstate(divide=, under=, over=,
invalid=):
for r in range(self.nsites):
self.prx[r] /= self.prx[r].sum() | Update `prx` from `phi`, `pi_codon`, and `beta`. |
14,984 | def _read_next_line(self):
prev_line = self._line
self._line = self.stream.readline()
return prev_line | Read next line store in self._line and return old one |
14,985 | def summary(model, input_size):
def register_hook(module):
def hook(module, input, output):
class_name = str(module.__class__).split()[-1].split("%s-%iinput_shapeinput_shapeoutput_shapeoutput_shapeoutput_shapeweightsizetrainablebiassizenb_params----------------------------------------------------------------{:>20} {:>25} {:>15}Layer (type)Output ShapeParam | Print summary of the model |
14,986 | def can_group_commands(command, next_command):
multi_capable_commands = (, , )
if next_command is None:
return False
name = command.get_name()
if name not in multi_capable_commands:
return False
if name != next_command.get_name():
return False
if grouped_args_for_command(command) != grouped_args_for_command(next_command):
return False
if command.get_kwargs() != next_command.get_kwargs():
return False
return True | Returns a boolean representing whether these commands can be
grouped together or not.
A few things are taken into account for this decision:
For ``set`` commands:
- Are all arguments other than the key/value the same?
For ``delete`` and ``get`` commands:
- Are all arguments other than the key the same? |
14,987 | def add_handler(self, type, actions, **kwargs):
l = self._events.get(type, [])
h = Handler(self, type, kwargs, actions)
l.append(h)
self._events[type] = l
return h | Add an event handler to be processed by this session.
type - The type of the event (pygame.QUIT, pygame.KEYUP ETC).
actions - The methods which should be called when an event matching this specification is received.
more than one action can be tied to a single event. This allows for secondary actions to occur along side already existing actions such as the down errow in the List.
You can either pass the actions or action as a single parameter or as a list.
kwargs - An arbitrary number of parameters which must be satisfied in order for the event to match.
The keywords are directly matched with the instance variables found in the current event
Each value for kwargs can optionally be a lambda which must evaluate to True in order for the match to work.
Example:
session.add_handler(pygame.QUIT, session.do_quit)
session.add_handler(pygame.KEYDOWN, lambda: ao2.speak("You pressed the enter key."), key = pygame.K_RETURN) |
14,988 | def literal_struct(cls, elems):
tys = [el.type for el in elems]
return cls(types.LiteralStructType(tys), elems) | Construct a literal structure constant made of the given members. |
14,989 | def remove_repeat_coordinates(x, y, z):
r
coords = []
variable = []
for (x_, y_, t_) in zip(x, y, z):
if (x_, y_) not in coords:
coords.append((x_, y_))
variable.append(t_)
coords = np.array(coords)
x_ = coords[:, 0]
y_ = coords[:, 1]
z_ = np.array(variable)
return x_, y_, z_ | r"""Remove all x, y, and z where (x,y) is repeated and keep the first occurrence only.
Will not destroy original values.
Parameters
----------
x: array_like
x coordinate
y: array_like
y coordinate
z: array_like
observation value
Returns
-------
x, y, z
List of coordinate observation pairs without
repeated coordinates. |
14,990 | def compute_best_path(local_asn, path1, path2):
best_path = None
best_path_reason = BPR_UNKNOWN
if best_path is None:
best_path = _cmp_by_reachable_nh(path1, path2)
best_path_reason = BPR_REACHABLE_NEXT_HOP
if best_path is None:
best_path = _cmp_by_highest_wg(path1, path2)
best_path_reason = BPR_HIGHEST_WEIGHT
if best_path is None:
best_path = _cmp_by_local_pref(path1, path2)
best_path_reason = BPR_LOCAL_PREF
if best_path is None:
best_path = _cmp_by_local_origin(path1, path2)
best_path_reason = BPR_LOCAL_ORIGIN
if best_path is None:
best_path = _cmp_by_aspath(path1, path2)
best_path_reason = BPR_ASPATH
if best_path is None:
best_path = _cmp_by_origin(path1, path2)
best_path_reason = BPR_ORIGIN
if best_path is None:
best_path = _cmp_by_med(path1, path2)
best_path_reason = BPR_MED
if best_path is None:
best_path = _cmp_by_asn(local_asn, path1, path2)
best_path_reason = BPR_ASN
if best_path is None:
best_path = _cmp_by_igp_cost(path1, path2)
best_path_reason = BPR_IGP_COST
if best_path is None:
best_path = _cmp_by_router_id(local_asn, path1, path2)
best_path_reason = BPR_ROUTER_ID
if best_path is None:
best_path = _cmp_by_cluster_list(path1, path2)
best_path_reason = BPR_CLUSTER_LIST
if best_path is None:
best_path_reason = BPR_UNKNOWN
return best_path, best_path_reason | Compares given paths and returns best path.
Parameters:
-`local_asn`: asn of local bgpspeaker
-`path1`: first path to compare
-`path2`: second path to compare
Best path processing will involve following steps:
1. Select a path with a reachable next hop.
2. Select the path with the highest weight.
3. If path weights are the same, select the path with the highest
local preference value.
4. Prefer locally originated routes (network routes, redistributed
routes, or aggregated routes) over received routes.
5. Select the route with the shortest AS-path length.
6. If all paths have the same AS-path length, select the path based
on origin: IGP is preferred over EGP; EGP is preferred over
Incomplete.
7. If the origins are the same, select the path with lowest MED
value.
8. If the paths have the same MED values, select the path learned
via EBGP over one learned via IBGP.
9. Select the route with the lowest IGP cost to the next hop.
10. Select the route received from the peer with the lowest BGP
router ID.
11. Select the route received from the peer with the shorter
CLUSTER_LIST length.
Returns None if best-path among given paths cannot be computed else best
path.
Assumes paths from NC has source equal to None. |
14,991 | def getFaxStatsSessions(self):
if not self.hasFax():
return None
info_dict = {}
info_dict[] = 0
fax_types = (, )
fax_operations = (, )
fax_states = (, , ,
, , , ,)
info_dict[] = dict([(k,0) for k in fax_types])
info_dict[] = dict([(k,0) for k in fax_operations])
info_dict[] = dict([(k,0) for k in fax_states])
cmdresp = self.executeCommand()
sections = cmdresp.strip().split()
if len(sections) >= 3:
for line in sections[1][1:]:
cols = re.split(, line)
if len(cols) == 7:
info_dict[] += 1
if cols[3].lower() in fax_types:
info_dict[][cols[3].lower()] += 1
if cols[4] == :
info_dict[][] += 1
elif cols[4] == :
info_dict[][] += 1
if cols[5].lower() in fax_states:
info_dict[][cols[5].lower()] += 1
return info_dict | Query Asterisk Manager Interface for Fax Stats.
CLI Command - fax show sessions
@return: Dictionary of fax stats. |
14,992 | def _worker_process(self):
while not self.terminated:
try:
key, lpath, fpath, remote_md5, pagealign, lpview = \
self._task_queue.get(True, 0.1)
except queue.Empty:
continue
if lpview is None:
start = None
end = None
size = None
else:
start = lpview.fd_start
end = lpview.fd_end
size = end - start
md5 = blobxfer.operations.md5.compute_md5_for_file_asbase64(
fpath, pagealign, start, end)
logger.debug(.format(
md5, remote_md5, fpath))
self._done_cv.acquire()
self._done_queue.put((key, lpath, size, md5 == remote_md5))
self._done_cv.notify()
self._done_cv.release() | Compute MD5 for local file
:param LocalFileMd5Offload self: this |
14,993 | def create_project(self, name, **kwargs):
data = self._wrap_dict("project", kwargs)
data["customer"]["name"] = name
return self.post("/projects.json", data=data) | Creates a project with a name. All other parameters are optional. They
are: `note`, `customer_id`, `budget`, `budget_type`,
`active_hourly_rate`, `hourly_rate`, `hourly_rates_per_service`, and
`archived`. |
14,994 | def _ip_is_usable(self, current_ip):
try:
ipaddress.ip_address(current_ip)
except ValueError:
return False
if current_ip == self.real_ip:
return False
if not self._ip_is_safe(current_ip):
return False
return True | Check if the current Tor's IP is usable.
:argument current_ip: current Tor IP
:type current_ip: str
:returns bool |
14,995 | def iaf_flow(one_hot_assignments,
scale_weights,
scale_bias,
num_codes,
summary=True,
name=None):
with tf.name_scope(name, default_name="iaf"):
padded_assignments = tf.pad(
one_hot_assignments, [[0, 0], [0, 0], [1, 0], [0, 0]])[:, :, :-1, :]
scale_bijector = tfp.distributions.bijectors.Affine(
scale_tril=tfp.distributions.fill_triangular(scale_weights))
scale = scale_bijector.forward(
tf.transpose(padded_assignments, [0, 1, 3, 2]))
scale = tf.transpose(scale, [0, 1, 3, 2])
scale = tf.nn.softplus(scale)
scale = scale + tf.nn.softplus(scale_bias[tf.newaxis, tf.newaxis, ...])
scale = scale[..., :-1]
z = one_hot_assignments[..., :-1]
unnormalized_probs = tf.concat([z * scale,
one_hot_assignments[..., -1, tf.newaxis]],
axis=-1)
normalizer = tf.reduce_sum(unnormalized_probs, axis=-1)
flow_output = unnormalized_probs / (normalizer[..., tf.newaxis])
inverse_log_det_jacobian = (-tf.reduce_sum(tf.log(scale), axis=-1)
+ num_codes * tf.log(normalizer))
if summary:
tf.summary.histogram("iaf/scale", tf.reshape(scale, [-1]))
tf.summary.histogram("iaf/inverse_log_det_jacobian",
tf.reshape(inverse_log_det_jacobian, [-1]))
return flow_output, inverse_log_det_jacobian | Performs a single IAF flow using scale and normalization transformations.
Args:
one_hot_assignments: Assignments Tensor with shape [num_samples, batch_size,
latent_size, num_codes].
scale_weights: Tensor corresponding to lower triangular matrix used to
autoregressively generate scale matrix from assignments. To ensure the
lower-triangular matrix has length of latent_size, scale_weights should
be a rank-one tensor with size latent_size * (latent_size + 1) / 2.
scale_bias: Bias tensor to be added to scale tensor, with shape
[latent_size, num_codes]. If scale weights are zero, initialize scale_bias
to be log(exp(1.) / 2. - 1) so initial transformation is identity.
num_codes: Number of codes in codebook.
summary: Whether to save summaries.
name: String used for name scope.
Returns:
flow_output: Transformed one-hot assignments.
inverse_log_det_jacobian: Inverse log deteriminant of Jacobian corresponding
to transformation. |
14,996 | def prt_gene_aart_details(self, geneids, prt=sys.stdout):
_go2nt = self.sortobj.grprobj.go2nt
patgene = self.datobj.kws["fmtgene2"]
patgo = self.datobj.kws["fmtgo2"]
itemid2name = self.datobj.kws.get("itemid2name")
chr2i = self.datobj.get_chr2idx()
for geneid in geneids:
gos_gene = self.gene2gos[geneid]
symbol = "" if itemid2name is None else itemid2name.get(geneid, "")
prt.write("\n")
prt.write(patgene.format(AART=self.gene2aart[geneid], ID=geneid, NAME=symbol))
go2nt = {go:(_go2nt[go], "".join(self.go2chrs[go])) for go in gos_gene}
for ntgo, abc in sorted(go2nt.values(),
key=lambda t: [chr2i[t[1][:1]], t[0].NS, -1*t[0].dcnt]):
prt.write("{ABC} ".format(ABC=abc))
prt.write(patgo.format(**ntgo._asdict())) | For each gene, print ASCII art which represents its associated GO IDs. |
14,997 | def add_data(self, conf):
self.validate_data(conf)
self.process_data(conf)
self.data.append(conf) | Add data to the graph object. May be called several times to add
additional data sets.
conf should be a dictionary including 'data' and 'title' keys |
14,998 | def cleanup(self):
for k in self._children:
self._children[k].cleanup()
if self._cleanup:
self.remove(True) | Clean up children and remove the directory.
Directory will only be removed if the cleanup flag is set. |
14,999 | def color_palette(name=None, n_colors=6, desat=None):
seaborn_palettes = dict(
deep=["
"
muted=["
"
pastel=["
"
bright=["
"
dark=["
"
colorblind=["
"
)
if name is None:
palette = mpl.rcParams["axes.color_cycle"]
elif not isinstance(name, string_types):
palette = name
elif name == "hls":
palette = hls_palette(n_colors)
elif name == "husl":
palette = husl_palette(n_colors)
elif name in seaborn_palettes:
palette = seaborn_palettes[name]
elif name in dir(mpl.cm):
palette = mpl_palette(name, n_colors)
elif name[:-2] in dir(mpl.cm):
palette = mpl_palette(name, n_colors)
else:
raise ValueError("%s is not a valid palette name" % name)
if desat is not None:
palette = [desaturate(c, desat) for c in palette]
pal_cycle = cycle(palette)
palette = [next(pal_cycle) for _ in range(n_colors)]
try:
palette = map(mpl.colors.colorConverter.to_rgb, palette)
palette = _ColorPalette(palette)
except ValueError:
raise ValueError("Could not generate a palette for %s" % str(name))
return palette | Return a list of colors defining a color palette.
Availible seaborn palette names:
deep, muted, bright, pastel, dark, colorblind
Other options:
hls, husl, any matplotlib palette
Matplotlib paletes can be specified as reversed palettes by appending
"_r" to the name or as dark palettes by appending "_d" to the name.
This function can also be used in a ``with`` statement to temporarily
set the color cycle for a plot or set of plots.
Parameters
----------
name: None, string, or sequence
Name of palette or None to return current palette. If a
sequence, input colors are used but possibly cycled and
desaturated.
n_colors : int
Number of colors in the palette. If larger than the number of
colors in the palette, they will cycle.
desat : float
Value to desaturate each color by.
Returns
-------
palette : list of RGB tuples.
Color palette.
Examples
--------
>>> p = color_palette("muted")
>>> p = color_palette("Blues_d", 10)
>>> p = color_palette("Set1", desat=.7)
>>> import matplotlib.pyplot as plt
>>> with color_palette("husl", 8):
... f, ax = plt.subplots()
... ax.plot(x, y) # doctest: +SKIP
See Also
--------
set_palette : set the default color cycle for all plots.
axes_style : define parameters to set the style of plots
plotting_context : define parameters to scale plot elements |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.