ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py | 7dfc9ff63d343956b2cd4ec172c0cc6a0bc6b667 | import random
import sys
print(sys.path)
from locust import HttpUser, TaskSet, task, HttpLocust, between
class QuickstartUser(HttpUser):
wait_time = between(5, 9)
@task
def index_page(self):
self.client.get("/")
|
py | 7dfca07ce6b754f6c74a4b42961c25e16d2edecd | # coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Basic HTML parser for anchor links only.
Intended for use on a specific style of output produced by the third-party
`wikiextractor`.
"""
import collections
from html import parser as html_parser
import re
import urllib
from absl import logging
# from dense_representations_for_entity_retrieval.mel.wikinews_extractor import constants
from . import constants
_ANCHOR_EXCLUDE_REGEX = re.compile(
"|".join(constants._ANCHOR_EXCLUDE_PATTERNS), re.UNICODE | re.IGNORECASE
)
# Regex to detect and parse wiki link targets that point to Wikipedia.
# Breakdown of the expression:
# Optional initial colon.
# Required "w".
# Optionally write out "wikipedia".
# Required colon.
# Optional language spec of the form "LANG:", capturing the LANG part.
# Required non-zero length title.
# Example strings (square brackets just for display here):
# [[w:title] / [[:w:title]] / [[:w:de:title]] / [[:wikipedia:de:title]]
# TODO(jabot): Consider also exploiting the d: prefixes that point to WikiData.
_WIKIPEDIA_TITLE_REGEX = re.compile(
r":?w(?:ikipedia)?:(?:(%s):)?(.+)" % "|".join(constants.WIKI_LANG_CODES),
re.IGNORECASE,
)
def parse_title_if_wikipedia(title):
"""Require wikipedia prefix and parse title into language and title."""
m = _WIKIPEDIA_TITLE_REGEX.match(title)
if m:
# Two capturing groups even if the first one was empty; hence language will
# be None here if the language spec was absent.
language, title = m.groups()
if language:
language = language.lower()
return language, title
else:
return None, None
# Regex to detect blocked terms in link prefixes. The terms appear in a context
# like "[[term:foobar]]".
_LINK_PREFIX_BLOCKLIST_REGEX = re.compile(
"(?:%s):.+" % "|".join(constants.LINK_PREFIX_BLOCKLIST), re.UNICODE | re.IGNORECASE
)
def exclude_link_by_prefix(unqouted_url):
"""Returns whether this URL is in the exclusion list."""
return _LINK_PREFIX_BLOCKLIST_REGEX.match(unqouted_url.lstrip())
class WikiExtractorHTMLParser(html_parser.HTMLParser):
"""Light-weight parser to extract text and linked mentions.
Targets the wiki-extractor's output, which should only have <a> tags. Parsing
will throw an exception when those tags are nested.
The class is stateful, so use a new instance for each next document.
It only handles unnested <a> tags. It drops any other tags.
Usage:
parser = WikiExtractorHTMLParser()
try:
parser.feed(wiki_doc)
output = parser.output
mentions = parser.mentions
except ValueError:
pass # parse failure
"""
def __init__(self):
super(WikiExtractorHTMLParser, self).__init__()
# Cleaned output text.
self._output = ""
# List of mention dicts extracted.
self._mentions = []
# State information.
# This indicates whether we are inside an <a> tag.
self._in_a_tag = False
# The href attribute for the <a> tag we are inside of.
self._href = ""
# The anchor text for the <a> tag we are inside of.
self._anchor_text = ""
# The character position in output where the <a> tag started.
self._last_pos = 0
# Counter of observed tags; for diagnostics.
self._tags = collections.Counter()
self._found_end = False
@property
def mentions(self):
"""Returns list of mentions extracted so far, as a `dict` per mention."""
return self._mentions
def handle_starttag(self, tag, attrs):
logging.debug(
"Encountered a start tag %s (%s):",
tag,
",".join(["{}={}".format(k, v) for (k, v) in attrs]),
)
if tag == "a":
# Fail on nested a-tags for simplicity.
if self._in_a_tag:
raise ValueError("Handling of nested <a>-tags is not implemented.")
self._in_a_tag = True
self._reset()
# Extract hyperlink target.
for name, value in attrs:
if name == "href":
self._href = urllib.parse.unquote(value)
def handle_data(self, data):
logging.debug("Encountered data: '%s'", data)
# Normalize whitespace by eliminating trailing whitespace and more than
# two consecutive newlines.
self._output = re.sub(r" *\n", r"\n", self._output)
self._output = re.sub(r"\n\n\n*", r"\n\n", self._output)
# Remove preceding whitespace when starting a new line.
if self._output and self._output[-1] == "\n":
data = data.lstrip()
# Capture anchor text if inside an a-tag and track position.
if self._in_a_tag and self._href:
# Completely drop certain anchor texts based on a blocklist.
if _ANCHOR_EXCLUDE_REGEX.search(data):
return
_, title = parse_title_if_wikipedia(self._href)
self._anchor_text = ""
if title:
# If this is a wikipedia link, keep the anchor text and extract
# mention.
mention_text = data.replace(" \n", " ").replace("\n", " ")
self._anchor_text = mention_text
self._last_pos = len(self._output)
self._output += mention_text
logging.debug("Keep link, keep contents: %s", self._href)
elif exclude_link_by_prefix(self._href):
# If link matches exclusion list, do not append it's anchor text.
logging.debug("Drop link, drop contents: %s", self._href)
pass
else:
# For all other links, ignore the hyperlink but keep the anchor text.
logging.debug("Drop link, keep contents: %s", self._href)
self._output += data
else:
# Just append output.
self._output += data
def _reset(self):
self._anchor_text = "" # reset
self._href = ""
def handle_endtag(self, tag):
logging.debug("Encountered an end tag: '%s'", tag)
self._tags.update((tag,))
if tag == "a":
if not self._in_a_tag:
raise ValueError("Spurious end tag.")
self._in_a_tag = False
# Extract mention if we got a well-formed link with anchor text.
if self._href and self._anchor_text:
self._mentions.append(
{
"mention": self._anchor_text,
"position": self._last_pos,
"length": len(self._anchor_text),
"target": self._href,
}
)
self._reset()
@property
def output(self):
"""Returns output text extracted so far."""
self._output = self._output.rstrip()
return self._output
|
py | 7dfca0ace54abb00223fecd203660ecdb19ed9c9 | class Character():
"""Class for representing a COC character."""
def __init__(self):
"""Constructs an empty character."""
self.basic_info = { "Name": "",
"Player": "",
"Occupation": "",
"Age": 0,
"Sex": "",
"Residence": "",
"Birthplace": ""
}
self.characteristics = { "STR": 0,
"DEX": 0,
"INT": 0,
"CON": 0,
"APP": 0,
"POW": 0,
"SIZ": 0,
"EDU": 0,
"Move Rate": 0}
self.status = { "HP": 0,
"Sanity": 0,
"Luck": 0,
"MP": 0}
self.combact_stat = {"Damage Bonus": "",
"Build": 0,
"Dodge": 0}
self.skills = {}
self.weapons = {}
self.backstory = {"Personal Description": [],
"Ideology/Beliefs": [],
"Significant People": [],
"Meaningful Locations": [],
"Treasured Possessions": [],
"Traits": [],
"Injuries & Scars": [],
"Phobias & Manias": [],
"Arcane Tomes, Spells & Artifacts":[],
"Encounters with Strange Entities": []}
self.inventory = []
self.financial_status = {"Spending Level": 0,
"Cash": 0,
"Unspecified Assets": 0,
"Assets": []}
def update(self):
"""Update stats according to their dependencies."""
pass
def generate(self, generator):
"""Use generator to generate stats.
generator: a generator object, must implement generate_* functions in class Generator."""
generator.generate_char(self)
def dump(self, f, fmt="json"):
"""Dump to file."""
pass
def load(self, f, fmt="json"):
"""Load from file."""
pass
def dumps(self, fmt="json"):
"""Dump to string."""
pass
def loads(self, string, fmt="json"):
"""Load from string."""
pass
|
py | 7dfca1550aa9b125654f4fffafd5454b17bc8b6c | """ Visualize cumulative SHAP values."""
from __future__ import division, unicode_literals
import warnings
from typing import Union
import numpy as np
try:
import matplotlib.cm as cm
import matplotlib.pyplot as pl
import matplotlib
except ImportError:
warnings.warn("matplotlib could not be loaded!")
pass
from . import colors, labels
from ..common import convert_to_link, hclust_ordering, LogitLink
def __change_shap_base_value(base_value, new_base_value, shap_values) -> np.ndarray:
"""Shift SHAP base value to a new value. This function assumes that `base_value` and `new_base_value` are scalars
and that `shap_values` is a two or three dimensional array.
"""
# matrix of shap_values
if shap_values.ndim == 2:
return shap_values + (base_value - new_base_value) / shap_values.shape[1]
# cube of shap_interaction_values
main_effects = shap_values.shape[1]
all_effects = main_effects * (main_effects + 1) // 2
temp = (base_value - new_base_value) / all_effects / 2 # divided by 2 because interaction effects are halved
shap_values = shap_values + temp
# Add the other half to the main effects on the diagonal
idx = np.diag_indices_from(shap_values[0])
shap_values[:, idx[0], idx[1]] += temp
return shap_values
def __decision_plot_matplotlib(
base_value,
cumsum,
ascending,
feature_display_count,
features,
feature_names,
highlight,
plot_color,
axis_color,
y_demarc_color,
xlim,
alpha,
color_bar,
auto_size_plot,
title,
show,
legend_labels,
legend_location,
):
"""matplotlib rendering for decision_plot()"""
# image size
row_height = 0.4
if auto_size_plot:
pl.gcf().set_size_inches(8, feature_display_count * row_height + 1.5)
# draw vertical line indicating center
pl.axvline(x=base_value, color="#999999", zorder=-1)
# draw horizontal dashed lines for each feature contribution
for i in range(1, feature_display_count):
pl.axhline(y=i, color=y_demarc_color, lw=0.5, dashes=(1, 5), zorder=-1)
# initialize highlighting
linestyle = np.array("-", dtype=np.object)
linestyle = np.repeat(linestyle, cumsum.shape[0])
linewidth = np.repeat(1, cumsum.shape[0])
if highlight is not None:
linestyle[highlight] = "-."
linewidth[highlight] = 2
# plot each observation's cumulative SHAP values.
ax = pl.gca()
ax.set_xlim(xlim)
m = cm.ScalarMappable(cmap=plot_color)
m.set_clim(xlim)
y_pos = np.arange(0, feature_display_count + 1)
lines = []
for i in range(cumsum.shape[0]):
o = pl.plot(
cumsum[i, :],
y_pos,
color=m.to_rgba(cumsum[i, -1], alpha),
linewidth=linewidth[i],
linestyle=linestyle[i]
)
lines.append(o[0])
# determine font size. if ' *\n' character sequence is found (as in interaction labels), use a smaller
# font. we don't shrink the font for all interaction plots because if an interaction term is not
# in the display window there is no need to shrink the font.
s = next((s for s in feature_names if " *\n" in s), None)
fontsize = 13 if s is None else 9
# if there is a single observation and feature values are supplied, print them.
if (cumsum.shape[0] == 1) and (features is not None):
renderer = pl.gcf().canvas.get_renderer()
inverter = pl.gca().transData.inverted()
y_pos = y_pos + 0.5
for i in range(feature_display_count):
v = features[0, i]
if isinstance(v, str):
v = "({})".format(str(v).strip())
else:
v = "({})".format("{0:,.3f}".format(v).rstrip("0").rstrip("."))
t = ax.text(np.max(cumsum[0, i:(i + 2)]), y_pos[i], " " + v, fontsize=fontsize,
horizontalalignment="left", verticalalignment="center_baseline", color="#666666")
bb = inverter.transform_bbox(t.get_window_extent(renderer=renderer))
if bb.xmax > xlim[1]:
t.set_text(v + " ")
t.set_x(np.min(cumsum[0, i:(i + 2)]))
t.set_horizontalalignment("right")
bb = inverter.transform_bbox(t.get_window_extent(renderer=renderer))
if bb.xmin < xlim[0]:
t.set_text(v)
t.set_x(xlim[0])
t.set_horizontalalignment("left")
# style axes
ax.xaxis.set_ticks_position("both")
ax.yaxis.set_ticks_position("none")
ax.spines["right"].set_visible(False)
ax.spines["left"].set_visible(False)
ax.tick_params(color=axis_color, labelcolor=axis_color, labeltop=True)
pl.yticks(np.arange(feature_display_count) + 0.5, feature_names, fontsize=fontsize)
ax.tick_params("x", labelsize=11)
pl.ylim(0, feature_display_count)
pl.xlabel(labels["MODEL_OUTPUT"], fontsize=13)
# draw the color bar - must come after axes styling
if color_bar:
m = cm.ScalarMappable(cmap=plot_color)
m.set_array(np.array([0, 1]))
# place the colorbar
pl.ylim(0, feature_display_count + 0.25)
ax_cb = ax.inset_axes([xlim[0], feature_display_count, xlim[1] - xlim[0], 0.25], transform=ax.transData)
cb = pl.colorbar(m, ticks=[0, 1], orientation="horizontal", cax=ax_cb)
cb.set_ticklabels([])
cb.ax.tick_params(labelsize=11, length=0)
cb.set_alpha(alpha)
cb.outline.set_visible(False)
# re-activate the main axis for drawing.
pl.sca(ax)
if title:
# TODO decide on style/size
pl.title(title)
if ascending:
pl.gca().invert_yaxis()
if legend_labels is not None:
ax.legend(handles=lines, labels=legend_labels, loc=legend_location)
if show:
pl.show()
class DecisionPlotResult:
"""The optional return value of decision_plot. The class attributes can be used to create multiple decision
plots with the same scale and feature ordering.
"""
def __init__(self, base_value, shap_values, feature_names, feature_idx, xlim):
"""
Example
-------
Plot two decision plots using the same feature order and x-axis.
>>> range1, range2 = range(20), range(20, 40)
>>> r = decision_plot(base, shap_values[range1], features[range1], return_objects=True)
>>> decision_plot(base, shap_values[range2], features[range2], feature_order=r.feature_idx, xlim=r.xlim)
Parameters
----------
base_value : float
The base value used in the plot. For multioutput models,
this will be the mean of the base values. This will inherit `new_base_value` if specified.
shap_values : numpy.ndarray
The `shap_values` passed to decision_plot re-ordered based on `feature_order`. If SHAP interaction values
are passed to decision_plot, `shap_values` is a 2D (matrix) representation of the interactions. See
`feature_names` to locate the feature positions. If `new_base_value` is specified, the SHAP values are
relative to the new base value.
feature_names : list of str
The feature names used in the plot in the order specified in the decision_plot parameter `feature_order`.
feature_idx : numpy.ndarray
The index used to order `shap_values` based on `feature_order`. This attribute can be used to specify
identical feature ordering in multiple decision plots.
xlim : tuple[float, float]
The x-axis limits. This attributed can be used to specify the same x-axis in multiple decision plots.
"""
self.base_value = base_value
self.shap_values = shap_values
self.feature_names = feature_names
self.feature_idx = feature_idx
self.xlim = xlim
def decision_plot(
base_value,
shap_values,
features=None,
feature_names=None,
feature_order="importance",
feature_display_range=None,
highlight=None,
link="identity",
plot_color=None,
axis_color="#333333",
y_demarc_color="#333333",
alpha=None,
color_bar=True,
auto_size_plot=True,
title=None,
xlim=None,
show=True,
return_objects=False,
ignore_warnings=False,
new_base_value=None,
legend_labels=None,
legend_location="best",
) -> Union[DecisionPlotResult, None]:
"""Visualize model decisions using cumulative SHAP values. Each colored line in the plot represents the model
prediction for a single observation. Note that plotting too many samples at once can make the plot unintelligible.
Parameters
----------
base_value : float or numpy.ndarray
This is the reference value that the feature contributions start from. Usually, this is
explainer.expected_value.
shap_values : numpy.ndarray
Matrix of SHAP values (# features) or (# samples x # features) from explainer.shap_values(). Or cube of SHAP
interaction values (# samples x # features x # features) from explainer.shap_interaction_values().
features : numpy.array or pandas.Series or pandas.DataFrame or numpy.ndarray or list
Matrix of feature values (# features) or (# samples x # features). This provides the values of all the
features and, optionally, the feature names.
feature_names : list or numpy.ndarray
List of feature names (# features). If None, names may be derived from the features argument if a Pandas
object is provided. Otherwise, numeric feature names will be generated.
feature_order : str or None or list or numpy.ndarray
Any of "importance" (the default), "hclust" (hierarchical clustering), "none", or a list/array of indices.
feature_display_range: slice or range
The slice or range of features to plot after ordering features by feature_order. A step of 1 or None
will display the features in ascending order. A step of -1 will display the features in descending order. If
feature_display_range=None, slice(-1, -21, -1) is used (i.e. show the last 20 features in descending order).
If shap_values contains interaction values, the number of features is automatically expanded to include all
possible interactions: N(N + 1)/2 where N = shap_values.shape[1].
highlight : Any
Specify which observations to draw in a different line style. All numpy indexing methods are supported. For
example, list of integer indices, or a bool array.
link : str
Use "identity" or "logit" to specify the transformation used for the x-axis. The "logit" link transforms
log-odds into probabilities.
plot_color : str or matplotlib.colors.ColorMap
Color spectrum used to draw the plot lines. If str, a registered matplotlib color name is assumed.
axis_color : str or int
Color used to draw plot axes.
y_demarc_color : str or int
Color used to draw feature demarcation lines on the y-axis.
alpha : float
Alpha blending value in [0, 1] used to draw plot lines.
color_bar : bool
Whether to draw the color bar.
auto_size_plot : bool
Whether to automatically size the matplotlib plot to fit the number of features displayed. If `False`,
specify the plot size using matplotlib before calling this function.
title : str
Title of the plot.
xlim: tuple[float, float]
The extents of the x-axis (e.g. (-1.0, 1.0)). If not specified, the limits are determined by the
maximum/minimum predictions centered around base_value when link='identity'. When link='logit', the
x-axis extents are (0, 1) centered at 0.5. x_lim values are not transformed by the link function. This
argument is provided to simplify producing multiple plots on the same scale for comparison.
show : bool
Whether to automatically display the plot.
return_objects : bool
Whether to return a DecisionPlotResult object containing various plotting features. This can be used to
generate multiple decision plots using the same feature ordering and scale.
ignore_warnings : bool
Plotting many data points or too many features at a time may be slow, or may create very large plots. Set
this argument to `True` to override hard-coded limits that prevent plotting large amounts of data.
new_base_value : float
SHAP values are relative to a base value; by default, the expected value of the model's raw predictions. Use
`new_base_value` to shift the base value to an arbitrary value (e.g. the cutoff point for a binary
classification task).
legend_labels : list of str
List of legend labels. If `None`, legend will not be shown.
legend_location : str
Legend location. Any of "best", "upper right", "upper left", "lower left", "lower right", "right",
"center left", "center right", "lower center", "upper center", "center".
Returns
-------
Returns a DecisionPlotResult object if `return_objects=True`. Returns `None` otherwise (the default).
Example
-------
Plot two decision plots using the same feature order and x-axis.
>>> range1, range2 = range(20), range(20, 40)
>>> r = decision_plot(base, shap_values[range1], features[range1], return_objects=True)
>>> decision_plot(base, shap_values[range2], features[range2], feature_order=r.feature_idx, xlim=r.xlim)
"""
# code taken from force_plot. auto unwrap the base_value
if type(base_value) == np.ndarray and len(base_value) == 1:
base_value = base_value[0]
if isinstance(base_value, list) or isinstance(shap_values, list):
raise TypeError("Looks like multi output. Try base_value[i] and shap_values[i], "
"or use shap.multioutput_decision_plot().")
# validate shap_values
if not isinstance(shap_values, np.ndarray):
raise TypeError("The shap_values arg is the wrong type. Try explainer.shap_values().")
# calculate the various dimensions involved (observations, features, interactions, display, etc.
if shap_values.ndim == 1:
shap_values = shap_values.reshape(1, -1)
observation_count = shap_values.shape[0]
feature_count = shap_values.shape[1]
# code taken from force_plot. convert features from other types.
if str(type(features)) == "<class 'pandas.core.frame.DataFrame'>":
if feature_names is None:
feature_names = features.columns.to_list()
features = features.values
elif str(type(features)) == "<class 'pandas.core.series.Series'>":
if feature_names is None:
feature_names = features.index.to_list()
features = features.values
elif isinstance(features, list):
if feature_names is None:
feature_names = features
features = None
elif features is not None and features.ndim == 1 and feature_names is None:
feature_names = features.tolist()
features = None
# the above code converts features to either None or np.ndarray. if features is something else at this point,
# there's a problem.
if not isinstance(features, (np.ndarray, type(None))):
raise TypeError("The features arg uses an unsupported type.")
if (features is not None) and (features.ndim == 1):
features = features.reshape(1, -1)
# validate/generate feature_names. at this point, feature_names does not include interactions.
if feature_names is None:
feature_names = [labels['FEATURE'] % str(i) for i in range(feature_count)]
elif len(feature_names) != feature_count:
raise ValueError("The feature_names arg must include all features represented in shap_values.")
elif not isinstance(feature_names, list):
raise TypeError("The feature_names arg requires a list or numpy array.")
# transform interactions cube to a matrix and generate interaction names.
if shap_values.ndim == 3:
# flatten
triu_count = feature_count * (feature_count - 1) // 2
idx_diag = np.diag_indices_from(shap_values[0])
idx_triu = np.triu_indices_from(shap_values[0], 1)
a = np.ndarray((observation_count, feature_count + triu_count), shap_values.dtype)
a[:, :feature_count] = shap_values[:, idx_diag[0], idx_diag[1]]
a[:, feature_count:] = shap_values[:, idx_triu[0], idx_triu[1]] * 2
shap_values = a
# names
a = [None] * shap_values.shape[1]
a[:feature_count] = feature_names
for i, row, col in zip(range(feature_count, shap_values.shape[1]), idx_triu[0], idx_triu[1]):
a[i] = "{0} *\n{1}".format(feature_names[row], feature_names[col])
feature_names = a
feature_count = shap_values.shape[1]
features = None # Can't use feature values for interactions...
# determine feature order
if isinstance(feature_order, list):
feature_idx = np.array(feature_order)
elif isinstance(feature_order, np.ndarray):
feature_idx = feature_order
elif (feature_order is None) or (feature_order.lower() == "none"):
feature_idx = np.arange(feature_count)
elif feature_order is "importance":
feature_idx = np.argsort(np.sum(np.abs(shap_values), axis=0))
elif feature_order is "hclust":
feature_idx = np.array(hclust_ordering(shap_values.transpose()))
else:
raise ValueError("The feature_order arg requires 'importance', 'hclust', 'none', or an integer list/array "
"of feature indices.")
if (feature_idx.shape != (feature_count, )) or (not np.issubdtype(feature_idx.dtype, np.integer)):
raise ValueError("A list or array has been specified for the feature_order arg. The length must match the "
"feature count and the data type must be integer.")
# validate and convert feature_display_range to a slice. prevents out of range errors later.
if feature_display_range is None:
feature_display_range = slice(-1, -21, -1) # show last 20 features in descending order.
elif not isinstance(feature_display_range, (slice, range)):
raise TypeError("The feature_display_range arg requires a slice or a range.")
elif feature_display_range.step not in (-1, 1, None):
raise ValueError("The feature_display_range arg supports a step of 1, -1, or None.")
elif isinstance(feature_display_range, range):
# Negative values in a range are not the same as negs in a slice. Consider range(2, -1, -1) == [2, 1, 0],
# but slice(2, -1, -1) == [] when len(features) > 2. However, range(2, -1, -1) == slice(2, -inf, -1) after
# clipping.
a = np.iinfo(np.integer).min
feature_display_range = slice(
feature_display_range.start if feature_display_range.start >= 0 else a, # should never happen, but...
feature_display_range.stop if feature_display_range.stop >= 0 else a,
feature_display_range.step
)
# apply new_base_value
if new_base_value is not None:
shap_values = __change_shap_base_value(base_value, new_base_value, shap_values)
base_value = new_base_value
# use feature_display_range to determine which features will be plotted. convert feature_display_range to
# ascending indices and expand by one in the negative direction. why? we are plotting the change in prediction
# for every feature. this requires that we include the value previous to the first displayed feature
# (i.e. i_0 - 1 to i_n).
a = feature_display_range.indices(feature_count)
ascending = True
if a[2] == -1: # The step
ascending = False
a = (a[1] + 1, a[0] + 1, 1)
feature_display_count = a[1] - a[0]
shap_values = shap_values[:, feature_idx]
if a[0] == 0:
cumsum = np.ndarray((observation_count, feature_display_count + 1), shap_values.dtype)
cumsum[:, 0] = base_value
cumsum[:, 1:] = base_value + np.nancumsum(shap_values[:, 0:a[1]], axis=1)
else:
cumsum = base_value + np.nancumsum(shap_values, axis=1)[:, (a[0] - 1):a[1]]
# Select and sort feature names and features according to the range selected above
feature_names = np.array(feature_names)
feature_names_display = feature_names[feature_idx[a[0]:a[1]]].tolist()
feature_names = feature_names[feature_idx].tolist()
features_display = None if features is None else features[:, feature_idx[a[0]:a[1]]]
# throw large data errors
if not ignore_warnings:
if observation_count > 2000:
raise RuntimeError("Plotting {} observations may be slow. Consider subsampling or set "
"ignore_warnings=True to ignore this message.".format(observation_count))
if feature_display_count > 200:
raise RuntimeError("Plotting {} features may create a very large plot. Set "
"ignore_warnings=True to ignore this "
"message.".format(feature_display_count))
if feature_count * observation_count > 100000000:
raise RuntimeError("Processing SHAP values for {} features over {} observations may be slow. Set "
"ignore_warnings=True to ignore this "
"message.".format(feature_count, observation_count))
# convert values based on link and update x-axis extents
create_xlim = xlim is None
link = convert_to_link(link)
base_value_saved = base_value
if isinstance(link, LogitLink):
base_value = link.finv(base_value)
cumsum = link.finv(cumsum)
if create_xlim:
# Expand [0, 1] limits a little for a visual margin
xlim = (-0.02, 1.02)
elif create_xlim:
xmin = np.min((cumsum.min(), base_value))
xmax = np.max((cumsum.max(), base_value))
# create a symmetric axis around base_value
a, b = (base_value - xmin), (xmax - base_value)
if a > b:
xlim = (base_value - a, base_value + a)
else:
xlim = (base_value - b, base_value + b)
# Adjust xlim to include a little visual margin.
a = (xlim[1] - xlim[0]) * 0.02
xlim = (xlim[0] - a, xlim[1] + a)
# Initialize style arguments
if alpha is None:
alpha = 1.0
if plot_color is None:
plot_color = colors.red_blue
__decision_plot_matplotlib(
base_value,
cumsum,
ascending,
feature_display_count,
features_display,
feature_names_display,
highlight,
plot_color,
axis_color,
y_demarc_color,
xlim,
alpha,
color_bar,
auto_size_plot,
title,
show,
legend_labels,
legend_location,
)
if not return_objects:
return None
return DecisionPlotResult(base_value_saved, shap_values, feature_names, feature_idx, xlim)
def multioutput_decision_plot(base_values, shap_values, row_index, **kwargs) -> Union[DecisionPlotResult, None]:
"""Decision plot for multioutput models. Plots all outputs for a single observation. By default, the plotted base
value will be the mean of base_values unless new_base_value is specified. Supports both SHAP values and SHAP
interaction values.
Parameters
----------
base_values : list of float
This is the reference value that the feature contributions start from. Use explainer.expected_value.
shap_values : list of numpy.ndarray
A multioutput list of SHAP matrices or SHAP cubes from explainer.shap_values() or
explainer.shap_interaction_values(), respectively.
row_index : int
The integer index of the row to plot.
**kwargs : Any
Arguments to be passed on to decision_plot().
Returns
-------
Returns a DecisionPlotResult object if `return_objects=True`. Returns `None` otherwise (the default).
"""
if not (isinstance(base_values, list) and isinstance(shap_values, list)):
raise ValueError("The base_values and shap_values args expect lists.")
# convert arguments to arrays for simpler handling
base_values = np.array(base_values)
if not ((base_values.ndim == 1) or (np.issubdtype(base_values.dtype, np.number))):
raise ValueError("The base_values arg should be a list of scalars.")
shap_values = np.array(shap_values)
if shap_values.ndim not in [3, 4]:
raise ValueError("The shap_values arg should be a list of two or three dimensional SHAP arrays.")
if shap_values.shape[0] != base_values.shape[0]:
raise ValueError("The base_values output length is different than shap_values.")
# shift shap base values to mean of base values
base_values_mean = base_values.mean()
for i in range(shap_values.shape[0]):
shap_values[i] = __change_shap_base_value(base_values[i], base_values_mean, shap_values[i])
# select the feature row corresponding to row_index
if (kwargs is not None) and ("features" in kwargs):
features = kwargs["features"]
if isinstance(features, np.ndarray) and (features.ndim == 2):
kwargs["features"] = features[[row_index]]
elif str(type(features)) == "<class 'pandas.core.frame.DataFrame'>":
kwargs["features"] = features.iloc[row_index]
return decision_plot(base_values_mean, shap_values[:, row_index, :], **kwargs)
|
py | 7dfca2ba228587e03d8f645ab5e652dbd755fdfb | #!/usr/bin/python
from Token import Token
from Queues import SQueue
class Dijkstra( ):
"""
A simple implementation of the Dijkstra shortest
path algorithm. No queue optimization. No checking
the closed queue for better options.
"""
def __init__( self, graph ):
self.graph = graph
self.o_queue = SQueue()
def dijkstra_path( self ):
#Initialize the open queue with the start state
#Note: assumes start state is part of a valid path
self.o_queue.add( Token(0,0.,0.,None) )
#Repeatedly check the queue
while self.o_queue.head().state not in self.graph['F']:
current = self.o_queue.pop( )
for arc in self.graph[current.state]:
score = current.score + arc.score
if arc.nextstate in self.o_queue and \
score < self.o_queue.states[arc.nextstate].score:
self.o_queue.remove(arc.nextstate)
if arc.nextstate not in self.o_queue:
heur = score #For a-star this would be different
self.o_queue.add(
Token(arc.nextstate, score, heur, current)
)
return
def compute_traceback( self ):
"""
Reconstruct the traceback, yielding
the actual shortest path, in order.
"""
head = self.o_queue.head()
traceback = [head]
while head.parent:
head = head.parent
traceback.append(head)
traceback.reverse()
return traceback
if __name__=="__main__":
from graphs import wiki_graph_dijkstra as graph
import sys, argparse
example = "{0}".format(sys.argv[0])
parser = argparse.ArgumentParser( description=example )
parser.add_argument( "--verbose", "-v", help="Verbose mode.", default=False, action="store_true" )
args = parser.parse_args( )
#Instantiate a searcher, and run search, compute traceback
searcher = Dijkstra( graph, defaultHeuristic )
searcher.dijkstra_path( )
traceback = searcher.compute_traceback( )
for tok in traceback:
print tok.state, tok.score
|
py | 7dfca2ee3034c2f531c5d6f22a8ac9f644f28f44 | from collections import Counter
from string import punctuation
def load_text(input_file):
"""Load text from a text file and return as a string.
Parameters
----------
input_file : str
Path to text file.
Returns
-------
str
Text file contents.
Examples
--------
>>> load_text("text.txt")
"""
with open(input_file, "r") as file:
text = file.read()
return text
def clean_text(text):
"""Lowercase and remove punctuation from a string.
Parameters
----------
text : str
Text to clean.
Returns
-------
str
Cleaned text.
Examples
--------
>>> clean_text("Early optimization is the root of all evil!")
'early optimization is the root of all evil'
"""
text = text.lower()
for p in punctuation:
text = text.replace(p, "")
return text
def count_words(input_file):
"""Count words in a text file.
Words are made lowercase and punctuation is removed
before counting.
Parameters
----------
input_file : str
Path to text file.
Returns
-------
collections.Counter
dict-like object where keys are words and values are counts.
Examples
--------
>>> count_words("text.txt")
"""
text = load_text(input_file)
text = clean_text(text)
words = text.split()
return Counter(words) |
py | 7dfca378233a18987d5d95bd2db80caeb92206e6 | from robot.api.deco import keyword
class MockLibrary(object):
def no_args(self):
pass
@keyword(types={'arg1': str, 'arg2': int})
def positional_args(self, arg1, arg2):
"""Some documentation
Multi line docs
"""
pass
@keyword(types=None)
def types_disabled(self, arg=False):
pass
@keyword
def positional_and_default(self, arg1, arg2, named1='string1', named2=123):
pass
def default_only(self, named1='string1', named2=123):
pass
def varargs_kwargs(self, *vargs, **kwargs):
pass
|
py | 7dfca3fbdf79ab2d7a98d7c64932228e16c3b6c2 | import math
import sys
def convert_size(size_bytes):
if size_bytes == 0:
return "0B"
size_name = ("B", "KB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB")
i = int(math.floor(math.log(size_bytes, 1024)))
p = math.pow(1024, i)
s = round(size_bytes / p, 2)
return "%s %s" % (s, size_name[i])
def get_object_size(obj, seen=None):
"""Recursively finds size of objects"""
# https://goshippo.com/blog/measure-real-size-any-python-object/
size = sys.getsizeof(obj)
if seen is None:
seen = set()
obj_id = id(obj)
if obj_id in seen:
return 0
# Important mark as seen *before* entering recursion to gracefully handle
# self-referential objects
seen.add(obj_id)
if isinstance(obj, dict):
size += sum([get_object_size(v, seen) for v in obj.values()])
size += sum([get_object_size(k, seen) for k in obj.keys()])
elif hasattr(obj, '__dict__'):
size += get_object_size(obj.__dict__, seen)
elif hasattr(obj, '__iter__') and not isinstance(obj, (str, bytes, bytearray)):
size += sum([get_object_size(i, seen) for i in obj])
return size
|
py | 7dfca4beb919f14ea36edc61e3093cd20cd9f9bf | from __future__ import annotations
from typing import Generator, NoReturn
class StdReader:
def __init__(
self,
) -> NoReturn:
import sys
self.buf = sys.stdin.buffer
self.lines = (
self.async_readlines()
)
self.chunks: Generator
def async_readlines(
self,
) -> Generator:
while True:
gen = self.line_chunks()
yield gen
def line_chunks(
self,
) -> Generator:
ln = self.buf.readline()
for chunk in ln.split():
yield chunk
def __call__(
self,
) -> bytes:
try:
chunk = next(self.chunks)
except:
self.chunks = next(
self.lines,
)
chunk = self()
return chunk
def str(
self,
) -> str:
b = self()
return b.decode()
def int(
self,
) -> int:
return int(self.str())
from abc import ABC, abstractmethod
class Solver(ABC):
def __init__(self):
self.reader = StdReader()
def __call__(
self,
):
self.prepare()
self.solve()
@abstractmethod
def prepare(self):
...
@abstractmethod
def solve(self):
...
import numpy as np
class Problem(
Solver,
):
def prepare(self):
reader = self.reader
a = reader.int()
b = reader.int()
c = reader.int()
self.a = a
self.b = b
self.c = c
def solve(self):
a = self.a
b = self.b
c = self.c
l = a ** 2 + b ** 2
r = c ** 2
ans = (
'Yes' if l < r else 'No'
)
print(ans)
def main():
t = 1
# t = StdReader().int()
for _ in range(t):
Problem()()
if __name__ == '__main__':
main()
|
py | 7dfca54fbc9be6800adc4412b74a5792dc8a380b | from unittest import TestCase
from deepsweet_models import DeepSweetRF, DeepSweetDNN, DeepSweetTextCNN
from ensemble import Ensemble
class TestEnsemble(TestCase):
def test_ensemble(self):
models_folder_path = "../resources/models/"
molecules = ["CN1CCC[C@H]1C2=CN=CC=C2", "CN1C=NC2=C1C(=O)N(C(=O)N2C)C"]
list_of_models = []
list_of_models.append(DeepSweetRF(models_folder_path, "2d", "SelectFromModelFS"))
list_of_models.append(DeepSweetDNN(models_folder_path, "rdk", "all"))
ensemble = Ensemble(list_of_models, models_folder_path)
predictions, dataset, not_converted_molecules = ensemble.predict(molecules) |
py | 7dfca5d10436a333852371f7a06981426f474916 | from distutils.core import setup
setup(name='oc_screenshots',
version='0.9',
description='OwnCloud Auto Screenshots Uploader',
author='Simon Slangen',
requires=['pyocclient', 'pyperclip', 'pync', 'watchdog'],
) |
py | 7dfca6a4054213ba92b9fc5297523385a231d54e | from os import sep
from dash import dcc
from dash import html
from datetime import datetime, timedelta
import helpers
from website_component import Website_Component
SHOW_HISTORIC_DAYS = 90
USE_DATA = [
'hospitalizations_nl',
'hospitalizations_il',
'hospitalizations_nsw',
'vaccinations_nl',
'vaccinations_il',
'vaccinations_nsw',
'cases_nl',
'cases_il',
'cases_nsw'
] # Defines wich df to load for this component
class CurrentSituation(Website_Component): # Uses the store_required_data from Website_component
def __init__(self, data):
start_date = datetime.today() - timedelta(days=SHOW_HISTORIC_DAYS)
self.store_required_data(data, USE_DATA, start_date=start_date) # fills self.data with required data
def get_html(self):
"""
Creates the HTML for the element.
Graphs use the data in self.data
"""
return [
html.Div(
className="container",
children=[
html.Div(
className="titleBar",
children=[
html.H3('Current Situation'),
html.Hr()
]),
html.Div(
className="situationBox negativeColors",
children=[
html.Div(
className="fiveGrid",
children= html.H4('Cases')
),
html.Div(
className="fiveGrid cs_graph",
children=dcc.Graph(id="cs_cases_graph", className="cs_graph", figure={
'data': [
{ 'x': self.data['cases_nl']['date'], 'y': self.data['cases_nl']['cases'], 'type': 'line', 'marker': {'symbol': 'circle'}, 'hovertemplate': '<b>%{y} Cases</b><extra></extra>' }
],
'layout': {
'xaxis': { 'showgrid': False, 'zeroline': False, 'visible': False, 'showticklabels': False },
'yaxis': { 'showgrid': False, 'zeroline': False, 'visible': False, 'showticklabels': False, 'automargin': False },
'autosize': True,
'plot_bgcolor': 'rgba(255, 255, 255, 0)',
'margin': { 'b': 0, 't': 0, 'r': 0, 'l': 0 },
'marker': False,
'fillcolor':'#000000',
'hovermode': 'x unified',
'hoverlabel': {
'bordercolor': 'rgb(229 229 229)',
'bgcolor': 'white',
'font': {
'color': 'black'
}
}
},
}, config={ 'staticPlot': False })
),
html.Div(
className="fiveGrid",
children= [
html.Div(
className="counter",
children= [
html.H5(children= helpers.get_latest_kpi_value(self.data['cases_nl'], 'cases_per_100k')),
html.P("per 100k"),
html.P("since yesterday")
]
)
]
),
html.Div(
className="fiveGrid",
children= helpers.get_kpi_trend(self.data['cases_nl'], 'cases')
),
html.Div(
className="fiveGrid",
children= [
html.Div(
className="compairedCountries",
children= [
html.Span(children= [
html.P("Israël:"),
html.P(className="lightpar", children=helpers.get_latest_kpi_value(self.data['cases_il'], 'cases_per_100k')),
helpers.get_kpi_trend_arrow(self.data['cases_il'], 'cases')
]),
html.Span(children= [
html.P("Australia (NSW):"),
html.P(className="lightpar", children=helpers.get_latest_kpi_value(self.data['cases_nsw'], 'cases_per_100k')),
helpers.get_kpi_trend_arrow(self.data['cases_nsw'], 'cases')
]),
]
)
]
)
]),
html.Div(
className="situationBox negativeColors",
children=[
html.Div(
className="fiveGrid",
children= html.H4('Hospitalizations')
),
html.Div(
className="fiveGrid cs_graph",
children=dcc.Graph(id="cs_hosp_graph", className="cs_graph", figure={
'data': [
{ 'x': self.data['hospitalizations_nl']['date'], 'y': self.data['hospitalizations_nl']['hospitalizations'], 'type': 'line', 'marker': {'symbol': 'circle'}, 'hovertemplate': '<b>%{y} Hospitalizations</b><extra></extra>' }
],
'layout': {
'xaxis': { 'showgrid': False, 'zeroline': False, 'visible': False, 'showticklabels': False },
'yaxis': { 'showgrid': False, 'zeroline': False, 'visible': False, 'showticklabels': False, 'automargin': False },
'autosize': True,
'plot_bgcolor': 'rgba(255, 255, 255, 0)',
'margin': { 'b': 0, 't': 0, 'r': 0, 'l': 0 },
'marker': False,
'hovermode': 'x unified',
'hoverlabel': {
'bordercolor': 'rgb(229 229 229)',
'bgcolor': 'white',
'font': {
'color': 'black'
}
}
}
}, config={ 'staticPlot': False })
),
html.Div(
className="fiveGrid",
children= [
html.Div(
className="counter",
children= [
html.H5(children= helpers.get_latest_kpi_value(self.data['hospitalizations_nl'], 'hospitalizations_per_100k')),
html.P("per 100k"),
html.P("since yesterday")
]
)
]
),
html.Div(
className="fiveGrid",
children= helpers.get_kpi_trend(self.data['cases_nl'], 'cases')
),
html.Div(
className="fiveGrid",
children= [
html.Div(
className="compairedCountries",
children= [
html.Span(children= [
html.P("Israël:"),
html.P(className="lightpar", children=helpers.get_latest_kpi_value(self.data['hospitalizations_il'], 'hospitalizations_per_100k')),
helpers.get_kpi_trend_arrow(self.data['hospitalizations_il'], 'hospitalizations')
]),
html.Span(children= [
html.P("Australia (NSW):"),
html.P(className="lightpar", children=helpers.get_latest_kpi_value(self.data['hospitalizations_nsw'], 'hospitalizations_per_100k')),
helpers.get_kpi_trend_arrow(self.data['hospitalizations_nsw'], 'hospitalizations')
]),
]
)
]
)
]),
html.Div(
className="situationBox positiveColors",
children=[
html.Div(
className="fiveGrid",
children= html.H4('Vaccination')
),
html.Div(
className="fiveGrid cs_graph",
children=dcc.Graph(id="cs_vac_graph", className="cs_graph", figure={
'data': [
{ 'x': self.data['vaccinations_nl']['date'], 'y': self.data['vaccinations_nl']['vaccinations'], 'type': 'line', 'marker': {'symbol': 'circle'}, 'hovertemplate': '<b>%{y} Vaccinations</b><extra></extra>' }
],
'layout': {
'xaxis': { 'showgrid': False, 'zeroline': False, 'visible': False, 'showticklabels': False },
'yaxis': { 'showgrid': False, 'zeroline': False, 'visible': False, 'showticklabels': False, 'automargin': False },
'autosize': True,
'plot_bgcolor': 'rgba(255, 255, 255, 0)',
'margin': { 'b': 0, 't': 0, 'r': 0, 'l': 0 },
'marker': False,
'hovermode': 'x unified',
'hoverlabel': {
'bordercolor': 'rgb(229 229 229)',
'bgcolor': 'white',
'font': {
'color': 'black'
}
}
}
}, config={ 'staticPlot': False })
),
html.Div(
className="fiveGrid",
children= [
html.Div(
className="counter",
children= [
html.H5(children= helpers.get_latest_kpi_value(self.data['vaccinations_nl'], 'vaccinations_per_100k')),
html.P("per 100k"),
html.P("since last week")
]
)
]
),
html.Div(
className="fiveGrid",
children= helpers.get_kpi_trend(self.data['vaccinations_nl'], 'vaccinations')
),
html.Div(
className="fiveGrid",
children= [
html.Div(
className="compairedCountries",
children= [
html.Span(children= [
html.P("Israël:"),
html.P(className="lightpar", children=helpers.get_latest_kpi_value(self.data['vaccinations_il'], 'vaccinations_per_100k')),
helpers.get_kpi_trend_arrow(self.data['vaccinations_il'], 'vaccinations')
]),
html.Span(children= [
html.P("Australia (NSW):"),
html.P(className="lightpar", children=helpers.get_latest_kpi_value(self.data['vaccinations_nsw'], 'vaccinations_per_100k')),
helpers.get_kpi_trend_arrow(self.data['vaccinations_nsw'], 'vaccinations')
]),
]
)
]
)
])
]
)] |
py | 7dfca6b6ce045352a985a99e92b6eafb4151e53b | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
class KoubeiMerchantDeviceCrashinfoUploadResponse(AlipayResponse):
def __init__(self):
super(KoubeiMerchantDeviceCrashinfoUploadResponse, self).__init__()
self._error_code = None
self._error_msg = None
self._retry = None
self._success = None
@property
def error_code(self):
return self._error_code
@error_code.setter
def error_code(self, value):
self._error_code = value
@property
def error_msg(self):
return self._error_msg
@error_msg.setter
def error_msg(self, value):
self._error_msg = value
@property
def retry(self):
return self._retry
@retry.setter
def retry(self, value):
self._retry = value
@property
def success(self):
return self._success
@success.setter
def success(self, value):
self._success = value
def parse_response_content(self, response_content):
response = super(KoubeiMerchantDeviceCrashinfoUploadResponse, self).parse_response_content(response_content)
if 'error_code' in response:
self.error_code = response['error_code']
if 'error_msg' in response:
self.error_msg = response['error_msg']
if 'retry' in response:
self.retry = response['retry']
if 'success' in response:
self.success = response['success']
|
py | 7dfca76229964567685e44c3e5107aa41ae9dfa4 | # -*- coding: utf-8 -*-
#
# Cilium documentation build configuration file, created by
# sphinx-quickstart on Sun Feb 12 18:34:43 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
# import sys
import re
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.ifconfig',
'sphinx.ext.githubpages',
'sphinxcontrib.openapi',
'sphinx_tabs.tabs']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = ['.rst', '.md']
source_parsers = {
'.md': 'recommonmark.parser.CommonMarkParser',
}
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Cilium'
copyright = u'2017, Cilium Authors'
author = u'Cilium Authors'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
release = open("../VERSION", "r").read().strip()
# Asume the current branch is master but check with VERSION file.
branch = "master"
if not release.endswith(".90"):
semver = release.split(".")
branch = "v{}.{}".format(semver[0], semver[1])
githubusercontent = 'https://raw.githubusercontent.com/cilium/cilium/'
scm_web = githubusercontent + branch
# Store variables in the epilogue so they are globally available.
rst_epilog = """
.. |SCM_WEB| replace:: \{s}
""".format(s = scm_web)
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store', '_themes/**/*.rst']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "sphinx_rtd_theme"
html_theme_path = ["_themes", ]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['images', '_static']
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'Ciliumdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
'extraclassoptions': 'openany',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'Cilium.tex', u'Cilium Documentation',
u'Cilium Authors', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'cilium', u'Cilium Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'Cilium', u'Cilium Documentation',
author, 'Cilium', 'One line description of project.',
'Miscellaneous'),
]
http_strict_mode = False
# Try as hard as possible to find references
default_role = 'any'
def setup(app):
app.add_stylesheet('parsed-literal.css')
|
py | 7dfca840e6848fcd6477c0f5b746d7bd8fd2fa65 | #!/usr/bin/env python
import os
import sys
from pathlib import Path
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "config.settings.local")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django # noqa
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
# This allows easy placement of apps within the interior
# openapi_documentor directory.
current_path = Path(__file__).parent.resolve()
sys.path.append(str(current_path / "openapi_documentor"))
execute_from_command_line(sys.argv)
|
py | 7dfca95dc1779cfc8b6c63b0f48e2e4552a374d3 | import warnings
from ._cma import CMA
__all__ = ["CMA"]
warnings.warn(
"This module is deprecated. Please import CMA class from the "
"package root (ex: from cmaes import CMA).",
FutureWarning,
)
|
py | 7dfca9ddec7b224c8eb907c55eb40d1488eb1710 | """
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import unittest
import flowvision as vision
import flowvision.transforms as transforms
import oneflow.unittest
import oneflow as flow
import oneflow.nn as nn
from data_utils import load_data_mnist
data_dir = os.path.join(
os.getenv("ONEFLOW_TEST_CACHE_DIR", "./data-test"), "mnist-dataset"
)
train_iter, test_iter = load_data_mnist(
batch_size=128,
download=True,
root=data_dir,
source_url="https://oneflow-public.oss-cn-beijing.aliyuncs.com/datasets/mnist/MNIST/",
)
def evaluate_accuracy(data_iter, net, device=None):
n_correct, n_samples = 0.0, 0
net.to(device)
net.eval()
with flow.no_grad():
for images, labels in data_iter:
images = images.reshape(-1, 28 * 28)
images = images.to(device=device)
labels = labels.to(device=device)
n_correct += (net(images).argmax(dim=1).numpy() == labels.numpy()).sum()
n_samples += images.shape[0]
net.train()
return n_correct / n_samples
class Net(nn.Module):
def __init__(
self, input_size=784, hidden_size1=128, hidden_size2=64, num_classes=10
):
super(Net, self).__init__()
self.l1 = nn.Linear(input_size, hidden_size1)
self.relu1 = nn.ReLU()
self.l2 = nn.Linear(hidden_size1, hidden_size2)
self.relu2 = nn.ReLU()
self.l3 = nn.Linear(hidden_size2, num_classes)
def forward(self, x):
out = self.l1(x)
out = self.relu1(out)
out = self.l2(out)
out = self.relu2(out)
out = self.l3(out)
return out
def test_train_and_eval(test_case):
if os.getenv("ONEFLOW_TEST_CPU_ONLY"):
device = flow.device("cpu")
else:
device = flow.device("cuda")
model = Net()
model.to(device)
loss = nn.CrossEntropyLoss().to(device)
optimizer = flow.optim.SGD(model.parameters(), lr=0.10)
num_epochs = 1
for epoch in range(num_epochs):
train_loss, n_correct, n_samples = 0.0, 0.0, 0
for images, labels in train_iter:
images = images.reshape(-1, 28 * 28)
images = images.to(device=device)
labels = labels.to(device=device)
features = model(images)
l = loss(features, labels).sum()
optimizer.zero_grad()
l.backward()
optimizer.step()
train_loss += l.numpy()
n_correct += (features.argmax(dim=1).numpy() == labels.numpy()).sum()
n_samples += images.shape[0]
if n_samples > 2000:
break
test_acc = evaluate_accuracy(test_iter, model, device)
train_acc = n_correct / n_samples
print(
"epoch %d, train loss %.4f, train acc %.3f, test acc %.3f"
% (epoch + 1, train_loss / n_samples, train_acc, test_acc)
)
# test_case.assertLess(0.8, test_acc)
@flow.unittest.skip_unless_1n1d()
class TestMnistDataset(flow.unittest.TestCase):
def test_mnist_dataset(test_case):
test_train_and_eval(test_case)
if __name__ == "__main__":
unittest.main()
|
py | 7dfca9ef50e5151b40b0be0a2e15c61b138db625 | import zlib
import brotli
import pytest
import httpx
from httpx._decoders import (
BrotliDecoder,
DeflateDecoder,
GZipDecoder,
IdentityDecoder,
LineDecoder,
TextDecoder,
)
def test_deflate():
"""
Deflate encoding may use either 'zlib' or 'deflate' in the wild.
https://stackoverflow.com/questions/1838699/how-can-i-decompress-a-gzip-stream-with-zlib#answer-22311297
"""
body = b"test 123"
compressor = zlib.compressobj(9, zlib.DEFLATED, -zlib.MAX_WBITS)
compressed_body = compressor.compress(body) + compressor.flush()
headers = [(b"Content-Encoding", b"deflate")]
response = httpx.Response(
200,
headers=headers,
content=compressed_body,
)
assert response.content == body
def test_zlib():
"""
Deflate encoding may use either 'zlib' or 'deflate' in the wild.
https://stackoverflow.com/questions/1838699/how-can-i-decompress-a-gzip-stream-with-zlib#answer-22311297
"""
body = b"test 123"
compressed_body = zlib.compress(body)
headers = [(b"Content-Encoding", b"deflate")]
response = httpx.Response(
200,
headers=headers,
content=compressed_body,
)
assert response.content == body
def test_gzip():
body = b"test 123"
compressor = zlib.compressobj(9, zlib.DEFLATED, zlib.MAX_WBITS | 16)
compressed_body = compressor.compress(body) + compressor.flush()
headers = [(b"Content-Encoding", b"gzip")]
response = httpx.Response(
200,
headers=headers,
content=compressed_body,
)
assert response.content == body
def test_brotli():
body = b"test 123"
compressed_body = brotli.compress(body)
headers = [(b"Content-Encoding", b"br")]
response = httpx.Response(
200,
headers=headers,
content=compressed_body,
)
assert response.content == body
def test_multi():
body = b"test 123"
deflate_compressor = zlib.compressobj(9, zlib.DEFLATED, -zlib.MAX_WBITS)
compressed_body = deflate_compressor.compress(body) + deflate_compressor.flush()
gzip_compressor = zlib.compressobj(9, zlib.DEFLATED, zlib.MAX_WBITS | 16)
compressed_body = (
gzip_compressor.compress(compressed_body) + gzip_compressor.flush()
)
headers = [(b"Content-Encoding", b"deflate, gzip")]
response = httpx.Response(
200,
headers=headers,
content=compressed_body,
)
assert response.content == body
def test_multi_with_identity():
body = b"test 123"
compressed_body = brotli.compress(body)
headers = [(b"Content-Encoding", b"br, identity")]
response = httpx.Response(
200,
headers=headers,
content=compressed_body,
)
assert response.content == body
headers = [(b"Content-Encoding", b"identity, br")]
response = httpx.Response(
200,
headers=headers,
content=compressed_body,
)
assert response.content == body
@pytest.mark.asyncio
async def test_streaming():
body = b"test 123"
compressor = zlib.compressobj(9, zlib.DEFLATED, zlib.MAX_WBITS | 16)
async def compress(body):
yield compressor.compress(body)
yield compressor.flush()
headers = [(b"Content-Encoding", b"gzip")]
response = httpx.Response(
200,
headers=headers,
content=compress(body),
)
assert not hasattr(response, "body")
assert await response.aread() == body
@pytest.mark.parametrize("header_value", (b"deflate", b"gzip", b"br", b"identity"))
def test_empty_content(header_value):
headers = [(b"Content-Encoding", header_value)]
response = httpx.Response(
200,
headers=headers,
content=b"",
)
assert response.content == b""
@pytest.mark.parametrize(
"decoder", (BrotliDecoder, DeflateDecoder, GZipDecoder, IdentityDecoder)
)
def test_decoders_empty_cases(decoder):
response = httpx.Response(content=b"", status_code=200)
instance = decoder()
assert instance.decode(response.content) == b""
assert instance.flush() == b""
@pytest.mark.parametrize("header_value", (b"deflate", b"gzip", b"br"))
def test_decoding_errors(header_value):
headers = [(b"Content-Encoding", header_value)]
body = b"test 123"
compressed_body = brotli.compress(body)[3:]
with pytest.raises(httpx.DecodingError):
request = httpx.Request("GET", "https://example.org")
httpx.Response(200, headers=headers, content=compressed_body, request=request)
with pytest.raises(ValueError):
httpx.Response(200, headers=headers, content=compressed_body)
@pytest.mark.parametrize(
["data", "encoding"],
[
((b"Hello,", b" world!"), "ascii"),
((b"\xe3\x83", b"\x88\xe3\x83\xa9", b"\xe3", b"\x83\x99\xe3\x83\xab"), "utf-8"),
((b"\x83g\x83\x89\x83x\x83\x8b",) * 64, "shift-jis"),
((b"\x83g\x83\x89\x83x\x83\x8b",) * 600, "shift-jis"),
(
(b"\xcb\xee\xf0\xe5\xec \xe8\xef\xf1\xf3\xec \xe4\xee\xeb\xee\xf0",) * 64,
"MacCyrillic",
),
(
(b"\xa5\xa6\xa5\xa7\xa5\xd6\xa4\xce\xb9\xf1\xba\xdd\xb2\xbd",) * 512,
"euc-jp",
),
],
)
@pytest.mark.asyncio
async def test_text_decoder(data, encoding):
async def iterator():
nonlocal data
for chunk in data:
yield chunk
# Accessing `.text` on a read response.
response = httpx.Response(
200,
content=iterator(),
)
await response.aread()
assert response.text == (b"".join(data)).decode(encoding)
# Streaming `.aiter_text` iteratively.
response = httpx.Response(
200,
content=iterator(),
)
text = "".join([part async for part in response.aiter_text()])
assert text == (b"".join(data)).decode(encoding)
@pytest.mark.asyncio
async def test_text_decoder_known_encoding():
async def iterator():
yield b"\x83g"
yield b"\x83"
yield b"\x89\x83x\x83\x8b"
response = httpx.Response(
200,
headers=[(b"Content-Type", b"text/html; charset=shift-jis")],
content=iterator(),
)
await response.aread()
assert "".join(response.text) == "トラベル"
def test_text_decoder_empty_cases():
decoder = TextDecoder()
assert decoder.flush() == ""
decoder = TextDecoder()
assert decoder.decode(b"") == ""
assert decoder.flush() == ""
def test_line_decoder_nl():
decoder = LineDecoder()
assert decoder.decode("") == []
assert decoder.decode("a\n\nb\nc") == ["a\n", "\n", "b\n"]
assert decoder.flush() == ["c"]
decoder = LineDecoder()
assert decoder.decode("") == []
assert decoder.decode("a\n\nb\nc\n") == ["a\n", "\n", "b\n", "c\n"]
assert decoder.flush() == []
# Issue #1033
decoder = LineDecoder()
assert decoder.decode("") == []
assert decoder.decode("12345\n") == ["12345\n"]
assert decoder.decode("foo ") == []
assert decoder.decode("bar ") == []
assert decoder.decode("baz\n") == ["foo bar baz\n"]
assert decoder.flush() == []
def test_line_decoder_cr():
decoder = LineDecoder()
assert decoder.decode("") == []
assert decoder.decode("a\r\rb\rc") == ["a\n", "\n", "b\n"]
assert decoder.flush() == ["c"]
decoder = LineDecoder()
assert decoder.decode("") == []
assert decoder.decode("a\r\rb\rc\r") == ["a\n", "\n", "b\n"]
assert decoder.flush() == ["c\n"]
# Issue #1033
decoder = LineDecoder()
assert decoder.decode("") == []
assert decoder.decode("12345\r") == []
assert decoder.decode("foo ") == ["12345\n"]
assert decoder.decode("bar ") == []
assert decoder.decode("baz\r") == []
assert decoder.flush() == ["foo bar baz\n"]
def test_line_decoder_crnl():
decoder = LineDecoder()
assert decoder.decode("") == []
assert decoder.decode("a\r\n\r\nb\r\nc") == ["a\n", "\n", "b\n"]
assert decoder.flush() == ["c"]
decoder = LineDecoder()
assert decoder.decode("") == []
assert decoder.decode("a\r\n\r\nb\r\nc\r\n") == ["a\n", "\n", "b\n", "c\n"]
assert decoder.flush() == []
decoder = LineDecoder()
assert decoder.decode("") == []
assert decoder.decode("a\r") == []
assert decoder.decode("\n\r\nb\r\nc") == ["a\n", "\n", "b\n"]
assert decoder.flush() == ["c"]
# Issue #1033
decoder = LineDecoder()
assert decoder.decode("") == []
assert decoder.decode("12345\r\n") == ["12345\n"]
assert decoder.decode("foo ") == []
assert decoder.decode("bar ") == []
assert decoder.decode("baz\r\n") == ["foo bar baz\n"]
assert decoder.flush() == []
def test_invalid_content_encoding_header():
headers = [(b"Content-Encoding", b"invalid-header")]
body = b"test 123"
response = httpx.Response(
200,
headers=headers,
content=body,
)
assert response.content == body
|
py | 7dfcaad92ea6cec661a88c986bfd36b126ab4b68 | from functools import partial
from concurrent.futures import ThreadPoolExecutor
import traceback
import threading
import time
InitFunctions = []
stopped = False
ThreadPool = ThreadPoolExecutor(max_workers=64)
def delay_pyle_command(timer, fun):
def timed_cmd():
time.sleep(timer)
fun()
ThreadPool.submit(timed_cmd)
def queue_pyle_command(fun):
ThreadPool.submit(fun)
class PyleCommand:
@staticmethod
def Threaded(func):
cmd = PyleCommand(func)
cmd.threaded = True
return cmd
def __init__(self, func):
self.func = func
self.threaded = False
def __call__(self, *args, **kwargs):
cmd = PyleCommand(
partial(self.func, *args, **kwargs)
)
cmd.threaded = self.threaded
return cmd
def execute_on_thread(self):
try:
self.func()
except Exception as ex:
traceback.print_exc()
def run(self):
if self.threaded:
ThreadPool.submit(self.execute_on_thread)
else:
self.func()
def run_pyle_command(fun):
if isinstance(fun, PyleCommand):
fun.run()
else:
fun()
def PyleInit(fun):
InitFunctions.append(fun)
return fun
def PyleThread(timer):
def TickDecorator(func):
def TickThread():
global stopped
while not stopped:
func()
time.sleep(timer)
def ThreadInit():
threading.Thread(target=TickThread, daemon=True).start()
InitFunctions.append(ThreadInit)
return TickDecorator
class CommandQueue:
def __init__(self):
self.queuedFunctions = []
self.queue_lock = threading.RLock()
self.queue_event = threading.Event()
self.stopped = False
threading.Thread(target=self.process_commands_thread, daemon=True).start()
def stop(self):
self.stopped = True
self.queue_event.set()
def queue_command(self, fun):
with self.queue_lock:
self.queuedFunctions.append(fun)
self.queue_event.set()
def process_commands_thread(self):
global stopped
while not stopped and not self.stopped:
self.queue_event.wait()
run = None
with self.queue_lock:
run = list(self.queuedFunctions)
self.queuedFunctions = []
self.queue_event.clear()
try:
for cmd in run:
run_pyle_command(cmd)
except Exception as ex:
traceback.print_exc()
|
py | 7dfcab46a79386501d0b3398a7d8a2145610ea67 | # encoding: utf8
import os
import sys
import unittest
import tkinter as tk
import tkinter.ttk as ttk
pygubu_basedir = os.path.abspath(os.path.dirname(
os.path.dirname(os.path.realpath(sys.argv[0]))))
if pygubu_basedir not in sys.path:
sys.path.insert(0, pygubu_basedir)
import pygubu
import support
class TestTtkComobox(unittest.TestCase):
def setUp(self):
support.root_deiconify()
xmldata = 'test_ttkcombobox.ui'
self.builder = builder = pygubu.Builder()
builder.add_from_file(xmldata)
self.mainwindow = builder.get_object('mainwindow')
self.widget = builder.get_object('combobox1')
def tearDown(self):
support.root_withdraw()
def test_class(self):
self.assertIsInstance(self.widget, ttk.Combobox)
self.widget.destroy()
def test_validate(self):
prop = 'validate'
expected_value = 'key'
tclobj = self.widget.cget(prop)
value = str(tclobj)
self.assertEqual(expected_value, value)
self.widget.destroy()
def test_validate_command(self):
valid_values = ('option1', 'option2', 'option3')
def cbox_validate(action, newvalue):
valid = False
if action == '1': # 1: insert 0: delete
if newvalue in valid_values:
valid = True
else:
valid = True
return valid
def cbox_invalid(newvalue):
pass
callback = {'cbox_validate': cbox_validate,
'cbox_invalid': cbox_invalid}
self.builder.connect_callbacks(callback)
self.widget.delete('0', tk.END)
self.assertEqual('', self.widget.get())
self.widget.insert('0', valid_values[0])
self.assertEqual(valid_values[0], self.widget.get())
self.widget.delete('0', tk.END)
self.widget.insert('0', 'Invalid value')
self.assertEqual('', self.widget.get())
self.widget.destroy()
def test_invalid_command(self):
invalid_text = []
def cbox_validate(action, newvalue):
valid = False
if action == '1': # 1: insert 0: delete
if newvalue == 'Allowed':
valid = True
else:
valid = True
return valid
def cbox_invalid(newvalue):
invalid_text.append(newvalue)
callback = {
'cbox_validate': cbox_validate,
'cbox_invalid': cbox_invalid
}
self.widget.delete('0', tk.END)
self.builder.connect_callbacks(callback)
self.widget.insert('0', 'Not Allowed')
self.assertEqual(invalid_text[0], 'Not Allowed')
self.widget.destroy()
|
py | 7dfcab6c47a2061132ca57247c5eabcb67328928 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import abc
from typing import Awaitable, Callable, Dict, Optional, Sequence, Union
import packaging.version
import pkg_resources
import google.auth # type: ignore
import google.api_core # type: ignore
from google.api_core import exceptions as core_exceptions # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
from google.api_core import operations_v1 # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.oauth2 import service_account # type: ignore
from google.cloud.workflows_v1beta.types import workflows
from google.longrunning import operations_pb2 # type: ignore
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution(
'google-cloud-workflows',
).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
try:
# google.auth.__version__ was added in 1.26.0
_GOOGLE_AUTH_VERSION = google.auth.__version__
except AttributeError:
try: # try pkg_resources if it is available
_GOOGLE_AUTH_VERSION = pkg_resources.get_distribution("google-auth").version
except pkg_resources.DistributionNotFound: # pragma: NO COVER
_GOOGLE_AUTH_VERSION = None
class WorkflowsTransport(abc.ABC):
"""Abstract transport class for Workflows."""
AUTH_SCOPES = (
'https://www.googleapis.com/auth/cloud-platform',
)
DEFAULT_HOST: str = 'workflows.googleapis.com'
def __init__(
self, *,
host: str = DEFAULT_HOST,
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
**kwargs,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is mutually exclusive with credentials.
scopes (Optional[Sequence[str]]): A list of scopes.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
"""
# Save the hostname. Default to port 443 (HTTPS) if none is specified.
if ':' not in host:
host += ':443'
self._host = host
scopes_kwargs = self._get_scopes_kwargs(self._host, scopes)
# Save the scopes.
self._scopes = scopes
# If no credentials are provided, then determine the appropriate
# defaults.
if credentials and credentials_file:
raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive")
if credentials_file is not None:
credentials, _ = google.auth.load_credentials_from_file(
credentials_file,
**scopes_kwargs,
quota_project_id=quota_project_id
)
elif credentials is None:
credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id)
# If the credentials are service account credentials, then always try to use self signed JWT.
if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"):
credentials = credentials.with_always_use_jwt_access(True)
# Save the credentials.
self._credentials = credentials
# TODO(busunkim): This method is in the base transport
# to avoid duplicating code across the transport classes. These functions
# should be deleted once the minimum required versions of google-auth is increased.
# TODO: Remove this function once google-auth >= 1.25.0 is required
@classmethod
def _get_scopes_kwargs(cls, host: str, scopes: Optional[Sequence[str]]) -> Dict[str, Optional[Sequence[str]]]:
"""Returns scopes kwargs to pass to google-auth methods depending on the google-auth version"""
scopes_kwargs = {}
if _GOOGLE_AUTH_VERSION and (
packaging.version.parse(_GOOGLE_AUTH_VERSION)
>= packaging.version.parse("1.25.0")
):
scopes_kwargs = {"scopes": scopes, "default_scopes": cls.AUTH_SCOPES}
else:
scopes_kwargs = {"scopes": scopes or cls.AUTH_SCOPES}
return scopes_kwargs
def _prep_wrapped_messages(self, client_info):
# Precompute the wrapped methods.
self._wrapped_methods = {
self.list_workflows: gapic_v1.method.wrap_method(
self.list_workflows,
default_timeout=None,
client_info=client_info,
),
self.get_workflow: gapic_v1.method.wrap_method(
self.get_workflow,
default_timeout=None,
client_info=client_info,
),
self.create_workflow: gapic_v1.method.wrap_method(
self.create_workflow,
default_timeout=None,
client_info=client_info,
),
self.delete_workflow: gapic_v1.method.wrap_method(
self.delete_workflow,
default_timeout=None,
client_info=client_info,
),
self.update_workflow: gapic_v1.method.wrap_method(
self.update_workflow,
default_timeout=None,
client_info=client_info,
),
}
def close(self):
"""Closes resources associated with the transport.
.. warning::
Only call this method if the transport is NOT shared
with other clients - this may cause errors in other clients!
"""
raise NotImplementedError()
@property
def operations_client(self) -> operations_v1.OperationsClient:
"""Return the client designed to process long-running operations."""
raise NotImplementedError()
@property
def list_workflows(self) -> Callable[
[workflows.ListWorkflowsRequest],
Union[
workflows.ListWorkflowsResponse,
Awaitable[workflows.ListWorkflowsResponse]
]]:
raise NotImplementedError()
@property
def get_workflow(self) -> Callable[
[workflows.GetWorkflowRequest],
Union[
workflows.Workflow,
Awaitable[workflows.Workflow]
]]:
raise NotImplementedError()
@property
def create_workflow(self) -> Callable[
[workflows.CreateWorkflowRequest],
Union[
operations_pb2.Operation,
Awaitable[operations_pb2.Operation]
]]:
raise NotImplementedError()
@property
def delete_workflow(self) -> Callable[
[workflows.DeleteWorkflowRequest],
Union[
operations_pb2.Operation,
Awaitable[operations_pb2.Operation]
]]:
raise NotImplementedError()
@property
def update_workflow(self) -> Callable[
[workflows.UpdateWorkflowRequest],
Union[
operations_pb2.Operation,
Awaitable[operations_pb2.Operation]
]]:
raise NotImplementedError()
__all__ = (
'WorkflowsTransport',
)
|
py | 7dfcae3eca0e9adccb4770d6d38d3913a35c96aa | """ Implementing Publisher """
from typing import (TYPE_CHECKING, TypeVar, Type, Tuple, Callable, Optional,
overload)
from broqer import NONE, Disposable
import broqer
if TYPE_CHECKING:
# pylint: disable=cyclic-import
from typing import List
from broqer import Subscriber
class SubscriptionError(ValueError):
""" Special exception raised when subscription is failing (subscriber
already subscribed) or on unsubscribe when subscriber is not subscribed
"""
TInherit = TypeVar('TInherit') # Type to inherited behavior from
TValue = TypeVar('TValue') # Type of publisher state and emitted value
SubscriptionCBT = Callable[[bool], None]
class Publisher:
""" In broqer a subscriber can subscribe to a publisher. After subscription
the subscriber is notified about emitted values from the publisher (
starting with the current state). In other frameworks
*publisher*/*subscriber* are referenced as *observable*/*observer*.
broqer.NONE is used as default initialisation. .get() will always
return the internal state (even when it's broqer.NONE). .subscribe() will
emit the actual state to the new subscriber only if it is something else
than broqer.NONE .
To receive information use following methods to interact with Publisher:
- ``.subscribe(subscriber)`` to subscribe for events on this publisher
- ``.unsubscribe(subscriber)`` to unsubscribe
- ``.get()`` to get the current state
When implementing a Publisher use the following methods:
- ``.notify(value)`` calls .emit(value) on all subscribers
:param init: the initial state.
:ivar _state: state of the publisher
:ivar _inherited_type: type class for method lookup
:ivar _subscriptions: holding a list of subscribers
:ivar _on_subscription_cb: callback with boolean as argument, telling
if at least one subscription exists
:ivar _dependencies: list with publishers this publisher is (directly or
indirectly) dependent on.
"""
@overload # noqa: F811
def __init__(self, *, type_: Type[TValue] = None):
pass
@overload # noqa: F811
def __init__(self, init: TValue, type_: Type[TValue] = None): # noqa: F811
pass
def __init__(self, init=NONE, type_=None): # noqa: F811
self._state = init
if type_:
self._inherited_type = type_ # type: Optional[Type]
elif init is not NONE:
self._inherited_type = type(init)
else:
self._inherited_type = None
self._subscriptions = list() # type: List[Subscriber]
self._on_subscription_cb = None # type: Optional[SubscriptionCBT]
self._dependencies = () # type: Tuple[Publisher, ...]
def subscribe(self, subscriber: 'Subscriber',
prepend: bool = False) -> 'SubscriptionDisposable':
""" Subscribing the given subscriber.
:param subscriber: subscriber to add
:param prepend: For internal use - usually the subscribers will be
added at the end of a list. When prepend is True, it will be added
in front of the list. This will habe an effect in the order the
subscribers are called.
:raises SubscriptionError: if subscriber already subscribed
"""
# `subscriber in self._subscriptions` is not working because
# tuple.__contains__ is using __eq__ which is overwritten and returns
# a new publisher - not helpful here
if any(subscriber is s for s in self._subscriptions):
raise SubscriptionError('Subscriber already registered')
if not self._subscriptions and self._on_subscription_cb:
self._on_subscription_cb(True)
if prepend:
self._subscriptions.insert(0, subscriber)
else:
self._subscriptions.append(subscriber)
disposable_obj = SubscriptionDisposable(self, subscriber)
if self._state is not NONE:
subscriber.emit(self._state, who=self)
return disposable_obj
def unsubscribe(self, subscriber: 'Subscriber') -> None:
""" Unsubscribe the given subscriber
:param subscriber: subscriber to unsubscribe
:raises SubscriptionError: if subscriber is not subscribed (anymore)
"""
# here is a special implementation which is replacing the more
# obvious one: self._subscriptions.remove(subscriber) - this will not
# work because list.remove(x) is doing comparison for equality.
# Applied to publishers this will return another publisher instead of
# a boolean result
for i, _s in enumerate(self._subscriptions):
if _s is subscriber:
self._subscriptions.pop(i)
if not self._subscriptions and self._on_subscription_cb:
self._on_subscription_cb(False)
return
raise SubscriptionError('Subscriber is not registered')
def get(self) -> TValue:
""" Return the state of the publisher. """
return self._state
def notify(self, value: TValue) -> None:
""" Calling .emit(value) on all subscribers and store state.
:param value: value to be emitted to subscribers
"""
self._state = value
for subscriber in tuple(self._subscriptions):
subscriber.emit(value, who=self)
def reset_state(self) -> None:
""" Resets the state. Calling this method will not trigger a
notification, but will call .reset_state for all subscribers
"""
self._state = NONE
for subscriber in tuple(self._subscriptions):
subscriber.reset_state()
@property
def subscriptions(self) -> Tuple['Subscriber', ...]:
""" Property returning a tuple with all current subscribers """
return tuple(self._subscriptions)
def register_on_subscription_callback(self,
callback: SubscriptionCBT) -> None:
""" This callback will be called, when the subscriptions are changing.
When a subscription is done and no subscription was present the
callback is called with True as argument. When after unsubscribe no
subscription is left, it will be called with False.
:param callback: callback(subscription: bool) to be called.
when `callback` is None the callback will be reset
:raises ValueError: when a callback is already registrered
"""
if callback is None:
self._on_subscription_cb = None
return
if self._on_subscription_cb is not None:
raise ValueError('A callback is already registered')
self._on_subscription_cb = callback
if self._subscriptions:
callback(True)
def __await__(self):
""" Makes publisher awaitable. When publisher has a state it will
immediatly return its state as result. Otherwise it will wait forever
until it will change its state.
"""
future = self.as_future(timeout=None, omit_subscription=False)
return future.__await__()
def as_future(self, timeout: float, omit_subscription: bool = True,
loop=None):
""" Returns a asyncio.Future which will be done on first change of this
publisher.
:param timeout: timeout in seconds. Use None for infinite waiting
:param omit_subscription: if True the first emit (which can be on the
subscription) will be ignored.
:param loop: asyncio loop to be used
:returns: a future returning the emitted value
"""
return broqer.OnEmitFuture(self, timeout, omit_subscription, loop)
def __bool__(self):
""" A new Publisher is the result of a comparision between a publisher
and something else (may also be a second publisher). This result should
never be used in a boolean sense (e.g. in `if p1 == p2:`). To prevent
this __bool__ is overwritten to raise a ValueError.
"""
raise ValueError('Evaluation of comparison of publishers is not '
'supported')
def __iter__(self):
""" To prevent iterating over a publisher this method is implemented
to throw an exception. Otherwise it will fallback to __getitem__.
"""
raise ValueError('Iteration over a publisher is not possible')
def inherit_type(self, type_cls: Optional[Type]) -> None:
""" Enables the usage of method and attribute overloading for this
publisher.
"""
self._inherited_type = type_cls
@property
def inherited_type(self) -> Optional[Type]:
""" Property inherited_type returns used type class (or None) """
return self._inherited_type
@property
def dependencies(self) -> Tuple['Publisher', ...]:
""" Returning a list of publishers this publisher is dependent on. """
return self._dependencies
def add_dependencies(self, *publishers: 'Publisher') -> None:
""" Add publishers which are directly or indirectly controlling the
behavior of this publisher
:param *publishers: variable argument list with publishers
"""
self._dependencies = self._dependencies + publishers
def __dir__(self):
""" Extending __dir__ with inherited type """
attrs = set(super().__dir__())
if self._inherited_type:
attrs.update(set(dir(self._inherited_type)))
return tuple(attrs)
class SubscriptionDisposable(Disposable):
""" This disposable is returned on Publisher.subscribe(subscriber).
:param publisher: publisher the subscription is made to
:param subscriber: subscriber used for subscription
"""
def __init__(self, publisher: 'Publisher', subscriber: 'Subscriber') \
-> None:
self._publisher = publisher
self._subscriber = subscriber
def dispose(self) -> None:
self._publisher.unsubscribe(self._subscriber)
@property
def publisher(self) -> 'Publisher':
""" Subscripted publisher """
return self._publisher
@property
def subscriber(self) -> 'Subscriber':
""" Subscriber used in this subscription """
return self._subscriber
|
py | 7dfcaf016dec6c0556eafb48a3fb346b7b80fb8e | # Generated by Django 2.0.6 on 2018-06-22 05:55
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('puskesmas_app', '0005_auto_20180622_1346'),
]
operations = [
migrations.AlterField(
model_name='pemeriksaan',
name='dari_file',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='puskesmas_app.DataPemeriksaan'),
),
migrations.AlterField(
model_name='pemeriksaan',
name='pasien',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='puskesmas_app.Pasien'),
),
migrations.AlterField(
model_name='pemeriksaan',
name='tanggal',
field=models.DateField(null=True),
),
]
|
py | 7dfcafdb0fd33db3c5e7de9f9e06d941dbd91c18 | """
Material Balance Plots
@author: Yohanes Nuwara
@email: [email protected]
"""
def initial_hydrocarbon_in_place(Nfoi, Gfgi, Rv, Rs):
"""
Calculate OOIP and OGIP from Nfoi and Gfgi
And output the result to labels in the plot
"""
import matplotlib.patches as mpl_patches
Rvi, Rsi = Rv[0], Rs[0]
OOIP = Nfoi + Gfgi * Rvi
OGIP = Gfgi + Nfoi * Rsi
labels = []
labels.append("Nfoi = {0:.4g} STB".format(Nfoi))
labels.append("Gfgi = {0:.4g} SCF".format(Gfgi))
labels.append("OOIP = {0:.4g} STB".format(OOIP))
labels.append("OGIP = {0:.4g} SCF".format(OGIP))
handles = [mpl_patches.Rectangle((0, 0), 1, 1, fc="white", ec="white",
lw=0, alpha=0)] * 4
return labels, handles, OOIP, OGIP
class drygas():
"""
Dry-Gas Material Balance Plot
"""
def calculate_params(self, p, Bg, Gp, cf, cw, swi):
"""
Calculate Material Balance Paramaters for Dry-Gas Reservoir
Output: F, Btg, Efw, Eg
"""
import numpy as np
pi = p[0]
Bgi = Bg[0]
# total gas FVF equals the gas FVF itself (for dry-gas)
Btg = Bg
# calculate Efw
Efw = ((cf + cw * swi) / (1 - swi)) * (pi - p)
F = []; Eg = []
for i in range(len(p)):
F_ = Bg[i] * Gp[i]
Eg_ = Btg[i] - Bgi
F.append(F_); Eg.append(Eg_)
F = np.array(F); Eg = np.array(Eg)
return F, Btg, Efw, Eg
def plot(self, p, z, Gp, F, Btg, Efw, Eg):
"""Create Material Balance Plots for Dry-Gas Reservoir"""
import numpy as np
import matplotlib.pyplot as plt
from scipy.optimize import curve_fit
# no volatile and vapor component, define Rv as zeros
Rv, Rs = np.zeros(len(p)), np.zeros(len(p))
# plot attributes
title_size = 12
title_pad = 10
# linear function for curve-fit
def linear_zero_intercept(x, m):
y = m * x
return y
def linear_with_intercept(x, m, c):
y = m * x + c
return y
# Plot 1: F vs Eg
plt.subplot(3,2,1)
x1, y1 = Eg, F
plt.plot(x1, y1, '.-')
plt.title('Plot 1: F vs Eg', size=title_size, pad=title_pad)
plt.xlabel('Eg (RB/scf)')
plt.ylabel('F (res ft3)')
## curve-fitting to calculate the slope as OGIP
x1_norm = x1 / max(x1) # normalize x
y1_norm = y1 / max(y1) # normalize y
popt, pcov = curve_fit(linear_zero_intercept, x1_norm, y1_norm)
m = popt[0]
Gfgi = m * max(y1) / max(x1) # denormalize the slope, hence the OGIP
## Output results into text in plot
Nfoi = 0
labels, handles, OOIP, OGIP = initial_hydrocarbon_in_place(Nfoi, Gfgi, Rv, Rs)
## plot the regression line
x1_fit = np.linspace(min(x1), max(x1), 5)
y1_fit = linear_zero_intercept(x1_fit, Gfgi)
plt.plot(x1_fit, y1_fit)
plt.legend(handles, labels, loc='best', fontsize='small',
fancybox=True, framealpha=0.7,
handlelength=0, handletextpad=0)
# Plot 2: p/z vs Gp
plt.subplot(3,2,2)
x2, y2 = Gp, (p / z)
plt.plot(x2, y2, '.-')
plt.title('Plot 2: p/z vs Gp', size=title_size, pad=title_pad)
plt.xlabel('Gp (scf)')
plt.ylabel('p/z (psia)')
## curve-fitting to calculate the slope as OGIP
x2_norm = x2 / max(x2) # normalize x
y2_norm = y2 / max(y2) # normalize y
popt, pcov = curve_fit(linear_with_intercept, x2_norm, y2_norm)
m, c = popt[0], popt[1]
Gfgi = (-c / m) * max(x2) # OGIP is the intercept at x-axis, and denormalized
m = m * max(y2) / max(x2) # denormalize the slope
c = c * max(y2) # denormalize the intercept
## Output results into text in plot
Nfoi = 0
labels, handles, OOIP, OGIP = initial_hydrocarbon_in_place(Nfoi, Gfgi, Rv, Rs)
## plot the regression line
x2_fit = np.linspace(min(x2), max(x2), 5)
y2_fit = linear_with_intercept(x2_fit, m, c)
plt.plot(x2_fit, y2_fit)
plt.legend(handles, labels, loc='best', fontsize='small',
fancybox=True, framealpha=0.7,
handlelength=0, handletextpad=0)
# Plot 3: F/Eg vs Gp
plt.subplot(3,2,3)
x3, y3 = Gp, (F / Eg)
plt.plot(x3, y3, '.-')
plt.title('Plot 3: Waterdrive Diagnostic Plot', size=title_size, pad=title_pad)
plt.xlabel('Gp (scf)')
plt.ylabel('F/Eg (scf)')
## curve-fitting to calculate the slope as OGIP, here [1:] because NaN is removed
x3_norm = x3[1:] / max(x3[1:]) # normalize x
y3_norm = y3[1:] / max(y3[1:]) # normalize y
popt, pcov = curve_fit(linear_with_intercept, x3_norm, y3_norm)
m, c = popt[0], popt[1]
m = m * max(y3[1:]) / max(x3[1:]) # denormalize the slope
Gfgi = c * max(y3[1:]) # denormalize the intercept, hence the OGIP
## Output results into text in plot
Nfoi = 0
labels, handles, OOIP, OGIP = initial_hydrocarbon_in_place(Nfoi, Gfgi, Rv, Rs)
## plot the regression line
x3_fit = np.linspace(min(x3[1:]), max(x3[1:]), 5)
y3_fit = linear_with_intercept(x3_fit, m, Gfgi)
plt.plot(x3_fit, y3_fit)
plt.legend(handles, labels, loc='best', fontsize='small',
fancybox=True, framealpha=0.7,
handlelength=0, handletextpad=0)
# Plot 6: F vs (Eg+Bgi*Efw)
plt.subplot(3,2,4)
Bgi = Btg[0]
x6, y6 = (Eg + Bgi * Efw), F
plt.plot(x6, y6, '.-')
plt.title('Plot 6: F vs (Eg+Bgi*Efw)', size=title_size, pad=title_pad)
plt.xlabel('Eg+Bgi*Efw (res ft3/scf)')
plt.ylabel('F (res ft3)')
## curve-fitting to calculate the slope as OGIP
x6_norm = x6 / max(x6) # normalize x
y6_norm = y6 / max(y6) # normalize y
popt, pcov = curve_fit(linear_zero_intercept, x6_norm, y6_norm)
m = popt[0]
Gfgi = m * max(y6) / max(x6) # denormalize the slope, hence the OGIP
## Output results into text in plot
Nfoi = 0
labels, handles, OOIP, OGIP = initial_hydrocarbon_in_place(Nfoi, Gfgi, Rv, Rs)
## plot the regression line
x6_fit = np.linspace(min(x6), max(x6), 5)
y6_fit = linear_zero_intercept(x6_fit, Gfgi)
plt.plot(x6_fit, y6_fit)
plt.legend(handles, labels, loc='best', fontsize='small',
fancybox=True, framealpha=0.7,
handlelength=0, handletextpad=0)
# Plot 7: ((p/z)*(1-Efw)) vs Gp
plt.subplot(3,2,5)
x7, y7 = Gp, ((p / z) * (1 - Efw))
plt.plot(x7, y7, '.-')
plt.title('Plot 7: ((p/z)*(1-Efw)) vs Gp', size=title_size, pad=title_pad)
plt.xlabel('Gp (scf)')
plt.ylabel('(p/z)*(1-Efw) (psia)')
## curve-fitting to calculate the slope as OGIP
x7_norm = x7 / max(x7) # normalize x
y7_norm = y7 / max(y7) # normalize y
popt, pcov = curve_fit(linear_with_intercept, x7_norm, y7_norm)
m, c = popt[0], popt[1]
Gfgi = (-c / m) * max(x7) # OGIP is the intercept at x-axis, and denormalized
m = m * max(y7) / max(x7) # denormalize the slope
c = c * max(y7) # denormalize the intercept
## Output results into text in plot
Nfoi = 0
labels, handles, OOIP, OGIP = initial_hydrocarbon_in_place(Nfoi, Gfgi, Rv, Rs)
## plot the regression line
x7_fit = np.linspace(min(x7), max(x7), 5)
y7_fit = linear_with_intercept(x7_fit, m, c)
plt.plot(x7_fit, y7_fit)
plt.legend(handles, labels, loc='best', fontsize='small',
fancybox=True, framealpha=0.7,
handlelength=0, handletextpad=0)
plt.tight_layout(pad=1.5)
plt.show()
return F, Eg, Efw
class gascondensate():
"""
Gas-Condensate Material Balance Plot
"""
def calculate_params(self, p, pdew, Bg, Bo, Np, Gp, Gi, cf, cw, swi, Rs, Rv):
"""
Calculate Material Balance Paramaters for Gas-Condensate Reservoir
Output: F, Btg, Efw, Eg
"""
import numpy as np
pi = p[0]
Rvi = Rv[0]
Bgi = Bg[0]
# calculate Efw
Efw = ((cf + cw * swi) / (1 - swi)) * (pi - p)
# calculate F and Btg
F = []; Btg = []; Eg = []
for i in range(len(p)):
if p[i] >= pdew:
# gas-condensate above dewpoint pressure
F_ = Bg[i] * Gp[i]
Btg_ = Bg[i]
Eg_ = Btg_ - Bgi
if p[i] < pdew:
# gas-condensate below dewpoint pressure
F_ = (Np[i] * ((Bo[i] - (Rs[i] * Bg[i])) / (1 - (Rv[i] * Rs[i])))) + ((Gp[i] - Gi[i]) * ((Bg[i] - (Rv[i] * Bo[i])) / (1 - (Rv[i] * Rs[i]))))
Btg_ = ((Bg[i] * (1 - (Rs[i] * Rvi))) + (Bo[i] * (Rvi - Rv[i]))) / (1 - (Rv[i] * Rs[i])) # in RB/STB
Eg_ = Btg_ - Bgi
F.append(F_); Btg.append(Btg_); Eg.append(Eg_)
F, Btg, Eg = np.array(F), np.array(Btg), np.array(Eg)
return F, Btg, Efw, Eg
def plot(self, p, z, Gp, F, Btg, Efw, Eg, Rv):
"""Create Material Balance Plots for Dry-Gas Reservoir"""
import numpy as np
import matplotlib.pyplot as plt
from scipy.optimize import curve_fit
# no vapor component, define Rs as zeros
Rs = np.zeros(len(p))
# plot attributes
title_size = 12
title_pad = 10
# linear function for curve-fit
def linear_zero_intercept(x, m):
y = m * x
return y
def linear_with_intercept(x, m, c):
y = m * x + c
return y
# Plot 1: F vs Eg
plt.subplot(3,2,1)
x1, y1 = Eg, F
plt.plot(x1, y1, '.-')
plt.title('Plot 1: F vs Eg', size=title_size, pad=title_pad)
plt.xlabel('Eg (RB/scf)')
plt.ylabel('F (res ft3)')
## curve-fitting to calculate the slope as OGIP
x1_norm = x1 / max(x1) # normalize x
y1_norm = y1 / max(y1) # normalize y
popt, pcov = curve_fit(linear_zero_intercept, x1_norm, y1_norm)
m = popt[0]
Gfgi = m * max(y1) / max(x1) # denormalize the slope, hence the OGIP
## Output results into text in plot
Nfoi = 0
labels, handles, OOIP, OGIP = initial_hydrocarbon_in_place(Nfoi, Gfgi, Rv, Rs)
## plot the regression line
x1_fit = np.linspace(min(x1), max(x1), 5)
y1_fit = linear_zero_intercept(x1_fit, Gfgi)
plt.plot(x1_fit, y1_fit)
plt.legend(handles, labels, loc='best', fontsize='small',
fancybox=True, framealpha=0.7,
handlelength=0, handletextpad=0)
# Plot 2: p/z vs Gp
plt.subplot(3,2,2)
plt.title('Plot 2: p/z vs Gp', size=title_size, pad=title_pad)
plt.xlabel('Gp (scf)')
plt.ylabel('p/z (psia)')
if np.all(z==0) == False:
x2, y2 = Gp, (p / z)
plt.plot(x2, y2, '.-')
## curve-fitting to calculate the slope as OGIP
x2_norm = x2 / max(x2) # normalize x
y2_norm = y2 / max(y2) # normalize y
popt, pcov = curve_fit(linear_with_intercept, x2_norm, y2_norm)
m, c = popt[0], popt[1]
Gfgi = (-c / m) * max(x2) # OGIP is the intercept at x-axis, and denormalized
m = m * max(y2) / max(x2) # denormalize the slope
c = c * max(y2) # denormalize the intercept
## Output results into text in plot
Nfoi = 0
labels, handles, OOIP, OGIP = initial_hydrocarbon_in_place(Nfoi, Gfgi, Rv, Rs)
## plot the regression line
x2_fit = np.linspace(min(x2), max(x2), 5)
y2_fit = linear_with_intercept(x2_fit, m, c)
plt.plot(x2_fit, y2_fit)
plt.legend(handles, labels, loc='best', fontsize='small',
fancybox=True, framealpha=0.7,
handlelength=0, handletextpad=0)
# Plot 3: F/Eg vs Gp
plt.subplot(3,2,3)
x3, y3 = Gp, (F / Eg)
plt.plot(x3, y3, '.-')
plt.title('Plot 3: Waterdrive Diagnostic Plot', size=title_size, pad=title_pad)
plt.xlabel('Gp (scf)')
plt.ylabel('F/Eg (scf)')
## curve-fitting to calculate the slope as OGIP, here [1:] because NaN is removed
x3_norm = x3[1:] / max(x3[1:]) # normalize x
y3_norm = y3[1:] / max(y3[1:]) # normalize y
popt, pcov = curve_fit(linear_with_intercept, x3_norm, y3_norm)
m, c = popt[0], popt[1]
m = m * max(y3[1:]) / max(x3[1:]) # denormalize the slope
Gfgi = c * max(y3[1:]) # denormalize the intercept, hence the OGIP
## Output results into text in plot
Nfoi = 0
labels, handles, OOIP, OGIP = initial_hydrocarbon_in_place(Nfoi, Gfgi, Rv, Rs)
## plot the regression line
x3_fit = np.linspace(min(x3[1:]), max(x3[1:]), 5)
y3_fit = linear_with_intercept(x3_fit, m, Gfgi)
plt.plot(x3_fit, y3_fit)
plt.legend(handles, labels, loc='best', fontsize='small',
fancybox=True, framealpha=0.7,
handlelength=0, handletextpad=0)
# Plot 6: F vs (Eg+Bgi*Efw)
plt.subplot(3,2,4)
Bgi = Btg[0]
x6, y6 = (Eg + Bgi * Efw), F
plt.plot(x6, y6, '.-')
plt.title('Plot 6: F vs (Eg+Bgi*Efw)', size=title_size, pad=title_pad)
plt.xlabel('Eg+Bgi*Efw (res ft3/scf)')
plt.ylabel('F (res ft3)')
## curve-fitting to calculate the slope as OGIP
x6_norm = x6 / max(x6) # normalize x
y6_norm = y6 / max(y6) # normalize y
popt, pcov = curve_fit(linear_zero_intercept, x6_norm, y6_norm)
m = popt[0]
Gfgi = m * max(y6) / max(x6) # denormalize the slope, hence the OGIP
## Output results into text in plot
Nfoi = 0
labels, handles, OOIP, OGIP = initial_hydrocarbon_in_place(Nfoi, Gfgi, Rv, Rs)
## plot the regression line
x6_fit = np.linspace(min(x6), max(x6), 5)
y6_fit = linear_zero_intercept(x6_fit, Gfgi)
plt.plot(x6_fit, y6_fit)
plt.legend(handles, labels, loc='best', fontsize='small',
fancybox=True, framealpha=0.7,
handlelength=0, handletextpad=0)
# Plot 7: ((p/z)*(1-Efw)) vs Gp
plt.subplot(3,2,5)
plt.title('Plot 7: ((p/z)*(1-Efw)) vs Gp', size=title_size, pad=title_pad)
plt.xlabel('Gp (scf)')
plt.ylabel('(p/z)*(1-Efw) (psia)')
if np.all(z==0) == False:
x7, y7 = Gp, ((p / z) * (1 - Efw))
plt.plot(x7, y7, '.-')
## curve-fitting to calculate the slope as OGIP
x7_norm = x7 / max(x7) # normalize x
y7_norm = y7 / max(y7) # normalize y
popt, pcov = curve_fit(linear_with_intercept, x7_norm, y7_norm)
m, c = popt[0], popt[1]
Gfgi = (-c / m) * max(x7) # OGIP is the intercept at x-axis, and denormalized
m = m * max(y7) / max(x7) # denormalize the slope
c = c * max(y7) # denormalize the intercept
## Output results into text in plot
Nfoi = 0
labels, handles, OOIP, OGIP = initial_hydrocarbon_in_place(Nfoi, Gfgi, Rv, Rs)
## plot the regression line
x7_fit = np.linspace(min(x7), max(x7), 5)
y7_fit = linear_with_intercept(x7_fit, m, c)
plt.plot(x7_fit, y7_fit)
plt.legend(handles, labels, loc='best', fontsize='small',
fancybox=True, framealpha=0.7,
handlelength=0, handletextpad=0)
plt.tight_layout(pad=1.5)
plt.show()
return F, Eg, Efw
class oil():
"""
Oil (Undersaturated and saturated; Volatile and Non-volatile) Material Balance Plot
"""
def calculate_params(self, p, Bo, Bg, Rv, Rs, Np, Gp, Gi, cf, cw, swi):
"""
Calculate Material Balance Paramaters for Oil Reservoir
Output: F, Bto, Btg, Efw, Eo, Eg
"""
pi = p[0]
Rsi = Rs[0]
Rvi = Rv[0]
Boi = Bo[0]
Bgi = Bg[0]
# calculate Efw
Efw = ((cf + cw * swi) / (1 - swi)) * (pi - p)
# calculate F, Bto, and Btg
F = (Np * ((Bo - (Rs * Bg)) / (1 - (Rv * Rs)))) + ((Gp - Gi) * ((Bg - (Rv * Bo)) / (1 - (Rv * Rs))))
Btg = ((Bg * (1 - (Rs * Rvi))) + (Bo * (Rvi - Rv))) / (1 - (Rv * Rs)) # in RB/STB
Bto = ((Bo * (1 - (Rv * Rsi))) + (Bg * (Rsi - Rs))) / (1 - (Rv * Rs)) # in RB/scf
# calculate Eo and Eg
Eo = Bto - Boi
Eg = Btg - Bgi
return F, Bto, Btg, Efw, Eo, Eg
def gascap(self, Gfgi, Nfoi, Bg, Bo):
"""
Calculate Total Oil+Gas Expansion Factor from known Gas Cap ratio
Gfgi and Nfoi known from volumetrics
"""
Bgi, Boi = Bg[0], Bo[0]
m = (Gfgi * Bgi) / (Nfoi * Boi)
return m
def plot(self, oil_type, F, Bto, Btg, Efw, Eo, Eg, Np, Bo, Rs, Rv, start=0, end=-1, figsize=(10,5)):
"""
Create Material Balance Plots for Oil Reservoir
Input:
oil_type: 'undersaturated' or 'saturated'
"""
import numpy as np
import matplotlib.pyplot as plt
from scipy.optimize import curve_fit
import matplotlib.patches as mpl_patches
# plot attributes
title_size = 15
title_pad = 14
# linear function for curve-fit
def linear_zero_intercept(x, m):
y = m * x
return y
def linear_with_intercept(x, m, c):
y = m * x + c
return y
if oil_type == 'undersaturated':
plt.figure(figsize=figsize)
" Plot 1: F vs (Eg+Boi*Efw) "
plt.subplot(1,2,1)
Boi = Bo[0]
x1, y1 = (Eg + Boi * Efw), F
plt.plot(x1, y1, '.-')
plt.title(r'Plot 1: $F$ vs $(E_o+B_{oi}*E_{fw})$', size=title_size, pad=title_pad)
plt.xlabel(r'$E_o+B_{oi}E_{fw}$ (RB/STB)', size=15)
plt.ylabel(r'$F$ (res bbl)', size=15)
## curve-fitting to calculate the slope as OOIP
x1_norm = x1 / max(x1) # normalize x
y1_norm = y1 / max(y1) # normalize y
x1_norm = x1_norm[start:end]
y1_norm = y1_norm[start:end]
popt, pcov = curve_fit(linear_zero_intercept, x1_norm, y1_norm)
m = popt[0]
Nfoi = m * max(y1) / max(x1) # denormalize the slope, hence the OGIP
## Calculate OOIP and OGIP from Nfoi
Rsi = Rs[0]
Gfgi = 0 # no free gas phase in undersaturated oil
OOIP = Nfoi
OGIP = Nfoi * Rsi
## Output results into text in plot
labels, handles, OOIP, OGIP = initial_hydrocarbon_in_place(Nfoi, Gfgi, Rv, Rs)
## plot the regression line
x1_fit = np.linspace(min(x1), max(x1), 5)
y1_fit = linear_zero_intercept(x1_fit, Nfoi)
plt.plot(x1_fit, y1_fit, label='{} MMSTB'.format(np.round(Nfoi * 1E-6, 3)))
plt.legend(handles, labels, loc='best', fontsize='small',
fancybox=True, framealpha=0.7,
handlelength=0, handletextpad=0)
" Plot 2: F/(Eg+Boi*Efw) vs Np (Waterdrive Diagnostic Plot) "
plt.subplot(1,2,2)
x2, y2 = Np, F / (Eg + Boi * Efw)
plt.plot(x2, y2, '.-')
plt.title('Plot 2: Waterdrive Diagnostic Plot', size=title_size, pad=title_pad)
plt.xlabel(r'$N_p$ (STB)', size=15)
plt.ylabel(r'$\frac{F}{(E_o+B_{oi}E_{fw})}$ (STB)', size=15)
## curve-fitting to calculate the slope as OOIP, here [1:] because NaN is removed
x2_norm = x2[1:] / max(x2[1:]) # normalize x
y2_norm = y2[1:] / max(y2[1:]) # normalize y
popt, pcov = curve_fit(linear_with_intercept, x2_norm, y2_norm)
m, c = popt[0], popt[1]
m = m * max(y2[1:]) / max(x2[1:]) # denormalize the slope
Nfoi = c * max(y2[1:]) # denormalize the intercept, hence the OGIP
## Calculate OOIP and OGIP from Nfoi
Rsi = Rs[0]
Gfgi = 0 # no free gas phase in undersaturated oil
OOIP = Nfoi
OGIP = Nfoi * Rsi
## Output results into text in plot
labels, handles, OOIP, OGIP = initial_hydrocarbon_in_place(Nfoi, Gfgi, Rv, Rs)
## plot the regression line
x2_fit = np.linspace(min(x2[1:]), max(x2[1:]), 5)
y2_fit = linear_with_intercept(x2_fit, m, Nfoi)
plt.plot(x2_fit, y2_fit, label='{} MMSTB'.format(np.round(Nfoi * 1E-6, 3)))
plt.legend(handles, labels, loc='best', fontsize='small',
fancybox=True, framealpha=0.7,
handlelength=0, handletextpad=0)
plt.tight_layout(1)
plt.show()
if oil_type == 'saturated':
plt.figure(figsize=figsize)
" Plot 1: F/Eo vs Eg/Eo "
plt.subplot(1,3,1)
x1, y1 = (Eg / Eo), (F / Eo)
plt.plot(x1, y1, '.-')
plt.title('Plot 1: F/Eo vs Eg/Eo', size=title_size, pad=title_pad)
plt.xlabel(r'$\frac{Eg}{Eo}$ (STB/scf)', size=15)
plt.ylabel(r'$\frac{F}{Eo}$ (STB)', size=15)
## curve-fitting to calculate the slope as Gfgi, intercept as Nfoi
x1_norm = x1[1:] / max(x1[1:]) # normalize x
y1_norm = y1[1:] / max(y1[1:]) # normalize y
popt, pcov = curve_fit(linear_with_intercept, x1_norm, y1_norm)
m, c = popt[0], popt[1]
Gfgi = m = m * max(y1[1:]) / max(x1[1:]) # denormalize the slope
Nfoi = c = c * max(y1[1:]) # denormalize the intercept
## calculate OOIP and OGIP from Nfoi and Gfgi
Rsi, Rvi = Rs[0], Rv[0]
OOIP = Nfoi + Gfgi * Rvi
OGIP = Gfgi + Nfoi * Rsi
## Output results into text in plot
labels, handles, OOIP, OGIP = initial_hydrocarbon_in_place(Nfoi, Gfgi, Rv, Rs)
## plot the regression line
x1_fit = np.linspace(min(x1[1:]), max(x1[1:]), 5)
y1_fit = linear_with_intercept(x1_fit, m, c)
plt.plot(x1_fit, y1_fit)
plt.legend(handles, labels, loc='best', fontsize='small',
fancybox=True, framealpha=0.7,
handlelength=0, handletextpad=0)
" Plot 2: p/z vs Gp "
plt.subplot(1,3,2)
x2, y2 = (Eo / Eg), (F / Eg)
plt.plot(x2, y2, '.-')
plt.title('Plot 2: F/Eg vs Eo/Eg', size=title_size, pad=title_pad)
plt.xlabel(r'$\frac{Eo}{Eg}$ (scf/STB)', size=15)
plt.ylabel(r'$\frac{F}{Eg}$ (scf)', size=15)
## curve-fitting to calculate the slope as Nfoi, intercept as Gfgi
x2_norm = x2[1:] / max(x2[1:]) # normalize x
y2_norm = y2[1:] / max(y2[1:]) # normalize y
popt, pcov = curve_fit(linear_with_intercept, x2_norm, y2_norm)
m, c = popt[0], popt[1]
Nfoi = m = m * max(y2[1:]) / max(x2[1:]) # denormalize the slope
Gfgi = c = c * max(y2[1:]) # denormalize the intercept
## calculate OOIP and OGIP from Nfoi and Gfgi
Rsi, Rvi = Rs[0], Rv[0]
OOIP = Nfoi + Gfgi * Rvi
OGIP = Gfgi + Nfoi * Rsi
## Output results into text in plot
labels, handles, OOIP, OGIP = initial_hydrocarbon_in_place(Nfoi, Gfgi, Rv, Rs)
## plot the regression line
x2_fit = np.linspace(min(x2[1:]), max(x2[1:]), 5)
y2_fit = linear_with_intercept(x2_fit, m, c)
plt.plot(x2_fit, y2_fit)
plt.legend(handles, labels, loc='best', fontsize='small',
fancybox=True, framealpha=0.7,
handlelength=0, handletextpad=0)
plt.tight_layout(1)
plt.show()
# def plot(self, oil_type, F, Bto, Btg, Efw, Eo, Eg, Np, Bo, Rs, Rv, figsize=(10,5)):
# """
# Create Material Balance Plots for Oil Reservoir
# Input:
# oil_type: 'undersaturated' or 'saturated'
# """
# import numpy as np
# import matplotlib.pyplot as plt
# from scipy.optimize import curve_fit
# import matplotlib.patches as mpl_patches
# # plot attributes
# title_size = 15
# title_pad = 14
# # linear function for curve-fit
# def linear_zero_intercept(x, m):
# y = m * x
# return y
# def linear_with_intercept(x, m, c):
# y = m * x + c
# return y
# if oil_type == 'undersaturated':
# plt.figure(figsize=figsize)
# " Plot 1: F vs (Eg+Boi*Efw) "
# plt.subplot(1,2,1)
# Boi = Bo[0]
# x1, y1 = (Eg + Boi * Efw), F
# plt.plot(x1, y1, '.-')
# plt.title(r'Plot 1: $F$ vs $(E_o+B_{oi}*E_{fw})$', size=title_size, pad=title_pad)
# plt.xlabel(r'$E_o+B_{oi}E_{fw}$ (RB/STB)', size=15)
# plt.ylabel(r'$F$ (res bbl)', size=15)
# ## curve-fitting to calculate the slope as OOIP
# x1_norm = x1 / max(x1) # normalize x
# y1_norm = y1 / max(y1) # normalize y
# popt, pcov = curve_fit(linear_zero_intercept, x1_norm, y1_norm)
# m = popt[0]
# Nfoi = m * max(y1) / max(x1) # denormalize the slope, hence the OGIP
# ## Calculate OOIP and OGIP from Nfoi
# Rsi = Rs[0]
# Gfgi = 0 # no free gas phase in undersaturated oil
# OOIP = Nfoi
# OGIP = Nfoi * Rsi
# ## Output results into text in plot
# labels, handles, OOIP, OGIP = initial_hydrocarbon_in_place(Nfoi, Gfgi, Rv, Rs)
# ## plot the regression line
# x1_fit = np.linspace(min(x1), max(x1), 5)
# y1_fit = linear_zero_intercept(x1_fit, Nfoi)
# plt.plot(x1_fit, y1_fit, label='{} MMSTB'.format(np.round(Nfoi * 1E-6, 3)))
# plt.legend(handles, labels, loc='best', fontsize='small',
# fancybox=True, framealpha=0.7,
# handlelength=0, handletextpad=0)
# " Plot 2: F/(Eg+Boi*Efw) vs Np (Waterdrive Diagnostic Plot) "
# plt.subplot(1,2,2)
# x2, y2 = Np, F / (Eg + Boi * Efw)
# plt.plot(x2, y2, '.-')
# plt.title('Plot 2: Waterdrive Diagnostic Plot', size=title_size, pad=title_pad)
# plt.xlabel(r'$N_p$ (STB)', size=15)
# plt.ylabel(r'$\frac{F}{(E_o+B_{oi}E_{fw})}$ (STB)', size=15)
# ## curve-fitting to calculate the slope as OOIP, here [1:] because NaN is removed
# x2_norm = x2[1:] / max(x2[1:]) # normalize x
# y2_norm = y2[1:] / max(y2[1:]) # normalize y
# popt, pcov = curve_fit(linear_with_intercept, x2_norm, y2_norm)
# m, c = popt[0], popt[1]
# m = m * max(y2[1:]) / max(x2[1:]) # denormalize the slope
# Nfoi = c * max(y2[1:]) # denormalize the intercept, hence the OGIP
# ## Calculate OOIP and OGIP from Nfoi
# Rsi = Rs[0]
# Gfgi = 0 # no free gas phase in undersaturated oil
# OOIP = Nfoi
# OGIP = Nfoi * Rsi
# ## Output results into text in plot
# labels, handles, OOIP, OGIP = initial_hydrocarbon_in_place(Nfoi, Gfgi, Rv, Rs)
# ## plot the regression line
# x2_fit = np.linspace(min(x2[1:]), max(x2[1:]), 5)
# y2_fit = linear_with_intercept(x2_fit, m, Nfoi)
# plt.plot(x2_fit, y2_fit, label='{} MMSTB'.format(np.round(Nfoi * 1E-6, 3)))
# plt.legend(handles, labels, loc='best', fontsize='small',
# fancybox=True, framealpha=0.7,
# handlelength=0, handletextpad=0)
# plt.tight_layout(1)
# plt.show()
# if oil_type == 'saturated':
# plt.figure(figsize=figsize)
# " Plot 1: F/Eo vs Eg/Eo "
# plt.subplot(1,3,1)
# x1, y1 = (Eg / Eo), (F / Eo)
# plt.plot(x1, y1, '.-')
# plt.title('Plot 1: F/Eo vs Eg/Eo', size=title_size, pad=title_pad)
# plt.xlabel(r'$\frac{Eg}{Eo}$ (STB/scf)', size=15)
# plt.ylabel(r'$\frac{F}{Eo}$ (STB)', size=15)
# ## curve-fitting to calculate the slope as Gfgi, intercept as Nfoi
# x1_norm = x1[1:] / max(x1[1:]) # normalize x
# y1_norm = y1[1:] / max(y1[1:]) # normalize y
# popt, pcov = curve_fit(linear_with_intercept, x1_norm, y1_norm)
# m, c = popt[0], popt[1]
# Gfgi = m = m * max(y1[1:]) / max(x1[1:]) # denormalize the slope
# Nfoi = c = c * max(y1[1:]) # denormalize the intercept
# ## calculate OOIP and OGIP from Nfoi and Gfgi
# Rsi, Rvi = Rs[0], Rv[0]
# OOIP = Nfoi + Gfgi * Rvi
# OGIP = Gfgi + Nfoi * Rsi
# ## Output results into text in plot
# labels, handles, OOIP, OGIP = initial_hydrocarbon_in_place(Nfoi, Gfgi, Rv, Rs)
# ## plot the regression line
# x1_fit = np.linspace(min(x1[1:]), max(x1[1:]), 5)
# y1_fit = linear_with_intercept(x1_fit, m, c)
# plt.plot(x1_fit, y1_fit)
# plt.legend(handles, labels, loc='best', fontsize='small',
# fancybox=True, framealpha=0.7,
# handlelength=0, handletextpad=0)
# " Plot 2: p/z vs Gp "
# plt.subplot(1,3,2)
# x2, y2 = (Eo / Eg), (F / Eg)
# plt.plot(x2, y2, '.-')
# plt.title('Plot 2: F/Eg vs Eo/Eg', size=title_size, pad=title_pad)
# plt.xlabel(r'$\frac{Eo}{Eg}$ (scf/STB)', size=15)
# plt.ylabel(r'$\frac{F}{Eg}$ (scf)', size=15)
# ## curve-fitting to calculate the slope as Nfoi, intercept as Gfgi
# x2_norm = x2[1:] / max(x2[1:]) # normalize x
# y2_norm = y2[1:] / max(y2[1:]) # normalize y
# popt, pcov = curve_fit(linear_with_intercept, x2_norm, y2_norm)
# m, c = popt[0], popt[1]
# Nfoi = m = m * max(y2[1:]) / max(x2[1:]) # denormalize the slope
# Gfgi = c = c * max(y2[1:]) # denormalize the intercept
# ## calculate OOIP and OGIP from Nfoi and Gfgi
# Rsi, Rvi = Rs[0], Rv[0]
# OOIP = Nfoi + Gfgi * Rvi
# OGIP = Gfgi + Nfoi * Rsi
# ## Output results into text in plot
# labels, handles, OOIP, OGIP = initial_hydrocarbon_in_place(Nfoi, Gfgi, Rv, Rs)
# ## plot the regression line
# x2_fit = np.linspace(min(x2[1:]), max(x2[1:]), 5)
# y2_fit = linear_with_intercept(x2_fit, m, c)
# plt.plot(x2_fit, y2_fit)
# plt.legend(handles, labels, loc='best', fontsize='small',
# fancybox=True, framealpha=0.7,
# handlelength=0, handletextpad=0)
# plt.tight_layout(1)
# plt.show()
|
py | 7dfcb092608d4549478cddde96b34eb5df203eb7 | from django.db import models
from datetime import datetime
# 用户信息模型
class Users(models.Model):
username = models.CharField(max_length=32) # 唯一性约束
password = models.CharField(max_length=32)
addtime = models.DateTimeField(default=datetime.now)
def toDict(self):
return {'id':self.id,'username':self.username,'password':self.password}
class Meta:
db_table = "users" # 更改表名
# 主机信息模型
class Host(models.Model):
uid = models.CharField(max_length=32) #登录用户的用户名
host_user = models.CharField(max_length=32) # 唯一性约束
host_password = models.CharField(max_length=32)
tag = models.CharField(max_length=32)
ip = models.CharField(max_length=32)
cpu = models.IntegerField()
vir_mem = models.IntegerField()
mem = models.IntegerField()
host_status = models.CharField(max_length=32)
addtime = models.DateTimeField(default=datetime.now)
class Meta:
db_table = "host" # 更改表名
# 主机实时信息模型
class Monitor(models.Model):
host_id = models.CharField(max_length=32) #登录用户的用户名
cpu_used = models.CharField(max_length=32)
vir_mem_used = models.CharField(max_length=32)
mem_used = models.CharField(max_length=32)
addtime = models.DateTimeField(default=datetime.now)
time = models.CharField(max_length=50)
class Meta:
db_table = "monitor" # 更改表名
|
py | 7dfcb0ab11ccae2a992c21e7f672a8e2b1882878 | import posixpath
import subprocess
import uuid
import pytest
@pytest.fixture
def empty_hdfs_root(onefs_client):
"""Create a temporary directory and make it the HDFS root."""
old_hdfs_root = onefs_client.hdfs_settings()['root_directory']
new_root_name = str(uuid.uuid4())
onefs_client.mkdir(new_root_name, 0o755)
onefs_client.update_hdfs_settings({
'root_directory': posixpath.join(onefs_client.zone_settings()['path'], new_root_name),
})
yield
onefs_client.update_hdfs_settings({'root_directory': old_hdfs_root})
onefs_client.rmdir(new_root_name, recursive=True)
@pytest.mark.usefixtures('empty_hdfs_root')
@pytest.mark.parametrize('script', ['isilon_create_users', 'isilon_create_directories'])
@pytest.mark.parametrize('dist', ['cdh', 'cdp', 'hdp'])
def test_dry_run(script, onefs_client, dist):
subprocess.check_call([
script,
'--append-cluster-name', str(uuid.uuid4()),
'--dist', dist,
'--dry',
'--no-verify',
'--onefs-password', onefs_client.password,
'--onefs-user', onefs_client.username,
'--zone', 'System',
onefs_client.address,
])
|
py | 7dfcb0ad58942624e29d357c503278044bfa89c5 | import gettext
class _(str):
observers = set()
lang = None
def __new__(cls, s):
if _.lang is None:
_.switch_lang('en')
t = _.translate(s)
o = super(_, cls).__new__(cls, t)
o.source_text = s
return o
@staticmethod
def translate(s, *args, **kwargs):
return _.lang(s)
@staticmethod
def bind(label):
try:
_.observers.add(label)
except:
pass
# garbage collection
new = set()
for label in _.observers:
try:
new.add(label)
except:
pass
_.observers = new
@staticmethod
def switch_lang(lang):
# get the right locales directory, and instantiate a gettext
from electrum_blk.i18n import LOCALE_DIR, set_language
locales = gettext.translation('electrum', LOCALE_DIR, languages=[lang], fallback=True)
_.lang = locales.gettext
for label in _.observers:
try:
label.text = _(label.text.source_text)
except:
pass
# Note that all invocations of _() inside the core electrum library
# use electrum.i18n instead of electrum.gui.kivy.i18n, so we should update the
# language there as well:
set_language(lang)
|
py | 7dfcb2535ca831bfe79c12d7f4d9b7904a39b9cd | import time
from watchdog.observers import Observer
from watchdog.events import FileSystemEventHandler
from pathlib import PurePath
import os
import jinja2
import sys
import getopt
import math
arguments = "d:i:t:D:"
long_arguments = [
"directory=",
"index-name=",
"index-template=",
# "depth="
]
opts, extraparams = getopt.getopt(sys.argv[1:], arguments, long_arguments)
args = dict(opts)
SLEEP_SECONDS = 1
INDEX = args["-i"] if "-i" in args else 'index.html'
BASE = os.path.abspath(args["-d"])
INDEX_TEMPLATE = args["-t"] if "-t" in args else (BASE + '/' + 'index.jinja')
# DEPTH = int(args["-D"]) if "-D" in args else None
DEPTH = None # TODO depth to fix
env = jinja2.Environment(loader=jinja2.FileSystemLoader(searchpath='/'))
template = env.get_template(
INDEX_TEMPLATE
)
class Item():
def __init__(self):
self.is_dir = False
self.path = ""
self.name = ""
self.base = ""
def __unicode__(self):
self.path
def convert_size(size_bytes):
if size_bytes == 0:
return "0B"
size_name = ("B", "KB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB")
i = int(math.floor(math.log(size_bytes, 1024)))
p = math.pow(1024, i)
s = round(size_bytes / p, 2)
return "%s %s" % (s, size_name[i])
def generate(dirname, depth=None):
items = []
base = dirname[len(BASE):]
PARENT = PurePath(base).parent
for item in os.listdir(dirname):
if (
item.startswith('.') or
item == INDEX or
item == INDEX_TEMPLATE
):
continue
item_obj = Item()
item_obj.name = item
item_obj.path = os.path.join(dirname, item)
item_obj.size = convert_size(os.path.getsize(item_obj.path))
item_obj.base = base
items.append(item_obj)
if os.path.isdir(item_obj.path):
item_obj.is_dir = True
if depth != 0:
current_depth = depth
if depth:
current_depth = depth - 1
generate(item_obj.path, current_depth)
html = template.render(
{
'parent': PARENT,
'base': base,
'dirname': dirname,
'items': items,
}
)
index = os.path.join(dirname, INDEX)
with open(index, 'w') as f:
f.write(html)
class IndexerHandler(FileSystemEventHandler):
def on_modified(self, event):
generate(BASE, DEPTH)
if __name__ == "__main__":
generate(BASE, DEPTH)
event_handler = IndexerHandler()
observer = Observer()
# observer.schedule(event_handler, path=BASE, recursive=False)
observer.schedule(event_handler, path=BASE, recursive=True)
observer.start()
try:
while True:
time.sleep(SLEEP_SECONDS)
except KeyboardInterrupt:
observer.stop()
observer.join()
|
py | 7dfcb379b403382249f9d58fca31fc23986a56a0 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'about.ui'
#
# Created by: PyQt5 UI code generator 5.9
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_Dialog(object):
def setupUi(self, Dialog):
Dialog.setObjectName("Dialog")
Dialog.resize(555, 605)
Dialog.setMaximumSize(QtCore.QSize(555, 605))
self.pushButton = QtWidgets.QPushButton(Dialog)
self.pushButton.setGeometry(QtCore.QRect(230, 570, 85, 27))
self.pushButton.setObjectName("pushButton")
self.label = QtWidgets.QLabel(Dialog)
self.label.setGeometry(QtCore.QRect(20, 10, 511, 41))
font = QtGui.QFont()
font.setPointSize(20)
font.setBold(True)
font.setItalic(True)
font.setWeight(75)
self.label.setFont(font)
self.label.setAlignment(QtCore.Qt.AlignCenter)
self.label.setObjectName("label")
self.line = QtWidgets.QFrame(Dialog)
self.line.setGeometry(QtCore.QRect(10, 150, 531, 20))
self.line.setFrameShape(QtWidgets.QFrame.HLine)
self.line.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line.setObjectName("line")
self.line_2 = QtWidgets.QFrame(Dialog)
self.line_2.setGeometry(QtCore.QRect(10, 540, 531, 20))
self.line_2.setFrameShape(QtWidgets.QFrame.HLine)
self.line_2.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_2.setObjectName("line_2")
self.label_2 = QtWidgets.QLabel(Dialog)
self.label_2.setGeometry(QtCore.QRect(160, 90, 251, 41))
font = QtGui.QFont()
font.setPointSize(20)
font.setBold(True)
font.setItalic(True)
font.setWeight(75)
self.label_2.setFont(font)
self.label_2.setAlignment(QtCore.Qt.AlignCenter)
self.label_2.setObjectName("label_2")
self.textBrowser = QtWidgets.QTextBrowser(Dialog)
self.textBrowser.setGeometry(QtCore.QRect(20, 180, 511, 351))
self.textBrowser.setObjectName("textBrowser")
self.label_3 = QtWidgets.QLabel(Dialog)
self.label_3.setGeometry(QtCore.QRect(30, 80, 111, 61))
self.label_3.setText("")
self.label_3.setPixmap(QtGui.QPixmap("../img/icon.png"))
self.label_3.setObjectName("label_3")
self.retranslateUi(Dialog)
self.pushButton.clicked.connect(Dialog.close)
QtCore.QMetaObject.connectSlotsByName(Dialog)
def retranslateUi(self, Dialog):
_translate = QtCore.QCoreApplication.translate
Dialog.setWindowTitle(_translate("Dialog", "Dialog"))
self.pushButton.setText(_translate("Dialog", "Закрыть"))
self.label.setText(_translate("Dialog", "Добро пожаловать в чат-флудилку"))
self.label_2.setText(_translate("Dialog", "\"Finger Balabolka\""))
self.textBrowser.setHtml(_translate("Dialog", "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n"
"<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n"
"p, li { white-space: pre-wrap; }\n"
"</style></head><body style=\" font-family:\'Noto Sans\'; font-size:9pt; font-weight:400; font-style:normal;\">\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"> <span style=\" font-size:14pt;\">Автор, он же разработчик, он же великдушный диктатор данной флудилки: </span> </p>\n"
"<p style=\" margin-top:12px; margin-bottom:12px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"> <span style=\" font-size:14pt; font-weight:600;\">Сухаренко А.Н. </span> <a href=\"https://github.com/Cyxapic\"><span style=\" text-decoration: underline; color:#0000ff;\"> </span></a><a href=\"https://github.com/Cyxapic\"><span style=\" font-size:14pt; font-weight:600; text-decoration: underline; color:#0000ff;\">HTTP: github</span></a></p>\n"
"<p style=\"-qt-paragraph-type:empty; margin-top:12px; margin-bottom:12px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><br /></p>\n"
"<p style=\"-qt-paragraph-type:empty; margin-top:12px; margin-bottom:12px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><br /></p>\n"
"<p style=\" margin-top:12px; margin-bottom:12px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-size:14pt; text-decoration: underline; color:#000000;\">Программа разработа под руководством преподователя Geekbrains - </span><span style=\" font-family:\'Roboto,San Francisco,Helvetica Neue,Helvetica,Arial\'; font-size:14pt; text-decoration: underline; color:#3c4144; background-color:#ffffff;\">Леонид Орлов, </span><span style=\" font-family:\'Roboto,San Francisco,Helvetica Neue,Helvetica,Arial\'; font-size:14pt; font-weight:600; text-decoration: underline; color:#00aa00; background-color:#ffffff;\">за что ему отдельная благодарность!</span> </p></body></html>"))
|
py | 7dfcb3ec7660c2335dc40a4f5bb5fcfe2a3943f4 | from copy import copy
import re
import pytest
from app import create_app
from config import Config
from modules.user_account.schema_public import UserAccountSchema
from modules.users.model import User
from fixtures import Fixtures
@pytest.fixture
def app(request):
config = copy(Config)
config.TESTING = True
config.APP_TYPE = 'admin' if 'admin_api' in request.keywords else 'public'
app = create_app(config)
if 'unit' in request.keywords:
yield app
else:
fixtures = Fixtures(app)
fixtures.setup()
yield app
fixtures.teardown()
# INTEGRATION TESTS
@pytest.mark.integration
@pytest.mark.admin_api
def test_user_account_admin_schema_dump(app):
re_datetime = re.compile(r"^\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\+\d{4}$")
user2 = User.query.get(2)
output = {
'id': user2.id,
'username': user2.username,
'email': user2.email,
'password_changed_at': user2.password_changed_at,
'is_verified': user2.is_verified,
'first_name': user2.profile.first_name,
'last_name': user2.profile.last_name,
'joined_at': user2.profile.joined_at,
}
result = UserAccountSchema().dump(output)
assert len(result) == 8
assert result['id'] == 2
assert result['username'] == 'user2'
assert result['email'] == '[email protected]'
assert result['first_name'] == 'Lynne'
assert result['last_name'] == 'Harford'
assert bool(re_datetime.match(result['password_changed_at']))
assert result['joined_at'] == '2018-12-07T00:00:00+0000'
assert result['is_verified'] is True
|
py | 7dfcb40f19f97a94298532e93911413b0b5a7c16 | """Test Fill-in-the-blank module"""
import unittest
from pororo import Pororo
class PororoBlankTester(unittest.TestCase):
def test_modules(self):
fill = Pororo(task="fib", lang="ko")
fill_res = fill("아 그거 __으로 보내줘 ㅋㅋ")
self.assertIsInstance(fill_res, list)
fill = Pororo(task="fib", lang="zh")
fill_res = fill("三__男子在街上做同样的舞蹈。")
self.assertIsInstance(fill_res, list)
fill = Pororo(task="fib", lang="ja")
fill_res = fill("文在寅は__の大統領だ。")
self.assertIsInstance(fill_res, list)
if __name__ == "__main__":
unittest.main()
|
py | 7dfcb6ba2f94d6cdb3e4d61ac725c294951c4b6a | #!/usr/bin/env python3
# Pretty print 9p simpletrace log
# Usage: ./analyse-9p-simpletrace <trace-events> <trace-pid>
#
# Author: Harsh Prateek Bora
import os
import simpletrace
symbol_9p = {
6 : 'TLERROR',
7 : 'RLERROR',
8 : 'TSTATFS',
9 : 'RSTATFS',
12 : 'TLOPEN',
13 : 'RLOPEN',
14 : 'TLCREATE',
15 : 'RLCREATE',
16 : 'TSYMLINK',
17 : 'RSYMLINK',
18 : 'TMKNOD',
19 : 'RMKNOD',
20 : 'TRENAME',
21 : 'RRENAME',
22 : 'TREADLINK',
23 : 'RREADLINK',
24 : 'TGETATTR',
25 : 'RGETATTR',
26 : 'TSETATTR',
27 : 'RSETATTR',
30 : 'TXATTRWALK',
31 : 'RXATTRWALK',
32 : 'TXATTRCREATE',
33 : 'RXATTRCREATE',
40 : 'TREADDIR',
41 : 'RREADDIR',
50 : 'TFSYNC',
51 : 'RFSYNC',
52 : 'TLOCK',
53 : 'RLOCK',
54 : 'TGETLOCK',
55 : 'RGETLOCK',
70 : 'TLINK',
71 : 'RLINK',
72 : 'TMKDIR',
73 : 'RMKDIR',
74 : 'TRENAMEAT',
75 : 'RRENAMEAT',
76 : 'TUNLINKAT',
77 : 'RUNLINKAT',
100 : 'TVERSION',
101 : 'RVERSION',
102 : 'TAUTH',
103 : 'RAUTH',
104 : 'TATTACH',
105 : 'RATTACH',
106 : 'TERROR',
107 : 'RERROR',
108 : 'TFLUSH',
109 : 'RFLUSH',
110 : 'TWALK',
111 : 'RWALK',
112 : 'TOPEN',
113 : 'ROPEN',
114 : 'TCREATE',
115 : 'RCREATE',
116 : 'TREAD',
117 : 'RREAD',
118 : 'TWRITE',
119 : 'RWRITE',
120 : 'TCLUNK',
121 : 'RCLUNK',
122 : 'TREMOVE',
123 : 'RREMOVE',
124 : 'TSTAT',
125 : 'RSTAT',
126 : 'TWSTAT',
127 : 'RWSTAT'
}
class VirtFSRequestTracker(simpletrace.Analyzer):
def begin(self):
print("Pretty printing 9p simpletrace log ...")
def v9fs_rerror(self, tag, id, err):
print("RERROR (tag =", tag, ", id =", symbol_9p[id], ", err = \"", os.strerror(err), "\")")
def v9fs_version(self, tag, id, msize, version):
print("TVERSION (tag =", tag, ", msize =", msize, ", version =", version, ")")
def v9fs_version_return(self, tag, id, msize, version):
print("RVERSION (tag =", tag, ", msize =", msize, ", version =", version, ")")
def v9fs_attach(self, tag, id, fid, afid, uname, aname):
print("TATTACH (tag =", tag, ", fid =", fid, ", afid =", afid, ", uname =", uname, ", aname =", aname, ")")
def v9fs_attach_return(self, tag, id, type, version, path):
print("RATTACH (tag =", tag, ", qid={type =", type, ", version =", version, ", path =", path, "})")
def v9fs_stat(self, tag, id, fid):
print("TSTAT (tag =", tag, ", fid =", fid, ")")
def v9fs_stat_return(self, tag, id, mode, atime, mtime, length):
print("RSTAT (tag =", tag, ", mode =", mode, ", atime =", atime, ", mtime =", mtime, ", length =", length, ")")
def v9fs_getattr(self, tag, id, fid, request_mask):
print("TGETATTR (tag =", tag, ", fid =", fid, ", request_mask =", hex(request_mask), ")")
def v9fs_getattr_return(self, tag, id, result_mask, mode, uid, gid):
print("RGETATTR (tag =", tag, ", result_mask =", hex(result_mask), ", mode =", oct(mode), ", uid =", uid, ", gid =", gid, ")")
def v9fs_walk(self, tag, id, fid, newfid, nwnames):
print("TWALK (tag =", tag, ", fid =", fid, ", newfid =", newfid, ", nwnames =", nwnames, ")")
def v9fs_walk_return(self, tag, id, nwnames, qids):
print("RWALK (tag =", tag, ", nwnames =", nwnames, ", qids =", hex(qids), ")")
def v9fs_open(self, tag, id, fid, mode):
print("TOPEN (tag =", tag, ", fid =", fid, ", mode =", oct(mode), ")")
def v9fs_open_return(self, tag, id, type, version, path, iounit):
print("ROPEN (tag =", tag, ", qid={type =", type, ", version =", version, ", path =", path, "}, iounit =", iounit, ")")
def v9fs_lcreate(self, tag, id, dfid, flags, mode, gid):
print("TLCREATE (tag =", tag, ", dfid =", dfid, ", flags =", oct(flags), ", mode =", oct(mode), ", gid =", gid, ")")
def v9fs_lcreate_return(self, tag, id, type, version, path, iounit):
print("RLCREATE (tag =", tag, ", qid={type =", type, ", version =", version, ", path =", path, "}, iounit =", iounit, ")")
def v9fs_fsync(self, tag, id, fid, datasync):
print("TFSYNC (tag =", tag, ", fid =", fid, ", datasync =", datasync, ")")
def v9fs_clunk(self, tag, id, fid):
print("TCLUNK (tag =", tag, ", fid =", fid, ")")
def v9fs_read(self, tag, id, fid, off, max_count):
print("TREAD (tag =", tag, ", fid =", fid, ", off =", off, ", max_count =", max_count, ")")
def v9fs_read_return(self, tag, id, count, err):
print("RREAD (tag =", tag, ", count =", count, ", err =", err, ")")
def v9fs_readdir(self, tag, id, fid, offset, max_count):
print("TREADDIR (tag =", tag, ", fid =", fid, ", offset =", offset, ", max_count =", max_count, ")")
def v9fs_readdir_return(self, tag, id, count, retval):
print("RREADDIR (tag =", tag, ", count =", count, ", retval =", retval, ")")
def v9fs_write(self, tag, id, fid, off, count, cnt):
print("TWRITE (tag =", tag, ", fid =", fid, ", off =", off, ", count =", count, ", cnt =", cnt, ")")
def v9fs_write_return(self, tag, id, total, err):
print("RWRITE (tag =", tag, ", total =", total, ", err =", err, ")")
def v9fs_create(self, tag, id, fid, name, perm, mode):
print("TCREATE (tag =", tag, ", fid =", fid, ", perm =", oct(perm), ", name =", name, ", mode =", oct(mode), ")")
def v9fs_create_return(self, tag, id, type, version, path, iounit):
print("RCREATE (tag =", tag, ", qid={type =", type, ", version =", version, ", path =", path, "}, iounit =", iounit, ")")
def v9fs_symlink(self, tag, id, fid, name, symname, gid):
print("TSYMLINK (tag =", tag, ", fid =", fid, ", name =", name, ", symname =", symname, ", gid =", gid, ")")
def v9fs_symlink_return(self, tag, id, type, version, path):
print("RSYMLINK (tag =", tag, ", qid={type =", type, ", version =", version, ", path =", path, "})")
def v9fs_flush(self, tag, id, flush_tag):
print("TFLUSH (tag =", tag, ", flush_tag =", flush_tag, ")")
def v9fs_link(self, tag, id, dfid, oldfid, name):
print("TLINK (tag =", tag, ", dfid =", dfid, ", oldfid =", oldfid, ", name =", name, ")")
def v9fs_remove(self, tag, id, fid):
print("TREMOVE (tag =", tag, ", fid =", fid, ")")
def v9fs_wstat(self, tag, id, fid, mode, atime, mtime):
print("TWSTAT (tag =", tag, ", fid =", fid, ", mode =", oct(mode), ", atime =", atime, "mtime =", mtime, ")")
def v9fs_mknod(self, tag, id, fid, mode, major, minor):
print("TMKNOD (tag =", tag, ", fid =", fid, ", mode =", oct(mode), ", major =", major, ", minor =", minor, ")")
def v9fs_lock(self, tag, id, fid, type, start, length):
print("TLOCK (tag =", tag, ", fid =", fid, "type =", type, ", start =", start, ", length =", length, ")")
def v9fs_lock_return(self, tag, id, status):
print("RLOCK (tag =", tag, ", status =", status, ")")
def v9fs_getlock(self, tag, id, fid, type, start, length):
print("TGETLOCK (tag =", tag, ", fid =", fid, "type =", type, ", start =", start, ", length =", length, ")")
def v9fs_getlock_return(self, tag, id, type, start, length, proc_id):
print("RGETLOCK (tag =", tag, "type =", type, ", start =", start, ", length =", length, ", proc_id =", proc_id, ")")
def v9fs_mkdir(self, tag, id, fid, name, mode, gid):
print("TMKDIR (tag =", tag, ", fid =", fid, ", name =", name, ", mode =", mode, ", gid =", gid, ")")
def v9fs_mkdir_return(self, tag, id, type, version, path, err):
print("RMKDIR (tag =", tag, ", qid={type =", type, ", version =", version, ", path =", path, "}, err =", err, ")")
def v9fs_xattrwalk(self, tag, id, fid, newfid, name):
print("TXATTRWALK (tag =", tag, ", fid =", fid, ", newfid =", newfid, ", xattr name =", name, ")")
def v9fs_xattrwalk_return(self, tag, id, size):
print("RXATTRWALK (tag =", tag, ", xattrsize =", size, ")")
def v9fs_xattrcreate(self, tag, id, fid, name, size, flags):
print("TXATTRCREATE (tag =", tag, ", fid =", fid, ", name =", name, ", xattrsize =", size, ", flags =", flags, ")")
def v9fs_readlink(self, tag, id, fid):
print("TREADLINK (tag =", tag, ", fid =", fid, ")")
def v9fs_readlink_return(self, tag, id, target):
print("RREADLINK (tag =", tag, ", target =", target, ")")
simpletrace.run(VirtFSRequestTracker())
|
py | 7dfcb73be5f4b00d2228108cf287c45c3b48d5e6 | import sqlalchemy
import zeeguu_core
from datetime import datetime, timedelta
from zeeguu_core.model.user import User
db = zeeguu_core.db
# Parameter that controls after how much time (in seconds) the session is expired
EXERCISE_SESSION_TIMEOUT = 21
VERY_FAR_IN_THE_PAST = '2000-01-01T00:00:00'
VERY_FAR_IN_THE_FUTURE = '9999-12-31T23:59:59'
class UserExerciseSession(db.Model):
"""
This class keeps track of the user's exercise sessions.
So we can study how much time and when the user has done exercises.
"""
__table_args__ = dict(mysql_collate="utf8_bin")
__tablename__ = 'user_exercise_session'
id = db.Column(db.Integer, primary_key=True)
user_id = db.Column(db.Integer, db.ForeignKey(User.id))
user = db.relationship(User)
start_time = db.Column(db.DateTime)
duration = db.Column(db.Integer) # Duration time in miliseconds
last_action_time = db.Column(db.DateTime)
is_active = db.Column(db.Boolean)
def __init__(self, user_id, start_time, current_time=None):
self.user_id = user_id
self.is_active = True
# When we want to emulate an event happening in a particular moment in the past or in the future,
# we can provide the current_time variable to override the datetime.now()
if current_time is None:
current_time = datetime.now()
self.start_time = start_time
self.last_action_time = current_time
duration = self.last_action_time - self.start_time
self.duration = duration.total_seconds() * 1000
@classmethod
def get_exercise_session_timeout(cls):
return EXERCISE_SESSION_TIMEOUT
@classmethod
def _find_most_recent_session(cls, user_id, db_session):
"""
Queries and returns if there is an open exercise session for that user
Parameters:
user_id = user identifier
db_session = database session
returns: the active exercise_session record for the specific user or None if none is found
"""
query = cls.query
query = query.filter(cls.user_id == user_id)
query = query.filter(cls.is_active == True)
try:
return query.one()
except sqlalchemy.orm.exc.MultipleResultsFound:
# Close all open sessions except last one
query.order_by(cls.last_action_time)
open_sessions = query.with_for_update().all()
for exercise_session in open_sessions[:-1]:
exercise_session._close_exercise_session(db_session)
return open_sessions[-1]
except sqlalchemy.orm.exc.NoResultFound:
return None
def _is_still_active(self, current_time=datetime.now()):
"""
Validates if the exercise session is still valid (according to the exercise_session_timeout control variable)
Parameters:
current_time = when this parameter is sent, instead of using the datetime.now() value for the current time
we use the provided value as the system time (only used for filling in historical data)
returns: True if the time between the exercise session's last action and the current time
is less or equal than the exercise_session_timeout
"""
time_difference = current_time - self.last_action_time
w_exercise_session_timeout = timedelta(seconds=EXERCISE_SESSION_TIMEOUT)
return time_difference <= w_exercise_session_timeout
@staticmethod
def _create_new_session(db_session, user_id, start_time, current_time=None):
"""
Creates a new exercise session
Parameters:
db_session = database session
user_id = user identifier
start_time = the time when the user opened the exercise
current_time = when this parameter is sent, instead of using the datetime.now() value for the current time
we use the provided value as the system time (only used for filling in historical data)
returns: The new exercise session
"""
# If the user left the exercise open for very long, we set the duration of the session to the timeout value
time_difference = current_time - start_time
if time_difference > timedelta(seconds=EXERCISE_SESSION_TIMEOUT):
start_time = current_time - timedelta(seconds=EXERCISE_SESSION_TIMEOUT)
exercise_session = UserExerciseSession(user_id, start_time, current_time)
db_session.add(exercise_session)
db_session.commit()
return exercise_session
def _update_last_action_time(self, db_session, current_time=datetime.now()):
"""
Updates the last_action_time field and computes the duration of the exercise session
Parameters:
db_session = database session
current_time = when this parameter is sent, instead of using the datetime.now() value for the current time
we use the provided value as the system time (only used for filling in historical data)
returns: The exercise session
"""
# Update duration
current_session_length = current_time - self.start_time
self.duration = current_session_length.total_seconds() * 1000 # Convert to miliseconds
self.last_action_time = current_time
db_session.add(self)
db_session.commit()
return self
def _close_exercise_session(self, db_session):
"""
Sets the is_active field to False
Parameters:
db_session = database session
returns: The exercise session if everything went well otherwise probably exceptions related to the DB
"""
self.is_active = False
db_session.add(self)
db_session.commit()
return self
@classmethod
def update_exercise_session(cls, exercise, db_session):
"""
Main callable method that keeps track of the exercise sessions.
It does: If there is an open session for the user and article and the elapsed time is within the
session_timeout range, it updates the session to the current time, otherwise, it closes and creates a new one
Parameters:
user_exercise = exercise instance
db_session = database session
returns: The exercise session or None when no user is found
"""
user_id = exercise.find_user_id(db.session)
if user_id:
current_time = exercise.time
start_time = current_time - timedelta(minutes=(exercise.solving_speed / 60000))
most_recent_exercise_session = cls._find_most_recent_session(user_id, db_session)
if most_recent_exercise_session:
if most_recent_exercise_session._is_still_active(
current_time): # Verify if the session is not expired (according to session timeout)
return most_recent_exercise_session._update_last_action_time(db_session, current_time=current_time)
else: # If the session is expired, close it and create a new one
most_recent_exercise_session._close_exercise_session(db_session)
return cls._create_new_session(db_session, user_id, start_time, current_time=current_time)
else:
return cls._create_new_session(db_session, user_id, start_time, current_time=current_time)
else:
return None
@classmethod
def find_by_user(cls,
user_id,
from_date: str = VERY_FAR_IN_THE_PAST,
to_date: str = VERY_FAR_IN_THE_FUTURE,
is_active: bool = None):
"""
Get exercise sessions by user
return: object or None if not found
"""
query = cls.query
query = query.filter(cls.user_id == user_id)
query = query.filter(cls.start_time >= from_date)
query = query.filter(cls.start_time <= to_date)
if is_active is not None:
query = query.filter(cls.is_active == is_active)
query = query.order_by('start_time')
sessions = query.all()
return sessions
@classmethod
def find_by_cohort(cls,
cohort_id,
from_date: str = VERY_FAR_IN_THE_PAST,
to_date: str = VERY_FAR_IN_THE_FUTURE,
is_active: bool = None):
"""
Get exercise sessions by cohort
return: object or None if not found
"""
query = cls.query.join(User).filter(User.cohort_id == cohort_id)
query = query.filter(cls.start_time >= from_date)
query = query.filter(cls.start_time <= to_date)
if is_active is not None:
query = query.filter(cls.is_active == is_active)
query = query.order_by('start_time')
sessions = query.all()
return sessions
|
py | 7dfcb8a657784262292b5f4dc1c719d29f92844d | from django.contrib.contenttypes.models import ContentType
import test_utils
from access.helpers import has_perm, has_perm_or_owns
from access.tests import permission
from forums.tests import ForumTestCase, forum, thread
from sumo.urlresolvers import reverse
from users.tests import user, group
class ForumTestPermissions(ForumTestCase):
"""Make sure access helpers work on the forums."""
def setUp(self):
url = reverse('forums.threads', args=[u'test-forum'])
self.context = {'request': test_utils.RequestFactory().get(url)}
self.group = group(save=True)
# Set up forum_1
f = self.forum_1 = forum(save=True)
ct = ContentType.objects.get_for_model(self.forum_1)
permission(codename='forums_forum.thread_edit_forum', content_type=ct,
object_id=f.id, group=self.group, save=True)
permission(codename='forums_forum.post_edit_forum', content_type=ct,
object_id=f.id, group=self.group, save=True)
permission(codename='forums_forum.post_delete_forum', content_type=ct,
object_id=f.id, group=self.group, save=True)
permission(codename='forums_forum.thread_delete_forum',
content_type=ct, object_id=f.id, group=self.group,
save=True)
permission(codename='forums_forum.thread_sticky_forum',
content_type=ct, object_id=f.id, group=self.group,
save=True)
permission(codename='forums_forum.thread_move_forum', content_type=ct,
object_id=f.id, group=self.group, save=True)
# Set up forum_2
f = self.forum_2 = forum(save=True)
permission(codename='forums_forum.thread_move_forum', content_type=ct,
object_id=f.id, group=self.group, save=True)
def test_has_perm_thread_edit(self):
"""User in group can edit thread in forum_1, but not in forum_2."""
u = user(save=True)
self.group.user_set.add(u)
self.context['request'].user = u
assert has_perm(self.context, 'forums_forum.thread_edit_forum',
self.forum_1)
assert not has_perm(self.context, 'forums_forum.thread_edit_forum',
self.forum_2)
def test_has_perm_or_owns_thread_edit(self):
"""Users can edit their own threads."""
my_t = thread(save=True)
me = my_t.creator
other_t = thread(save=True)
self.context['request'].user = me
perm = 'forums_forum.thread_edit_forum'
assert has_perm_or_owns(self.context, perm, my_t, self.forum_1)
assert not has_perm_or_owns(self.context, perm, other_t, self.forum_1)
def test_has_perm_thread_delete(self):
"""User in group can delete thread in forum_1, but not in forum_2."""
u = user(save=True)
self.group.user_set.add(u)
self.context['request'].user = u
assert has_perm(self.context, 'forums_forum.thread_delete_forum',
self.forum_1)
assert not has_perm(self.context, 'forums_forum.thread_delete_forum',
self.forum_2)
def test_has_perm_thread_sticky(self):
"""
User in group can change sticky status of thread in forum_1, but not
in forum_2.
"""
u = user(save=True)
self.group.user_set.add(u)
self.context['request'].user = u
assert has_perm(self.context, 'forums_forum.thread_sticky_forum',
self.forum_1)
assert not has_perm(self.context, 'forums_forum.thread_sticky_forum',
self.forum_2)
def test_has_perm_thread_locked(self):
"""
Sanity check: user in group has no permission to change locked
status in forum_1.
"""
u = user(save=True)
self.group.user_set.add(u)
self.context['request'].user = u
assert not has_perm(self.context, 'forums_forum.thread_locked_forum',
self.forum_1)
def test_has_perm_post_edit(self):
"""User in group can edit any post in forum_1, but not in forum_2."""
u = user(save=True)
self.group.user_set.add(u)
self.context['request'].user = u
assert has_perm(self.context, 'forums_forum.post_edit_forum',
self.forum_1)
assert not has_perm(self.context, 'forums_forum.post_edit_forum',
self.forum_2)
def test_has_perm_post_delete(self):
"""User in group can delete posts in forum_1, but not in forum_2."""
u = user(save=True)
self.group.user_set.add(u)
self.context['request'].user = u
assert has_perm(self.context, 'forums_forum.post_delete_forum',
self.forum_1)
assert not has_perm(self.context, 'forums_forum.post_delete_forum',
self.forum_2)
def test_no_perm_thread_delete(self):
"""
User not in group cannot delete thread in any forum.
"""
self.context['request'].user = user(save=True)
assert not has_perm(self.context, 'forums_forum.thread_delete_forum',
self.forum_1)
assert not has_perm(self.context, 'forums_forum.thread_delete_forum',
self.forum_2)
|
py | 7dfcb8a9adde78446a5e9d13c2bc25d8651e0d9a | from base_plugin import BasePlugin
class GoodPlugin(BasePlugin):
name = "test_plugin_1" |
py | 7dfcb8ce2a273cae786cb4b785895974e4ed6ba5 | import numpy as np
import torch
import torch.nn.functional as F
from decision_transformer.training.trainer import Trainer
import torch_semiring_einsum
EQUATION = torch_semiring_einsum.compile_equation("iaj,bj->iabj")
def kmeans_cosine_max_loss(centers, seq, mean=False):
assert centers.device == seq.device
# loss = -(torch.einsum("iaj,bj->iabj", [seq, centers]).max(2).values.mean())
if mean:
loss = -(
torch_semiring_einsum.einsum(EQUATION, seq, centers, block_size=5).mean()
)
else:
loss = -(
torch_semiring_einsum.einsum(EQUATION, seq, centers, block_size=5)
.max(2)
.values.mean()
)
return loss
kmeans_anneal = lambda x: 1 / (1 + np.exp(-(((5000 - x) / (5000 / 10)) - 5)))
class SequenceTrainer(Trainer):
def train_step(self):
(
states,
actions,
rewards,
dones,
rtg,
timesteps,
attention_mask,
) = self.get_batch(self.batch_size)
action_target = torch.clone(actions)
state_preds, action_preds, reward_preds, all_embs = self.model.forward(
states,
actions,
rewards,
rtg[:, :-1],
timesteps,
attention_mask=attention_mask,
)
self.step += 1
act_dim = action_preds.shape[2]
action_preds = action_preds.reshape(-1, act_dim)[attention_mask.reshape(-1) > 0]
action_target = action_target.reshape(-1, act_dim)[
attention_mask.reshape(-1) > 0
]
loss = self.loss_fn(
None,
action_preds,
None,
None,
action_target,
None,
)
if self.args["gpt_kmeans"]:
loss += (
self.args["gpt_kmeans_const"]
* kmeans_anneal(self.step)
* kmeans_cosine_max_loss(
F.normalize(self.model.cluster_centers, dim=-1),
F.normalize(all_embs, dim=-1),
mean=self.args["kmeans_mean"],
)
)
self.optimizer.zero_grad()
loss.backward()
torch.nn.utils.clip_grad_norm_(self.model.parameters(), 0.25)
self.optimizer.step()
with torch.no_grad():
self.diagnostics["training/action_error"] = (
torch.mean((action_preds - action_target) ** 2).detach().cpu().item()
)
return loss.detach().cpu().item()
|
py | 7dfcb9538f77de9a14e2f355284acbbfb4180b0f | # Exemple given.
# numbers = [10, 15, 3, 7]
# k = 17
numbers = []
numbersString = input("Type a list of numbers (separated by a comma): ")
numbersStringList = numbersString.split()
for n in numbersStringList:
numbers.append(int(n))
k = int(input("Type target number: "))
result = False
for i in range(len(numbers)):
for j in range(i+1, len(numbers)):
if (numbers[i] + numbers[j]) == k:
result = True
print(result)
|
py | 7dfcb9e4a9d9f5155aa25c3a260bae7cef283b75 | ##
## Copyright (c) 2016-2019 Genomics England Ltd.
##
## This file is part of PanelApp
## (see https://panelapp.genomicsengland.co.uk).
##
## Licensed to the Apache Software Foundation (ASF) under one
## or more contributor license agreements. See the NOTICE file
## distributed with this work for additional information
## regarding copyright ownership. The ASF licenses this file
## to you under the Apache License, Version 2.0 (the
## "License"); you may not use this file except in compliance
## with the License. You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing,
## software distributed under the License is distributed on an
## "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
## KIND, either express or implied. See the License for the
## specific language governing permissions and limitations
## under the License.
##
import csv
import sys
import djclick as click
from panels.models import GenePanelSnapshot
from panels.models import Evidence
@click.command()
def command():
"""
Generate CSV to check which expert reviews need to be updated
:return:
"""
header = [
"Panel Name",
"Panel ID",
"Panel Version",
"Panel Status",
"Entity Type",
"Entity",
"Status",
"Sources",
"Change Required",
"Expert Review",
]
writer = csv.writer(sys.stdout)
writer.writerow(header)
for gps in (
GenePanelSnapshot.objects.get_active_annotated(True, True, True)
.exclude(is_super_panel=True)
.iterator()
):
panel_info = [gps.panel.name, gps.panel.id, gps.version, gps.panel.status]
for entity in gps.get_all_entities_extra:
status = entity.saved_gel_status
sources = entity.evidence.values_list("name", flat=True)
expert_reviews = [s for s in sources if s in Evidence.EXPERT_REVIEWS]
change_required = len(expert_reviews) > 1
expert_review = ""
if status == 0 and "Expert Review Removed" in expert_reviews:
expert_review = "Expert Review Removed"
elif status == 1 and "Expert Review Red" in expert_reviews:
expert_review = "Expert Review Red"
elif status == 2 and "Expert Review Amber" in expert_reviews:
expert_review = "Expert Review Amber"
elif status > 2 and "Expert Review Green" in expert_reviews:
expert_review = "Expert Review Green"
item = [
*panel_info,
entity.entity_type,
entity.entity_name,
status,
";".join(sources),
change_required,
expert_review,
]
if change_required:
writer.writerow(item)
|
py | 7dfcba62ba779004959cebf2010d283fd8e344cc | # -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright (c) 2009- Spyder Project Contributors
#
# Distributed under the terms of the MIT License
# (see spyder/__init__.py for details)
# -----------------------------------------------------------------------------
"""
Tests for the spyder.config.base module.
"""
# Standard library imports
import os.path as osp
try:
from importlib import reload
except ImportError: # A builtin on Python 2
pass
# Third party imports
import pytest
# Local imports
import spyder.config.base
# ============================================================================
# ---- Tests
# ============================================================================
def test_is_stable_version():
"""Test that stable and non-stable versions are recognized correctly."""
for stable_version in ['3.3.0', '2', ('0', '5')]:
assert spyder.config.base.is_stable_version(stable_version)
for not_stable_version in ['4.0.0b1', '3.3.2.dev0',
'beta', ('2', '0', 'alpha')]:
assert not spyder.config.base.is_stable_version(not_stable_version)
@pytest.mark.parametrize('use_dev_config_dir', [True, False])
def test_get_conf_path(monkeypatch, use_dev_config_dir):
"""Test that the config dir path is set under dev and release builds."""
monkeypatch.setenv('SPYDER_USE_DEV_CONFIG_DIR', str(use_dev_config_dir))
reload(spyder.config.base)
conf_path = spyder.config.base.get_conf_path()
assert conf_path
assert ((osp.basename(conf_path).split('-')[-1] == 'dev')
== use_dev_config_dir)
assert osp.isdir(conf_path)
monkeypatch.undo()
reload(spyder.config.base)
if __name__ == '__main__':
pytest.main()
|
py | 7dfcbb04af64c13e83c2c685c1eaa5a5e9d2846d | # coding: utf-8
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from __future__ import unicode_literals
DA_HP = dict(
archi_name=["L4"]
, n_steps=[15000, 25000]
, n_units=[200, 500]
, batch_size=[10000]
, tolerance=[100.0]
)
FF_HP = dict(
feature_id=list(range(28))
, tolerance=[100.0]
)
GB_HP = dict(
max_depth=[3, 6]
, n_estimators=[300, 800]
, learning_rate=[0.1, 0.01]
, tolerance=[100.0]
)
INF_HP = dict(
archi_name=["L4"]
, n_steps=[2000]
, n_units=[200, 500]
, sample_size=[10000]
, tolerance=[100.0]
)
NN_HP = dict(
archi_name=["L4"]
, n_steps=[15000, 25000]
, n_units=[200, 500]
, batch_size=[200, 10000]
, tolerance=[100.0]
)
PIVOT_HP = dict(
archi_name=["L4"]
, n_steps=[15000, 25000]
, n_units=[200, 500]
, trade_off=[1.0, 0.1]
, batch_size=[1000, 10000]
, tolerance=[100.0]
)
REG_HP = dict(
archi_name=["EA3ML3"]
, n_steps=[15000, 25000]
, n_units=[200, 500]
, sample_size=[10000, 50000]
)
FREG_HP = dict(
archi_name=["EA3ML3"]
, n_steps=[15000, 25000]
, n_units=[200, 500]
, sample_size=[10000, 50000]
)
REG_M_HP = dict(
archi_name=["A3ML3"]
, n_steps=[15000, 25000]
, n_units=[100, 200, 500]
, sample_size=[10000, 50000]
)
TP_HP = dict(
archi_name=["L4"]
, n_steps=[15000, 25000]
, n_units=[200, 500]
, trade_off=[1.0, 0.1]
, batch_size=[200]
, tolerance=[100.0]
)
|
py | 7dfcbb39906c0e8d103a608b1225737788440104 | """
Textnet network visualization: draw the Network json file.
NOTE: this requires pydot>=1.0.
"""
import json
import pydot
import argparse
import re
# Internal layer and blob styles.
LAYER_STYLE_DEFAULT = {'shape': 'record', 'fillcolor': '#6495ED',
'style': 'filled'}
NEURON_LAYER_STYLE = {'shape': 'record', 'fillcolor': '#90EE90',
'style': 'filled'}
BLOB_STYLE = {'shape': 'octagon', 'fillcolor': '#E0E0E0',
'style': 'filled'}
Layer2Name = {
0: 'UnkonwnLayer',
# Activation Layer 1-10
1: 'RectifiedLinear',
2: 'Sigmoid',
3: 'Tanh',
# Common Layer 11-50
11: 'FullConnect',
12: 'Flatten',
13: 'Dropout',
14: 'Conv',
15: 'MaxPooling',
16: 'SumPooling',
17: 'AvgPooling',
18: 'Concat',
19: 'ChConcat',
20: 'Split',
21: 'Embedding',
22: 'Cross',
23: 'Match',
24: 'Lstm',
25: 'WholePooling',
26: 'ConvolutionalLstm',
27: 'Recurrent',
28: 'SequenceDimReduction',
29: 'ConvLstmSplit',
30: 'TensorFullConnect',
31: 'MaxRecurrent',
32: 'ConvResultTransform',
33: 'Gate',
34: 'GateAlldim',
35: 'Product',
36: 'TopkPooling',
37: 'SoftmaxFunc',
38: 'SoftmaxFuncVarLen',
39: 'SumByAxis',
40: 'Gating',
41: 'Lr2softmax',
42: 'SwapAxis',
43: 'DynamicPooling',
44: 'DiagRecurrent',
45: 'Dup4lstm',
46: 'PosPredRep',
47: 'LstmAutoencoder',
48: 'NbpGenLstmInput',
49: 'PhraseAveRep',
1000: 'OneHot',
1001: 'MatchTensor',
1002: 'MatchWeightedDot',
1003: 'MatchTensorFact',
1004: 'MatchMulti',
1005: 'BatchCombine',
1006: 'Gru',
1007: 'BatchSelect',
1008: 'BatchSplit',
1009: 'BatchConcat',
1010: 'BatchDuplicate',
10001: 'kMaxDynamicPooling',
# Loss Layer 51-70
51: 'Softmax',
52: 'L2Loss',
53: 'MultiLogistic',
54: 'HingeLoss',
55: 'PairHingeLoss',
56: 'Accuracy',
57: 'CrossEntropyLoss',
58: 'NegativeSampleLoss',
59: 'WordClassSoftmaxLoss',
60: 'LstmAutoencoderSoftmaxLoss',
61: 'ListwiseMeasure',
# Input Layer 71-
71: 'TextData',
72: 'SequenceClassificationData',
73: 'NextBasketData',
74: 'NegativeSample',
75: 'WordClassInput',
76: 'LabelFeatValue',
77: 'LstmAutoencoderInput',
78: 'MatchPhraseRep',
79: 'PairTextData',
80: 'ListTextData',
81: 'QATextData',
}
def parse_args():
"""Parse input arguments
"""
parser = argparse.ArgumentParser(description='Draw a network graph')
parser.add_argument('input_net_json_file',
help='Input network json file')
parser.add_argument('output_image_file',
help='Output image file')
parser.add_argument('--details',
help='Need details of each layer.',
default='True')
parser.add_argument('--rankdir',
help=('One of TB (top-bottom, i.e., vertical), '
'RL (right-left, i.e., horizontal), or another'
'valid dot option; see'
'http://www.graphviz.org/doc/info/attrs.html#k:rankdir'
'(default: LR)'),
default='LR')
args = parser.parse_args()
return args
def get_layer_type_name(layer_name):
if type(layer_name) == int:
if layer_name in Layer2Name:
return Layer2Name[layer_name]
else:
return str(layer_name)
else:
return str(layer_name)
def determine_edge_label_by_layertype(layer, layertype):
"""Define edge label based on layer type
"""
edge_label = '""'
return edge_label
def determine_node_label_by_layertype(layer, layertype, rankdir, need_details = True):
"""Define node label based on layer type
"""
if rankdir in ('TB', 'BT'):
# If graph orientation is vertical, horizontal space is free and
# vertical space is not; separate words with spaces
separator = ' '
else:
# If graph orientation is horizontal, vertical space is free and
# horizontal space is not; separate words with newlines
separator = '\n'
node_label = '%s %s [%s] %s' % \
(layer["layer_name"],
separator,
get_layer_type_name(layer["layer_type"]),
separator)
if need_details:
if layer["setting"] == None:
layer["setting"] = {}
node_label += separator.join(['%s: %s' % p for p in layer['setting'].items()])
node_label = '"%s"' % node_label
return node_label
def choose_color_by_layertype(layertype):
"""Define colors for nodes based on the layer type
"""
color = '#6495ED' # Default
if layertype == 'Conv':
color = '#FF5050'
elif layertype == 'Embedding':
color = '#FF9900'
elif layertype == 'FullConnect':
color = '#CC33FF'
elif layertype == 'MaxPooling' or layertype == 'AvgPooling' or layertype == 'DynamicPooling':
color = '#66CC66'
elif layertype == 'Lstm' or layertype == 'Gru':
color = '#B5E61D'
return color
def get_pydot_graph(text_net, rankdir, label_edges=True, need_details=True):
pydot_graph = pydot.Dot(text_net["net_name"], graph_type='digraph', rankdir=rankdir)
pydot_nodes = {}
pydot_edges = []
for layer in text_net["layers"]:
name = layer["layer_name"]
# TODO - need convert to layer type name
layertype = get_layer_type_name(layer["layer_type"])
node_label = determine_node_label_by_layertype(layer, layertype, rankdir, need_details)
# set None to 0 list
if layer["bottom_nodes"] == None:
layer["bottom_nodes"] = []
if layer["top_nodes"] == None:
layer["top_nodes"] = []
if (len(layer["bottom_nodes"]) == 1 and len(layer["top_nodes"]) == 1 and
layer["bottom_nodes"][0] == layer["top_nodes"][0]):
# We have an in-place neuron layer.
pydot_nodes[name + '_' + layertype] = pydot.Node(
node_label, **NEURON_LAYER_STYLE)
else:
layer_style = LAYER_STYLE_DEFAULT
layer_style['fillcolor'] = choose_color_by_layertype(layertype)
pydot_nodes[name + '_' + layertype] = pydot.Node(
node_label, **layer_style)
for bottom_node in layer["bottom_nodes"]:
pydot_nodes[bottom_node + '_blob'] = pydot.Node(
'%s' % (bottom_node), **BLOB_STYLE)
edge_label = '""'
pydot_edges.append({'src': bottom_node + '_blob',
'dst': name + '_' + layertype,
'label': edge_label})
for top_node in layer["top_nodes"]:
pydot_nodes[top_node + '_blob'] = pydot.Node(
'%s' % (top_node))
if label_edges:
edge_label = determine_edge_label_by_layertype(layer, layertype)
else:
edge_label = '""'
pydot_edges.append({'src': name + '_' + layertype,
'dst': top_node + '_blob',
'label': edge_label})
# Now, add the nodes and edges to the graph.
for node in pydot_nodes.values():
pydot_graph.add_node(node)
for edge in pydot_edges:
pydot_graph.add_edge(
pydot.Edge(pydot_nodes[edge['src']], pydot_nodes[edge['dst']],
label=edge['label']))
return pydot_graph
def draw_net(text_net, rankdir, ext='png', need_details=True):
return get_pydot_graph(text_net, rankdir, False, need_details).create(format=ext)
def draw_net_to_file(text_net, filename, rankdir='LR', need_details=True):
ext = filename[filename.rfind('.')+1:]
with open(filename, 'wb') as fid:
fid.write(draw_net(text_net, rankdir, ext, need_details))
if __name__ == "__main__":
args = parse_args()
json_file = open(args.input_net_json_file)
json_str = ''
# Remove json comment
comment_ptn = r"//.*"
for line in json_file:
line = re.sub(comment_ptn, '', line)
json_str += line
text_net = json.loads(json_str)
print('Drawing net to %s' % args.output_image_file)
need_details = True
if args.details.lower() == 'true':
need_details = True
elif args.details.lower() == 'false':
need_details = False
draw_net_to_file(text_net, args.output_image_file, args.rankdir, need_details)
|
py | 7dfcbb509cc379effee22d570c35dc6f355f88f1 | # Copyright (c) 2021, Soohwan Kim. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class JasperEncoderConfig:
def _init__(self) -> None:
self.num_blocks = None
self.num_sub_blocks = None
self.preprocess_block = None
self.block = None
class Jasper10x5EncoderConfig(JasperEncoderConfig):
def __init__(self, num_blocks: int, num_sub_blocks: int) -> None:
super(JasperEncoderConfig, self).__init__()
self.num_blocks = num_blocks
self.num_sub_blocks = num_sub_blocks
self.preprocess_block = {
'in_channels': 80,
'out_channels': 256,
'kernel_size': 11,
'stride': 2,
'dilation': 1,
'dropout_p': 0.2,
}
self.block = {
'in_channels': (256, 256, 256, 384, 384, 512, 512, 640, 640, 768),
'out_channels': (256, 256, 384, 384, 512, 512, 640, 640, 768, 768),
'kernel_size': (11, 11, 13, 13, 17, 17, 21, 21, 25, 25),
'dilation': [1] * 10,
'dropout_p': (0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.3, 0.3, 0.3, 0.3),
}
class Jasper5x3EncoderConfig(JasperEncoderConfig):
def __init__(self, num_blocks: int, num_sub_blocks: int) -> None:
super(JasperEncoderConfig, self).__init__()
self.num_blocks = num_blocks
self.num_sub_blocks = num_sub_blocks
self.preprocess_block = {
'in_channels': 80,
'out_channels': 256,
'kernel_size': 11,
'stride': 2,
'dilation': 1,
'dropout_p': 0.2,
}
self.block = {
'in_channels': (256, 256, 384, 512, 640),
'out_channels': (256, 384, 512, 640, 768),
'kernel_size': (11, 13, 17, 21, 25),
'dilation': [1] * 5,
'dropout_p': (0.2, 0.2, 0.2, 0.3, 0.3),
}
class JasperDecoderConfig:
def __init__(self, num_classes: int) -> None:
super(JasperDecoderConfig, self).__init__()
self.num_classes = num_classes
self.block = {
'in_channels': (768, 896, 1024),
'out_channels': (896, 1024, num_classes),
'kernel_size': (29, 1, 1),
'dilation': (2, 1, 1),
'dropout_p': (0.4, 0.4, 0.0)
}
from jasper.model import Jasper |
py | 7dfcbba664ae9128847ccb5e04f4c815307a1b35 | ##################################################
# Copyright (c) Xuanyi Dong [GitHub D-X-Y], 2019 #
##################################################
import torch, copy, random
import torch.utils.data as data
class SearchDataset(data.Dataset):
def __init__(self, name, data, train_split, valid_split, check=True):
self.datasetname = name
self.data = data
self.train_split = train_split.copy()
self.valid_split = valid_split.copy()
print(len(train_split),len(valid_split))
if check:
intersection = set(train_split).intersection(set(valid_split))
assert len(intersection) == 0, 'the splitted train and validation sets should have no intersection'
self.length = len(self.train_split)
def __repr__(self):
return ('{name}(name={datasetname}, train={tr_L}, valid={val_L})'.format(name=self.__class__.__name__, datasetname=self.datasetname, tr_L=len(self.train_split), val_L=len(self.valid_split)))
def __len__(self):
return self.length
def __getitem__(self, index):
assert index >= 0 and index < self.length, 'invalid index = {:}'.format(index)
train_index = self.train_split[index]
valid_index = random.choice( self.valid_split )
train_image, train_label = self.data[train_index]
valid_image, valid_label = self.data[valid_index]
return train_image, train_label, valid_image, valid_label
|
py | 7dfcbbc35d3bce78f3c739479b4013ee6f41ec16 | import logging
from flask_restful import Resource
from mtv import model
LOGGER = logging.getLogger(__name__)
def get_dataset(dataset_doc):
return {
'id': str(dataset_doc.id),
'insert_time': dataset_doc.insert_time.isoformat(),
'name': dataset_doc.name,
'entity_id': dataset_doc.entity_id
}
class Dataset(Resource):
def get(self, dataset_name):
"""
@api {get} /datasets/:dataset_name/ Get dataset by name
@apiName GetDataset
@apiGroup Dataset
@apiVersion 1.0.0
@apiParam {String} dataset_name Dataset name.
@apiSuccess {String} id Dataset ID.
@apiSuccess {String} insert_time Dataset creation time.
@apiSuccess {String} name Dataset name.
@apiSuccess {String} entity_id Dataset entity_id.
"""
document = model.Dataset.find_one(name=dataset_name)
if document is None:
LOGGER.exception('Error getting dataset. '
'Dataset %s does not exist.', dataset_name)
return {
'message': 'Dataset {} does not exist'.format(dataset_name)
}, 400
try:
res = get_dataset(document)
except Exception as e:
LOGGER.exception(e)
return {'message': str(e)}, 500
else:
return res
class Datasets(Resource):
def get(self):
"""
@api {get} /datasets/ Get datasets
@apiName GetDatasets
@apiGroup Dataset
@apiVersion 1.0.0
@apiSuccess {Object[]} datasets Dataset list.
@apiSuccess {String} datasets.id Dataset ID.
@apiSuccess {String} datasets.insert_time Dataset creation time.
@apiSuccess {String} datasets.name Dataset name.
@apiSuccess {String} datasets.entity_id Dataset entity_id.
"""
documents = model.Dataset.find()
try:
datasets = [get_dataset(document) for document in documents]
except Exception as e:
LOGGER.exception(e)
return {'message': str(e)}, 500
else:
return {'datasets': datasets}
|
py | 7dfcbc7767fb6f6729e2d9d4f43e95a29792fcda | from django.db import models
from django.contrib.auth.models import AbstractUser, Group
from django.contrib.contenttypes.fields import GenericForeignKey
from django.contrib.contenttypes.models import ContentType
from django.db.models.signals import post_save
from django.dispatch import receiver
class Organization(models.Model):
code = models.CharField(max_length=10, unique=True)
displayName = models.CharField(max_length=200)
sn_name = models.CharField(max_length=200, null=True, blank=True)
tm_name = models.CharField(max_length=200, null=True, blank=True)
organization_type = models.CharField(max_length=50, null=True, blank=True)
logo = models.FileField(upload_to='organization/', null=True, blank=True)
created_date = models.DateTimeField(auto_now_add=True)
def __str__(self):
return '%s' % (self.displayName)
class Division(models.Model):
code = models.CharField(max_length=100)
organization = models.ForeignKey(Organization, on_delete=models.DO_NOTHING)
division_type = models.CharField(max_length=50)
name = models.CharField(max_length=200)
sn_name = models.CharField(max_length=200, null=True, blank=True)
tm_name = models.CharField(max_length=200, null=True, blank=True)
is_default_division = models.BooleanField(default=False)
is_hq = models.BooleanField(default=False)
created_date = models.DateTimeField(auto_now_add=True)
def __str__(self):
return '%s - %s: %s' % (self.organization, self.division_type, self.name)
class UserLevel(models.Model):
code = models.CharField(max_length=100)
displayName = models.CharField(max_length=100)
organization = models.ForeignKey(Organization, on_delete=models.DO_NOTHING)
parent = models.ForeignKey('UserLevel', on_delete=models.DO_NOTHING, null=True, blank=True)
role = models.ForeignKey(Group, on_delete=models.DO_NOTHING, null=True, blank=True)
created_date = models.DateTimeField(auto_now_add=True)
def __str__(self):
return '%s: %s' % (self.organization, self.displayName)
# custom user model
class User(AbstractUser):
id = models.BigAutoField(primary_key=True)
class Profile(models.Model):
id = models.BigAutoField(primary_key=True)
user = models.OneToOneField(User, on_delete=models.CASCADE, related_name='profile')
organization = models.ForeignKey(Organization, on_delete=models.DO_NOTHING, null=True, blank=True)
division = models.ForeignKey(Division, on_delete=models.DO_NOTHING, null=True, blank=True)
level = models.ForeignKey(UserLevel, on_delete=models.DO_NOTHING, null=True, blank=True)
def __str__(self):
return '%s' % (self.user)
@receiver(post_save, sender=User)
def create_user_profile(sender, **kwargs):
user = kwargs['instance']
if hasattr(user, 'profile') and user.profile is not None:
return
profile = Profile()
profile.user = user
profile.save() |
py | 7dfcbd815fd05a5d1b34b9e53e2abda20546a018 | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
"""
Topic: 爬取链接的蜘蛛
Desc :
"""
import logging
from coolscrapy.items import HuxiuItem
import scrapy
from scrapy.spiders import CrawlSpider, Rule
from scrapy.linkextractors import LinkExtractor
class CrawlSpider(CrawlSpider):
name = "crawl"
allowed_domains = ["huxiu.com"]
start_urls = [
"http://www.huxiu.com/index.php"
]
rules = (
# 提取匹配正则式'/group?f=index_group'链接 (但是不能匹配'deny.html')
# 并且会递归爬取(如果没有定义callback,默认follow=True).
Rule(LinkExtractor(allow=('/group?f=index_group', ), deny=('deny\.html', ))),
# 提取匹配'/article/\d+/\d+.html'的链接,并使用parse_item来解析它们下载后的内容,不递归
Rule(LinkExtractor(allow=('/article/\d+/\d+\.html', )), callback='parse_item'),
)
def parse_item(self, response):
self.logger.info('Hi, this is an item page! %s', response.url)
detail = response.xpath('//div[@class="article-wrap"]')
item = HuxiuItem()
item['title'] = detail.xpath('h1/text()')[0].extract()
item['link'] = response.url
item['published'] = detail.xpath(
'div[@class="article-author"]/span[@class="article-time"]/text()')[0].extract()
logging.info(item['title'],item['link'],item['published'])
yield item
|
py | 7dfcbdac529221e2e1a79ff067cce04556df6fc7 | from duckrun.process_result import ProcessResult
import sys
from io import StringIO
import contextlib
# source: https://stackoverflow.com/questions/3906232
@contextlib.contextmanager
def stdoutIO(stdout=None):
old = sys.stdout
if stdout is None:
stdout = StringIO()
sys.stdout = stdout
yield stdout
sys.stdout = old
class PyProcessor:
def execute_process(self, generic_process):
process_result = ProcessResult()
generic_process.process_result = process_result
with stdoutIO() as s:
try:
exec(generic_process.command_text)
process_result.set_end_time()
process_result.output = s.getvalue()
except Exception as e:
print("handled user python script exception", e)
return process_result
def __init__(self):
pass
|
py | 7dfcbe63c06761795a62e823fbb0edac10165987 | import re
from django import forms
from django.utils.translation import ugettext_lazy as _
from computes.models import Compute
class ComputeAddTcpForm(forms.Form):
name = forms.CharField(error_messages={'required': _('No hostname has been entered')},
max_length=20)
hostname = forms.CharField(error_messages={'required': _('No IP / Domain name has been entered')},
max_length=100)
login = forms.CharField(error_messages={'required': _('No login has been entered')},
max_length=100)
password = forms.CharField(error_messages={'required': _('No password has been entered')},
max_length=100)
def clean_name(self):
name = self.cleaned_data['name']
have_symbol = re.match('[^a-zA-Z0-9._-]+', name)
if have_symbol:
raise forms.ValidationError(_('The host name must not contain any special characters'))
elif len(name) > 20:
raise forms.ValidationError(_('The host name must not exceed 20 characters'))
try:
Compute.objects.get(name=name)
except Compute.DoesNotExist:
return name
raise forms.ValidationError(_('This host is already connected'))
def clean_hostname(self):
hostname = self.cleaned_data['hostname']
have_symbol = re.match('[^a-z0-9.-]+', hostname)
wrong_ip = re.match('^0.|^255.', hostname)
if have_symbol:
raise forms.ValidationError(_('Hostname must contain only numbers, or the domain name separated by "."'))
elif wrong_ip:
raise forms.ValidationError(_('Wrong IP address'))
try:
Compute.objects.get(hostname=hostname)
except Compute.DoesNotExist:
return hostname
raise forms.ValidationError(_('This host is already connected'))
class ComputeAddSshForm(forms.Form):
name = forms.CharField(error_messages={'required': _('No hostname has been entered')},
max_length=20)
hostname = forms.CharField(error_messages={'required': _('No IP / Domain name has been entered')},
max_length=100)
login = forms.CharField(error_messages={'required': _('No login has been entered')},
max_length=20)
def clean_name(self):
name = self.cleaned_data['name']
have_symbol = re.match('[^a-zA-Z0-9._-]+', name)
if have_symbol:
raise forms.ValidationError(_('The name of the host must not contain any special characters'))
elif len(name) > 20:
raise forms.ValidationError(_('The name of the host must not exceed 20 characters'))
try:
Compute.objects.get(name=name)
except Compute.DoesNotExist:
return name
raise forms.ValidationError(_('This host is already connected'))
def clean_hostname(self):
hostname = self.cleaned_data['hostname']
have_symbol = re.match('[^a-zA-Z0-9._-]+', hostname)
wrong_ip = re.match('^0.|^255.', hostname)
if have_symbol:
raise forms.ValidationError(_('Hostname must contain only numbers, or the domain name separated by "."'))
elif wrong_ip:
raise forms.ValidationError(_('Wrong IP address'))
try:
Compute.objects.get(hostname=hostname)
except Compute.DoesNotExist:
return hostname
raise forms.ValidationError(_('This host is already connected'))
class ComputeAddTlsForm(forms.Form):
name = forms.CharField(error_messages={'required': _('No hostname has been entered')},
max_length=20)
hostname = forms.CharField(error_messages={'required': _('No IP / Domain name has been entered')},
max_length=100)
login = forms.CharField(error_messages={'required': _('No login has been entered')},
max_length=100)
password = forms.CharField(error_messages={'required': _('No password has been entered')},
max_length=100)
def clean_name(self):
name = self.cleaned_data['name']
have_symbol = re.match('[^a-zA-Z0-9._-]+', name)
if have_symbol:
raise forms.ValidationError(_('The host name must not contain any special characters'))
elif len(name) > 20:
raise forms.ValidationError(_('The host name must not exceed 20 characters'))
try:
Compute.objects.get(name=name)
except Compute.DoesNotExist:
return name
raise forms.ValidationError(_('This host is already connected'))
def clean_hostname(self):
hostname = self.cleaned_data['hostname']
have_symbol = re.match('[^a-z0-9.-]+', hostname)
wrong_ip = re.match('^0.|^255.', hostname)
if have_symbol:
raise forms.ValidationError(_('Hostname must contain only numbers, or the domain name separated by "."'))
elif wrong_ip:
raise forms.ValidationError(_('Wrong IP address'))
try:
Compute.objects.get(hostname=hostname)
except Compute.DoesNotExist:
return hostname
raise forms.ValidationError(_('This host is already connected'))
class ComputeEditHostForm(forms.Form):
host_id = forms.CharField()
name = forms.CharField(error_messages={'required': _('No hostname has been entered')},
max_length=20)
hostname = forms.CharField(error_messages={'required': _('No IP / Domain name has been entered')},
max_length=100)
login = forms.CharField(error_messages={'required': _('No login has been entered')},
max_length=100)
password = forms.CharField(max_length=100)
def clean_name(self):
name = self.cleaned_data['name']
have_symbol = re.match('[^a-zA-Z0-9._-]+', name)
if have_symbol:
raise forms.ValidationError(_('The name of the host must not contain any special characters'))
elif len(name) > 20:
raise forms.ValidationError(_('The name of the host must not exceed 20 characters'))
return name
def clean_hostname(self):
hostname = self.cleaned_data['hostname']
have_symbol = re.match('[^a-zA-Z0-9._-]+', hostname)
wrong_ip = re.match('^0.|^255.', hostname)
if have_symbol:
raise forms.ValidationError(_('Hostname must contain only numbers, or the domain name separated by "."'))
elif wrong_ip:
raise forms.ValidationError(_('Wrong IP address'))
return hostname
class ComputeAddSocketForm(forms.Form):
name = forms.CharField(error_messages={'required': _('No hostname has been entered')},
max_length=20)
details = forms.CharField(error_messages={'required': _('No details has been entred')},
max_length=50)
def clean_name(self):
name = self.cleaned_data['name']
have_symbol = re.match('[^a-zA-Z0-9._-]+', name)
if have_symbol:
raise forms.ValidationError(_('The host name must not contain any special characters'))
elif len(name) > 20:
raise forms.ValidationError(_('The host name must not exceed 20 characters'))
try:
Compute.objects.get(name=name)
except Compute.DoesNotExist:
return name
raise forms.ValidationError(_('This host is already connected'))
|
py | 7dfcbf2fdcba38c8f9d1430415a95507defaa0be | # importing "collections" for namedtuple()
from collections import namedtuple
# Declaring namedtuple()
Student = namedtuple('Student',['name','age','DOB'])
# Adding values
S = Student('Nancy','19','2541997')
# using _fields to display all the keynames of namedtuple()
print ("All the fields of students are : ")
print (S._fields)
# using _replace() to change the attribute values of namedtuple
print ("The modified namedtuple is : ")
print(S._replace(name = 'Nick')) |
py | 7dfcbf70a030a2f66d262d99a6dda35e799e134c | #! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# vim: fenc=utf-8
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
#
#
"""
File name: math.py
Version: 0.1
Author: dhilipsiva <[email protected]>
Date created: 2015-06-09
"""
__author__ = "dhilipsiva"
__status__ = "development"
"""
"""
def add(num1, num2):
"""
docstring for add
"""
return num1 + num1
|
py | 7dfcc04956b3c176acddbdee39d03480975cb82d | from urllib.parse import urljoin
import threading
import webbrowser
import http.server
from typing import Tuple
ADDRESS : Tuple[str, int] = ('127.0.0.1', 8137)
def start_daemon(doc_root : str) -> None:
class Handler(http.server.SimpleHTTPRequestHandler):
def __init__(self, *args, **kwargs) -> None:
kwargs['directory'] = doc_root
http.server.SimpleHTTPRequestHandler.__init__(self, *args, **kwargs)
httpd = http.server.HTTPServer(ADDRESS, Handler)
thread = threading.Thread(target=httpd.serve_forever, daemon=True)
thread.start()
# TODO: try several ports in case the user runs several instances of Prest
def open_in_browser(rel_url : str) -> None:
host, port = ADDRESS
url = urljoin(f'http://{host}:{port}/', rel_url)
webbrowser.open(url, new=2, autoraise=True) # new=2 -> new tab
|
py | 7dfcc1cebf1626c735a0dcd277038b8dd55cc3ef | """Group (a.k.a Classroom) management."""
import base64
import csv
import io
import string
import api
from api import (
block_before_competition,
check_csrf,
PicoException,
rate_limit,
require_login,
require_teacher,
)
from bs4 import UnicodeDammit
from flask import jsonify
from flask_restplus import Namespace, Resource
from marshmallow import (
fields,
post_load,
pre_load,
RAISE,
Schema,
validate,
validates_schema,
ValidationError,
)
from .schemas import (
batch_registration_req,
group_invite_req,
group_modify_team_req,
group_patch_req,
group_req,
score_progressions_req,
scoreboard_page_req,
)
ns = Namespace("groups", description="Group management")
@ns.route("")
class GroupList(Resource):
"""Get the list of your groups, or create a new group."""
@require_login
@ns.response(200, "Success")
@ns.response(401, "Not logged in")
def get(self):
"""Get the groups of which you are a member."""
curr_tid = api.user.get_user()["tid"]
return jsonify(api.team.get_groups(curr_tid))
@check_csrf
@require_teacher
@rate_limit(limit=20, duration=10)
@ns.response(201, "Classroom added")
@ns.response(400, "Error parsing request")
@ns.response(401, "Not logged in")
@ns.response(
403,
"You do not have permission to create a classroom " + "or CSRF token invalid",
)
@ns.response(409, "You already have a classroom with that name")
@ns.response(429, "Too many requests, slow down!")
@ns.expect(group_req)
def post(self):
"""Create a new group."""
req = group_req.parse_args(strict=True)
req["name"] = req["name"].strip()
curr_user = api.user.get_user()
# Don't create group if teacher already has one with same name
if (
api.group.get_group(name=req["name"], owner_tid=curr_user["tid"])
is not None
):
raise PicoException("You already have a classroom with that name", 409)
if not all(
[
c in string.digits + string.ascii_lowercase + " ()-,#'&"
for c in req["name"].lower()
]
):
raise PicoException(
"Classroom names cannot contain special characters other "
+ "than ()-,#'&",
status_code=400,
)
# Make sure this teacher hasn't already created the max no. of groups
db = api.db.get_conn()
created_group_count = db.groups.count_documents({"owner": curr_user["tid"]})
settings = api.config.get_settings()
if created_group_count >= settings["group_limit"] and not curr_user.get(
"admin", False
):
raise PicoException(
"You have created the maximum number of classrooms. "
+ "Please contact an administrator for assistance.",
status_code=403,
)
gid = api.group.create_group(curr_user["tid"], req["name"])
res = jsonify({"success": True, "gid": gid})
res.status_code = 201
return res
@ns.response(200, "Success")
@ns.response(401, "Not logged in")
@ns.response(403, "Permission denied")
@ns.response(404, "Classroom not found")
@ns.route("/<string:group_id>")
class Group(Resource):
"""Get a specific group."""
def get(self, group_id):
"""Get a specific group."""
group = api.group.get_group(gid=group_id)
if not group:
raise PicoException("Classroom not found", 404)
group_members = [group["owner"]] + group["members"] + group["teachers"]
group_teachers = [group["owner"]] + group["teachers"]
if not api.user.is_logged_in():
# Return group name and settings even if not a member.
# Used for group invite links.
return jsonify({"name": group["name"], "settings": group["settings"]})
curr_user = api.user.get_user()
if curr_user["tid"] not in group_members and not curr_user["admin"]:
return jsonify({"name": group["name"], "settings": group["settings"]})
# Replace the team ids with full team objects if teacher, else remove
if curr_user["tid"] in group_teachers:
full_teachers = []
for tid in group["teachers"]:
full_teachers.append(api.team.get_team_information(tid))
group["teachers"] = full_teachers
full_members = []
for tid in group["members"]:
full_members.append(api.team.get_team_information(tid))
group["members"] = full_members
else:
group.pop("teachers")
group.pop("members")
return jsonify(group)
@ns.response(400, "Error parsing request")
@ns.response(403, "CSRF token incorrect")
@ns.response(422, "Cannot make a previously hidden classroom public")
@ns.expect(group_patch_req)
def patch(self, group_id):
"""Modify a group's settings (other fields are not available)."""
req = group_patch_req.parse_args(strict=True)
group = api.group.get_group(gid=group_id)
if not group:
raise PicoException("Classroom not found", 404)
curr_user = api.user.get_user()
if (
curr_user["tid"] not in ([group["owner"]] + group["teachers"])
and not curr_user["admin"]
):
raise PicoException(
"You do not have permission to modify this classroom.", 403
)
api.group.change_group_settings(group_id, req["settings"])
return jsonify({"success": True})
@check_csrf
def delete(self, group_id):
"""Delete a group. Must be the owner of the group."""
group = api.group.get_group(gid=group_id)
if not group:
raise PicoException("Classroom not found", 404)
curr_user = api.user.get_user()
if curr_user["tid"] != group["owner"] and not curr_user["admin"]:
raise PicoException(
"You do not have permission to delete this classroom.", 403
)
tids_in_group = set()
tids_in_group.update(group["members"])
tids_in_group.update(group["teachers"])
tids_in_group.add(group["owner"])
for tid in tids_in_group:
api.cache.invalidate(api.team.get_groups, tid)
api.group.delete_group(group_id)
return jsonify({"success": True})
@ns.response(200, "Success")
@ns.response(401, "Not logged in")
@ns.response(403, "Permission denied or CSRF token invalid")
@ns.response(404, "Classroom not found")
@ns.response(422, "Specified team is not a member of the classroom")
@ns.route("/<string:group_id>/elevate_team")
class ElevateTeamResponse(Resource):
"""Elevate a team the teacher role within a group."""
@check_csrf
@require_login
@ns.expect(group_modify_team_req)
def post(self, group_id):
"""
Elevate a specified team within a group to the teacher role.
Requires teacher role within the group.
"""
req = group_modify_team_req.parse_args(strict=True)
group = api.group.get_group(group_id)
if not group:
raise PicoException("Classroom not found", 404)
group_teachers = [group["owner"]] + group["teachers"]
eligible_for_elevation = group["members"]
curr_tid = api.user.get_user()["tid"]
# Ensure the current user has a teacher role within the group
if curr_tid not in group_teachers:
raise PicoException(
"You must be a teacher in this classroom to remove a team.",
status_code=403,
)
# Ensure the specified tid is eligible for elevation
if req["team_id"] not in eligible_for_elevation:
raise PicoException(
"Team is not eligible for elevation to teacher role", status_code=422
)
api.group.elevate_team(group_id, req["team_id"])
return jsonify({"success": True})
@ns.response(200, "Success")
@ns.response(401, "Not logged in")
@ns.response(403, "Permission denied or CSRF token invalid")
@ns.response(404, "Classroom not found")
@ns.response(422, "Specified team is not a member of the classroom")
@ns.route("/<string:group_id>/remove_team")
class RemoveTeamResponse(Resource):
"""
Remove a team from a group.
If the specified team is not your own, requires teacher role within
the group.
"""
@check_csrf
@require_login
def get(self, group_id):
"""Remove your own team from this group."""
group = api.group.get_group(group_id)
if not group:
raise PicoException("Classroom not found", 404)
eligible_for_removal = group["members"] + group["teachers"]
curr_tid = api.user.get_user()["tid"]
if curr_tid not in eligible_for_removal:
raise PicoException(
"Team is not eligible for removal from this classroom", status_code=422
)
api.group.leave_group(group_id, curr_tid)
return jsonify({"success": True})
@check_csrf
@require_login
@ns.expect(group_modify_team_req)
def post(self, group_id):
"""
Remove a specified team from a group.
Requires teacher role within the group.
"""
req = group_modify_team_req.parse_args(strict=True)
group = api.group.get_group(group_id)
if not group:
raise PicoException("Classroom not found", 404)
group_teachers = [group["owner"]] + group["teachers"]
eligible_for_removal = group["members"] + group["teachers"]
curr_tid = api.user.get_user()["tid"]
# Ensure the user has a teacher role within the group
if curr_tid not in group_teachers:
raise PicoException(
"You must be a teacher in this classroom to remove a team.",
status_code=403,
)
# Ensure the specified tid is a member of the group
if req["team_id"] not in eligible_for_removal:
raise PicoException(
"Team is not eligible for removal from this classroom", status_code=422
)
api.group.leave_group(group_id, req["team_id"])
return jsonify({"success": True})
@ns.response(200, "Success")
@ns.response(401, "Not logged in")
@ns.response(403, "Permission denied")
@ns.response(404, "Classroom not found")
@ns.route("/<string:group_id>/invite")
class InviteResponse(Resource):
"""Send an email invite to join this group."""
@rate_limit(limit=1, duration=30)
@ns.response(429, "Too many requests, slow down!")
@ns.expect(group_invite_req)
def post(self, group_id):
"""Send an email invite to join this group."""
req = group_invite_req.parse_args(strict=True)
group = api.group.get_group(gid=group_id)
if not group:
raise PicoException("Classroom not found", 404)
curr_user = api.user.get_user()
if (
curr_user["tid"] not in (group["teachers"] + [group["owner"]])
and not curr_user["admin"]
):
raise PicoException(
"You do not have permission to invite members to " + "this classroom.",
status_code=403,
)
api.email.send_email_invite(group_id, req["email"], req["as_teacher"])
return jsonify({"success": True})
@ns.route("/<string:group_id>/batch_registration")
class BatchRegistrationResponse(Resource):
"""
Register multiple student accounts and assign them to this group.
Demographics for the registered accounts are provided via CSV upload.
"""
@rate_limit(limit=1, duration=30)
@ns.response(200, "Success")
@ns.response(400, "Error parsing CSV")
@ns.response(401, "Not logged in")
@ns.response(403, "Permission denied")
@ns.response(404, "Classroom not found")
@ns.response(429, "Too many requests, slow down!")
@ns.expect(batch_registration_req)
def post(self, group_id):
"""Automatically registers several student accounts based on a CSV."""
group = api.group.get_group(gid=group_id)
if not group:
raise PicoException("Classroom not found", 404)
curr_user = api.user.get_user()
if (
curr_user["tid"] not in (group["teachers"] + [group["owner"]])
and not curr_user["admin"]
):
raise PicoException(
"You do not have permission to batch-register students into "
+ "this classroom.",
status_code=403,
)
# Load in student demographics from CSV
req = batch_registration_req.parse_args(strict=True)
students = []
unicoded_csv = UnicodeDammit(req["csv"].read()) # Forcibly unicodify
csv_reader = csv.DictReader(unicoded_csv.unicode_markup.split("\n"))
try:
for row in csv_reader:
row = {k: v.strip() for k, v in row.items()} # Trim whitespace
students.append(row)
except csv.Error as e:
raise PicoException(
f"Error reading CSV at line {csv_reader.line_num}: {e}", status_code=400
)
# Check whether registering these students would exceed maximum
# batch registrations per teacher account
config = api.config.get_settings()
teacher_metadata = api.token.find_key({"uid": api.user.get_user()["uid"]})
if not teacher_metadata:
existing_batch_count = 0
else:
existing_batch_count = teacher_metadata.get("tokens", {}).get(
"batch_registered_students", 0
)
potential_batch_count = existing_batch_count + len(students)
if potential_batch_count > config["max_batch_registrations"]:
raise PicoException(
"You have exceeded the maximum number of batch-registered "
+ "student accounts. Please contact an administrator.",
403,
)
# Validate demographics
def validate_current_year(s):
try:
n = int(s)
if not (1 <= n <= 12):
raise ValueError
except ValueError:
raise ValidationError(f"Grade must be between 1 and 12 (provided {s})")
class BatchRegistrationUserSchema(Schema):
# Convert empty strings to Nones when doing validation
# to allow optional parent_email value for age 18+,
# but back to '' before storing in database.
@pre_load
def empty_to_none(self, in_data, **kwargs):
for k, v in in_data.items():
if v == "":
in_data[k] = None
return in_data
@post_load
def none_to_empty(self, in_data, **kwargs):
for k, v in in_data.items():
if v is None:
in_data[k] = ""
return in_data
current_year = fields.Str(
data_key="Grade (1-12)", required=True, validate=validate_current_year
)
age = fields.Str(
data_key="Age (13-17 or 18+)",
required=True,
validate=validate.OneOf(choices=["13-17", "18+"]),
)
gender = fields.Str(
data_key="Gender",
required=False,
allow_none=True,
validate=validate.OneOf(
["male", "female", "nb/gf", "nl/no"],
[
"Male",
"Female",
"Non-Binary/Gender-Fluid",
"Not listed/Prefer not to answer",
],
error="If specified, must be one of {labels}. Please use "
"the corresponding code from: {choices}.",
),
)
parent_email = fields.Email(
data_key="Parent Email (if under 18)", required=True, allow_none=True
)
@validates_schema
def validate_parent_email(self, data, **kwargs):
if data["age"] == "13-17" and data["parent_email"] is None:
raise ValidationError(
"Parent email must be specified for students under 18"
)
try:
students = BatchRegistrationUserSchema().load(
students, many=True, unknown=RAISE
)
except ValidationError as err:
raise PicoException(err.messages, status_code=400)
# Batch-register accounts
curr_teacher = api.user.get_user()
created_accounts = api.group.batch_register(students, curr_teacher, group_id)
if len(created_accounts) != len(students):
raise PicoException(
"An error occurred while adding student accounts. "
+ f"The first {len(created_accounts)} were created. "
+ "Please contact an administrator."
)
output = []
for i in range(len(students)):
output.append(
{
"Grade (1-12)": students[i]["current_year"],
"Age (13-17 or 18+)": students[i]["age"],
"Gender": students[i]["gender"],
"Parent Email (if under 18)": students[i]["parent_email"],
"Username": created_accounts[i]["username"],
"Password": created_accounts[i]["password"],
}
)
buffer = io.StringIO()
csv_writer = csv.DictWriter(
buffer,
[
"Grade (1-12)",
"Age (13-17 or 18+)",
"Gender",
"Parent Email (if under 18)",
"Username",
"Password",
],
)
csv_writer.writeheader()
csv_writer.writerows(output)
output_csv_bytes = buffer.getvalue().encode("utf-8")
return jsonify(
{
"success": True,
"accounts": created_accounts,
"as_csv": base64.b64encode(output_csv_bytes).decode("utf-8"),
}
)
@ns.route("/<string:group_id>/scoreboard")
class ScoreboardPage(Resource):
"""
Get a scoreboard page for a group.
If a page is not specified, will attempt to return the page containing the
current team, falling back to the first page if neccessary.
"""
@block_before_competition
@ns.response(200, "Success")
@ns.response(403, "Permission denied")
@ns.response(404, "Classroom not found")
@ns.response(422, "Competition has not started")
@ns.expect(scoreboard_page_req)
def get(self, group_id):
"""Retrieve a scoreboard page for a group."""
group = api.group.get_group(gid=group_id)
if not group:
raise PicoException("Classroom not found", 404)
group_members = [group["owner"]] + group["members"] + group["teachers"]
curr_user = api.user.get_user()
if not curr_user or (
curr_user["tid"] not in group_members and not curr_user["admin"]
):
raise PicoException(
"You do not have permission to " + "view this classroom's scoreboard.",
403,
)
req = scoreboard_page_req.parse_args(strict=True)
if req["search"] is not None:
page = api.stats.get_filtered_scoreboard_page(
{"group_id": group_id}, req["search"], req["page"] or 1
)
else:
page = api.stats.get_scoreboard_page({"group_id": group_id}, req["page"])
return jsonify(
{"scoreboard": page[0], "current_page": page[1], "total_pages": page[2]}
)
@ns.route("/<string:group_id>/score_progressions")
class ScoreProgressionsResult(Resource):
"""Get a list of score progressions for the top n teams in a group."""
@block_before_competition
@ns.response(200, "Success")
@ns.response(403, "Permission denied")
@ns.response(404, "Classroom not found")
@ns.response(422, "Competition has not started")
@ns.expect(score_progressions_req)
def get(self, group_id):
"""Get a list of teams' score progressions."""
req = score_progressions_req.parse_args(strict=True)
group = api.group.get_group(gid=group_id)
if not group:
raise PicoException("Classroom not found", 404)
group_members = [group["owner"]] + group["members"] + group["teachers"]
if not api.user.is_logged_in() or (
api.user.get_user()["tid"] not in group_members
and not api.user.get_user()["admin"]
):
raise PicoException(
"You do not have permission to view this "
+ "classroom's score progressions.",
403,
)
if req["limit"] and (
not api.user.is_logged_in() or not api.user.get_user()["admin"]
):
raise PicoException("Must be admin to specify limit", 403)
return jsonify(
api.stats.get_top_teams_score_progressions(
limit=(req["limit"] or 5), group_id=group_id
)
)
|
py | 7dfcc20494bd9604f1b591f4c4fbf34ca60a013b | import sys
import os
import platform
import fnmatch
import setuptools
with open("README.md", "r", encoding='utf-8') as fh:
long_description = fh.read()
# Replace relative image path with github-hosted one
long_description = long_description.replace(
"docs/img/overview.svg",
"https://raw.githubusercontent.com/SystemRDL/systemrdl-compiler/master/docs/img/overview.svg?sanitize=true"
)
with open(os.path.join("systemrdl", "__about__.py"), encoding='utf-8') as f:
v_dict = {}
exec(f.read(), v_dict)
rdl_version = v_dict['__version__']
target = platform.system().lower()
PLATFORMS = {'windows', 'linux', 'darwin', 'cygwin'}
for known in PLATFORMS:
if target.startswith(known):
target = known
def run_setup(with_binary):
if with_binary:
extra_compile_args = {
'windows': ['/DANTLR4CPP_STATIC', '/Zc:__cplusplus'],
'linux': ['-std=c++11'],
'darwin': ['-std=c++11'],
'cygwin': ['-std=c++11'],
}
# Define an Extension object that describes the Antlr accelerator
parser_ext = setuptools.Extension(
name='systemrdl.parser.sa_systemrdl_cpp_parser',
# Add the Antlr runtime source directory to the include search path
include_dirs=["systemrdl/parser/ext/antlr4-cpp-runtime"],
# Rather than listing each C++ file (Antlr has a lot!), discover them automatically
sources=get_files("systemrdl/parser/ext", "*.cpp"),
depends=get_files("systemrdl/parser/ext", "*.h"),
extra_compile_args=extra_compile_args.get(target, [])
)
ext_modules = [parser_ext]
else:
ext_modules = []
setuptools.setup(
name="systemrdl-compiler",
version=rdl_version,
author="Alex Mykyta",
author_email="[email protected]",
description="Parse and elaborate front-end for SystemRDL 2.0",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/SystemRDL/systemrdl-compiler",
packages=setuptools.find_packages(exclude=["test"]),
include_package_data=True,
ext_modules=ext_modules,
cmdclass={"build_ext": ve_build_ext},
python_requires='>=3.5.2',
install_requires=[
"antlr4-python3-runtime >= 4.9, < 4.10",
"colorama",
"markdown",
],
classifiers=(
"Development Status :: 5 - Production/Stable",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Programming Language :: Python :: 3 :: Only",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Topic :: Scientific/Engineering :: Electronic Design Automation (EDA)",
"Topic :: Software Development :: Compilers",
"Topic :: Software Development :: Code Generators",
),
project_urls={
"Documentation": "http://systemrdl-compiler.readthedocs.io",
"Source": "https://github.com/SystemRDL/systemrdl-compiler",
"Tracker": "https://github.com/SystemRDL/systemrdl-compiler/issues",
},
)
#===============================================================================
from setuptools.command.build_ext import build_ext
from distutils.errors import CCompilerError, DistutilsExecError, DistutilsPlatformError
def get_files(path, pattern):
"""
Recursive file search that is compatible with python3.4 and older
"""
matches = []
for root, _, filenames in os.walk(path):
for filename in fnmatch.filter(filenames, pattern):
matches.append(os.path.join(root, filename))
return matches
class BuildFailed(Exception):
pass
class ve_build_ext(build_ext):
"""
This class extends setuptools to fail with a common BuildFailed exception
if a build fails
"""
def run(self):
try:
build_ext.run(self)
except DistutilsPlatformError:
raise BuildFailed()
def build_extension(self, ext):
try:
build_ext.build_extension(self, ext)
except (CCompilerError, DistutilsExecError, DistutilsPlatformError):
raise BuildFailed()
except ValueError:
# this can happen on Windows 64 bit, see Python issue 7511
if "'path'" in str(sys.exc_info()[1]): # works with Python 2 and 3
raise BuildFailed()
raise
# Detect if an alternate interpreter is being used
is_jython = "java" in sys.platform
is_pypy = hasattr(sys, "pypy_version_info")
# Force using fallback if using an alternate interpreter
using_fallback = is_jython or is_pypy
if not using_fallback:
try:
run_setup(with_binary=True)
except BuildFailed:
if 'SYSTEMRDL_REQUIRE_BINARY_BUILD' in os.environ:
# Force failure if binary build is required
raise
else:
using_fallback = True
if using_fallback:
run_setup(with_binary=False)
|
py | 7dfcc25becdc58a7a3ab5587d7999c095c2f0644 | #!/usr/bin/env python2
#
#
from gppylib.testold.testDriver import TestDriver
from gppylib.programs.clsRecoverSegment import *
from gppylib.operations.buildMirrorSegments import *
from gppylib.mainUtils import *
from gppylib.testold.testUtils import *
from gppylib.system import fileSystemImplTest, fileSystemInterface
from gppylib.gplog import get_default_logger
programName = sys.argv[0]
parserFn = GpRecoverSegmentProgram.createParser
commandFn = GpRecoverSegmentProgram.createProgram
driver = TestDriver()
logger = get_default_logger()
###############
#
# Now the tests:
#
####################################
#
#
def testPortAssigner(args):
driver.initTwoSegmentOneFailedMirrorConfiguration()
confProvider = configInterface.getConfigurationProvider().initializeProvider(5432)
gpArray = confProvider.loadSystemConfig(useUtilityMode=False)
masterSeg = [seg for seg in gpArray.getDbList() if not seg.isSegmentQE()][0]
masterSeg.setSegmentPort(35001) # to make sure master is avoided when assigning ports
masterSeg.setSegmentHostName("first-host") # to make sure master is avoided when assigning ports
portAssigner = PortAssigner(gpArray)
testOutput("")
for (host, replPortOrRegular) in args:
testOutput( portAssigner.findAndReservePort(replPortOrRegular, host, host))
testOutput("")
simple_test("testPortAssigner_0", testPortAssigner, [
("first-host", True),
("first-host", True),
("first-host", True),
("first-host", False),
("first-host", True),
("first-host", False),
],
"""
35002
35003
35004
30001
35005
30002
"""
)
simple_test("testPortAssigner_0", testPortAssigner, [
("first-host", True),
("second-host", True),
("third-host", True),
("fourth-host", True),
("fifth-host", True),
("sixth-host", True),
("fourth-host", True),
("fifth-host", True),
("sixth-host", True),
],
"""
35002
35003
35001
35001
35001
35001
35002
35002
35002
"""
)
#
# Test that recovering from a config file that tells us to recover something that can't be recovered fails
#
def testRecoverFromConfigFileChecksWhetherRecoveryIsPossible(args):
driver.initTwoSegmentOneFailedMirrorConfiguration()
confProvider = configInterface.getConfigurationProvider().initializeProvider(5432)
gpArray = confProvider.loadSystemConfig(useUtilityMode=False)
seg = recoverFrom = None
if args[0] == 0:
seg = [seg for seg in gpArray.getSegDbList() if seg.isSegmentUp()][0]
elif args[0] == 1:
seg = [seg for seg in gpArray.getSegDbList() if seg.isSegmentPrimary(current_role=True)][0]
seg.setSegmentStatus(gparray.STATUS_DOWN)
elif args[0] == 2:
seg = [seg for seg in gpArray.getDbList() if not seg.isSegmentQE()][0]
recoverFrom = seg # dummy just to get further along
if recoverFrom is None:
recoverFrom = gpArray.getDbIdToPeerMap()[seg.getSegmentDbId()]
try:
toBuild = [GpMirrorToBuild(seg, recoverFrom, None, False)]
mirrorBuilder = GpMirrorListToBuild(toBuild, pool=None, quiet=False, parallelDegree=1)
except Exception, e:
testOutput("Validation Error: %s" % e)
simple_test("testRecoverFromConfigFileChecksWhetherRecoveryIsPossible_0", testRecoverFromConfigFileChecksWhetherRecoveryIsPossible, [0],
"""Validation Error: Segment to recover from for content 0 is not a primary"""
)
simple_test("testRecoverFromConfigFileChecksWhetherRecoveryIsPossible_1", testRecoverFromConfigFileChecksWhetherRecoveryIsPossible, [1],
"""Validation Error: Segment to recover from for content 0 is not a primary"""
)
simple_test("testRecoverFromConfigFileChecksWhetherRecoveryIsPossible_2", testRecoverFromConfigFileChecksWhetherRecoveryIsPossible, [2],
"""Validation Error: Segment to recover from for content -1 is not a correct segment (it is a master or standby master)"""
)
def testRecoveryWarningsWhenRecoveringToSameHost(args):
"""
Test the function that checks for recovering so that a mirror and primary are on the same machine
"""
driver.initTwoSegmentOneFailedMirrorConfiguration()
confProvider = configInterface.getConfigurationProvider().initializeProvider(5432)
gpArray = confProvider.loadSystemConfig(useUtilityMode=False)
toBuild = []
if args[0] == 0:
pass
elif args[0] == 1:
seg = [seg for seg in gpArray.getSegDbList() if not seg.isSegmentUp()][0]
recoverFrom = gpArray.getDbIdToPeerMap()[seg.getSegmentDbId()]
failoverTarget = seg.copy()
failoverTarget.setSegmentAddress("different-address")
failoverTarget.setSegmentHostName(recoverFrom.getSegmentHostName())
toBuild.append(GpMirrorToBuild(seg, recoverFrom, failoverTarget, False))
else: raise Exception("invalid test option")
mirrorBuilder = GpMirrorListToBuild(toBuild, pool=None, quiet=False, parallelDegree=1)
program = GpRecoverSegmentProgram({})
warnings = program._getRecoveryWarnings(mirrorBuilder)
if warnings:
for w in warnings:
testOutput(w)
else: testOutput("No warnings")
simple_test("testRecoveryWarningsWhenRecoveringToSameHost_0", testRecoveryWarningsWhenRecoveringToSameHost, [0],
"""No warnings"""
)
simple_test("testRecoveryWarningsWhenRecoveringToSameHost_1", testRecoveryWarningsWhenRecoveringToSameHost, [1],
"""Segment is being recovered to the same host as its primary: primary second-host:/second/datadirpathdbfast2/gp1 failover target: different-address:/datadirpathdbfast4/gp1"""
)
# All done tests
printTestResults()
|
py | 7dfcc32592c586db28cca4257fb83e127e6442bd | # Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""MeltingPotEnv as a MultiAgentEnv wrapper to interface with RLLib."""
import dm_env
import dmlab2d
from gym import spaces
import numpy as np
from ray.rllib.env import multi_agent_env
import tree
PLAYER_STR_FORMAT = "player_{index}"
def _timestep_to_observations(timestep: dm_env.TimeStep):
gym_observations = {}
for index, observation in enumerate(timestep.observation):
gym_observations[PLAYER_STR_FORMAT.format(index=index)] = {
key: value for key, value in observation.items() if "WORLD" not in key
}
return gym_observations
def _remove_world_observations_from_space(observation: spaces.Dict) -> spaces.Dict:
return spaces.Dict(
{key: observation[key] for key in observation if "WORLD" not in key}
)
def _spec_to_space(spec: tree.Structure[dm_env.specs.Array]) -> spaces.Space:
"""Converts a dm_env nested structure of specs to a Gym Space.
BoundedArray is converted to Box Gym spaces. DiscreteArray is converted to
Discrete Gym spaces. Using Tuple and Dict spaces recursively as needed.
Args:
spec: The nested structure of specs
Returns:
The Gym space corresponding to the given spec.
"""
if isinstance(spec, dm_env.specs.DiscreteArray):
return spaces.Discrete(spec.num_values)
elif isinstance(spec, dm_env.specs.BoundedArray):
return spaces.Box(spec.minimum, spec.maximum, spec.shape, spec.dtype)
elif isinstance(spec, dm_env.specs.Array):
if np.issubdtype(spec.dtype, np.floating):
info = np.finfo(spec.dtype)
else:
info = np.iinfo(spec.dtype)
return spaces.Box(info.min, info.max, spec.shape, spec.dtype)
elif isinstance(spec, (list, tuple)):
return spaces.Tuple([_spec_to_space(s) for s in spec])
elif isinstance(spec, dict):
return spaces.Dict({key: _spec_to_space(s) for key, s in spec.items()})
else:
raise ValueError("Unexpected spec: {}".format(spec))
class MeltingPotEnv(multi_agent_env.MultiAgentEnv):
"""An adapter between the Melting Pot substrates and RLLib MultiAgentEnv."""
def __init__(self, env: dmlab2d.Environment):
self._env = env
self._num_players = len(self._env.observation_spec())
def reset(self):
"""See base class."""
timestep = self._env.reset()
return _timestep_to_observations(timestep)
def step(self, action):
"""See base class."""
actions = [
action[PLAYER_STR_FORMAT.format(index=index)]
for index in range(self._num_players)
]
timestep = self._env.step(actions)
rewards = {
PLAYER_STR_FORMAT.format(index=index): timestep.reward[index]
for index in range(self._num_players)
}
done = {"__all__": True if timestep.last() else False}
info = {}
observations = _timestep_to_observations(timestep)
return observations, rewards, done, info
def close(self):
"""See base class."""
self._env.close()
def single_player_observation_space(self) -> spaces.Space:
"""The observation space for a single player in this environment."""
return _remove_world_observations_from_space(
_spec_to_space(self._env.observation_spec()[0])
)
def single_player_action_space(self):
"""The action space for a single player in this environment."""
return _spec_to_space(self._env.action_spec()[0])
|
py | 7dfcc338b7e591d0a3d24d7340d2c9417530f983 | import getopt
import logging
import os
import sys
import time
from app.hello import __version__
from app.hello.hello_cfg import HelloConfig
from base.data_source_config import DataSourceConfig
from base.log_handle import LogHandle
from component.table_user import TableUser
from dao.dao_user import DaoUser
def parse_args():
if len(sys.argv) == 2 and \
(sys.argv[1] == "--version" or sys.argv[1] == "-v"):
print("{}".format(__version__.__version__))
sys.exit(0)
opts, args = getopt.getopt(sys.argv[1:], "f:", ["config"])
config_file = None
for opt, arg in opts:
if opt in ("-f", "--config"):
config_file = arg
if config_file is None:
print("usage: {} -f config_file".format(sys.argv[0]))
sys.exit(1)
return config_file
def new_users(source, batch):
"""
插入测试用户
:param source:
:param batch:
:return:
"""
users = [
DaoUser(name="foo0", user_type=1, status=1),
DaoUser(name="foo1", user_type=1, status=1),
DaoUser(name="foo2", user_type=1, status=1),
DaoUser(name="foo3", user_type=1, status=1),
DaoUser(name="foo4", user_type=1, status=1),
DaoUser(name="foo5", user_type=1, status=1),
DaoUser(name="foo6", user_type=1, status=1),
DaoUser(name="foo7", user_type=1, status=1),
DaoUser(name="foo8", user_type=1, status=1),
DaoUser(name="foo9", user_type=1, status=1),
]
table_user = TableUser(table_name="t_user")
table_user.insert(source=source, daos=users, batch_cnt=batch)
def sync(source_from, source_to):
"""
同步两张表
:param source_from:
:param source_to:
:return:
"""
table_user = TableUser(table_name="t_user")
daos = table_user.fetch_all(source=source_from)
backup_table_user = TableUser(table_name="t_backup_user")
backup_table_user.insert(source=source_to, daos=daos, batch_cnt=cfg.options_batch)
if __name__ == "__main__":
# 获取配置文件路径
config_filepath = parse_args()
if not os.path.exists(config_filepath):
print("error - failed find config file: {}".format(config_filepath))
sys.exit(1)
# 读取配置文件
cfg = HelloConfig()
cfg.load(filepath=config_filepath)
# 初始化日志
log_filepath = os.path.join(cfg.log_path, "hello.log")
LogHandle.init_log(log_filepath, console_level=cfg.log_console_level, file_level=cfg.log_file_level)
logging.info("-----------------------------")
logging.info("start hello example")
# 加载数据源
datasource_config = DataSourceConfig()
datasource_config.load(filepath=cfg.data_source_cfg)
# 数据源名称
source_product = cfg.db_map["product"]
source_backup = cfg.db_map["backup"]
db_product = datasource_config.get(source=source_product)
if db_product is None:
logging.error("failed get product data source: name='product', source='{}'".format(db_product))
db_backup = datasource_config.get(source=source_backup)
if db_backup is None:
logging.error("failed get backup data source: name='backup', source='{}'".format(db_backup))
# 往product中插入用户
new_users(source=source_product, batch=cfg.options_batch)
# 从product中读出所有用户并插入backup中
sync(source_from=source_product, source_to=source_backup)
|
py | 7dfcc3466cefc70d8504925701716529fb0e807a | """Base classes for Builders."""
import logging
import os
import shutil
from functools import wraps
from readthedocs.projects.models import Feature
log = logging.getLogger(__name__)
def restoring_chdir(fn):
# XXX:dc: This would be better off in a neutral module
@wraps(fn)
def decorator(*args, **kw):
try:
path = os.getcwd()
return fn(*args, **kw)
finally:
os.chdir(path)
return decorator
class BaseBuilder:
"""
The Base for all Builders. Defines the API for subclasses.
Expects subclasses to define ``old_artifact_path``, which points at the
directory where artifacts should be copied from.
"""
_force = False
ignore_patterns = []
old_artifact_path = None
def __init__(self, build_env, python_env, force=False):
self.build_env = build_env
self.python_env = python_env
self.version = build_env.version
self.project = build_env.project
self.config = python_env.config if python_env else None
self._force = force
self.project_path = self.project.checkout_path(self.version.slug)
self.target = self.project.artifact_path(
version=self.version.slug,
type_=self.type,
)
def get_final_doctype(self):
"""Some builders may have a different doctype at build time."""
return self.config.doctype
def force(self, **__):
"""An optional step to force a build even when nothing has changed."""
log.info('Forcing a build')
self._force = True
def append_conf(self):
"""Set custom configurations for this builder."""
pass
def build(self):
"""Do the actual building of the documentation."""
raise NotImplementedError
def move(self, **__):
"""Move the generated documentation to its artifact directory."""
if os.path.exists(self.old_artifact_path):
if os.path.exists(self.target):
shutil.rmtree(self.target)
log.info('Copying %s on the local filesystem', self.type)
log.debug('Ignoring patterns %s', self.ignore_patterns)
shutil.copytree(
self.old_artifact_path,
self.target,
ignore=shutil.ignore_patterns(*self.ignore_patterns),
)
else:
log.warning('Not moving docs, because the build dir is unknown.')
def clean(self, **__):
"""Clean the path where documentation will be built."""
if os.path.exists(self.old_artifact_path):
shutil.rmtree(self.old_artifact_path)
log.info('Removing old artifact path: %s', self.old_artifact_path)
def docs_dir(self, docs_dir=None, **__):
"""Handle creating a custom docs_dir if it doesn't exist."""
if docs_dir:
return docs_dir
for doc_dir_name in ['docs', 'doc', 'Doc', 'book']:
possible_path = os.path.join(self.project_path, doc_dir_name)
if os.path.exists(possible_path):
return possible_path
return self.project_path
def create_index(self, extension='md', **__):
"""Create an index file if it needs it."""
docs_dir = self.docs_dir()
index_filename = os.path.join(
docs_dir,
'index.{ext}'.format(ext=extension),
)
if not os.path.exists(index_filename):
readme_filename = os.path.join(
docs_dir,
'README.{ext}'.format(ext=extension),
)
if os.path.exists(readme_filename):
return 'README'
if not self.project.has_feature(Feature.DONT_CREATE_INDEX):
index_file = open(index_filename, 'w+')
index_text = """
Welcome to Read the Docs
------------------------
This is an autogenerated index file.
Please create an ``index.{ext}`` or ``README.{ext}`` file with your own content
under the root (or ``/docs``) directory in your repository.
If you want to use another markup, choose a different builder in your settings.
Check out our `Getting Started Guide
<https://docs.readthedocs.io/en/latest/getting_started.html>`_ to become more
familiar with Read the Docs.
"""
index_file.write(index_text.format(dir=docs_dir, ext=extension))
index_file.close()
return 'index'
def run(self, *args, **kwargs):
"""Proxy run to build environment."""
return self.build_env.run(*args, **kwargs)
|
py | 7dfcc39722a1452bfbeb230edad7cbce34d4d114 | # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Manta(CMakePackage):
"""Structural variant and indel caller for mapped sequencing data"""
homepage = "https://github.com/Illumina/manta"
url = "https://github.com/Illumina/manta/releases/download/v1.3.2/manta-1.3.2.release_src.tar.bz2"
version('1.6.0', sha256='c846d61b02483265c09d58bd85dacf5326a94f38179b5ae4f70694be96e1368f')
version('1.5.0', sha256='9aa1a59c9cb8d2dd33724a42959c9398aff7840c5bf3c895d2483a8093b3d2dc')
version('1.4.0', sha256='4f8f827485e3ad9a12318bfcbf62fa622263378767514eb938bc02ad5ad74f10')
version('1.3.2', sha256='eb346d1a44aff1180732dcd03864b89efc1245652e1993107fb60da4ad739f79')
version('1.3.1', sha256='9ba943623088e552a4b45bccea48125a0553905f4cc7ea86a9de567e155a5888')
version('1.3.0', sha256='3db4b5475c33e3aeeb5435969c74364af9d2d77dd3bcf8dc70bf44a851e450dd')
depends_on('[email protected]:', type='build')
depends_on('[email protected]:2.7.999', type=('build', 'run'))
depends_on('zlib')
patch('for_aarch64.patch', when='target=aarch64:')
|
py | 7dfcc3ab143a2a900074f70918020213bac37623 | import ud, wizards, usuario |
py | 7dfcc3cee04014f6d59818d0f1b09e0a01a30308 | """This module contains the general information for VersionApplication ManagedObject."""
import sys, os
from ...ucsmo import ManagedObject
from ...ucscoremeta import UcsVersion, MoPropertyMeta, MoMeta
from ...ucsmeta import VersionMeta
class VersionApplicationConsts():
pass
class VersionApplication(ManagedObject):
"""This is VersionApplication class."""
consts = VersionApplicationConsts()
naming_props = set([])
mo_meta = MoMeta("VersionApplication", "versionApplication", "application", VersionMeta.Version211a, "InputOutput", 0x1f, [], ["read-only"], [u'versionEp'], [], [None])
prop_meta = {
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version211a, MoPropertyMeta.INTERNAL, 0x2, None, None, r"""((deleteAll|ignore|deleteNonPresent),){0,2}(deleteAll|ignore|deleteNonPresent){0,1}""", [], []),
"detail": MoPropertyMeta("detail", "detail", "string", VersionMeta.Version211a, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version211a, MoPropertyMeta.READ_ONLY, 0x4, 0, 256, None, [], []),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version211a, MoPropertyMeta.READ_ONLY, 0x8, 0, 256, None, [], []),
"sacl": MoPropertyMeta("sacl", "sacl", "string", VersionMeta.Version302a, MoPropertyMeta.READ_ONLY, None, None, None, r"""((none|del|mod|addchild|cascade),){0,4}(none|del|mod|addchild|cascade){0,1}""", [], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version211a, MoPropertyMeta.READ_WRITE, 0x10, None, None, r"""((removed|created|modified|deleted),){0,3}(removed|created|modified|deleted){0,1}""", [], []),
"time": MoPropertyMeta("time", "time", "string", VersionMeta.Version211a, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
"version": MoPropertyMeta("version", "version", "string", VersionMeta.Version211a, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
}
prop_map = {
"childAction": "child_action",
"detail": "detail",
"dn": "dn",
"rn": "rn",
"sacl": "sacl",
"status": "status",
"time": "time",
"version": "version",
}
def __init__(self, parent_mo_or_dn, **kwargs):
self._dirty_mask = 0
self.child_action = None
self.detail = None
self.sacl = None
self.status = None
self.time = None
self.version = None
ManagedObject.__init__(self, "VersionApplication", parent_mo_or_dn, **kwargs)
|
py | 7dfcc3d9c75cae9f425849220bd08e32cfad67ca | # Generated by Django 2.1.15 on 2021-12-19 11:58
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('core', '0003_ingredient'),
]
operations = [
migrations.CreateModel(
name='Recipe',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=255)),
('time_minutes', models.IntegerField()),
('price', models.DecimalField(decimal_places=2, max_digits=5)),
('link', models.CharField(blank=True, max_length=255)),
('ingredient', models.ManyToManyField(to='core.Ingredient')),
('tag', models.ManyToManyField(to='core.Tag')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
|
py | 7dfcc3e8f05ff06fb477be5b0a76ea8b2a0e5b78 | """
Django settings for app project.
Generated by 'django-admin startproject' using Django 2.1.15.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'btjbp90vz-^=m(==oz1q8s1cwi%!w6w9=0*fv_7_cjl(l^$wod'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'rest_framework.authtoken',
'core',
'user',
'recipe',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'app.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'app.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'HOST': os.environ.get('DB_HOST'),
'NAME': os.environ.get('DB_NAME'),
'USER': os.environ.get('DB_USER'),
'PASSWORD': os.environ.get('DB_PASS')
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
MEDIA_URL = '/media/'
STATIC_ROOT = 'vol/web/static'
MEDIA_ROOT = '/vol/web/media'
AUTH_USER_MODEL = 'core.User' |
py | 7dfcc3fba81948f820e732bc9b12666811b1018e | """Adds constants for SQL integration."""
import re
from homeassistant.const import Platform
DOMAIN = "sql"
PLATFORMS = [Platform.SENSOR]
CONF_COLUMN_NAME = "column"
CONF_QUERIES = "queries"
CONF_QUERY = "query"
DB_URL_RE = re.compile("//.*:.*@")
|
py | 7dfcc513827578670f1dbe97d6a8ae743b7cbcdb | import logging
from asyncio import (
sleep,
)
from collections.abc import (
Hashable,
)
from typing import (
Optional,
)
import aiopg
from aiomisc.pool import (
ContextManager,
)
from aiopg import (
Connection,
)
from psycopg2 import (
OperationalError,
)
from ..injections import (
Injectable,
)
from ..locks import (
LockPool,
)
from ..pools import (
Pool,
)
from .locks import (
PostgreSqlLock,
)
logger = logging.getLogger(__name__)
@Injectable("postgresql_pool")
class PostgreSqlPool(Pool[ContextManager]):
"""Postgres Pool class."""
def __init__(
self,
database: str,
host: Optional[str] = None,
port: Optional[int] = None,
user: Optional[str] = None,
password: Optional[str] = None,
*args,
**kwargs,
):
super().__init__(*args, **kwargs)
if host is None:
host = "localhost"
if port is None:
port = 5432
if user is None:
user = "postgres"
if password is None:
password = ""
self.database = database
self.host = host
self.port = port
self.user = user
self.password = password
@classmethod
def _from_config(cls, *args, config, **kwargs):
return cls(*args, **config.get_default_database(), **kwargs)
async def _create_instance(self) -> Optional[Connection]:
try:
connection = await aiopg.connect(
host=self.host, port=self.port, dbname=self.database, user=self.user, password=self.password
)
except OperationalError as exc:
logger.warning(f"There was an {exc!r} while trying to get a database connection.")
await sleep(1)
return None
logger.info(f"Created {self.database!r} database connection identified by {id(connection)}!")
return connection
async def _destroy_instance(self, instance: Connection):
if not instance.closed:
await instance.close()
logger.info(f"Destroyed {self.database!r} database connection identified by {id(instance)}!")
async def _check_instance(self, instance: Optional[Connection]) -> bool:
if instance is None:
return False
try:
# This operation connects to the database and raises an exception if something goes wrong.
instance.isolation_level
except OperationalError:
return False
return not instance.closed
class PostgreSqlLockPool(LockPool, PostgreSqlPool):
"""Postgres Locking Pool class."""
def acquire(self, key: Hashable, *args, **kwargs) -> PostgreSqlLock:
"""Acquire a new lock.
:param key: The key to be used for locking.
:return: A ``PostgreSqlLock`` instance.
"""
return PostgreSqlLock(super().acquire(), key, *args, **kwargs)
|
py | 7dfcc52fb85de09d22ee8c38e0e99aa1d46e1f74 | import komand
from .schema import SubmitInput, SubmitOutput
# Custom imports below
import requests
import base64
import io
import magic
class Submit(komand.Action):
def __init__(self):
super(self.__class__, self).__init__(
name="submit",
description="Submit a malicious file or hash",
input=SubmitInput(),
output=SubmitOutput(),
)
def run(self, params={}): # noqa: MC0001
_type = None
url = "https://submit.symantec.com/websubmit/bcs.cgi"
# Formatted with None and tuples so requests sends form-data properly
# Request Payload
# ------WebKitFormBoundaryEczE3Txu7oImEgyU
# Content-Disposition: form-data; name="mode"
#
# 2
# ------WebKitFormBoundaryEczE3Txu7oImEgyU
# Content-Disposition: form-data; name="fname"
#
# Jon
# ------WebKitFormBoundaryEczE3Txu7oImEgyU
# Content-Disposition: form-data; name="lname"
#
# Schipp
# ------WebKitFormBoundaryEczE3Txu7oImEgyU
# Content-Disposition: form-data; name="cname"
#
# Rapid7
# ------WebKitFormBoundaryEczE3Txu7oImEgyU
# Content-Disposition: form-data; name="email"
#
# [email protected]
# ------WebKitFormBoundaryEczE3Txu7oImEgyU
# Content-Disposition: form-data; name="email2"
#
# [email protected]
# ------WebKitFormBoundaryEczE3Txu7oImEgyU
# Content-Disposition: form-data; name="pin"
#
# 12345
# ------WebKitFormBoundaryEczE3Txu7oImEgyU
# Content-Disposition: form-data; name="stype"
#
# upfile
# ------WebKitFormBoundaryEczE3Txu7oImEgyU
# Content-Disposition: form-data; name="upfile"; filename="setup.exe"
# Content-Type: application/x-msdownload
stype = params.get("stype")
headers = {
"Origin": "https://submit.symantec.com",
"Referer": "https://submit.symantec.com/websubmit/bcs.cgi",
}
req = {
"mode": (None, "2"),
"fname": (None, params.get("fname")),
"lname": (None, params.get("lname")),
"cname": (None, params.get("cname")),
"email": (None, params.get("email")),
"email2": (None, params.get("email")),
"pin": (None, params.get("pin")),
"stype": (None, stype),
"comments": (None, params.get("comments")),
}
if params.get("critical") == True:
req["critical"] = (None, "on")
data = params.get("data", "")
if stype == "url":
self.logger.info("URL specified")
if not data.startswith("http://") and not data.startswith("https://") and not data.startswith("ftp://"):
self.logger.error("The URLs must start with http:// or https:// or ftp://")
raise Exception("Invalid URL format")
req["url"] = (None, data)
else:
req["url"] = (None, "")
if stype == "hash":
self.logger.info("Hash specified")
if len(data) != 32 and len(data) != 64:
self.logger.error("The hash provided should be in the MD5 or SHA256 format only")
raise Exception("Invalid hash format")
req["hash"] = (None, data)
else:
req["hash"] = (None, "")
if stype == "upfile":
self.logger.info("File specified")
filename = params.get("filename")
if not filename:
filename = "komand-uploaded.file"
self.logger.info("Filename: %s", filename)
try:
fisle = io.BytesIO(base64.b64decode(data))
except:
self.logger.error("Invalid file bytes input")
raise
try:
_type = magic.Magic(mime=True).from_buffer(fisle.read(1024))
self.logger.info("MIME Content Type: %s", _type)
except:
self.logger.info("Unable to determine MIME Content Type of file, using %s:", _type)
_type = "application/octet-stream"
pass
# Reset file counter to beginning of file since read 1024 bytes for magic number above
fisle.seek(0)
f = fisle.read()
if len(f) > 0:
req["upfile"] = (filename, f, _type)
else:
req["upfile"] = ("", "", _type)
else:
req["upfile"] = (None, "")
try:
r = requests.post(url, headers=headers, files=req)
r.raise_for_status()
out = base64.b64encode(r.content)
except requests.exceptions.HTTPError as e:
self.logger.error("HTTP error occurred. Error: " + str(e))
raise
except requests.exceptions.ConnectionError as e:
self.logger.error("A network problem occurred. Error: " + str(e))
raise
except requests.exceptions.Timeout as e:
self.logger.error("Timeout occurred. Error: " + str(e))
raise
except requests.exceptions.TooManyRedirects as e:
self.logger.error("Too many redirects! Error: " + str(e))
raise
except Exception as e:
self.logger.error("Error: " + str(e))
raise
# Debugging
# self.logger.info(r.request.headers)
# self.logger.info(r.request.body)
return {"response": out.decode()}
def test(self):
url = "https://submit.symantec.com/websubmit/bcs.cgi"
try:
r = requests.get(url)
r.raise_for_status()
out = base64.b64encode(r.content)
except requests.exceptions.HTTPError as e:
self.logger.error("HTTP error occurred. Error: " + str(e))
raise
except requests.exceptions.ConnectionError as e:
self.logger.error("A network problem occurred. Error: " + str(e))
raise
except requests.exceptions.Timeout as e:
self.logger.error("Timeout occurred. Error: " + str(e))
raise
except requests.exceptions.TooManyRedirects as e:
self.logger.error("Too many redirects! Error: " + str(e))
raise
except Exception as e:
self.logger.error("Error: " + str(e))
raise
return {"response": out.decode()}
|
py | 7dfcc5d7ce80192f624983439ca768736be13c12 | import commands
from time import sleep
syn_recv_num = []
for i in range(70):
x = commands.getoutput("netstat -n | grep SYN_RECV | wc -l")
#num = commands.getoutput("netstat -n | grep ESTA | wc -l")
print("time: %s, utilization of syn queue: %s") %(i,str(float(int(x)/5.0)*100))
#syn_recv_num.append([x,num])
sleep(1)
# f = open('esta1.txt','w')
# for item in syn_recv_num:
# #f.write(str(float(int(item)/5.0)))
# f.write(item[0])
# f.write(" ")
# f.write(item[1])
# f.write("\n")
# f.close()
|
py | 7dfcc62b8b26ea384ba251c9c3d3a5d692f93182 | def print_test():
print('print_test')
|
py | 7dfcc664c442c3a77f8ee21ac02781d9bd5f1aa1 | # -*- coding: utf-8 -*-
import six
import itertools
import os
import shutil
import requests
import pytest
from verta.registry.entities import RegisteredModels
from verta.tracking.entities._deployable_entity import _CACHE_DIR
from . import utils
import verta
from verta._internal_utils import _utils
import json
from verta.external.six.moves.urllib.parse import urlparse # pylint: disable=import-error, no-name-in-module
KWARGS = {
'desc': [None, "A test."],
'tags': [None, ['test']],
'attrs': [None, {'is_test': True}],
}
KWARGS_COMBOS = [dict(zip(KWARGS.keys(), values))
for values
in itertools.product(*KWARGS.values())
if values.count(None) != len(values)]
# for `tags` typecheck tests
TAG = "my-tag"
class TestClient:
@pytest.mark.skipif(not all(env_var in os.environ for env_var in ('VERTA_HOST', 'VERTA_EMAIL', 'VERTA_DEV_KEY')), reason="insufficient Verta credentials")
def test_config_file(self):
self.config_file_with_type_util(connect = False)
@pytest.mark.skipif(not all(env_var in os.environ for env_var in ('VERTA_HOST', 'VERTA_EMAIL', 'VERTA_DEV_KEY')), reason="insufficient Verta credentials")
def test_config_file_connect(self):
self.config_file_with_type_util(connect = True)
def config_file_with_type_util(self, connect):
PROJECT_NAME = _utils.generate_default_name()
DATASET_NAME = _utils.generate_default_name()
EXPERIMENT_NAME = _utils.generate_default_name()
CONFIG_FILENAME = "verta_config.json"
HOST_KEY, EMAIL_KEY, DEV_KEY_KEY = "VERTA_HOST", "VERTA_EMAIL", "VERTA_DEV_KEY"
HOST, EMAIL, DEV_KEY = os.environ[HOST_KEY], os.environ[EMAIL_KEY], os.environ[DEV_KEY_KEY]
try:
del os.environ[HOST_KEY], os.environ[EMAIL_KEY], os.environ[DEV_KEY_KEY]
try:
with open(CONFIG_FILENAME, 'w') as f:
json.dump(
{
'host': HOST,
'email': EMAIL, 'dev_key': DEV_KEY,
'project': PROJECT_NAME,
'experiment': EXPERIMENT_NAME,
'dataset': DATASET_NAME,
},
f,
)
client = verta.Client(_connect=connect)
conn = client._conn
back_end_url = urlparse(HOST)
socket = back_end_url.netloc + back_end_url.path.rstrip('/')
assert conn.socket == socket
assert conn.auth['Grpc-Metadata-email'] == EMAIL
assert conn.auth['Grpc-Metadata-developer_key'] == DEV_KEY
assert conn.auth['Grpc-Metadata-developer-key'] == DEV_KEY
if connect:
try:
assert client.set_experiment_run()
assert client.proj.name == PROJECT_NAME
assert client.expt.name == EXPERIMENT_NAME
finally:
if client.proj is not None:
client.proj.delete()
dataset = client.set_dataset()
try:
assert dataset.name == DATASET_NAME
finally:
dataset.delete()
else:
assert client._set_from_config_if_none(None, "project") == PROJECT_NAME
assert client._set_from_config_if_none(None, "experiment") == EXPERIMENT_NAME
assert client._set_from_config_if_none(None, "dataset") == DATASET_NAME
finally:
if os.path.exists(CONFIG_FILENAME):
os.remove(CONFIG_FILENAME)
finally:
os.environ[HOST_KEY], os.environ[EMAIL_KEY], os.environ[DEV_KEY_KEY] = HOST, EMAIL, DEV_KEY
class TestEntities:
def test_cache(self, client, strs):
client.set_project()
client.set_experiment()
entities = (
client.set_experiment_run(),
)
for entity in entities:
filename = strs[0]
filepath = os.path.join(_CACHE_DIR, filename)
contents = six.ensure_binary(strs[1])
assert not os.path.isfile(filepath)
assert not entity._get_cached_file(filename)
try:
assert entity._cache_file(filename, contents) == filepath
assert os.path.isfile(filepath)
assert entity._get_cached_file(filename)
with open(filepath, 'rb') as f:
assert f.read() == contents
finally:
shutil.rmtree(_CACHE_DIR, ignore_errors=True)
def test_context(self, client, strs):
strs = iter(strs)
def assert_new_run_in_proj():
assert client.get_or_create_experiment_run()._msg.project_id == proj.id
assert client.create_experiment_run()._msg.project_id == proj.id
proj = client.create_project()
assert_new_run_in_proj()
client.get_or_create_registered_model().delete()
client.create_registered_model().delete()
assert_new_run_in_proj()
client.get_or_create_dataset().delete()
client.create_dataset().delete()
assert_new_run_in_proj()
assert client.get_or_create_experiment()._msg.project_id == proj.id
assert client.create_experiment()._msg.project_id == proj.id
assert_new_run_in_proj()
class TestProject:
def test_create(self, client):
assert client.set_project()
assert client.proj is not None
name = _utils.generate_default_name()
assert client.create_project(name)
assert client.proj is not None
with pytest.raises(requests.HTTPError) as excinfo:
assert client.create_project(name)
excinfo_value = str(excinfo.value).strip()
assert "409" in excinfo_value
assert "already exists" in excinfo_value
with pytest.warns(UserWarning, match='.*already exists.*'):
client.get_or_create_project(name=name, tags=["tag1", "tag2"])
def test_get(self, client):
name = _utils.generate_default_name()
with pytest.raises(ValueError):
client.get_project(name)
proj = client.set_project(name)
assert proj.id == client.get_project(proj.name).id
assert proj.id == client.get_project(id=proj.id).id
def test_get_by_name(self, client):
proj = client.set_project()
client.set_project() # in case get erroneously fetches latest
assert proj.id == client.set_project(proj.name).id
def test_get_by_id(self, client):
proj = client.set_project()
client.set_project() # in case get erroneously fetches latest
assert proj.id == client.set_project(id=proj.id).id
def test_get_nonexistent_id(self, client):
with pytest.raises(ValueError):
client.set_project(id="nonexistent_id")
@pytest.mark.parametrize("tags", [TAG, [TAG]])
def test_tags_is_list_of_str(self, client, tags):
proj = client.set_project(tags=tags)
endpoint = "{}://{}/api/v1/modeldb/project/getProjectTags".format(
client._conn.scheme,
client._conn.socket,
)
response = _utils.make_request("GET", endpoint, client._conn, params={'id': proj.id})
_utils.raise_for_http_error(response)
assert response.json().get('tags', []) == [TAG]
class TestExperiment:
def test_create(self, client):
client.set_project()
assert client.set_experiment()
assert client.expt is not None
name = _utils.generate_default_name()
assert client.create_experiment(name)
assert client.expt is not None
with pytest.raises(requests.HTTPError) as excinfo:
assert client.create_experiment(name)
excinfo_value = str(excinfo.value).strip()
assert "409" in excinfo_value
assert "already exists" in excinfo_value
with pytest.warns(UserWarning, match='.*already exists.*'):
client.set_experiment(name=name, attrs={"a": 123})
def test_get(self, client):
proj = client.set_project()
name = _utils.generate_default_name()
with pytest.raises(ValueError):
client.get_experiment(name)
expt = client.set_experiment(name)
assert expt.id == client.get_experiment(expt.name).id
assert expt.id == client.get_experiment(id=expt.id).id
# test parents are restored
client.set_project()
client.get_experiment(id=expt.id)
assert client.proj.id == proj.id
assert client.expt.id == expt.id
def test_get_by_name(self, client):
client.set_project()
expt = client.set_experiment()
client.set_experiment() # in case get erroneously fetches latest
assert expt.id == client.set_experiment(expt.name).id
def test_get_by_id(self, client):
proj = client.set_project()
expt = client.set_experiment()
client.set_experiment() # in case get erroneously fetches latest
assert expt.id == client.set_experiment(id=expt.id).id
assert proj.id == client.proj.id
def test_get_nonexistent_id_error(self, client):
with pytest.raises(ValueError):
client.set_experiment(id="nonexistent_id")
@pytest.mark.parametrize("tags", [TAG, [TAG]])
def test_tags_is_list_of_str(self, client, tags):
client.set_project()
expt = client.set_experiment(tags=tags)
endpoint = "{}://{}/api/v1/modeldb/experiment/getExperimentTags".format(
client._conn.scheme,
client._conn.socket,
)
response = _utils.make_request("GET", endpoint, client._conn, params={'id': expt.id})
_utils.raise_for_http_error(response)
assert response.json().get('tags', []) == [TAG]
class TestExperimentRun:
def test_create(self, client):
client.set_project()
client.set_experiment()
assert client.set_experiment_run()
name = _utils.generate_default_name()
assert client.create_experiment_run(name)
with pytest.raises(requests.HTTPError) as excinfo:
assert client.create_experiment_run(name)
excinfo_value = str(excinfo.value).strip()
assert "409" in excinfo_value
assert "already exists" in excinfo_value
with pytest.warns(UserWarning, match='.*already exists.*'):
client.set_experiment_run(name=name, attrs={"a": 123})
def test_get(self, client):
proj = client.set_project()
expt = client.set_experiment()
name = _utils.generate_default_name()
with pytest.raises(ValueError):
client.get_experiment_run(name)
run = client.set_experiment_run(name)
assert run.id == client.get_experiment_run(run.name).id
assert run.id == client.get_experiment_run(id=run.id).id
# test parents are restored by first setting new, unrelated ones
client.set_project()
client.set_experiment()
client.get_experiment_run(id=run.id)
assert client.proj.id == proj.id
assert client.expt.id == expt.id
def test_get_by_name(self, client):
client.set_project()
client.set_experiment()
run = client.set_experiment_run()
client.set_experiment_run() # in case get erroneously fetches latest
assert run.id == client.set_experiment_run(run.name).id
def test_get_by_id(self, client):
proj = client.set_project()
expt = client.set_experiment()
expt_run = client.set_experiment_run()
client.set_experiment_run() # in case get erroneously fetches latest
assert expt_run.id == client.set_experiment_run(id=expt_run.id).id
assert proj.id == client.proj.id
assert expt.id == client.expt.id
def test_get_nonexistent_id_error(self, client):
with pytest.raises(ValueError):
client.set_experiment_run(id="nonexistent_id")
def test_no_experiment_error(self, client):
with pytest.raises(AttributeError):
client.set_experimennt_run()
@pytest.mark.parametrize("tags", [TAG, [TAG]])
def test_tags_is_list_of_str(self, client, tags):
client.set_project()
client.set_experiment()
run = client.set_experiment_run(tags=tags)
endpoint = "{}://{}/api/v1/modeldb/experiment-run/getExperimentRunTags".format(
client._conn.scheme,
client._conn.socket,
)
response = _utils.make_request("GET", endpoint, client._conn, params={'id': run.id})
_utils.raise_for_http_error(response)
assert response.json().get('tags', []) == [TAG]
def test_clone(self, experiment_run):
expt_run = experiment_run
expt_run._conf.use_git = False
expt_run.log_hyperparameters({"hpp1" : 1, "hpp2" : 2, "hpp3" : "hpp3"})
expt_run.log_metrics({"metric1" : 0.5, "metric2" : 0.6})
expt_run.log_tags(["tag1", "tag2"])
expt_run.log_attributes({"attr1" : 10, "attr2" : {"abc": 1}})
expt_run.log_artifact("my-artifact", "README.md")
expt_run.log_code()
# set various things in the run
new_run_no_art = expt_run.clone()
old_run_msg = expt_run._get_proto_by_id(expt_run._conn, expt_run.id)
new_run_no_art_msg = new_run_no_art._get_proto_by_id(new_run_no_art._conn, new_run_no_art.id)
# ensure basic data is the same
assert expt_run.id != new_run_no_art_msg.id
assert old_run_msg.description == new_run_no_art_msg.description
assert old_run_msg.tags == new_run_no_art_msg.tags
assert old_run_msg.metrics == new_run_no_art_msg.metrics
assert old_run_msg.hyperparameters == new_run_no_art_msg.hyperparameters
assert old_run_msg.observations == new_run_no_art_msg.observations
assert old_run_msg.artifacts == new_run_no_art_msg.artifacts
def test_clone_into_expt(self, client):
expt1 = client.set_experiment()
expt2 = client.set_experiment()
assert expt1.id != expt2.id # of course, but just to be sure
old_run = client.set_experiment_run()
assert old_run._msg.experiment_id == expt2.id # of course, but just to be sure
old_run.log_hyperparameters({"hpp1" : 1, "hpp2" : 2, "hpp3" : "hpp3"})
old_run.log_metrics({"metric1" : 0.5, "metric2" : 0.6})
old_run.log_tags(["tag1", "tag2"])
old_run.log_attributes({"attr1" : 10, "attr2" : {"abc": 1}})
old_run.log_artifact("my-artifact", "README.md")
new_run = old_run.clone(experiment_id=expt1.id)
old_run_msg = old_run._get_proto_by_id(old_run._conn, old_run.id)
new_run_msg = new_run._get_proto_by_id(new_run._conn, new_run.id)
assert old_run_msg.id != new_run_msg.id
assert new_run_msg.experiment_id == expt1.id
assert old_run_msg.description == new_run_msg.description
assert old_run_msg.tags == new_run_msg.tags
assert old_run_msg.metrics == new_run_msg.metrics
assert old_run_msg.hyperparameters == new_run_msg.hyperparameters
assert old_run_msg.observations == new_run_msg.observations
assert old_run_msg.artifacts == new_run_msg.artifacts
def test_log_attribute_overwrite(self, client):
initial_attrs = {"str-attr": "attr", "int-attr": 4, "float-attr": 0.5}
new_attrs = {"str-attr": "new-attr", "int-attr": 5, "float-attr": 0.3, "bool-attr": False}
single_new_attr = new_attrs.popitem()
experiment_run = client.set_experiment_run(attrs=initial_attrs)
with pytest.raises(ValueError) as excinfo:
experiment_run.log_attribute("str-attr", "some-attr")
assert "already exists" in str(excinfo.value)
experiment_run.log_attribute(*single_new_attr, overwrite=True)
experiment_run.log_attributes(new_attrs, True)
expected_attrs = initial_attrs.copy()
expected_attrs.update([single_new_attr])
expected_attrs.update(new_attrs)
assert experiment_run.get_attributes() == expected_attrs
class TestExperimentRuns:
def test_getitem(self, client):
client.set_project()
expt_runs = client.set_experiment().expt_runs
local_run_ids = set(client.set_experiment_run().id for _ in range(3))
assert expt_runs[1].id in local_run_ids
def test_negative_indexing(self, client):
client.set_project()
expt_runs = client.set_experiment().expt_runs
local_run_ids = set(client.set_experiment_run().id for _ in range(3))
assert expt_runs[-1].id in local_run_ids
def test_index_out_of_range_error(self, client):
client.set_project()
expt_runs = client.set_experiment().expt_runs
[client.set_experiment_run() for _ in range(3)]
with pytest.raises(IndexError):
expt_runs[6]
with pytest.raises(IndexError):
expt_runs[-6]
def test_iter(self, client):
client.set_project()
expt_runs = client.set_experiment().expt_runs
expt_runs._ITER_PAGE_LIMIT = 3
local_run_ids = set(client.set_experiment_run().id for _ in range(6))
# iterate through all 6 runs
assert local_run_ids == set(run.id for run in expt_runs)
# don't fail ungracefully while runs are added
for i, _ in enumerate(expt_runs):
if i == 4:
[client.set_experiment_run() for _ in range(3)]
def test_len(self, client):
client.set_project()
expt_runs = client.set_experiment().expt_runs
assert len([client.set_experiment_run().id for _ in range(3)]) == len(expt_runs)
def test_as_dataframe(self, client, strs):
np = pytest.importorskip("numpy")
pytest.importorskip("pandas")
# initialize entities
client.set_project()
expt = client.set_experiment()
for _ in range(3):
client.set_experiment_run()
# log metadata
hpp1, hpp2, metric1, metric2 = strs[:4]
for run in expt.expt_runs:
run.log_hyperparameters({
hpp1: np.random.random(),
hpp2: np.random.random(),
})
run.log_metrics({
metric1: np.random.random(),
metric2: np.random.random(),
})
# verify that DataFrame matches
df = expt.expt_runs.as_dataframe()
assert set(df.index) == set(run.id for run in expt.expt_runs)
for run in expt.expt_runs:
row = df.loc[run.id]
assert row['hpp.'+hpp1] == run.get_hyperparameter(hpp1)
assert row['hpp.'+hpp2] == run.get_hyperparameter(hpp2)
assert row['metric.'+metric1] == run.get_metric(metric1)
assert row['metric.'+metric2] == run.get_metric(metric2)
def test_find(self, client):
client.set_project()
expt = client.set_experiment()
tag = "some-tag"
diff_tag = "diff-tag"
run_with_diff_tag = client.set_experiment_run("run-with-diff-tag")
run_with_diff_tag.log_tag(diff_tag)
runs_with_tag = []
for _ in range(5):
runs_with_tag.append(client.set_experiment_run())
runs_with_tag[-1].log_tag(tag)
found_runs = expt.expt_runs.find("tags ~= {}".format(tag))
assert len(found_runs) == len(runs_with_tag)
runs_with_tag[-1].log_hyperparameter("some-hyper", 1)
# compound conditions:
assert len(expt.expt_runs.find(["tags ~= {}".format(tag), "hyperparameters.some-hyper == 1"])) == 1 # old syntax
assert len(expt.expt_runs.find("tags ~= {}".format(tag), "hyperparameters.some-hyper == 1")) == 1 # new syntax
# if any predicate is not string, should fail:
with pytest.raises(TypeError, match="predicates must all be strings"):
expt.expt_runs.find("tag ~= {}".format(tag), 1234)
@pytest.mark.skip("functionality removed")
def test_add(self, client):
client.set_project()
expt1 = client.set_experiment()
local_expt1_run_ids = set(client.set_experiment_run().id for _ in range(3))
expt2 = client.set_experiment()
local_expt2_run_ids = set(client.set_experiment_run().id for _ in range(3))
# simple concatenation
assert local_expt1_run_ids | local_expt2_run_ids == set(run.id for run in expt1.expt_runs + expt2.expt_runs)
# ignore duplicates
assert local_expt1_run_ids == set(run.id for run in expt1.expt_runs + expt1.expt_runs)
|
py | 7dfcc6b9fcae64da553434e4ddc5a4f4908f293c | import pytest
import os
from great_expectations.data_context.util import safe_mmkdir
from great_expectations.datasource.generator import SubdirReaderGenerator
from great_expectations.exceptions import BatchKwargsError
def test_subdir_reader_path_partitioning(tmp_path_factory):
base_directory = str(tmp_path_factory.mktemp("test_folder_connection_path"))
mock_files = [
"asset_1/20190101__asset_1.csv",
"asset_1/20190102__asset_1.csv",
"asset_1/20190103__asset_1.csv",
"asset_2/20190101__asset_2.csv",
"asset_2/20190102__asset_2.csv"
]
for file in mock_files:
safe_mmkdir(os.path.join(base_directory, file.split("/")[0]))
open(os.path.join(base_directory, file), "w").close()
subdir_reader_generator = SubdirReaderGenerator("test_generator", base_directory=base_directory)
# We should see two assets
known_assets = subdir_reader_generator.get_available_data_asset_names()
# Use set in test to avoid order issues
assert set(known_assets) == {"asset_1", "asset_2"}
# We should see three partitions for the first:
known_partitions = subdir_reader_generator.get_available_partition_ids("asset_1")
assert set(known_partitions) == {
"20190101__asset_1",
"20190102__asset_1",
"20190103__asset_1"
}
asset_1_kwargs = [kwargs for kwargs in subdir_reader_generator.get_iterator("asset_1")]
asset_2_kwargs = [kwargs for kwargs in subdir_reader_generator.get_iterator("asset_2")]
with pytest.raises(BatchKwargsError):
not_an_asset_kwargs = [kwargs for kwargs in subdir_reader_generator.get_iterator("not_an_asset")]
assert len(asset_1_kwargs) == 3
paths = [kwargs["path"] for kwargs in asset_1_kwargs]
assert set(paths) == {
os.path.join(base_directory, "asset_1/20190101__asset_1.csv"),
os.path.join(base_directory, "asset_1/20190102__asset_1.csv"),
os.path.join(base_directory, "asset_1/20190103__asset_1.csv")
}
partitions = [kwargs["partition_id"] for kwargs in asset_1_kwargs]
# SubdirReaderGenerator uses filenames from subdirectories to generate partition names
assert set(partitions) == {
"20190101__asset_1",
"20190102__asset_1",
"20190103__asset_1"
}
assert len(asset_1_kwargs[0].keys()) == 3
assert len(asset_2_kwargs) == 2
paths = [kwargs["path"] for kwargs in asset_2_kwargs]
assert set(paths) == {
os.path.join(base_directory, "asset_2/20190101__asset_2.csv"),
os.path.join(base_directory, "asset_2/20190102__asset_2.csv")
}
partitions = [kwargs["partition_id"] for kwargs in asset_2_kwargs]
assert set(partitions) == {
"20190101__asset_2",
"20190102__asset_2"
}
assert len(asset_2_kwargs[0].keys()) == 3
def test_subdir_reader_file_partitioning(tmp_path_factory):
base_directory = str(tmp_path_factory.mktemp("test_folder_connection_path"))
mock_files = [
"20190101__asset_1.csv",
"20190102__asset_1.csv",
"20190103__asset_1.csv",
"asset_2/20190101__asset_2.csv",
"asset_2/20190102__asset_2.csv"
]
for file in mock_files:
if "/" in file:
safe_mmkdir(os.path.join(base_directory, file.split("/")[0]))
open(os.path.join(base_directory, file), "w").close()
# If we have files, we should see them as individual assets
subdir_reader_generator = SubdirReaderGenerator("test_generator", base_directory=base_directory)
known_assets = subdir_reader_generator.get_available_data_asset_names()
assert set(known_assets) == {
"20190101__asset_1",
"20190102__asset_1",
"20190103__asset_1",
"asset_2"
}
# SubdirReaderGenerator uses the filename as partition name for root files
known_partitions = subdir_reader_generator.get_available_partition_ids("20190101__asset_1")
assert set(known_partitions) == {"20190101__asset_1"}
kwargs = subdir_reader_generator.build_batch_kwargs_from_partition_id("20190101__asset_1", "20190101__asset_1")
assert kwargs["path"] == os.path.join(base_directory, "20190101__asset_1.csv")
# We should also be able to pass a limit
kwargs = subdir_reader_generator.build_batch_kwargs_from_partition_id("20190101__asset_1", "20190101__asset_1",
limit=10)
assert kwargs["path"] == os.path.join(base_directory, "20190101__asset_1.csv")
assert kwargs["limit"] == 10
def test_subdir_reader_configurable_reader_method(tmp_path_factory):
base_directory = str(tmp_path_factory.mktemp("test_folder_connection_path"))
mock_files = [
"20190101__asset_1.dat",
"20190102__asset_1.dat",
"20190103__asset_1.dat",
"asset_2/20190101__asset_2.dat",
"asset_2/20190102__asset_2.dat"
]
for file in mock_files:
if "/" in file:
safe_mmkdir(os.path.join(base_directory, file.split("/")[0]))
open(os.path.join(base_directory, file), "w").close()
# If we have files, we should see them as individual assets
subdir_reader_generator = SubdirReaderGenerator("test_generator", base_directory=base_directory,
reader_method='csv', known_extensions=['.dat'])
batch_kwargs = next(subdir_reader_generator.get_iterator('asset_2'))
assert batch_kwargs['reader_method'] == 'csv'
|
py | 7dfcc70bbb5cf1a1896cb87ea5163d7f17e189c0 | import sqlalchemy as sa
from sqlalchemy import CheckConstraint
from sqlalchemy import event
from sqlalchemy import exc
from sqlalchemy import ForeignKey
from sqlalchemy import ForeignKeyConstraint
from sqlalchemy import Index
from sqlalchemy import inspect
from sqlalchemy import Integer
from sqlalchemy import MetaData
from sqlalchemy import String
from sqlalchemy import testing
from sqlalchemy import UniqueConstraint
from sqlalchemy import util
from sqlalchemy.ext import declarative as decl
from sqlalchemy.ext.declarative import declared_attr
from sqlalchemy.ext.declarative import synonym_for
from sqlalchemy.ext.declarative.base import _DeferredMapperConfig
from sqlalchemy.ext.hybrid import hybrid_property
from sqlalchemy.orm import backref
from sqlalchemy.orm import class_mapper
from sqlalchemy.orm import clear_mappers
from sqlalchemy.orm import close_all_sessions
from sqlalchemy.orm import column_property
from sqlalchemy.orm import composite
from sqlalchemy.orm import configure_mappers
from sqlalchemy.orm import create_session
from sqlalchemy.orm import deferred
from sqlalchemy.orm import descriptor_props
from sqlalchemy.orm import exc as orm_exc
from sqlalchemy.orm import joinedload
from sqlalchemy.orm import mapper
from sqlalchemy.orm import relationship
from sqlalchemy.orm import Session
from sqlalchemy.orm.events import MapperEvents
from sqlalchemy.testing import assert_raises
from sqlalchemy.testing import assert_raises_message
from sqlalchemy.testing import assertions
from sqlalchemy.testing import eq_
from sqlalchemy.testing import expect_warnings
from sqlalchemy.testing import fixtures
from sqlalchemy.testing import is_
from sqlalchemy.testing import mock
from sqlalchemy.testing.schema import Column
from sqlalchemy.testing.schema import Table
from sqlalchemy.util import with_metaclass
Base = None
User = Address = None
class DeclarativeTestBase(
fixtures.TestBase,
testing.AssertsExecutionResults,
testing.AssertsCompiledSQL,
):
__dialect__ = "default"
def setup(self):
global Base
Base = decl.declarative_base(testing.db)
def teardown(self):
close_all_sessions()
clear_mappers()
Base.metadata.drop_all()
class DeclarativeTest(DeclarativeTestBase):
def test_basic(self):
class User(Base, fixtures.ComparableEntity):
__tablename__ = "users"
id = Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
)
name = Column("name", String(50))
addresses = relationship("Address", backref="user")
class Address(Base, fixtures.ComparableEntity):
__tablename__ = "addresses"
id = Column(
Integer, primary_key=True, test_needs_autoincrement=True
)
email = Column(String(50), key="_email")
user_id = Column(
"user_id", Integer, ForeignKey("users.id"), key="_user_id"
)
Base.metadata.create_all()
eq_(Address.__table__.c["id"].name, "id")
eq_(Address.__table__.c["_email"].name, "email")
eq_(Address.__table__.c["_user_id"].name, "user_id")
u1 = User(
name="u1", addresses=[Address(email="one"), Address(email="two")]
)
sess = create_session()
sess.add(u1)
sess.flush()
sess.expunge_all()
eq_(
sess.query(User).all(),
[
User(
name="u1",
addresses=[Address(email="one"), Address(email="two")],
)
],
)
a1 = sess.query(Address).filter(Address.email == "two").one()
eq_(a1, Address(email="two"))
eq_(a1.user, User(name="u1"))
def test_deferred_reflection_default_error(self):
class MyExt(object):
@classmethod
def prepare(cls):
"sample prepare method"
to_map = _DeferredMapperConfig.classes_for_base(cls)
for thingy in to_map:
thingy.map()
@classmethod
def _sa_decl_prepare(cls):
pass
class User(MyExt, Base):
__tablename__ = "user"
id = Column(Integer, primary_key=True)
assert_raises_message(
orm_exc.UnmappedClassError,
"Class test.ext.declarative.test_basic.User has a deferred "
"mapping on it. It is not yet usable as a mapped class.",
Session().query,
User,
)
User.prepare()
self.assert_compile(
Session().query(User), 'SELECT "user".id AS user_id FROM "user"'
)
def test_unicode_string_resolve(self):
class User(Base, fixtures.ComparableEntity):
__tablename__ = "users"
id = Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
)
name = Column("name", String(50))
addresses = relationship(util.u("Address"), backref="user")
class Address(Base, fixtures.ComparableEntity):
__tablename__ = "addresses"
id = Column(
Integer, primary_key=True, test_needs_autoincrement=True
)
email = Column(String(50), key="_email")
user_id = Column(
"user_id", Integer, ForeignKey("users.id"), key="_user_id"
)
assert User.addresses.property.mapper.class_ is Address
def test_unicode_string_resolve_backref(self):
class User(Base, fixtures.ComparableEntity):
__tablename__ = "users"
id = Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
)
name = Column("name", String(50))
class Address(Base, fixtures.ComparableEntity):
__tablename__ = "addresses"
id = Column(
Integer, primary_key=True, test_needs_autoincrement=True
)
email = Column(String(50), key="_email")
user_id = Column(
"user_id", Integer, ForeignKey("users.id"), key="_user_id"
)
user = relationship(
User,
backref=backref("addresses", order_by=util.u("Address.email")),
)
assert Address.user.property.mapper.class_ is User
def test_no_table(self):
def go():
class User(Base):
id = Column("id", Integer, primary_key=True)
assert_raises_message(
sa.exc.InvalidRequestError, "does not have a __table__", go
)
def test_table_args_empty_dict(self):
class MyModel(Base):
__tablename__ = "test"
id = Column(Integer, primary_key=True)
__table_args__ = {}
def test_table_args_empty_tuple(self):
class MyModel(Base):
__tablename__ = "test"
id = Column(Integer, primary_key=True)
__table_args__ = ()
def test_cant_add_columns(self):
t = Table(
"t",
Base.metadata,
Column("id", Integer, primary_key=True),
Column("data", String),
)
def go():
class User(Base):
__table__ = t
foo = Column(Integer, primary_key=True)
# can't specify new columns not already in the table
assert_raises_message(
sa.exc.ArgumentError,
"Can't add additional column 'foo' when " "specifying __table__",
go,
)
# regular re-mapping works tho
class Bar(Base):
__table__ = t
some_data = t.c.data
assert (
class_mapper(Bar).get_property("some_data").columns[0] is t.c.data
)
def test_lower_case_c_column_warning(self):
with assertions.expect_warnings(
r"Attribute 'x' on class <class .*Foo.* appears to be a "
r"non-schema 'sqlalchemy.sql.column\(\)' object; "
):
class Foo(Base):
__tablename__ = "foo"
id = Column(Integer, primary_key=True)
x = sa.sql.expression.column(Integer)
y = Column(Integer)
class MyMixin(object):
x = sa.sql.expression.column(Integer)
y = Column(Integer)
with assertions.expect_warnings(
r"Attribute 'x' on class <class .*MyMixin.* appears to be a "
r"non-schema 'sqlalchemy.sql.column\(\)' object; "
):
class Foo2(MyMixin, Base):
__tablename__ = "foo2"
id = Column(Integer, primary_key=True)
with assertions.expect_warnings(
r"Attribute 'x' on class <class .*Foo3.* appears to be a "
r"non-schema 'sqlalchemy.sql.column\(\)' object; "
):
class Foo3(Base):
__tablename__ = "foo3"
id = Column(Integer, primary_key=True)
@declared_attr
def x(cls):
return sa.sql.expression.column(Integer)
y = Column(Integer)
with assertions.expect_warnings(
r"Attribute 'x' on class <class .*Foo4.* appears to be a "
r"non-schema 'sqlalchemy.sql.column\(\)' object; "
):
class MyMixin2(object):
@declared_attr
def x(cls):
return sa.sql.expression.column(Integer)
y = Column(Integer)
class Foo4(MyMixin2, Base):
__tablename__ = "foo4"
id = Column(Integer, primary_key=True)
def test_column_named_twice(self):
def go():
class Foo(Base):
__tablename__ = "foo"
id = Column(Integer, primary_key=True)
x = Column("x", Integer)
y = Column("x", Integer)
assert_raises_message(
sa.exc.SAWarning,
"On class 'Foo', Column object 'x' named directly multiple times, "
"only one will be used: x, y",
go,
)
def test_column_repeated_under_prop(self):
def go():
class Foo(Base):
__tablename__ = "foo"
id = Column(Integer, primary_key=True)
x = Column("x", Integer)
y = column_property(x)
z = Column("x", Integer)
assert_raises_message(
sa.exc.SAWarning,
"On class 'Foo', Column object 'x' named directly multiple times, "
"only one will be used: x, y, z",
go,
)
def test_using_explicit_prop_in_schema_objects(self):
class Foo(Base):
__tablename__ = "foo"
id = Column(Integer, primary_key=True)
cprop = column_property(Column(Integer))
__table_args__ = (UniqueConstraint(cprop),)
uq = [
c
for c in Foo.__table__.constraints
if isinstance(c, UniqueConstraint)
][0]
is_(uq.columns.cprop, Foo.__table__.c.cprop)
class Bar(Base):
__tablename__ = "bar"
id = Column(Integer, primary_key=True)
cprop = deferred(Column(Integer))
__table_args__ = (CheckConstraint(cprop > sa.func.foo()),)
ck = [
c
for c in Bar.__table__.constraints
if isinstance(c, CheckConstraint)
][0]
is_(ck.columns.cprop, Bar.__table__.c.cprop)
if testing.requires.python3.enabled:
# test the existing failure case in case something changes
def go():
class Bat(Base):
__tablename__ = "bat"
id = Column(Integer, primary_key=True)
cprop = deferred(Column(Integer))
# we still can't do an expression like
# "cprop > 5" because the column property isn't
# a full blown column
__table_args__ = (CheckConstraint(cprop > 5),)
assert_raises(TypeError, go)
def test_relationship_level_msg_for_invalid_callable(self):
class A(Base):
__tablename__ = "a"
id = Column(Integer, primary_key=True)
class B(Base):
__tablename__ = "b"
id = Column(Integer, primary_key=True)
a_id = Column(Integer, ForeignKey("a.id"))
a = relationship("a")
assert_raises_message(
sa.exc.ArgumentError,
"relationship 'a' expects a class or a mapper "
"argument .received: .*Table",
configure_mappers,
)
def test_relationship_level_msg_for_invalid_object(self):
class A(Base):
__tablename__ = "a"
id = Column(Integer, primary_key=True)
class B(Base):
__tablename__ = "b"
id = Column(Integer, primary_key=True)
a_id = Column(Integer, ForeignKey("a.id"))
a = relationship(A.__table__)
assert_raises_message(
sa.exc.ArgumentError,
"relationship 'a' expects a class or a mapper "
"argument .received: .*Table",
configure_mappers,
)
def test_difficult_class(self):
"""test no getattr() errors with a customized class"""
# metaclass to mock the way zope.interface breaks getattr()
class BrokenMeta(type):
def __getattribute__(self, attr):
if attr == "xyzzy":
raise AttributeError("xyzzy")
else:
return object.__getattribute__(self, attr)
# even though this class has an xyzzy attribute, getattr(cls,"xyzzy")
# fails
class BrokenParent(with_metaclass(BrokenMeta)):
xyzzy = "magic"
# _as_declarative() inspects obj.__class__.__bases__
class User(BrokenParent, fixtures.ComparableEntity):
__tablename__ = "users"
id = Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
)
name = Column("name", String(50))
decl.instrument_declarative(User, {}, Base.metadata)
def test_reserved_identifiers(self):
def go1():
class User1(Base):
__tablename__ = "user1"
id = Column(Integer, primary_key=True)
metadata = Column(Integer)
def go2():
class User2(Base):
__tablename__ = "user2"
id = Column(Integer, primary_key=True)
metadata = relationship("Address")
for go in (go1, go2):
assert_raises_message(
exc.InvalidRequestError,
"Attribute name 'metadata' is reserved "
"for the MetaData instance when using a "
"declarative base class.",
go,
)
def test_undefer_column_name(self):
# TODO: not sure if there was an explicit
# test for this elsewhere
foo = Column(Integer)
eq_(str(foo), "(no name)")
eq_(foo.key, None)
eq_(foo.name, None)
decl.base._undefer_column_name("foo", foo)
eq_(str(foo), "foo")
eq_(foo.key, "foo")
eq_(foo.name, "foo")
def test_recompile_on_othermapper(self):
"""declarative version of the same test in mappers.py"""
from sqlalchemy.orm import mapperlib
class User(Base):
__tablename__ = "users"
id = Column("id", Integer, primary_key=True)
name = Column("name", String(50))
class Address(Base):
__tablename__ = "addresses"
id = Column("id", Integer, primary_key=True)
email = Column("email", String(50))
user_id = Column("user_id", Integer, ForeignKey("users.id"))
user = relationship(
"User", primaryjoin=user_id == User.id, backref="addresses"
)
assert mapperlib.Mapper._new_mappers is True
u = User() # noqa
assert User.addresses
assert mapperlib.Mapper._new_mappers is False
def test_string_dependency_resolution(self):
class User(Base, fixtures.ComparableEntity):
__tablename__ = "users"
id = Column(
Integer, primary_key=True, test_needs_autoincrement=True
)
name = Column(String(50))
addresses = relationship(
"Address",
order_by="desc(Address.email)",
primaryjoin="User.id==Address.user_id",
foreign_keys="[Address.user_id]",
backref=backref(
"user",
primaryjoin="User.id==Address.user_id",
foreign_keys="[Address.user_id]",
),
)
class Address(Base, fixtures.ComparableEntity):
__tablename__ = "addresses"
id = Column(
Integer, primary_key=True, test_needs_autoincrement=True
)
email = Column(String(50))
user_id = Column(Integer) # note no foreign key
Base.metadata.create_all()
sess = create_session()
u1 = User(
name="ed",
addresses=[
Address(email="abc"),
Address(email="def"),
Address(email="xyz"),
],
)
sess.add(u1)
sess.flush()
sess.expunge_all()
eq_(
sess.query(User).filter(User.name == "ed").one(),
User(
name="ed",
addresses=[
Address(email="xyz"),
Address(email="def"),
Address(email="abc"),
],
),
)
class Foo(Base, fixtures.ComparableEntity):
__tablename__ = "foo"
id = Column(Integer, primary_key=True)
rel = relationship("User", primaryjoin="User.addresses==Foo.id")
assert_raises_message(
exc.InvalidRequestError,
"'addresses' is not an instance of " "ColumnProperty",
configure_mappers,
)
def test_string_dependency_resolution_synonym(self):
class User(Base, fixtures.ComparableEntity):
__tablename__ = "users"
id = Column(
Integer, primary_key=True, test_needs_autoincrement=True
)
name = Column(String(50))
Base.metadata.create_all()
sess = create_session()
u1 = User(name="ed")
sess.add(u1)
sess.flush()
sess.expunge_all()
eq_(sess.query(User).filter(User.name == "ed").one(), User(name="ed"))
class Foo(Base, fixtures.ComparableEntity):
__tablename__ = "foo"
id = Column(Integer, primary_key=True)
_user_id = Column(Integer)
rel = relationship(
"User",
uselist=False,
foreign_keys=[User.id],
primaryjoin="Foo.user_id==User.id",
)
@synonym_for("_user_id")
@property
def user_id(self):
return self._user_id
foo = Foo()
foo.rel = u1
assert foo.rel == u1
def test_string_dependency_resolution_orm_descriptor(self):
from sqlalchemy.ext.hybrid import hybrid_property
class User(Base):
__tablename__ = "user"
id = Column(Integer, primary_key=True)
firstname = Column(String(50))
lastname = Column(String(50))
game_id = Column(Integer, ForeignKey("game.id"))
@hybrid_property
def fullname(self):
return self.firstname + " " + self.lastname
class Game(Base):
__tablename__ = "game"
id = Column(Integer, primary_key=True)
name = Column(String(50))
users = relationship("User", order_by="User.fullname")
s = Session()
self.assert_compile(
s.query(Game).options(joinedload(Game.users)),
"SELECT game.id AS game_id, game.name AS game_name, "
"user_1.id AS user_1_id, user_1.firstname AS user_1_firstname, "
"user_1.lastname AS user_1_lastname, "
"user_1.game_id AS user_1_game_id "
'FROM game LEFT OUTER JOIN "user" AS user_1 ON game.id = '
"user_1.game_id ORDER BY "
"user_1.firstname || :firstname_1 || user_1.lastname",
)
def test_string_dependency_resolution_asselectable(self):
class A(Base):
__tablename__ = "a"
id = Column(Integer, primary_key=True)
b_id = Column(ForeignKey("b.id"))
d = relationship(
"D",
secondary="join(B, D, B.d_id == D.id)."
"join(C, C.d_id == D.id)",
primaryjoin="and_(A.b_id == B.id, A.id == C.a_id)",
secondaryjoin="D.id == B.d_id",
)
class B(Base):
__tablename__ = "b"
id = Column(Integer, primary_key=True)
d_id = Column(ForeignKey("d.id"))
class C(Base):
__tablename__ = "c"
id = Column(Integer, primary_key=True)
a_id = Column(ForeignKey("a.id"))
d_id = Column(ForeignKey("d.id"))
class D(Base):
__tablename__ = "d"
id = Column(Integer, primary_key=True)
s = Session()
self.assert_compile(
s.query(A).join(A.d),
"SELECT a.id AS a_id, a.b_id AS a_b_id FROM a JOIN "
"(b AS b_1 JOIN d AS d_1 ON b_1.d_id = d_1.id "
"JOIN c AS c_1 ON c_1.d_id = d_1.id) ON a.b_id = b_1.id "
"AND a.id = c_1.a_id JOIN d ON d.id = b_1.d_id",
)
def test_string_dependency_resolution_no_table(self):
class User(Base, fixtures.ComparableEntity):
__tablename__ = "users"
id = Column(
Integer, primary_key=True, test_needs_autoincrement=True
)
name = Column(String(50))
class Bar(Base, fixtures.ComparableEntity):
__tablename__ = "bar"
id = Column(Integer, primary_key=True)
rel = relationship("User", primaryjoin="User.id==Bar.__table__.id")
assert_raises_message(
AttributeError,
"does not have a mapped column named " "'__table__'",
configure_mappers,
)
def test_string_w_pj_annotations(self):
class User(Base, fixtures.ComparableEntity):
__tablename__ = "users"
id = Column(
Integer, primary_key=True, test_needs_autoincrement=True
)
name = Column(String(50))
class Address(Base, fixtures.ComparableEntity):
__tablename__ = "addresses"
id = Column(
Integer, primary_key=True, test_needs_autoincrement=True
)
email = Column(String(50))
user_id = Column(Integer)
user = relationship(
"User", primaryjoin="remote(User.id)==foreign(Address.user_id)"
)
eq_(
Address.user.property._join_condition.local_remote_pairs,
[(Address.__table__.c.user_id, User.__table__.c.id)],
)
def test_string_dependency_resolution_no_magic(self):
"""test that full tinkery expressions work as written"""
class User(Base, fixtures.ComparableEntity):
__tablename__ = "users"
id = Column(Integer, primary_key=True)
addresses = relationship(
"Address",
primaryjoin="User.id==Address.user_id.prop.columns[0]",
)
class Address(Base, fixtures.ComparableEntity):
__tablename__ = "addresses"
id = Column(Integer, primary_key=True)
user_id = Column(Integer, ForeignKey("users.id"))
configure_mappers()
eq_(
str(User.addresses.prop.primaryjoin),
"users.id = addresses.user_id",
)
def test_string_dependency_resolution_module_qualified(self):
class User(Base, fixtures.ComparableEntity):
__tablename__ = "users"
id = Column(Integer, primary_key=True)
addresses = relationship(
"%s.Address" % __name__,
primaryjoin="%s.User.id==%s.Address.user_id.prop.columns[0]"
% (__name__, __name__),
)
class Address(Base, fixtures.ComparableEntity):
__tablename__ = "addresses"
id = Column(Integer, primary_key=True)
user_id = Column(Integer, ForeignKey("users.id"))
configure_mappers()
eq_(
str(User.addresses.prop.primaryjoin),
"users.id = addresses.user_id",
)
def test_string_dependency_resolution_in_backref(self):
class User(Base, fixtures.ComparableEntity):
__tablename__ = "users"
id = Column(Integer, primary_key=True)
name = Column(String(50))
addresses = relationship(
"Address",
primaryjoin="User.id==Address.user_id",
backref="user",
)
class Address(Base, fixtures.ComparableEntity):
__tablename__ = "addresses"
id = Column(Integer, primary_key=True)
email = Column(String(50))
user_id = Column(Integer, ForeignKey("users.id"))
configure_mappers()
eq_(
str(User.addresses.property.primaryjoin),
str(Address.user.property.primaryjoin),
)
def test_string_dependency_resolution_tables(self):
class User(Base, fixtures.ComparableEntity):
__tablename__ = "users"
id = Column(Integer, primary_key=True)
name = Column(String(50))
props = relationship(
"Prop",
secondary="user_to_prop",
primaryjoin="User.id==user_to_prop.c.u" "ser_id",
secondaryjoin="user_to_prop.c.prop_id=" "=Prop.id",
backref="users",
)
class Prop(Base, fixtures.ComparableEntity):
__tablename__ = "props"
id = Column(Integer, primary_key=True)
name = Column(String(50))
user_to_prop = Table(
"user_to_prop",
Base.metadata,
Column("user_id", Integer, ForeignKey("users.id")),
Column("prop_id", Integer, ForeignKey("props.id")),
)
configure_mappers()
assert (
class_mapper(User).get_property("props").secondary is user_to_prop
)
def test_string_dependency_resolution_schemas(self):
Base = decl.declarative_base()
class User(Base):
__tablename__ = "users"
__table_args__ = {"schema": "fooschema"}
id = Column(Integer, primary_key=True)
name = Column(String(50))
props = relationship(
"Prop",
secondary="fooschema.user_to_prop",
primaryjoin="User.id==fooschema.user_to_prop.c.user_id",
secondaryjoin="fooschema.user_to_prop.c.prop_id==Prop.id",
backref="users",
)
class Prop(Base):
__tablename__ = "props"
__table_args__ = {"schema": "fooschema"}
id = Column(Integer, primary_key=True)
name = Column(String(50))
user_to_prop = Table(
"user_to_prop",
Base.metadata,
Column("user_id", Integer, ForeignKey("fooschema.users.id")),
Column("prop_id", Integer, ForeignKey("fooschema.props.id")),
schema="fooschema",
)
configure_mappers()
assert (
class_mapper(User).get_property("props").secondary is user_to_prop
)
def test_string_dependency_resolution_annotations(self):
Base = decl.declarative_base()
class Parent(Base):
__tablename__ = "parent"
id = Column(Integer, primary_key=True)
name = Column(String)
children = relationship(
"Child",
primaryjoin="Parent.name=="
"remote(foreign(func.lower(Child.name_upper)))",
)
class Child(Base):
__tablename__ = "child"
id = Column(Integer, primary_key=True)
name_upper = Column(String)
configure_mappers()
eq_(
Parent.children.property._calculated_foreign_keys,
set([Child.name_upper.property.columns[0]]),
)
def test_shared_class_registry(self):
reg = {}
Base1 = decl.declarative_base(testing.db, class_registry=reg)
Base2 = decl.declarative_base(testing.db, class_registry=reg)
class A(Base1):
__tablename__ = "a"
id = Column(Integer, primary_key=True)
class B(Base2):
__tablename__ = "b"
id = Column(Integer, primary_key=True)
aid = Column(Integer, ForeignKey(A.id))
as_ = relationship("A")
assert B.as_.property.mapper.class_ is A
def test_uncompiled_attributes_in_relationship(self):
class Address(Base, fixtures.ComparableEntity):
__tablename__ = "addresses"
id = Column(
Integer, primary_key=True, test_needs_autoincrement=True
)
email = Column(String(50))
user_id = Column(Integer, ForeignKey("users.id"))
class User(Base, fixtures.ComparableEntity):
__tablename__ = "users"
id = Column(
Integer, primary_key=True, test_needs_autoincrement=True
)
name = Column(String(50))
addresses = relationship(
"Address",
order_by=Address.email,
foreign_keys=Address.user_id,
remote_side=Address.user_id,
)
# get the mapper for User. User mapper will compile,
# "addresses" relationship will call upon Address.user_id for
# its clause element. Address.user_id is a _CompileOnAttr,
# which then calls class_mapper(Address). But ! We're already
# "in compilation", but class_mapper(Address) needs to
# initialize regardless, or COA's assertion fails and things
# generally go downhill from there.
class_mapper(User)
Base.metadata.create_all()
sess = create_session()
u1 = User(
name="ed",
addresses=[
Address(email="abc"),
Address(email="xyz"),
Address(email="def"),
],
)
sess.add(u1)
sess.flush()
sess.expunge_all()
eq_(
sess.query(User).filter(User.name == "ed").one(),
User(
name="ed",
addresses=[
Address(email="abc"),
Address(email="def"),
Address(email="xyz"),
],
),
)
def test_nice_dependency_error(self):
class User(Base):
__tablename__ = "users"
id = Column("id", Integer, primary_key=True)
addresses = relationship("Address")
class Address(Base):
__tablename__ = "addresses"
id = Column(Integer, primary_key=True)
foo = sa.orm.column_property(User.id == 5)
# this used to raise an error when accessing User.id but that's
# no longer the case since we got rid of _CompileOnAttr.
assert_raises(sa.exc.ArgumentError, configure_mappers)
def test_nice_dependency_error_works_with_hasattr(self):
class User(Base):
__tablename__ = "users"
id = Column("id", Integer, primary_key=True)
addresses = relationship("Address")
# hasattr() on a compile-loaded attribute
try:
hasattr(User.addresses, "property")
except exc.InvalidRequestError:
assert sa.util.compat.py3k
# the exception is preserved. Remains the
# same through repeated calls.
for i in range(3):
assert_raises_message(
sa.exc.InvalidRequestError,
"^One or more mappers failed to initialize"
" - can't proceed with initialization of other mappers. "
r"Triggering mapper: 'mapped class User->users'. "
"Original exception was: When initializing.*",
configure_mappers,
)
def test_custom_base(self):
class MyBase(object):
def foobar(self):
return "foobar"
Base = decl.declarative_base(cls=MyBase)
assert hasattr(Base, "metadata")
assert Base().foobar() == "foobar"
def test_uses_get_on_class_col_fk(self):
# test [ticket:1492]
class Master(Base):
__tablename__ = "master"
id = Column(
Integer, primary_key=True, test_needs_autoincrement=True
)
class Detail(Base):
__tablename__ = "detail"
id = Column(
Integer, primary_key=True, test_needs_autoincrement=True
)
master_id = Column(None, ForeignKey(Master.id))
master = relationship(Master)
Base.metadata.create_all()
configure_mappers()
assert class_mapper(Detail).get_property("master").strategy.use_get
m1 = Master()
d1 = Detail(master=m1)
sess = create_session()
sess.add(d1)
sess.flush()
sess.expunge_all()
d1 = sess.query(Detail).first()
m1 = sess.query(Master).first()
def go():
assert d1.master
self.assert_sql_count(testing.db, go, 0)
def test_index_doesnt_compile(self):
class User(Base):
__tablename__ = "users"
id = Column("id", Integer, primary_key=True)
name = Column("name", String(50))
error = relationship("Address")
i = Index("my_index", User.name)
# compile fails due to the nonexistent Addresses relationship
assert_raises(sa.exc.InvalidRequestError, configure_mappers)
# index configured
assert i in User.__table__.indexes
assert User.__table__.c.id not in set(i.columns)
assert User.__table__.c.name in set(i.columns)
# tables create fine
Base.metadata.create_all()
def test_add_prop(self):
class User(Base, fixtures.ComparableEntity):
__tablename__ = "users"
id = Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
)
User.name = Column("name", String(50))
User.addresses = relationship("Address", backref="user")
class Address(Base, fixtures.ComparableEntity):
__tablename__ = "addresses"
id = Column(
Integer, primary_key=True, test_needs_autoincrement=True
)
Address.email = Column(String(50), key="_email")
Address.user_id = Column(
"user_id", Integer, ForeignKey("users.id"), key="_user_id"
)
Base.metadata.create_all()
eq_(Address.__table__.c["id"].name, "id")
eq_(Address.__table__.c["_email"].name, "email")
eq_(Address.__table__.c["_user_id"].name, "user_id")
u1 = User(
name="u1", addresses=[Address(email="one"), Address(email="two")]
)
sess = create_session()
sess.add(u1)
sess.flush()
sess.expunge_all()
eq_(
sess.query(User).all(),
[
User(
name="u1",
addresses=[Address(email="one"), Address(email="two")],
)
],
)
a1 = sess.query(Address).filter(Address.email == "two").one()
eq_(a1, Address(email="two"))
eq_(a1.user, User(name="u1"))
def test_alt_name_attr_subclass_column_inline(self):
# [ticket:2900]
class A(Base):
__tablename__ = "a"
id = Column("id", Integer, primary_key=True)
data = Column("data")
class ASub(A):
brap = A.data
assert ASub.brap.property is A.data.property
assert isinstance(
ASub.brap.original_property, descriptor_props.SynonymProperty
)
def test_alt_name_attr_subclass_relationship_inline(self):
# [ticket:2900]
class A(Base):
__tablename__ = "a"
id = Column("id", Integer, primary_key=True)
b_id = Column(Integer, ForeignKey("b.id"))
b = relationship("B", backref="as_")
class B(Base):
__tablename__ = "b"
id = Column("id", Integer, primary_key=True)
configure_mappers()
class ASub(A):
brap = A.b
assert ASub.brap.property is A.b.property
assert isinstance(
ASub.brap.original_property, descriptor_props.SynonymProperty
)
ASub(brap=B())
def test_alt_name_attr_subclass_column_attrset(self):
# [ticket:2900]
class A(Base):
__tablename__ = "a"
id = Column("id", Integer, primary_key=True)
data = Column("data")
A.brap = A.data
assert A.brap.property is A.data.property
assert isinstance(
A.brap.original_property, descriptor_props.SynonymProperty
)
def test_alt_name_attr_subclass_relationship_attrset(self):
# [ticket:2900]
class A(Base):
__tablename__ = "a"
id = Column("id", Integer, primary_key=True)
b_id = Column(Integer, ForeignKey("b.id"))
b = relationship("B", backref="as_")
A.brap = A.b
class B(Base):
__tablename__ = "b"
id = Column("id", Integer, primary_key=True)
assert A.brap.property is A.b.property
assert isinstance(
A.brap.original_property, descriptor_props.SynonymProperty
)
A(brap=B())
def test_eager_order_by(self):
class Address(Base, fixtures.ComparableEntity):
__tablename__ = "addresses"
id = Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
)
email = Column("email", String(50))
user_id = Column("user_id", Integer, ForeignKey("users.id"))
class User(Base, fixtures.ComparableEntity):
__tablename__ = "users"
id = Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
)
name = Column("name", String(50))
addresses = relationship("Address", order_by=Address.email)
Base.metadata.create_all()
u1 = User(
name="u1", addresses=[Address(email="two"), Address(email="one")]
)
sess = create_session()
sess.add(u1)
sess.flush()
sess.expunge_all()
eq_(
sess.query(User).options(joinedload(User.addresses)).all(),
[
User(
name="u1",
addresses=[Address(email="one"), Address(email="two")],
)
],
)
def test_order_by_multi(self):
class Address(Base, fixtures.ComparableEntity):
__tablename__ = "addresses"
id = Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
)
email = Column("email", String(50))
user_id = Column("user_id", Integer, ForeignKey("users.id"))
class User(Base, fixtures.ComparableEntity):
__tablename__ = "users"
id = Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
)
name = Column("name", String(50))
addresses = relationship(
"Address", order_by=(Address.email, Address.id)
)
Base.metadata.create_all()
u1 = User(
name="u1", addresses=[Address(email="two"), Address(email="one")]
)
sess = create_session()
sess.add(u1)
sess.flush()
sess.expunge_all()
u = sess.query(User).filter(User.name == "u1").one()
u.addresses
def test_as_declarative(self):
class User(fixtures.ComparableEntity):
__tablename__ = "users"
id = Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
)
name = Column("name", String(50))
addresses = relationship("Address", backref="user")
class Address(fixtures.ComparableEntity):
__tablename__ = "addresses"
id = Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
)
email = Column("email", String(50))
user_id = Column("user_id", Integer, ForeignKey("users.id"))
reg = {}
decl.instrument_declarative(User, reg, Base.metadata)
decl.instrument_declarative(Address, reg, Base.metadata)
Base.metadata.create_all()
u1 = User(
name="u1", addresses=[Address(email="one"), Address(email="two")]
)
sess = create_session()
sess.add(u1)
sess.flush()
sess.expunge_all()
eq_(
sess.query(User).all(),
[
User(
name="u1",
addresses=[Address(email="one"), Address(email="two")],
)
],
)
def test_custom_mapper_attribute(self):
def mymapper(cls, tbl, **kwargs):
m = sa.orm.mapper(cls, tbl, **kwargs)
m.CHECK = True
return m
base = decl.declarative_base()
class Foo(base):
__tablename__ = "foo"
__mapper_cls__ = mymapper
id = Column(Integer, primary_key=True)
eq_(Foo.__mapper__.CHECK, True)
def test_custom_mapper_argument(self):
def mymapper(cls, tbl, **kwargs):
m = sa.orm.mapper(cls, tbl, **kwargs)
m.CHECK = True
return m
base = decl.declarative_base(mapper=mymapper)
class Foo(base):
__tablename__ = "foo"
id = Column(Integer, primary_key=True)
eq_(Foo.__mapper__.CHECK, True)
def test_no_change_to_all_descriptors(self):
base = decl.declarative_base()
class Foo(base):
__tablename__ = "foo"
id = Column(Integer, primary_key=True)
eq_(Foo.__mapper__.all_orm_descriptors.keys(), ["id"])
def test_oops(self):
with testing.expect_warnings(
"Ignoring declarative-like tuple value of " "attribute 'name'"
):
class User(Base, fixtures.ComparableEntity):
__tablename__ = "users"
id = Column("id", Integer, primary_key=True)
name = (Column("name", String(50)),)
def test_table_args_no_dict(self):
class Foo1(Base):
__tablename__ = "foo"
__table_args__ = (ForeignKeyConstraint(["id"], ["foo.bar"]),)
id = Column("id", Integer, primary_key=True)
bar = Column("bar", Integer)
assert Foo1.__table__.c.id.references(Foo1.__table__.c.bar)
def test_table_args_type(self):
def err():
class Foo1(Base):
__tablename__ = "foo"
__table_args__ = ForeignKeyConstraint(["id"], ["foo.id"])
id = Column("id", Integer, primary_key=True)
assert_raises_message(
sa.exc.ArgumentError, "__table_args__ value must be a tuple, ", err
)
def test_table_args_none(self):
class Foo2(Base):
__tablename__ = "foo"
__table_args__ = None
id = Column("id", Integer, primary_key=True)
assert Foo2.__table__.kwargs == {}
def test_table_args_dict_format(self):
class Foo2(Base):
__tablename__ = "foo"
__table_args__ = {"mysql_engine": "InnoDB"}
id = Column("id", Integer, primary_key=True)
assert Foo2.__table__.kwargs["mysql_engine"] == "InnoDB"
def test_table_args_tuple_format(self):
class Foo2(Base):
__tablename__ = "foo"
__table_args__ = {"mysql_engine": "InnoDB"}
id = Column("id", Integer, primary_key=True)
class Bar(Base):
__tablename__ = "bar"
__table_args__ = (
ForeignKeyConstraint(["id"], ["foo.id"]),
{"mysql_engine": "InnoDB"},
)
id = Column("id", Integer, primary_key=True)
assert Bar.__table__.c.id.references(Foo2.__table__.c.id)
assert Bar.__table__.kwargs["mysql_engine"] == "InnoDB"
def test_table_cls_attribute(self):
class Foo(Base):
__tablename__ = "foo"
@classmethod
def __table_cls__(cls, *arg, **kw):
name = arg[0]
return Table(name + "bat", *arg[1:], **kw)
id = Column(Integer, primary_key=True)
eq_(Foo.__table__.name, "foobat")
def test_table_cls_attribute_return_none(self):
from sqlalchemy.schema import Column, PrimaryKeyConstraint
class AutoTable(object):
@declared_attr
def __tablename__(cls):
return cls.__name__
@classmethod
def __table_cls__(cls, *arg, **kw):
for obj in arg[1:]:
if (
isinstance(obj, Column) and obj.primary_key
) or isinstance(obj, PrimaryKeyConstraint):
return Table(*arg, **kw)
return None
class Person(AutoTable, Base):
id = Column(Integer, primary_key=True)
class Employee(Person):
employee_name = Column(String)
is_(inspect(Employee).local_table, Person.__table__)
def test_expression(self):
class User(Base, fixtures.ComparableEntity):
__tablename__ = "users"
id = Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
)
name = Column("name", String(50))
addresses = relationship("Address", backref="user")
class Address(Base, fixtures.ComparableEntity):
__tablename__ = "addresses"
id = Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
)
email = Column("email", String(50))
user_id = Column("user_id", Integer, ForeignKey("users.id"))
User.address_count = sa.orm.column_property(
sa.select([sa.func.count(Address.id)])
.where(Address.user_id == User.id)
.scalar_subquery()
)
Base.metadata.create_all()
u1 = User(
name="u1", addresses=[Address(email="one"), Address(email="two")]
)
sess = create_session()
sess.add(u1)
sess.flush()
sess.expunge_all()
eq_(
sess.query(User).all(),
[
User(
name="u1",
address_count=2,
addresses=[Address(email="one"), Address(email="two")],
)
],
)
def test_useless_declared_attr(self):
class Address(Base, fixtures.ComparableEntity):
__tablename__ = "addresses"
id = Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
)
email = Column("email", String(50))
user_id = Column("user_id", Integer, ForeignKey("users.id"))
class User(Base, fixtures.ComparableEntity):
__tablename__ = "users"
id = Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
)
name = Column("name", String(50))
addresses = relationship("Address", backref="user")
@declared_attr
def address_count(cls):
# this doesn't really gain us anything. but if
# one is used, lets have it function as expected...
return sa.orm.column_property(
sa.select([sa.func.count(Address.id)])
.where(Address.user_id == cls.id)
.scalar_subquery()
)
Base.metadata.create_all()
u1 = User(
name="u1", addresses=[Address(email="one"), Address(email="two")]
)
sess = create_session()
sess.add(u1)
sess.flush()
sess.expunge_all()
eq_(
sess.query(User).all(),
[
User(
name="u1",
address_count=2,
addresses=[Address(email="one"), Address(email="two")],
)
],
)
def test_declared_on_base_class(self):
class MyBase(Base):
__tablename__ = "foo"
id = Column(Integer, primary_key=True)
@declared_attr
def somecol(cls):
return Column(Integer)
class MyClass(MyBase):
__tablename__ = "bar"
id = Column(Integer, ForeignKey("foo.id"), primary_key=True)
# previously, the 'somecol' declared_attr would be ignored
# by the mapping and would remain unused. now we take
# it as part of MyBase.
assert "somecol" in MyBase.__table__.c
assert "somecol" not in MyClass.__table__.c
def test_decl_cascading_warns_non_mixin(self):
with expect_warnings(
"Use of @declared_attr.cascading only applies to "
"Declarative 'mixin' and 'abstract' classes. "
"Currently, this flag is ignored on mapped class "
"<class '.*.MyBase'>"
):
class MyBase(Base):
__tablename__ = "foo"
id = Column(Integer, primary_key=True)
@declared_attr.cascading
def somecol(cls):
return Column(Integer)
def test_column(self):
class User(Base, fixtures.ComparableEntity):
__tablename__ = "users"
id = Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
)
name = Column("name", String(50))
User.a = Column("a", String(10))
User.b = Column(String(10))
Base.metadata.create_all()
u1 = User(name="u1", a="a", b="b")
eq_(u1.a, "a")
eq_(User.a.get_history(u1), (["a"], (), ()))
sess = create_session()
sess.add(u1)
sess.flush()
sess.expunge_all()
eq_(sess.query(User).all(), [User(name="u1", a="a", b="b")])
def test_column_properties(self):
class Address(Base, fixtures.ComparableEntity):
__tablename__ = "addresses"
id = Column(
Integer, primary_key=True, test_needs_autoincrement=True
)
email = Column(String(50))
user_id = Column(Integer, ForeignKey("users.id"))
class User(Base, fixtures.ComparableEntity):
__tablename__ = "users"
id = Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
)
name = Column("name", String(50))
adr_count = sa.orm.column_property(
sa.select(
[sa.func.count(Address.id)], Address.user_id == id
).scalar_subquery()
)
addresses = relationship(Address)
Base.metadata.create_all()
u1 = User(
name="u1", addresses=[Address(email="one"), Address(email="two")]
)
sess = create_session()
sess.add(u1)
sess.flush()
sess.expunge_all()
eq_(
sess.query(User).all(),
[
User(
name="u1",
adr_count=2,
addresses=[Address(email="one"), Address(email="two")],
)
],
)
def test_column_properties_2(self):
class Address(Base, fixtures.ComparableEntity):
__tablename__ = "addresses"
id = Column(Integer, primary_key=True)
email = Column(String(50))
user_id = Column(Integer, ForeignKey("users.id"))
class User(Base, fixtures.ComparableEntity):
__tablename__ = "users"
id = Column("id", Integer, primary_key=True)
name = Column("name", String(50))
# this is not "valid" but we want to test that Address.id
# doesn't get stuck into user's table
adr_count = Address.id
eq_(set(User.__table__.c.keys()), set(["id", "name"]))
eq_(set(Address.__table__.c.keys()), set(["id", "email", "user_id"]))
def test_deferred(self):
class User(Base, fixtures.ComparableEntity):
__tablename__ = "users"
id = Column(
Integer, primary_key=True, test_needs_autoincrement=True
)
name = sa.orm.deferred(Column(String(50)))
Base.metadata.create_all()
sess = create_session()
sess.add(User(name="u1"))
sess.flush()
sess.expunge_all()
u1 = sess.query(User).filter(User.name == "u1").one()
assert "name" not in u1.__dict__
def go():
eq_(u1.name, "u1")
self.assert_sql_count(testing.db, go, 1)
def test_composite_inline(self):
class AddressComposite(fixtures.ComparableEntity):
def __init__(self, street, state):
self.street = street
self.state = state
def __composite_values__(self):
return [self.street, self.state]
class User(Base, fixtures.ComparableEntity):
__tablename__ = "user"
id = Column(
Integer, primary_key=True, test_needs_autoincrement=True
)
address = composite(
AddressComposite,
Column("street", String(50)),
Column("state", String(2)),
)
Base.metadata.create_all()
sess = Session()
sess.add(User(address=AddressComposite("123 anywhere street", "MD")))
sess.commit()
eq_(
sess.query(User).all(),
[User(address=AddressComposite("123 anywhere street", "MD"))],
)
def test_composite_separate(self):
class AddressComposite(fixtures.ComparableEntity):
def __init__(self, street, state):
self.street = street
self.state = state
def __composite_values__(self):
return [self.street, self.state]
class User(Base, fixtures.ComparableEntity):
__tablename__ = "user"
id = Column(
Integer, primary_key=True, test_needs_autoincrement=True
)
street = Column(String(50))
state = Column(String(2))
address = composite(AddressComposite, street, state)
Base.metadata.create_all()
sess = Session()
sess.add(User(address=AddressComposite("123 anywhere street", "MD")))
sess.commit()
eq_(
sess.query(User).all(),
[User(address=AddressComposite("123 anywhere street", "MD"))],
)
def test_mapping_to_join(self):
users = Table(
"users", Base.metadata, Column("id", Integer, primary_key=True)
)
addresses = Table(
"addresses",
Base.metadata,
Column("id", Integer, primary_key=True),
Column("user_id", Integer, ForeignKey("users.id")),
)
usersaddresses = sa.join(
users, addresses, users.c.id == addresses.c.user_id
)
class User(Base):
__table__ = usersaddresses
__table_args__ = {"primary_key": [users.c.id]}
# need to use column_property for now
user_id = column_property(users.c.id, addresses.c.user_id)
address_id = addresses.c.id
assert User.__mapper__.get_property("user_id").columns[0] is users.c.id
assert (
User.__mapper__.get_property("user_id").columns[1]
is addresses.c.user_id
)
def test_synonym_inline(self):
class User(Base, fixtures.ComparableEntity):
__tablename__ = "users"
id = Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
)
_name = Column("name", String(50))
def _set_name(self, name):
self._name = "SOMENAME " + name
def _get_name(self):
return self._name
name = sa.orm.synonym(
"_name", descriptor=property(_get_name, _set_name)
)
Base.metadata.create_all()
sess = create_session()
u1 = User(name="someuser")
eq_(u1.name, "SOMENAME someuser")
sess.add(u1)
sess.flush()
eq_(
sess.query(User).filter(User.name == "SOMENAME someuser").one(), u1
)
def test_synonym_no_descriptor(self):
from sqlalchemy.orm.properties import ColumnProperty
class CustomCompare(ColumnProperty.Comparator):
__hash__ = None
def __eq__(self, other):
return self.__clause_element__() == other + " FOO"
class User(Base, fixtures.ComparableEntity):
__tablename__ = "users"
id = Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
)
_name = Column("name", String(50))
name = sa.orm.synonym("_name", comparator_factory=CustomCompare)
Base.metadata.create_all()
sess = create_session()
u1 = User(name="someuser FOO")
sess.add(u1)
sess.flush()
eq_(sess.query(User).filter(User.name == "someuser").one(), u1)
def test_synonym_added(self):
class User(Base, fixtures.ComparableEntity):
__tablename__ = "users"
id = Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
)
_name = Column("name", String(50))
def _set_name(self, name):
self._name = "SOMENAME " + name
def _get_name(self):
return self._name
name = property(_get_name, _set_name)
User.name = sa.orm.synonym("_name", descriptor=User.name)
Base.metadata.create_all()
sess = create_session()
u1 = User(name="someuser")
eq_(u1.name, "SOMENAME someuser")
sess.add(u1)
sess.flush()
eq_(
sess.query(User).filter(User.name == "SOMENAME someuser").one(), u1
)
def test_reentrant_compile_via_foreignkey(self):
class User(Base, fixtures.ComparableEntity):
__tablename__ = "users"
id = Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
)
name = Column("name", String(50))
addresses = relationship("Address", backref="user")
class Address(Base, fixtures.ComparableEntity):
__tablename__ = "addresses"
id = Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
)
email = Column("email", String(50))
user_id = Column("user_id", Integer, ForeignKey(User.id))
# previous versions would force a re-entrant mapper compile via
# the User.id inside the ForeignKey but this is no longer the
# case
sa.orm.configure_mappers()
eq_(
list(Address.user_id.property.columns[0].foreign_keys)[0].column,
User.__table__.c.id,
)
Base.metadata.create_all()
u1 = User(
name="u1", addresses=[Address(email="one"), Address(email="two")]
)
sess = create_session()
sess.add(u1)
sess.flush()
sess.expunge_all()
eq_(
sess.query(User).all(),
[
User(
name="u1",
addresses=[Address(email="one"), Address(email="two")],
)
],
)
def test_relationship_reference(self):
class Address(Base, fixtures.ComparableEntity):
__tablename__ = "addresses"
id = Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
)
email = Column("email", String(50))
user_id = Column("user_id", Integer, ForeignKey("users.id"))
class User(Base, fixtures.ComparableEntity):
__tablename__ = "users"
id = Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
)
name = Column("name", String(50))
addresses = relationship(
"Address", backref="user", primaryjoin=id == Address.user_id
)
User.address_count = sa.orm.column_property(
sa.select([sa.func.count(Address.id)])
.where(Address.user_id == User.id)
.scalar_subquery()
)
Base.metadata.create_all()
u1 = User(
name="u1", addresses=[Address(email="one"), Address(email="two")]
)
sess = create_session()
sess.add(u1)
sess.flush()
sess.expunge_all()
eq_(
sess.query(User).all(),
[
User(
name="u1",
address_count=2,
addresses=[Address(email="one"), Address(email="two")],
)
],
)
def test_pk_with_fk_init(self):
class Bar(Base):
__tablename__ = "bar"
id = sa.Column(
sa.Integer, sa.ForeignKey("foo.id"), primary_key=True
)
ex = sa.Column(sa.Integer, primary_key=True)
class Foo(Base):
__tablename__ = "foo"
id = sa.Column(sa.Integer, primary_key=True)
bars = sa.orm.relationship(Bar)
assert Bar.__mapper__.primary_key[0] is Bar.__table__.c.id
assert Bar.__mapper__.primary_key[1] is Bar.__table__.c.ex
def test_with_explicit_autoloaded(self):
meta = MetaData(testing.db)
t1 = Table(
"t1",
meta,
Column("id", String(50), primary_key=True),
Column("data", String(50)),
)
meta.create_all()
try:
class MyObj(Base):
__table__ = Table("t1", Base.metadata, autoload=True)
sess = create_session()
m = MyObj(id="someid", data="somedata")
sess.add(m)
sess.flush()
eq_(t1.select().execute().fetchall(), [("someid", "somedata")])
finally:
meta.drop_all()
def test_synonym_for(self):
class User(Base, fixtures.ComparableEntity):
__tablename__ = "users"
id = Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
)
name = Column("name", String(50))
@decl.synonym_for("name")
@property
def namesyn(self):
return self.name
Base.metadata.create_all()
sess = create_session()
u1 = User(name="someuser")
eq_(u1.name, "someuser")
eq_(u1.namesyn, "someuser")
sess.add(u1)
sess.flush()
rt = sess.query(User).filter(User.namesyn == "someuser").one()
eq_(rt, u1)
def test_duplicate_classes_in_base(self):
class Test(Base):
__tablename__ = "a"
id = Column(Integer, primary_key=True)
assert_raises_message(
sa.exc.SAWarning,
"This declarative base already contains a class with ",
lambda: type(Base)(
"Test",
(Base,),
dict(__tablename__="b", id=Column(Integer, primary_key=True)),
),
)
@testing.teardown_events(MapperEvents)
def test_instrument_class_before_instrumentation(self):
# test #3388
canary = mock.Mock()
@event.listens_for(mapper, "instrument_class")
def instrument_class(mp, cls):
canary.instrument_class(mp, cls)
@event.listens_for(object, "class_instrument")
def class_instrument(cls):
canary.class_instrument(cls)
class Test(Base):
__tablename__ = "test"
id = Column(Integer, primary_key=True)
eq_(
canary.mock_calls,
[
mock.call.instrument_class(Test.__mapper__, Test),
mock.call.class_instrument(Test),
],
)
def test_cls_docstring(self):
class MyBase(object):
"""MyBase Docstring"""
Base = decl.declarative_base(cls=MyBase)
eq_(Base.__doc__, MyBase.__doc__)
def test_delattr_mapped_raises(self):
Base = decl.declarative_base()
class Foo(Base):
__tablename__ = "foo"
id = Column(Integer, primary_key=True)
data = Column(String)
def go():
del Foo.data
assert_raises_message(
NotImplementedError,
"Can't un-map individual mapped attributes on a mapped class.",
go,
)
def test_delattr_hybrid_fine(self):
Base = decl.declarative_base()
class Foo(Base):
__tablename__ = "foo"
id = Column(Integer, primary_key=True)
data = Column(String)
@hybrid_property
def data_hybrid(self):
return self.data
assert "data_hybrid" in Foo.__mapper__.all_orm_descriptors.keys()
del Foo.data_hybrid
assert "data_hybrid" not in Foo.__mapper__.all_orm_descriptors.keys()
assert not hasattr(Foo, "data_hybrid")
def test_setattr_hybrid_updates_descriptors(self):
Base = decl.declarative_base()
class Foo(Base):
__tablename__ = "foo"
id = Column(Integer, primary_key=True)
data = Column(String)
assert "data_hybrid" not in Foo.__mapper__.all_orm_descriptors.keys()
@hybrid_property
def data_hybrid(self):
return self.data
Foo.data_hybrid = data_hybrid
assert "data_hybrid" in Foo.__mapper__.all_orm_descriptors.keys()
del Foo.data_hybrid
assert "data_hybrid" not in Foo.__mapper__.all_orm_descriptors.keys()
assert not hasattr(Foo, "data_hybrid")
def _produce_test(inline, stringbased):
class ExplicitJoinTest(fixtures.MappedTest):
@classmethod
def define_tables(cls, metadata):
global User, Address
Base = decl.declarative_base(metadata=metadata)
class User(Base, fixtures.ComparableEntity):
__tablename__ = "users"
id = Column(
Integer, primary_key=True, test_needs_autoincrement=True
)
name = Column(String(50))
class Address(Base, fixtures.ComparableEntity):
__tablename__ = "addresses"
id = Column(
Integer, primary_key=True, test_needs_autoincrement=True
)
email = Column(String(50))
user_id = Column(Integer, ForeignKey("users.id"))
if inline:
if stringbased:
user = relationship(
"User",
primaryjoin="User.id==Address.user_id",
backref="addresses",
)
else:
user = relationship(
User,
primaryjoin=User.id == user_id,
backref="addresses",
)
if not inline:
configure_mappers()
if stringbased:
Address.user = relationship(
"User",
primaryjoin="User.id==Address.user_id",
backref="addresses",
)
else:
Address.user = relationship(
User,
primaryjoin=User.id == Address.user_id,
backref="addresses",
)
@classmethod
def insert_data(cls, connection):
params = [
dict(list(zip(("id", "name"), column_values)))
for column_values in [
(7, "jack"),
(8, "ed"),
(9, "fred"),
(10, "chuck"),
]
]
connection.execute(User.__table__.insert(), params)
connection.execute(
Address.__table__.insert(),
[
dict(list(zip(("id", "user_id", "email"), column_values)))
for column_values in [
(1, 7, "[email protected]"),
(2, 8, "[email protected]"),
(3, 8, "[email protected]"),
(4, 8, "[email protected]"),
(5, 9, "[email protected]"),
]
],
)
def test_aliased_join(self):
# this query will screw up if the aliasing enabled in
# query.join() gets applied to the right half of the join
# condition inside the any(). the join condition inside of
# any() comes from the "primaryjoin" of the relationship,
# and should not be annotated with _orm_adapt.
# PropertyLoader.Comparator will annotate the left side with
# _orm_adapt, though.
sess = create_session()
eq_(
sess.query(User)
.join(User.addresses, aliased=True)
.filter(Address.email == "[email protected]")
.filter(User.addresses.any(Address.email == "[email protected]"))
.all(),
[],
)
ExplicitJoinTest.__name__ = "ExplicitJoinTest%s%s" % (
inline and "Inline" or "Separate",
stringbased and "String" or "Literal",
)
return ExplicitJoinTest
for inline in True, False:
for stringbased in True, False:
testclass = _produce_test(inline, stringbased)
exec("%s = testclass" % testclass.__name__)
del testclass
|
py | 7dfcc8960e6fdf3fe62b72445b561791571df24b | from django.apps import AppConfig
class UsersAppConfig(AppConfig):
name = "outpass.users"
verbose_name = "Users"
def ready(self):
try:
import users.signals # noqa F401
except ImportError:
pass
|
py | 7dfcc8ff27ffcb2aae9277870cfc970f9ecf8745 | import datetime
from cal_setup import get_calendar_service
def main():
service = get_calendar_service()
# Call the Calendar API
now = datetime.datetime.utcnow().isoformat() + "Z" # 'Z' indicates UTC time
print("Getting List o 10 events")
events_result = (
service.events()
.list(calendarId="primary", timeMin=now, maxResults=10, singleEvents=True, orderBy="startTime")
.execute()
)
events = events_result.get("items", [])
if not events:
print("No upcoming events found.")
for event in events:
start = event["start"].get("dateTime", event["start"].get("date"))
print(start, event["summary"])
if __name__ == "__main__":
main()
|
py | 7dfcc975eab66c9cc8ded5da283bc35c55c71bce | # -*- coding: utf-8 -*-
# Author: Henry Lin <[email protected]>
# Tom Dupré la Tour
# License: BSD
import numbers
import numpy as np
import warnings
from . import OneHotEncoder
from ..base import BaseEstimator, TransformerMixin
from ..utils.validation import check_array
from ..utils.validation import check_is_fitted
from ..utils.validation import check_random_state
from ..utils.validation import _check_feature_names_in
from ..utils.validation import check_scalar
from ..utils.validation import _check_sample_weight
from ..utils.stats import _weighted_percentile
from ..utils import _safe_indexing
class KBinsDiscretizer(TransformerMixin, BaseEstimator):
"""
Bin continuous data into intervals.
Read more in the :ref:`User Guide <preprocessing_discretization>`.
.. versionadded:: 0.20
Parameters
----------
n_bins : int or array-like of shape (n_features,), default=5
The number of bins to produce. Raises ValueError if ``n_bins < 2``.
encode : {'onehot', 'onehot-dense', 'ordinal'}, default='onehot'
Method used to encode the transformed result.
- 'onehot': Encode the transformed result with one-hot encoding
and return a sparse matrix. Ignored features are always
stacked to the right.
- 'onehot-dense': Encode the transformed result with one-hot encoding
and return a dense array. Ignored features are always
stacked to the right.
- 'ordinal': Return the bin identifier encoded as an integer value.
strategy : {'uniform', 'quantile', 'kmeans'}, default='quantile'
Strategy used to define the widths of the bins.
- 'uniform': All bins in each feature have identical widths.
- 'quantile': All bins in each feature have the same number of points.
- 'kmeans': Values in each bin have the same nearest center of a 1D
k-means cluster.
dtype : {np.float32, np.float64}, default=None
The desired data-type for the output. If None, output dtype is
consistent with input dtype. Only np.float32 and np.float64 are
supported.
.. versionadded:: 0.24
subsample : int or None (default='warn')
Maximum number of samples, used to fit the model, for computational
efficiency. Used when `strategy="quantile"`.
`subsample=None` means that all the training samples are used when
computing the quantiles that determine the binning thresholds.
Since quantile computation relies on sorting each column of `X` and
that sorting has an `n log(n)` time complexity,
it is recommended to use subsampling on datasets with a
very large number of samples.
.. deprecated:: 1.1
In version 1.3 and onwards, `subsample=2e5` will be the default.
random_state : int, RandomState instance or None, default=None
Determines random number generation for subsampling.
Pass an int for reproducible results across multiple function calls.
See the `subsample` parameter for more details.
See :term:`Glossary <random_state>`.
.. versionadded:: 1.1
Attributes
----------
bin_edges_ : ndarray of ndarray of shape (n_features,)
The edges of each bin. Contain arrays of varying shapes ``(n_bins_, )``
Ignored features will have empty arrays.
n_bins_ : ndarray of shape (n_features,), dtype=np.int_
Number of bins per feature. Bins whose width are too small
(i.e., <= 1e-8) are removed with a warning.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
See Also
--------
Binarizer : Class used to bin values as ``0`` or
``1`` based on a parameter ``threshold``.
Notes
-----
In bin edges for feature ``i``, the first and last values are used only for
``inverse_transform``. During transform, bin edges are extended to::
np.concatenate([-np.inf, bin_edges_[i][1:-1], np.inf])
You can combine ``KBinsDiscretizer`` with
:class:`~sklearn.compose.ColumnTransformer` if you only want to preprocess
part of the features.
``KBinsDiscretizer`` might produce constant features (e.g., when
``encode = 'onehot'`` and certain bins do not contain any data).
These features can be removed with feature selection algorithms
(e.g., :class:`~sklearn.feature_selection.VarianceThreshold`).
Examples
--------
>>> from sklearn.preprocessing import KBinsDiscretizer
>>> X = [[-2, 1, -4, -1],
... [-1, 2, -3, -0.5],
... [ 0, 3, -2, 0.5],
... [ 1, 4, -1, 2]]
>>> est = KBinsDiscretizer(n_bins=3, encode='ordinal', strategy='uniform')
>>> est.fit(X)
KBinsDiscretizer(...)
>>> Xt = est.transform(X)
>>> Xt # doctest: +SKIP
array([[ 0., 0., 0., 0.],
[ 1., 1., 1., 0.],
[ 2., 2., 2., 1.],
[ 2., 2., 2., 2.]])
Sometimes it may be useful to convert the data back into the original
feature space. The ``inverse_transform`` function converts the binned
data into the original feature space. Each value will be equal to the mean
of the two bin edges.
>>> est.bin_edges_[0]
array([-2., -1., 0., 1.])
>>> est.inverse_transform(Xt)
array([[-1.5, 1.5, -3.5, -0.5],
[-0.5, 2.5, -2.5, -0.5],
[ 0.5, 3.5, -1.5, 0.5],
[ 0.5, 3.5, -1.5, 1.5]])
"""
def __init__(
self,
n_bins=5,
*,
encode="onehot",
strategy="quantile",
dtype=None,
subsample="warn",
random_state=None,
):
self.n_bins = n_bins
self.encode = encode
self.strategy = strategy
self.dtype = dtype
self.subsample = subsample
self.random_state = random_state
def fit(self, X, y=None, sample_weight=None):
"""
Fit the estimator.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Data to be discretized.
y : None
Ignored. This parameter exists only for compatibility with
:class:`~sklearn.pipeline.Pipeline`.
sample_weight : array-like shape (n_samples,) default = None
Weights for each sample, used to calculate quantiles if
'strategy = "quantile"' else ignored.
Returns
-------
self : object
Returns the instance itself.
"""
X = self._validate_data(X, dtype="numeric")
supported_dtype = (np.float64, np.float32)
if self.dtype in supported_dtype:
output_dtype = self.dtype
elif self.dtype is None:
output_dtype = X.dtype
else:
raise ValueError(
"Valid options for 'dtype' are "
f"{supported_dtype + (None,)}. Got dtype={self.dtype} "
" instead."
)
n_samples, n_features = X.shape
if sample_weight is not None:
sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype)
if self.strategy != "quantile" and sample_weight is not None:
warnings.warn(
"sample_weight parameter is ignored when the strategy is not `quantile`"
)
if self.strategy == "quantile" and self.subsample is not None:
if self.subsample == "warn":
if n_samples > 2e5:
warnings.warn(
"In version 1.3 onwards, subsample=2e5 "
"will be used by default. Set subsample explicitly to "
"silence this warning in the mean time. Set "
"subsample=None to disable subsampling explicitly.",
FutureWarning,
)
else:
self.subsample = check_scalar(
self.subsample, "subsample", numbers.Integral, min_val=1
)
rng = check_random_state(self.random_state)
if n_samples > self.subsample:
subsample_idx = rng.choice(
n_samples, size=self.subsample, replace=False
)
X = _safe_indexing(X, subsample_idx)
elif self.strategy != "quantile" and isinstance(
self.subsample, numbers.Integral
):
raise ValueError(
f"Invalid parameter for `strategy`: {self.strategy}. "
'`subsample` must be used with `strategy="quantile"`.'
)
valid_encode = ("onehot", "onehot-dense", "ordinal")
if self.encode not in valid_encode:
raise ValueError(
"Valid options for 'encode' are {}. Got encode={!r} instead.".format(
valid_encode, self.encode
)
)
valid_strategy = ("uniform", "quantile", "kmeans")
if self.strategy not in valid_strategy:
raise ValueError(
"Valid options for 'strategy' are {}. "
"Got strategy={!r} instead.".format(valid_strategy, self.strategy)
)
n_features = X.shape[1]
n_bins = self._validate_n_bins(n_features)
bin_edges = np.zeros(n_features, dtype=object)
for jj in range(n_features):
column = X[:, jj]
col_min, col_max = column.min(), column.max()
if col_min == col_max:
warnings.warn(
"Feature %d is constant and will be replaced with 0." % jj
)
n_bins[jj] = 1
bin_edges[jj] = np.array([-np.inf, np.inf])
continue
if self.strategy == "uniform":
bin_edges[jj] = np.linspace(col_min, col_max, n_bins[jj] + 1)
elif self.strategy == "quantile":
quantiles = np.linspace(0, 100, n_bins[jj] + 1)
if sample_weight is None:
percentile = np.percentile(column, quantiles)
else:
percentile = np.array(
[
_weighted_percentile(column, sample_weight, quantile)
for quantile in quantiles
]
)
bin_edges[jj] = np.asarray(percentile)
elif self.strategy == "kmeans":
from ..cluster import KMeans # fixes import loops
# Deterministic initialization with uniform spacing
uniform_edges = np.linspace(col_min, col_max, n_bins[jj] + 1)
init = (uniform_edges[1:] + uniform_edges[:-1])[:, None] * 0.5
# 1D k-means procedure
km = KMeans(n_clusters=n_bins[jj], init=init, n_init=1)
centers = km.fit(column[:, None]).cluster_centers_[:, 0]
# Must sort, centers may be unsorted even with sorted init
centers.sort()
bin_edges[jj] = (centers[1:] + centers[:-1]) * 0.5
bin_edges[jj] = np.r_[col_min, bin_edges[jj], col_max]
# Remove bins whose width are too small (i.e., <= 1e-8)
if self.strategy in ("quantile", "kmeans"):
mask = np.ediff1d(bin_edges[jj], to_begin=np.inf) > 1e-8
bin_edges[jj] = bin_edges[jj][mask]
if len(bin_edges[jj]) - 1 != n_bins[jj]:
warnings.warn(
"Bins whose width are too small (i.e., <= "
"1e-8) in feature %d are removed. Consider "
"decreasing the number of bins." % jj
)
n_bins[jj] = len(bin_edges[jj]) - 1
self.bin_edges_ = bin_edges
self.n_bins_ = n_bins
if "onehot" in self.encode:
self._encoder = OneHotEncoder(
categories=[np.arange(i) for i in self.n_bins_],
sparse=self.encode == "onehot",
dtype=output_dtype,
)
# Fit the OneHotEncoder with toy datasets
# so that it's ready for use after the KBinsDiscretizer is fitted
self._encoder.fit(np.zeros((1, len(self.n_bins_))))
return self
def _validate_n_bins(self, n_features):
"""Returns n_bins_, the number of bins per feature."""
orig_bins = self.n_bins
if isinstance(orig_bins, numbers.Number):
if not isinstance(orig_bins, numbers.Integral):
raise ValueError(
"{} received an invalid n_bins type. "
"Received {}, expected int.".format(
KBinsDiscretizer.__name__, type(orig_bins).__name__
)
)
if orig_bins < 2:
raise ValueError(
"{} received an invalid number "
"of bins. Received {}, expected at least 2.".format(
KBinsDiscretizer.__name__, orig_bins
)
)
return np.full(n_features, orig_bins, dtype=int)
n_bins = check_array(orig_bins, dtype=int, copy=True, ensure_2d=False)
if n_bins.ndim > 1 or n_bins.shape[0] != n_features:
raise ValueError("n_bins must be a scalar or array of shape (n_features,).")
bad_nbins_value = (n_bins < 2) | (n_bins != orig_bins)
violating_indices = np.where(bad_nbins_value)[0]
if violating_indices.shape[0] > 0:
indices = ", ".join(str(i) for i in violating_indices)
raise ValueError(
"{} received an invalid number "
"of bins at indices {}. Number of bins "
"must be at least 2, and must be an int.".format(
KBinsDiscretizer.__name__, indices
)
)
return n_bins
def transform(self, X):
"""
Discretize the data.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Data to be discretized.
Returns
-------
Xt : {ndarray, sparse matrix}, dtype={np.float32, np.float64}
Data in the binned space. Will be a sparse matrix if
`self.encode='onehot'` and ndarray otherwise.
"""
check_is_fitted(self)
# check input and attribute dtypes
dtype = (np.float64, np.float32) if self.dtype is None else self.dtype
Xt = self._validate_data(X, copy=True, dtype=dtype, reset=False)
bin_edges = self.bin_edges_
for jj in range(Xt.shape[1]):
Xt[:, jj] = np.searchsorted(bin_edges[jj][1:-1], Xt[:, jj], side="right")
if self.encode == "ordinal":
return Xt
dtype_init = None
if "onehot" in self.encode:
dtype_init = self._encoder.dtype
self._encoder.dtype = Xt.dtype
try:
Xt_enc = self._encoder.transform(Xt)
finally:
# revert the initial dtype to avoid modifying self.
self._encoder.dtype = dtype_init
return Xt_enc
def inverse_transform(self, Xt):
"""
Transform discretized data back to original feature space.
Note that this function does not regenerate the original data
due to discretization rounding.
Parameters
----------
Xt : array-like of shape (n_samples, n_features)
Transformed data in the binned space.
Returns
-------
Xinv : ndarray, dtype={np.float32, np.float64}
Data in the original feature space.
"""
check_is_fitted(self)
if "onehot" in self.encode:
Xt = self._encoder.inverse_transform(Xt)
Xinv = check_array(Xt, copy=True, dtype=(np.float64, np.float32))
n_features = self.n_bins_.shape[0]
if Xinv.shape[1] != n_features:
raise ValueError(
"Incorrect number of features. Expecting {}, received {}.".format(
n_features, Xinv.shape[1]
)
)
for jj in range(n_features):
bin_edges = self.bin_edges_[jj]
bin_centers = (bin_edges[1:] + bin_edges[:-1]) * 0.5
Xinv[:, jj] = bin_centers[np.int_(Xinv[:, jj])]
return Xinv
def get_feature_names_out(self, input_features=None):
"""Get output feature names.
Parameters
----------
input_features : array-like of str or None, default=None
Input features.
- If `input_features` is `None`, then `feature_names_in_` is
used as feature names in. If `feature_names_in_` is not defined,
then names are generated: `[x0, x1, ..., x(n_features_in_)]`.
- If `input_features` is an array-like, then `input_features` must
match `feature_names_in_` if `feature_names_in_` is defined.
Returns
-------
feature_names_out : ndarray of str objects
Transformed feature names.
"""
input_features = _check_feature_names_in(self, input_features)
return self._encoder.get_feature_names_out(input_features)
|
py | 7dfcca58e862213c0c066adbe21472529f198235 | #
# DRAGONS
#
# mappers.recipeMapper.py
# ------------------------------------------------------------------------------
import pkgutil
from importlib import import_module
from .baseMapper import Mapper
from ..utils.errors import ModeError
from ..utils.errors import RecipeNotFound
from ..utils.mapper_utils import dotpath
from ..utils.mapper_utils import find_user_recipe
from ..utils.mapper_utils import RECIPEMARKER
# ------------------------------------------------------------------------------
class RecipeMapper(Mapper):
"""
Retrieve the appropriate recipe for a dataset, using all defined defaults:
>>> ad = astrodata.open(<fitsfile>)
>>> dtags = set(list(ad.tags)[:])
>>> instpkg = ad.instrument(generic=True).lower()
>>> rm = RecipeMapper(dtags, instpkg)
>>> recipe = rm.get_applicable_recipe()
>>> recipe.__name__
'qaReduce'
"""
def get_applicable_recipe(self):
recipefn = find_user_recipe(self.recipename)
if recipefn is None:
tag_match, recipefn = self._retrieve_recipe()
if recipefn is None:
raise RecipeNotFound("Recipe '{}' not found.".format(self.recipename))
return recipefn
# --------------------------------------------------------------------------
# Recipe search cascade
def _retrieve_recipe(self):
"""
Start of the recipe library search cascade.
Returns
-------
TagSet
Best matched tagset.
function
Recipe with the associated best matched TagSet.
"""
matched_set = (set(), None)
for rlib in self._get_tagged_recipes():
if rlib is None:
break
if hasattr(rlib, 'recipe_tags'):
if self.tags.issuperset(rlib.recipe_tags):
isect = rlib.recipe_tags
l1 = len(isect)
l2 = len(matched_set[0])
matched_set = (isect, rlib) if l1 > l2 else matched_set
else:
continue
else:
continue
isection, rlib = matched_set
try:
recipe_actual = getattr(rlib, self.recipename)
except AttributeError:
recipe_actual = None
return isection, recipe_actual
def _get_tagged_recipes(self):
try:
loaded_pkg = import_module(self.dotpackage)
except Exception:
yield None
return
for rmod, ispkg in self._generate_recipe_modules(loaded_pkg):
if not ispkg:
importmod = dotpath(self.dotpackage, rmod)
yield import_module(importmod)
else:
continue
def _generate_recipe_modules(self, pkg, recipedir=RECIPEMARKER):
ppath = pkg.__path__[0]
pkg_importer = pkgutil.ImpImporter(ppath)
for pkgname, ispkg in pkg_importer.iter_modules():
if ispkg and pkgname == recipedir:
break
else:
continue
loaded_pkg = import_module(dotpath(self.dotpackage, pkgname))
for mode_pkg, ispkg in self._generate_mode_pkg(loaded_pkg):
yield dotpath(pkgname, mode_pkg), ispkg
def _generate_mode_pkg(self, pkg):
found = False
ppath = pkg.__path__[0]
pkg_importer = pkgutil.ImpImporter(ppath)
for pkgname, ispkg in pkg_importer.iter_modules():
if ispkg and pkgname in self.mode:
found = True
break
else:
continue
if not found:
cerr = "No recipe mode package matched '{}'"
raise ModeError(cerr.format(self.mode))
loaded_pkg = import_module(dotpath(pkg.__name__, pkgname))
for mod, ispkg in self._generate_mode_libs(loaded_pkg):
yield dotpath(pkgname, mod), ispkg
def _generate_mode_libs(self, pkg):
ppath = pkg.__path__[0]
pkg_importer = pkgutil.ImpImporter(ppath)
for pkgname, ispkg in pkg_importer.iter_modules():
if not ispkg:
yield pkgname, ispkg
else:
continue
|
py | 7dfccafc3726186012e4b980324994ddfa7538f8 | #!/usr/bin/env python
#
# Copyright (C) 2013 Umea Universitet, Sweden
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import re
import sys
from io import open
from setuptools import setup
from setuptools.command.test import test as TestCommand
__author__ = 'rohe0002'
class PyTest(TestCommand):
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
# import here, cause outside the eggs aren't loaded
import pytest
errno = pytest.main(self.test_args)
sys.exit(errno)
tests_requires = ['responses', 'testfixtures', 'pytest', 'freezegun']
with open('src/oic/__init__.py', 'r') as fd:
version = re.search(r'^__version__\s*=\s*[\'"]([^\'"]*)[\'"]',
fd.read(), re.MULTILINE).group(1)
setup(
name="oic",
version=version,
description="Python implementation of OAuth2 and OpenID Connect",
long_description=open('README.rst', encoding='utf-8').read(),
author="Roland Hedberg",
author_email="[email protected]",
license="Apache 2.0",
url='https://github.com/OpenIDC/pyoidc/',
packages=[
"oic", "oic/oauth2", "oic/oic", "oic/utils", "oic/utils/authn",
"oic/utils/userinfo", 'oic/utils/rp', 'oic/extension'
],
entry_points={
'console_scripts': [
'oic-client-management = oic.utils.client_management:run'
]
},
package_dir={"": "src"},
package_data={"oic": ["py.typed"]},
include_package_data=True,
classifiers=[
"Development Status :: 4 - Beta",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Topic :: Software Development :: Libraries :: Python Modules"],
python_requires='~=3.5',
extras_require={
'develop': ["cherrypy==3.2.4", "pyOpenSSL"],
'testing': tests_requires,
'docs': ['Sphinx', 'sphinx-autobuild', 'alabaster'],
'quality': ['pylama', 'isort', 'eradicate', 'mypy', 'black', 'bandit', 'readme_renderer[md]'],
'types': ['types-requests'],
'ldap_authn': ['pyldap'],
},
install_requires=[
"requests",
"pycryptodomex",
"pyjwkest>=1.3.6",
"mako",
"beaker",
"cryptography",
"defusedxml",
"typing_extensions",
],
tests_require=tests_requires,
long_description_content_type="text/x-rst",
zip_safe=False,
cmdclass={'test': PyTest},
)
|
py | 7dfccbd569d515ef20a0ae0a5a0942d32199839c | """Utility functions for arithmetic operations."""
__all__ = ["clip", "frange", "discretize", "counter"]
import itertools
import math
from typing import Generator
from ..typing import Literal
def clip(
value: float, minimum: float = None, maximum: float = None, *, absmax: float = None
) -> float:
"""Limit the ``value`` to the range [``minimum``, ``maximum``].
Parameters
----------
value
Arbitrary parameter.
minimum
Lower limit of ``value``.
maximum
Upper limit of ``value``.
absmax
Upper limit of absolute value of ``value``.
Examples
--------
>>> neclib.utils.clip(1.2, 0, 1)
1
>>> neclib.utils.clip(41, 0, 100)
41
>>> neclib.utils.clip(-4, absmax=3)
-3
"""
if absmax is not None:
minimum, maximum = -1 * abs(absmax), abs(absmax)
if minimum > maximum:
raise ValueError("Minimum should be less than maximum.")
return min(max(minimum, value), maximum)
def frange(
start: float, stop: float, step: float = 1.0, *, inclusive: bool = False
) -> Generator[float, None, None]:
"""Float version of built-in ``range``, with support for including stop value.
Parameters
----------
start
First value to be yielded.
stop
Last value to be yielded never exceeds this limit.
step
Difference between successive 2 values to be yielded.
inclusive
If ``True``, ``stop`` value can be yielded when ``stop - start`` is multiple of
``step``.
Notes
-----
Because of floating point overflow, errors may appear when ``print``-ing the result,
but it's the same as almost equivalent function ``numpy.arange``.
Examples
--------
>>> list(neclib.utils.frange(0, 1, 0.2))
[0, 0.2, 0.4, 0.6, 0.8]
>>> list(neclib.utils.frange(0, 1, 0.2, inclusive=True))
[0, 0.2, 0.4, 0.6, 0.8, 1]
"""
if inclusive:
num = -1 * math.ceil((start - stop) / step) + 1
# HACK: ``-1 * ceil(x) + 1`` is ceiling function, but if ``x`` is integer,
# return ``ceil(x) + 1``, so no ``x`` satisfies ``quasi_ceil(x) == x``.
else:
num = math.ceil((stop - start) / step)
for i in range(num):
yield start + (step * i)
def discretize(
value: float,
start: float = 0.0,
step: float = 1.0,
*,
method: Literal["nearest", "ceil", "floor"] = "nearest",
) -> float:
"""Convert ``value`` to nearest element of arithmetic sequence.
Parameters
----------
value
Parameter to discretize.
start
First element of element of arithmetic sequence.
step
Difference between the consecutive 2 elements of the sequence.
method
Discretizing method.
Examples
--------
>>> neclib.utils.discretize(3.141592)
3
>>> neclib.utils.discretize(3.141592, step=10)
0
>>> neclib.utils.discretize(3.141592, method="ceil")
4
>>> neclib.utils.discretize(3.141592, start=2.5, step=0.7)
3.2
"""
discretizer = {"nearest": round, "ceil": math.ceil, "floor": math.floor}
return discretizer[method]((value - start) / step) * step + start
def counter(stop: int = None, allow_infty: bool = False) -> Generator[int, None, None]:
"""Generate integers from 0 to ``stop``.
Parameters
----------
stop
Number of yielded values.
allow_infty
If ``True``, the counter counts up to infinity. Listing such object will cause
memory leak, so use caution.
Examples
--------
>>> list(neclib.utils.counter(5))
[0, 1, 2, 3, 4]
>>> list(neclib.utils.counter())
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, ...] # -> memory leak
"""
if (stop is None) and (not allow_infty):
raise ValueError("Specify ``stop`` value, unless ``allow_infty`` is set True.")
elif stop is None:
yield from itertools.count()
elif stop < 0:
raise ValueError("Stop value should be non-negative.")
else:
yield from range(stop)
|
py | 7dfccd70c9c95b59c5974f9ce67986183958c9cf | """Tests for the SQLFluff integration with the "diff-quality" tool."""
import sys
import pytest
try:
from sqlfluff import diff_quality_plugin
except ImportError:
pass
@pytest.mark.parametrize(
"sql_path,expected_violations_lines",
[
("linter/indentation_errors.sql", list(range(2, 7))),
("linter/parse_error.sql", {1}),
# NB: This version of the file is in a directory configured
# to ignore parsing errors.
("linter/diffquality/parse_error.sql", []),
],
)
@pytest.mark.skipif(
sys.version_info[:2] == (3, 4),
reason="requires diff_cover package, which does not support python3.4",
)
def test_diff_quality_plugin(sql_path, expected_violations_lines, monkeypatch):
"""Test the plugin at least finds errors on the expected lines."""
monkeypatch.chdir("test/fixtures/")
violation_reporter = diff_quality_plugin.diff_cover_report_quality()
violations = violation_reporter.violations(sql_path)
assert isinstance(violations, list)
if expected_violations_lines:
assert len(violations) > 0
violations_lines = {v.line for v in violations}
for expected_line in expected_violations_lines:
assert expected_line in violations_lines
else:
assert len(violations) == 0
|
py | 7dfcce12914a399d2009959de5f5f988246fcd48 | from setuptools import find_packages, setup
setup(
name='src',
packages=find_packages(),
version='0.1.0',
description='Ai/Machine Learning/Python course project',
author='Kalesh Kaladharan',
license='MIT',
)
|
py | 7dfcce9857adc479a1d5994b748e7ac8ec4edeb5 | import numpy as np
import pytest
import pykooh
from . import DATA_PATH
try:
import Cython # noqa: F401
has_cython = True
except ImportError:
has_cython = False
use_cython = (
[True, False]
if has_cython
else [
False,
]
)
# Load test data
data = np.load(str(DATA_PATH / "test_data.npz"))
freqs = data["freqs"]
raw_amps = data["fourier_amps"]
smooth_amps = data["ko_amps"]
b = data["b"]
@pytest.mark.parametrize("use_cython", use_cython)
def test_smooth(use_cython):
calculated = pykooh.smooth(freqs, freqs, raw_amps, b, use_cython=True)
np.testing.assert_allclose(calculated, smooth_amps, rtol=1e-3)
|
py | 7dfccefc16719d72824a791f689402370197ed3a | #
# Copyright (c) 2019. Asutosh Nayak ([email protected])
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
from collections import deque
import threading
import os
import time
class Logger:
LOG_QUEUE_SIZE = 50
def __init__(self, log_file_path, log_file_name_prefix, log_queue_size=LOG_QUEUE_SIZE):
if not os.path.exists(log_file_path):
os.makedirs(log_file_path)
LOG_QUEUE_SIZE = log_queue_size
self.LOGGING_LEVELS = {0: "INFO", 1: "DEBUG", 2: "WARN", 3: "ERROR", 4: "CRITICAL"}
self.init_file_writing(log_file_path, log_file_name_prefix)
def init_file_writing(self, LOG_PATH, LOG_FILE_NAME_PREFIX):
self.file_path = self.cleanup_file_path(LOG_PATH+os.sep+LOG_FILE_NAME_PREFIX+
"_"+self.get_readable_ctime()+".log")
self.log_queue = deque([])
self.append_log("Initialized logging at path {}".format(self.file_path), self.LOGGING_LEVELS[0])
def cleanup_file_path(self, path):
return path.replace('\\', '/').replace(" ", "_").replace(':', '_')
def get_log_prefix_format(self, level=None):
level = self.LOGGING_LEVELS[1] if level is None else level
return " ".join([self.get_readable_ctime(), threading.current_thread().name, level])
def append_log(self, text, level=None):
level = self.LOGGING_LEVELS[1] if level is None else level
log_str = self.get_log_prefix_format(level)+r"\ "+text
self.log_queue.append(log_str)
print(len(self.log_queue), ")",log_str)
if len(self.log_queue) >= self.LOG_QUEUE_SIZE:
log_file = open(self.cleanup_file_path(self.file_path), "a+")
while len(self.log_queue) > 0:
log_file.write(self.log_queue.popleft()+"\n")
log_file.close()
print("logs written...")
def flush(self):
print("test", (self.cleanup_file_path(self.file_path)))
log_file = open(self.cleanup_file_path(self.file_path), "a+")
while len(self.log_queue) > 0:
log_file.write(self.log_queue.popleft()+"\n")
log_file.close()
print("logs flushed...")
def get_readable_ctime(self):
return time.strftime("%d-%m-%Y %H_%M_%S")
|
py | 7dfcd0301897b16c2c4dd23a1aa40dca22a65d32 | from abc import ABC
class Shape(ABC):
def __str__(self):
return ""
class Circle(Shape):
def __init__(self, radius=0.0):
self.radius = radius
def resize(self, factor):
self.radius *= factor
def __str__(self):
return f"A circle of radius {self.radius}"
class Square(Shape):
def __init__(self, side):
self.side = side
def __str__(self):
return f"A square with side {self.side}"
class ColoredShape(Shape):
def __init__(self, shape, color):
if isinstance(shape, ColoredShape):
raise Exception("Cannot apply ColoredDecorator twice")
self.shape = shape
self.color = color
def __str__(self):
return f"{self.shape} has the color {self.color}"
class TransparentShape(Shape):
def __init__(self, shape, transparency):
self.shape = shape
self.transparency = transparency
def __str__(self):
return f"{self.shape} has {self.transparency * 100.0}% transparency"
if __name__ == "__main__":
circle = Circle(2)
print(circle)
red_circle = ColoredShape(circle, "red")
print(red_circle)
# ColoredShape doesn't have resize()
# red_circle.resize(3)
red_half_transparent_square = TransparentShape(red_circle, 0.5)
print(red_half_transparent_square)
# nothing prevents double application
mixed = ColoredShape(ColoredShape(Circle(3), "red"), "blue")
print(mixed)
|
py | 7dfcd06e9b7d393182f128d8fc0b9c64f57233b5 | def much(x):
return x+1 |
py | 7dfcd15e1991e1e1c12d64d31e2c392aacec7b88 | class TolValidtor:
def __init__(self, profile_id, fields, data, errors, warnings, flag, **kwargs):
self.profile_id = profile_id
self.fields = fields
self.data = data
self.errors = errors
self.warnings = warnings
self.flag = flag
self.kwargs = kwargs
def validate(self):
raise NotImplemented
|
py | 7dfcd182a962401bb43d8d8305ec4157cc04509b | import json, os
import requests
CONFIG_FILE = "{0}/.okta_cmd".format(os.getenv("HOME"))
REQUIRED_LIST = ['url', 'token']
def get_config():
with open(CONFIG_FILE) as f:
data = json.load(f)
check_config(data)
return data
def set_config(data):
with open(CONFIG_FILE, 'w+') as f:
json.dump(data, f)
def check_config(data):
for name in REQUIRED_LIST:
if not data.get(name):
raise Exception("Config File missing parameter: {0}, Please use the init command to initialize".format(name))
def request_session(session, url, method, **kwargs):
result = getattr(session, method)(url, **kwargs)
if result.status_code >= 400:
raise requests.HTTPError(json.dumps(result.json()))
return result
|
py | 7dfcd1c32e291ea7d49ac02f655da6d809a28ccb | # -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
from marshmallow import fields
class UUID(fields.UUID):
"""A UUID field."""
def _serialize(self, value, attr, obj, **kwargs):
validated = str(self._validated(value).hex) if value is not None else None
return super(fields.String, self)._serialize(validated, attr, obj) # noqa
|
py | 7dfcd285660d6d3c42496f1c174c9573d5645f0c | # Copyright 2014 The LUCI Authors. All rights reserved.
# Use of this source code is governed under the Apache License, Version 2.0
# that can be found in the LICENSE file.
"""NDB model classes used to model AuthDB relations.
Overview
--------
Models defined here are used by central authentication service (that stores all
groups and secrets) and by services that implement some concrete functionality
protected with ACLs (like isolate and swarming services).
Applications that use auth component may work in 3 modes:
1. Standalone. Application is self contained and manages its own groups.
Useful when developing a new service or for simple installations.
2. Replica. Application uses a central authentication service. An application
can be dynamically switched from Standalone to Replica mode.
3. Primary. Application IS a central authentication service. Only 'auth'
service is running in this mode. 'configure_as_primary' call during startup
switches application to that mode.
Central authentication service (Primary) holds authoritative copy of all auth
related information (groups, secrets, etc.) and acts as a single source of truth
for it. All other services (Replicas) hold copies of a relevant subset of
this information (that they use to perform authorization checks).
Primary service is responsible for updating replicas' configuration via
service-to-service push based replication protocol.
AuthDB holds a list of groups. Each group has a unique name and is defined
as union of 3 sets:
1) Explicit enumeration of particular Identities e.g. 'user:[email protected]'
2) Set of glob-like identity patterns e.g. 'user:*@example.com'
3) Set of nested Groups.
Identity defines an actor making an action (it can be a real person, a bot,
an AppEngine application or special 'anonymous' identity).
In addition to that, AuthDB stores small amount of authentication related
configuration data, such as OAuth2 client_id and client_secret and various
secret keys.
Audit trail
-----------
Each change to AuthDB has an associated revision number (that monotonically
increases with each change). All entities modified by a change are copied to
append-only log under an entity key associated with the revision (see
historical_revision_key below). Removals are marked by special auth_db_deleted
flag in entites in the log. This is enough to recover a snapshot of all groups
at some specific moment in time, or to produce a diff between two revisions.
Note that entities in the historical log are not used by online queries. At any
moment in time most recent version of an AuthDB entity exists in two copies:
1) Main copy used for online queries. It is mutated in-place with each change.
2) Most recent record in the historical log. Read only.
To reduce a possibility of misuse of historical copies in online transactions,
history log entity classes are suffixied with 'History' suffix. They also have
all indexes stripped.
This mechanism is enabled only on services in Standalone or Primary mode.
Replicas do not keep track of AuthDB revisions and do not keep any historical
log.
"""
import collections
import logging
import os
import re
from google.appengine.api import app_identity
from google.appengine.ext import ndb
from components import datastore_utils
from components import utils
from . import globmatch
from . import ipaddr
from .proto import realms_pb2
# Part of public API of 'auth' component, exposed by this module.
__all__ = [
'ADMIN_GROUP',
'Anonymous',
'bootstrap_group',
'bootstrap_ip_whitelist',
'bootstrap_loopback_ips',
'bots_ip_whitelist',
'configure_as_primary',
'find_group_dependency_cycle',
'find_referencing_groups',
'get_auth_db_revision',
'get_missing_groups',
'get_service_self_identity',
'group_key',
'Identity',
'IDENTITY_ANONYMOUS',
'IDENTITY_BOT',
'IDENTITY_PROJECT',
'IDENTITY_SERVICE',
'IDENTITY_USER',
'IdentityGlob',
'IdentityProperty',
'ip_whitelist_key',
'IP_WHITELISTED_BOT_ID',
'is_empty_group',
'is_external_group_name',
'is_primary',
'is_replica',
'is_standalone',
'is_valid_group_name',
'is_valid_ip_whitelist_name',
'replicate_auth_db',
]
# Name of a group whose members have access to Group management UI. It's the
# only group needed to bootstrap everything else.
ADMIN_GROUP = 'administrators'
# No identity information is provided. Identity name is always 'anonymous'.
IDENTITY_ANONYMOUS = 'anonymous'
# Using bot credentials. Used primary by Swarming. Identity encodes bot's id.
IDENTITY_BOT = 'bot'
# Using X-Luci-Project header in an internal RPC. Identity name is project name.
IDENTITY_PROJECT = 'project'
# Using App Engine X-Appengine-Inbound-Appid header. Identity name is app name.
IDENTITY_SERVICE = 'service'
# Using user credentials (cookies or access tokens). Identity name is email.
IDENTITY_USER = 'user'
# All allowed identity kinds + regexps to validate identity name.
ALLOWED_IDENTITY_KINDS = {
IDENTITY_ANONYMOUS: re.compile(r'^anonymous$'),
IDENTITY_BOT: re.compile(r'^[0-9a-zA-Z_\-\.@]+$'),
# See also PROJECT_ID_RGX in components/config/common.py.
IDENTITY_PROJECT: re.compile(r'^[a-z0-9\-_]+$'),
IDENTITY_SERVICE: re.compile(r'^[0-9a-zA-Z_\-\:\.]+$'),
IDENTITY_USER: re.compile(r'^[0-9a-zA-Z_\-\.\+\%]+@[0-9a-zA-Z_\-\.]+$'),
}
# Regular expression that matches group names. Lowercase ASCII only plus some
# special symbols to allow imported google groups.
GROUP_NAME_RE = re.compile(r'^([a-z\-]+/)?[0-9a-z_\-\.@]{1,100}$')
# Special group name that means 'All possible users' (including anonymous!).
GROUP_ALL = '*'
# Regular expression for IP whitelist name.
IP_WHITELIST_NAME_RE = re.compile(r'^[0-9a-zA-Z_\-\+\.\ ]{2,200}$')
# Configuration of Primary service, set by 'configure_as_primary'.
_replication_callback = None
# Root ndb keys of various models. They can't be defined as a module level
# constants because ndb.Key implicitly includes current APPLICATION_ID. And in
# testing environment it is '_' during module loading time. Trying to use such
# key from within a testbed test case results in the following error:
# BadRequestError: app "testbed-test" cannot access app "_"'s data
def root_key():
"""Global root key of auth models entity group."""
return ndb.Key('AuthGlobalConfig', 'root')
def replication_state_key():
"""Key of AuthReplicationState entity."""
return ndb.Key('AuthReplicationState', 'self', parent=root_key())
def ip_whitelist_assignments_key():
"""Key of AuthIPWhitelistAssignments entity."""
return ndb.Key('AuthIPWhitelistAssignments', 'default', parent=root_key())
def historical_revision_key(auth_db_rev):
"""Key for entity subgroup that holds changes done in a concrete revision."""
return ndb.Key('Rev', auth_db_rev, parent=root_key())
################################################################################
## Identity & IdentityGlob.
class Identity(
datastore_utils.BytesSerializable,
collections.namedtuple('Identity', 'kind, name')):
"""Represents a caller that makes requests. Immutable.
A tuple of (kind, name) where 'kind' is one of IDENTITY_* constants and
meaning of 'name' depends on a kind (see comments for IDENTITY_*).
It generalizes accounts of real people, bot accounts and service-to-service
accounts.
It's a pure identity information. Any additional information that may be
related to an identity (e.g. registration date, last access time, etc.) should
be stored elsewhere using Identity.to_bytes() as a key.
"""
# Inheriting from tuple requires use of __new__ instead of __init__. __init__
# is called with object already 'frozen', so it's not possible to modify its
# attributes in __init__.
# See http://docs.python.org/2/reference/datamodel.html#object.__new__
def __new__(cls, kind, name):
if isinstance(name, unicode):
try:
name = name.encode('ascii')
except UnicodeEncodeError:
raise ValueError('Identity has invalid format: only ASCII is allowed')
if (kind not in ALLOWED_IDENTITY_KINDS or
not ALLOWED_IDENTITY_KINDS[kind].match(name)):
raise ValueError('Identity has invalid format: %s' % name)
return super(Identity, cls).__new__(cls, str(kind), name)
def to_bytes(self):
"""Serializes this identity to byte buffer."""
return '%s:%s' % (self.kind, self.name)
@classmethod
def from_bytes(cls, byte_buf):
"""Given a byte buffer returns corresponding Identity object."""
kind, sep, name = byte_buf.partition(':')
if not sep:
raise ValueError('Missing \':\' separator in Identity string')
return cls(kind, name)
@property
def is_anonymous(self):
"""True if this object represents anonymous identity."""
return self.kind == IDENTITY_ANONYMOUS
@property
def is_bot(self):
"""True if this object represents bot account."""
return self.kind == IDENTITY_BOT
@property
def is_project(self):
"""True if this object represents a LUCI project."""
return self.kind == IDENTITY_PROJECT
@property
def is_service(self):
"""True if this object represents an appengine app."""
return self.kind == IDENTITY_SERVICE
@property
def is_user(self):
"""True if this object represents user account."""
return self.kind == IDENTITY_USER
# Predefined Anonymous identity.
Anonymous = Identity(IDENTITY_ANONYMOUS, 'anonymous')
# Identity assigned to callers that make unauthenticated calls from IPs
# belonging to '<appid>-bots' IP whitelist. Note that same bot may appear to use
# different IP addresses (happens with some NATs), thus we can't put IP
# address into the bot identity string and instead hardcode some arbitrary
# name (defined here).
#
# TODO(vadimsh): Get rid of this. Blocked on Swarming and Isolate switching
# to service accounts.
IP_WHITELISTED_BOT_ID = Identity(IDENTITY_BOT, 'whitelisted-ip')
class IdentityProperty(datastore_utils.BytesSerializableProperty):
"""NDB model property for Identity values.
Identities are stored as indexed short blobs internally.
"""
_value_type = Identity
_indexed = True
class IdentityGlob(
datastore_utils.BytesSerializable,
collections.namedtuple('IdentityGlob', 'kind, pattern')):
"""Glob-like pattern that matches subset of identities. Immutable.
Tuple (kind, glob) where 'kind' is is one of IDENTITY_* constants and 'glob'
defines pattern that identity names' should match. For example, IdentityGlob
that matches all bots is (IDENTITY_BOT, '*') which is also can be written
as 'bot:*'.
The pattern language only supports '*' currently.
"""
# See comment for Identity.__new__ regarding use of __new__ here.
def __new__(cls, kind, pattern):
if isinstance(pattern, unicode):
try:
pattern = pattern.encode('ascii')
except UnicodeEncodeError:
raise ValueError('Invalid IdentityGlob pattern: only ASCII is allowed')
if not pattern:
raise ValueError('No pattern is given')
if '\n' in pattern:
raise ValueError('Multi-line patterns are not allowed')
if kind not in ALLOWED_IDENTITY_KINDS:
raise ValueError('Invalid Identity kind: %s' % kind)
return super(IdentityGlob, cls).__new__(cls, str(kind), pattern)
def to_bytes(self):
"""Serializes this identity glob to byte buffer."""
return '%s:%s' % (self.kind, self.pattern)
@classmethod
def from_bytes(cls, byte_buf):
"""Given a byte buffer returns corresponding IdentityGlob object."""
kind, sep, pattern = byte_buf.partition(':')
if not sep:
raise ValueError('Missing \':\' separator in IdentityGlob string')
return cls(kind, pattern)
def match(self, identity):
"""Return True if |identity| matches this pattern."""
if identity.kind != self.kind:
return False
return globmatch.match(identity.name, self.pattern)
class IdentityGlobProperty(datastore_utils.BytesSerializableProperty):
"""NDB model property for IdentityGlob values.
IdentityGlobs are stored as short indexed blobs internally.
"""
_value_type = IdentityGlob
_indexed = True
################################################################################
## Singleton entities and replication related models.
def configure_as_primary(replication_callback):
"""Registers a callback to be called when AuthDB changes.
Should be called during Primary application startup. The callback will be
called as 'replication_callback(AuthReplicationState)' from inside transaction
on root_key() entity group whenever replicate_auth_db() is called (i.e. on
every change to auth db that should be replication to replicas).
"""
global _replication_callback
_replication_callback = replication_callback
def is_primary():
"""Returns True if current application was configured as Primary."""
return bool(_replication_callback)
def is_replica():
"""Returns True if application is in Replica mode."""
return not is_primary() and not is_standalone()
def is_standalone():
"""Returns True if application is in Standalone mode."""
ent = get_replication_state()
return not ent or not ent.primary_id
def get_replication_state():
"""Returns AuthReplicationState singleton entity if it exists."""
return replication_state_key().get()
def get_auth_db_revision():
"""Returns current revision of AuthDB, it increases with each change."""
state = get_replication_state()
return state.auth_db_rev if state else 0
def get_service_self_identity():
"""Returns Identity that correspond to the current GAE app itself."""
return Identity(IDENTITY_SERVICE, app_identity.get_application_id())
class AuthVersionedEntityMixin(object):
"""Mixin class for entities that keep track of when they change.
Entities that have this mixin are supposed to be updated in get()\put() or
get()\delete() transactions. Caller must call record_revision(...) sometime
during the transaction (but before put()). Similarly a call to
record_deletion(...) is expected sometime before delete().
replicate_auth_db will store a copy of the entity in the revision log when
committing a transaction.
A pair of properties auth_db_rev and auth_db_prev_rev are used to implement
a linked list of versions of this entity (e.g. one can take most recent entity
version and go back in time by following auth_db_prev_rev links).
"""
# When the entity was modified last time. Do not use 'auto_now' property since
# such property overrides any explicitly set value with now() during put. It's
# undesired when storing a copy of entity received from Primary (Replica
# should have modified_ts to be same as on Primary).
modified_ts = ndb.DateTimeProperty()
# Who modified the entity last time.
modified_by = IdentityProperty()
# Revision of Auth DB at which this entity was updated last time.
auth_db_rev = ndb.IntegerProperty()
# Revision of Auth DB of previous version of this entity or None.
auth_db_prev_rev = ndb.IntegerProperty()
def record_revision(self, modified_by, modified_ts=None, comment=None):
"""Updates the entity to record Auth DB revision of the current transaction.
Stages the entity to be copied to historical log.
Must be called sometime before 'put' (not necessary right before it). Note
that NDB hooks are not used because they are buggy. See docstring for
replicate_auth_db for more info.
Args:
modified_by: Identity that made the change.
modified_ts: datetime when the change was made (or None for current time).
comment: optional comment to put in the revision log.
"""
_get_pending_auth_db_transaction().record_change(
entity=self,
deletion=False,
modified_by=modified_by,
modified_ts=modified_ts or utils.utcnow(),
comment=comment)
def record_deletion(self, modified_by, modified_ts=None, comment=None):
"""Marks entity as being deleted in the current transaction.
Stages the entity to be copied to historical log (with 'auth_db_deleted'
flag set). The entity must not be mutated between 'get' and AuthDB commit.
Must be called sometime before 'delete' (not necessary right before it).
Note that NDB hooks are not used because they are buggy. See docstring for
replicate_auth_db for more info.
Args:
modified_by: Identity that made the change.
modified_ts: datetime when the change was made (or None for current time).
comment: optional comment to put in the revision log.
"""
_get_pending_auth_db_transaction().record_change(
entity=self,
deletion=True,
modified_by=modified_by,
modified_ts=modified_ts or utils.utcnow(),
comment=comment)
## Internal interface. Do not use directly unless you know what you are doing.
@classmethod
def get_historical_copy_class(cls):
"""Returns entity class for historical copies of original entity.
Has all the same properties, but unindexed (not needed), unvalidated
(original entity is already validated) and not cached.
The name of the new entity class is "<original name>History" (to make sure
it doesn't show up in indexes for original entity class).
"""
existing = getattr(cls, '_auth_db_historical_copy_cls', None)
if existing:
return existing
props = {}
for name, prop in cls._properties.items():
# Whitelist supported property classes. Better to fail loudly when
# encountering something new, rather than silently produce (possibly)
# incorrect result. Note that all AuthDB classes are instantiated in
# unit tests, so there should be no unexpected asserts in production.
assert prop.__class__ in (
datastore_utils.ProtobufProperty,
IdentityGlobProperty,
IdentityProperty,
ndb.BlobProperty,
ndb.BooleanProperty,
ndb.DateTimeProperty,
ndb.IntegerProperty,
ndb.LocalStructuredProperty,
ndb.StringProperty,
ndb.TextProperty,
), prop.__class__
kwargs = {
'name': prop._name,
'indexed': False,
'required': False,
'repeated': prop._repeated,
}
if prop.__class__ == datastore_utils.ProtobufProperty:
kwargs.update({
'message_class': prop._message_class,
'compressed': prop._compressed,
})
elif prop.__class__ == ndb.LocalStructuredProperty:
kwargs['modelclass'] = prop._modelclass
props[name] = prop.__class__(**kwargs)
new_cls = type(
'%sHistory' % cls.__name__, (_AuthDBHistoricalEntity,), props)
cls._auth_db_historical_copy_cls = new_cls
return new_cls
def make_historical_copy(self, deleted, comment):
"""Returns an entity to put in the historical log.
It's a copy of the original entity, but stored under another key and with
indexes removed. It also has a bunch of additional properties (defined
in _AuthDBHistoricalEntity). See 'get_historical_copy_class'.
The key is derived from auth_db_rev and class and ID of the original entity.
For example, AuthGroup "admins" modified at rev 123 will be copied to
the history as ('AuthGlobalConfig', 'root', 'Rev', 123, 'AuthGroupHistory',
'admins'), where the key prefix (first two pairs) is obtained with
historical_revision_key(...).
"""
assert self.key.parent() == root_key() or self.key == root_key(), self.key
cls = self.get_historical_copy_class()
entity = cls(
id=self.key.id(),
parent=historical_revision_key(self.auth_db_rev))
for prop in self._properties:
setattr(entity, prop, getattr(self, prop))
entity.auth_db_deleted = deleted
entity.auth_db_change_comment = comment
entity.auth_db_app_version = utils.get_app_version()
return entity
class AuthGlobalConfig(ndb.Model, AuthVersionedEntityMixin):
"""Acts as a root entity for auth models.
There should be only one instance of this model in Datastore, with a key set
to root_key(). A change to an entity group rooted at this key is a signal that
AuthDB has to be refetched (see 'fetch_auth_db' in api.py).
Entities that change often or associated with particular bot or user
MUST NOT be in this entity group.
Content of this particular entity is replicated from Primary service to all
Replicas.
Entities that belong to this entity group are:
* AuthGroup
* AuthIPWhitelist
* AuthIPWhitelistAssignments
* AuthReplicationState
* AuthSecret
"""
# Disable useless in-process per-request cache.
_use_cache = False
# OAuth2 client_id to use to mint new OAuth2 tokens.
oauth_client_id = ndb.StringProperty(indexed=False, default='')
# OAuth2 client secret. Not so secret really, since it's passed to clients.
oauth_client_secret = ndb.StringProperty(indexed=False, default='')
# Additional OAuth2 client_ids allowed to access the services.
oauth_additional_client_ids = ndb.StringProperty(repeated=True, indexed=False)
# URL of a token server to use to generate delegation tokens.
token_server_url = ndb.StringProperty(indexed=False, default='')
# Serialized security_config_pb2.SecurityConfig, see security_config.proto.
security_config = ndb.BlobProperty()
class AuthReplicationState(ndb.Model, datastore_utils.SerializableModelMixin):
"""Contains state used to control Primary -> Replica replication.
It's a singleton entity with key replication_state_key() (in same entity
groups as root_key()). This entity should be small since it is updated
(auth_db_rev is incremented) whenever AuthDB changes.
Exists in any AuthDB (on Primary and Replicas). Primary updates it whenever
changes to AuthDB are made, Replica updates it whenever it receives a push
from Primary.
"""
# How to convert this entity to or from serializable dict.
serializable_properties = {
'primary_id': datastore_utils.READABLE,
'primary_url': datastore_utils.READABLE,
'auth_db_rev': datastore_utils.READABLE,
'modified_ts': datastore_utils.READABLE,
}
# For services in Standalone mode it is None.
# For services in Primary mode: own GAE application ID.
# For services in Replica mode it is a GAE application ID of Primary.
primary_id = ndb.StringProperty(indexed=False)
# For services in Replica mode, root URL of Primary, i.e https://<host>.
primary_url = ndb.StringProperty(indexed=False)
# Revision of auth DB. Increased by 1 with every change that should be
# propagate to replicas. Only services in Standalone or Primary mode
# update this property by themselves. Replicas receive it from Primary.
auth_db_rev = ndb.IntegerProperty(default=0, indexed=False)
# Time when auth_db_rev was created (by Primary clock). For informational
# purposes only. See comment at AuthGroup.modified_ts for explanation why
# auto_now is not used.
modified_ts = ndb.DateTimeProperty(auto_now_add=True, indexed=False)
# For services in Replica mode, contains IDs of AuthDBSnapshotShard entities
# that together (when concatenated) hold a deflated AuthDB proto message with
# all groups, IP whitelists, etc. This property is used together with
# primary_url and auth_db_rev to reconstruct full keys of AuthDBSnapshotShard
# entities. See replication.py for code that deals with this property.
shard_ids = ndb.StringProperty(repeated=True, indexed=False)
class AuthDBSnapshotShard(ndb.Model):
"""Entity with a blob that contains a portion of deflated AuthDB message.
Services in Replica mode cache "fully assembled" AuthDB in such entities, so
that processes can fetch it all via a few memcache calls instead of doing a
bunch of expensive transactional datastore queries.
The parent key is AuthDBShapshotRoot(id="<primary_url>,<auth_db_rev>"), i.e.
all shards of some single auth_db_rev live in a single entity group. Note that
AuthDBShapshotRoot itself doesn't exist (nor even defined as an ndb.Model).
Entity ID is "<hex_sha256(blob)[:16]>".
See replication.py for code that deals with this entity.
Immutable.
"""
# Disable useless in-process per-request cache.
_use_cache = False
# A shard of zlib-deflated serialized AuthDB proto.
#
# Note that 'blob' by itself is not a valid zlib-deflated data. We compress
# AuthDB first, and then split it into shards. Thus compressed=True is not
# used here.
blob = ndb.BlobProperty()
def snapshot_shard_key(primary_url, auth_db_rev, shard_id):
"""Returns ndb.Key of some AuthDBSnapshotShard entity."""
return ndb.Key(
'AuthDBShapshotRoot', '%s,%d' % (primary_url, auth_db_rev),
AuthDBSnapshotShard, shard_id)
def replicate_auth_db():
"""Increments auth_db_rev, updates historical log, triggers replication.
Must be called once from inside a transaction (right before exiting it).
Should only be called for services in Standalone or Primary modes. Will raise
ValueError if called on Replica. When called for service in Standalone mode,
will update auth_db_rev but won't kick any replication. For services in
Primary mode will also initiate replication by calling callback set in
'configure_as_primary'. The callback usually transactionally enqueues a task
(to gracefully handle transaction rollbacks).
WARNING: This function relies on a valid transaction context. NDB hooks and
asynchronous operations are known to be buggy in this regard: NDB hook for
an async operation in a transaction may be called with a wrong context
(main event loop context instead of transaction context). One way to work
around that is to monkey patch NDB (as done here: https://goo.gl/1yASjL).
Another is to not use hooks at all. There's no way to differentiate between
sync and async modes of an NDB operation from inside a hook. And without a
strict assert it's very easy to forget about "Do not use put_async" warning.
For that reason _post_put_hook is NOT used and replicate_auth_db() should be
called explicitly whenever relevant part of root_key() entity group is
updated.
Returns:
New AuthDB revision number.
"""
assert ndb.in_transaction()
txn = _get_pending_auth_db_transaction()
txn.commit()
if is_primary():
_replication_callback(txn.replication_state)
return txn.replication_state.auth_db_rev
################################################################################
## Auth DB transaction details (used for historical log of changes).
_commit_callbacks = []
def commit_callback(cb):
"""Adds a callback that's called before AuthDB transaction is committed.
Can be used as decorator. Adding a callback second time is noop.
Args:
cb: function that takes single auth_db_rev argument as input.
"""
if cb not in _commit_callbacks:
_commit_callbacks.append(cb)
return cb
def _get_pending_auth_db_transaction():
"""Used internally to keep track of changes done in the transaction.
Returns:
Instance of _AuthDBTransaction (stored in the transaction context).
"""
# Use transaction context to store the object. Note that each transaction
# retry gets its own new transaction context which is what we need,
# see ndb/context.py, 'transaction' tasklet, around line 982 (for SDK 1.9.6).
assert ndb.in_transaction()
ctx = ndb.get_context()
txn = getattr(ctx, '_auth_db_transaction', None)
if txn:
return txn
# Prepare next AuthReplicationState (auth_db_rev +1).
state = replication_state_key().get()
if not state:
primary_id = app_identity.get_application_id() if is_primary() else None
state = AuthReplicationState(
key=replication_state_key(),
primary_id=primary_id,
auth_db_rev=0)
# Assert Primary or Standalone. Replicas can't increment auth db revision.
if not is_primary() and state.primary_id:
raise ValueError('Can\'t modify Auth DB on Replica')
state.auth_db_rev += 1
state.modified_ts = utils.utcnow()
# Store the state in the transaction context. Used in replicate_auth_db(...)
# later.
txn = _AuthDBTransaction(state)
ctx._auth_db_transaction = txn
return txn
class _AuthDBTransaction(object):
"""Keeps track of entities updated or removed in current transaction."""
_Change = collections.namedtuple('_Change', 'entity deletion comment')
def __init__(self, replication_state):
self.replication_state = replication_state
self.changes = [] # list of _Change tuples
self.committed = False
def record_change(self, entity, deletion, modified_by, modified_ts, comment):
assert not self.committed
assert isinstance(entity, AuthVersionedEntityMixin)
assert all(entity.key != c.entity.key for c in self.changes)
# Mutate the main entity (the one used to serve online requests).
entity.modified_by = modified_by
entity.modified_ts = modified_ts
entity.auth_db_prev_rev = entity.auth_db_rev # can be None for new entities
entity.auth_db_rev = self.replication_state.auth_db_rev
# Keep a historical copy. Delay make_historical_copy call until the commit.
# Here (in 'record_change') entity may not have all the fields updated yet.
self.changes.append(self._Change(entity, deletion, comment))
def commit(self):
assert not self.committed
puts = [
c.entity.make_historical_copy(c.deletion, c.comment)
for c in self.changes
]
ndb.put_multi(puts + [self.replication_state])
for cb in _commit_callbacks:
cb(self.replication_state.auth_db_rev)
self.committed = True
class _AuthDBHistoricalEntity(ndb.Model):
"""Base class for *History magic class in AuthVersionedEntityMixin.
In addition to properties defined here the child classes (*History) also
always inherit (for some definition of "inherit") properties from
AuthVersionedEntityMixin.
See get_historical_copy_class().
"""
# Historical entities are not intended to be read often, and updating the
# cache will make AuthDB transactions only slower.
_use_cache = False
_use_memcache = False
# True if entity was deleted in the given revision.
auth_db_deleted = ndb.BooleanProperty(indexed=False)
# Comment string passed to record_revision or record_deletion.
auth_db_change_comment = ndb.StringProperty(indexed=False)
# A GAE module version that committed the change.
auth_db_app_version = ndb.StringProperty(indexed=False)
def get_previous_historical_copy_key(self):
"""Returns ndb.Key of *History entity matching auth_db_prev_rev revision."""
if self.auth_db_prev_rev is None:
return None
return ndb.Key(
self.__class__, self.key.id(),
parent=historical_revision_key(self.auth_db_prev_rev))
################################################################################
## Groups.
class AuthGroup(
ndb.Model,
AuthVersionedEntityMixin,
datastore_utils.SerializableModelMixin):
"""A group of identities, entity id is a group name.
Parent is AuthGlobalConfig entity keyed at root_key().
Primary service holds authoritative list of Groups, that gets replicated to
all Replicas.
"""
# Disable useless in-process per-request cache.
_use_cache = False
# How to convert this entity to or from serializable dict.
serializable_properties = {
'members': datastore_utils.READABLE | datastore_utils.WRITABLE,
'globs': datastore_utils.READABLE | datastore_utils.WRITABLE,
'nested': datastore_utils.READABLE | datastore_utils.WRITABLE,
'description': datastore_utils.READABLE | datastore_utils.WRITABLE,
'owners': datastore_utils.READABLE | datastore_utils.WRITABLE,
'created_ts': datastore_utils.READABLE,
'created_by': datastore_utils.READABLE,
'modified_ts': datastore_utils.READABLE,
'modified_by': datastore_utils.READABLE,
}
# List of members that are explicitly in this group. Indexed.
members = IdentityProperty(repeated=True)
# List of identity-glob expressions (like 'user:*@example.com'). Indexed.
globs = IdentityGlobProperty(repeated=True)
# List of nested group names. Indexed.
nested = ndb.StringProperty(repeated=True)
# Human readable description.
description = ndb.TextProperty(default='')
# A name of the group that can modify or delete this group.
owners = ndb.StringProperty(default=ADMIN_GROUP)
# When the group was created.
created_ts = ndb.DateTimeProperty()
# Who created the group.
created_by = IdentityProperty()
def group_key(group):
"""Returns ndb.Key for AuthGroup entity."""
return ndb.Key(AuthGroup, group, parent=root_key())
def is_empty_group(group):
"""Returns True if group is missing or completely empty."""
group = group_key(group).get()
return not group or not(group.members or group.globs or group.nested)
def is_valid_group_name(name):
"""True if string looks like a valid group name."""
return bool(GROUP_NAME_RE.match(name))
def is_external_group_name(name):
"""True if group is imported from outside and is not writable."""
return is_valid_group_name(name) and '/' in name
@ndb.transactional
def bootstrap_group(group, identities, description=''):
"""Makes a group (if not yet exists) and adds |identities| to it as members.
Returns True if modified the group, False if identities are already there.
"""
key = group_key(group)
entity = key.get()
if entity and all(i in entity.members for i in identities):
return False
now = utils.utcnow()
if not entity:
entity = AuthGroup(
key=key,
description=description,
created_ts=now,
created_by=get_service_self_identity())
for i in identities:
if i not in entity.members:
entity.members.append(i)
entity.record_revision(
modified_by=get_service_self_identity(),
modified_ts=now,
comment='Bootstrap')
entity.put()
replicate_auth_db()
return True
def find_referencing_groups(group):
"""Finds groups that reference the specified group as nested group or owner.
Used to verify that |group| is safe to delete, i.e. no other group is
depending on it.
Returns:
Set of names of referencing groups.
"""
nesting_groups = AuthGroup.query(
AuthGroup.nested == group,
ancestor=root_key()).fetch_async(keys_only=True)
owned_groups = AuthGroup.query(
AuthGroup.owners == group,
ancestor=root_key()).fetch_async(keys_only=True)
refs = set()
refs.update(key.id() for key in nesting_groups.get_result())
refs.update(key.id() for key in owned_groups.get_result())
return refs
def get_missing_groups(groups):
"""Given a list of group names, returns a list of groups that do not exist."""
# We need to iterate over |groups| twice. It won't work if |groups|
# is a generator. So convert to list first.
groups = list(groups)
entities = ndb.get_multi(group_key(name) for name in groups)
return [name for name, ent in zip(groups, entities) if not ent]
def find_group_dependency_cycle(group):
"""Searches for dependency cycle between nested groups.
Traverses the dependency graph starting from |group|, fetching all necessary
groups from datastore along the way.
Args:
group: instance of AuthGroup to start traversing from. It doesn't have to be
committed to Datastore itself (but all its nested groups should be
there already).
Returns:
List of names of groups that form a cycle or empty list if no cycles.
"""
# It is a depth-first search on a directed graph with back edge detection.
# See http://www.cs.nyu.edu/courses/summer04/G22.1170-001/6a-Graphs-More.pdf
# Cache of already fetched groups.
groups = {group.key.id(): group}
# List of groups that are completely explored (all subtree is traversed).
visited = []
# Stack of groups that are being explored now. In case cycle is detected
# it would contain that cycle.
visiting = []
def visit(group):
"""Recursively explores |group| subtree, returns True if finds a cycle."""
assert group not in visiting
assert group not in visited
# Load bodies of nested groups not seen so far into |groups|.
entities = ndb.get_multi(
group_key(name) for name in group.nested if name not in groups)
groups.update({entity.key.id(): entity for entity in entities if entity})
visiting.append(group)
for nested in group.nested:
obj = groups.get(nested)
# Do not crash if non-existent group is referenced somehow.
if not obj:
continue
# Cross edge. Can happen in diamond-like graph, not a cycle.
if obj in visited:
continue
# Back edge: |group| references its own ancestor -> cycle.
if obj in visiting:
return True
# Explore subtree.
if visit(obj):
return True
visiting.pop()
visited.append(group)
return False
visit(group)
return [group.key.id() for group in visiting]
################################################################################
## Secrets store.
# TODO(vadimsh): Move secrets outside of AuthGlobalConfig entity group and
# encrypt them.
class AuthSecretScope(ndb.Model):
"""Entity to act as parent entity for AuthSecret.
Parent is AuthGlobalConfig entity keyed at root_key().
Id of this entity defines scope of secret keys that have this entity as
a parent. Always 'local' currently.
"""
class AuthSecret(ndb.Model):
"""Some service-wide named secret blob.
Parent entity is always Key(AuthSecretScope, 'local', parent=root_key()) now.
There should be only very limited number of AuthSecret entities around. AuthDB
fetches them all at once. Do not use this entity for per-user secrets.
Holds most recent value of a secret as well as several previous values. Most
recent value is used to generate new tokens, previous values may be used to
validate existing tokens. That way secret can be rotated without invalidating
any existing outstanding tokens.
"""
# Disable useless in-process per-request cache.
_use_cache = False
# Last several values of a secret, with current value in front.
values = ndb.BlobProperty(repeated=True, indexed=False)
# When secret was modified last time.
modified_ts = ndb.DateTimeProperty(auto_now_add=True)
# Who modified the secret last time.
modified_by = IdentityProperty()
@classmethod
def bootstrap(cls, name, length=32):
"""Creates a secret if it doesn't exist yet.
Args:
name: name of the secret.
length: length of the secret to generate if secret doesn't exist yet.
Returns:
Instance of AuthSecret (creating it if necessary) with random secret set.
"""
# Note that 'get_or_insert' is a bad fit here. With 'get_or_insert' we'd
# have to call os.urandom every time we want to get a key. It's a waste of
# time and entropy.
key = ndb.Key(
cls, name,
parent=ndb.Key(AuthSecretScope, 'local', parent=root_key()))
entity = key.get()
if entity is not None:
return entity
@ndb.transactional
def create():
entity = key.get()
if entity is not None:
return entity
logging.info('Creating new secret key %s', name)
entity = cls(
key=key,
values=[os.urandom(length)],
modified_by=get_service_self_identity())
entity.put()
return entity
return create()
################################################################################
## IP whitelist.
class AuthIPWhitelistAssignments(ndb.Model, AuthVersionedEntityMixin):
"""A singleton entity with "identity -> AuthIPWhitelist to use" mapping.
Entity key is ip_whitelist_assignments_key(). Parent entity is root_key().
See AuthIPWhitelist for more info about IP whitelists.
"""
# Disable useless in-process per-request cache.
_use_cache = False
class Assignment(ndb.Model):
# Identity name to limit by IP whitelist. Unique key in 'assignments' list.
identity = IdentityProperty()
# Name of IP whitelist to use (see AuthIPWhitelist).
ip_whitelist = ndb.StringProperty()
# Why the assignment was created.
comment = ndb.StringProperty()
# When the assignment was created.
created_ts = ndb.DateTimeProperty()
# Who created the assignment.
created_by = IdentityProperty()
# Holds all the assignments.
assignments = ndb.LocalStructuredProperty(Assignment, repeated=True)
class AuthIPWhitelist(
ndb.Model,
AuthVersionedEntityMixin,
datastore_utils.SerializableModelMixin):
"""A named set of whitelisted IPv4 and IPv6 subnets.
Can be assigned to individual user accounts to forcibly limit them only to
particular IP addresses, e.g. it can be used to enforce that specific service
account is used only from some known IP range. The mapping between accounts
and IP whitelists is stored in AuthIPWhitelistAssignments.
Entity id is a name of the whitelist. Parent entity is root_key().
"""
# Disable useless in-process per-request cache.
_use_cache = False
# How to convert this entity to or from serializable dict.
serializable_properties = {
'subnets': datastore_utils.READABLE | datastore_utils.WRITABLE,
'description': datastore_utils.READABLE | datastore_utils.WRITABLE,
'created_ts': datastore_utils.READABLE,
'created_by': datastore_utils.READABLE,
'modified_ts': datastore_utils.READABLE,
'modified_by': datastore_utils.READABLE,
}
# The list of subnets. The validator is used only as a last measure. JSON API
# handler should do validation too.
subnets = ndb.StringProperty(
repeated=True, validator=lambda _, val: ipaddr.normalize_subnet(val))
# Human readable description.
description = ndb.TextProperty(default='')
# When the list was created.
created_ts = ndb.DateTimeProperty()
# Who created the list.
created_by = IdentityProperty()
def ip_whitelist_key(name):
"""Returns ndb.Key for AuthIPWhitelist entity given its name."""
return ndb.Key(AuthIPWhitelist, name, parent=root_key())
def is_valid_ip_whitelist_name(name):
"""True if string looks like a valid IP whitelist name."""
return bool(IP_WHITELIST_NAME_RE.match(name))
def bots_ip_whitelist():
"""Returns a name of a special IP whitelist that controls IP-based auth.
Requests without authentication headers coming from IPs in this whitelist
are authenticated as coming from IP_WHITELISTED_BOT_ID ('bot:whitelisted-ip').
DEPRECATED.
"""
return '%s-bots' % app_identity.get_application_id()
@ndb.transactional
def bootstrap_ip_whitelist(name, subnets, description=''):
"""Adds subnets to an IP whitelist if not there yet.
Can be used on local dev appserver to add 127.0.0.1 to IP whitelist during
startup. Should not be used from request handlers.
Args:
name: IP whitelist name to add a subnet to.
subnets: IP subnet to add (as a list of strings).
description: description of IP whitelist (if new entity is created).
Returns:
True if entry was added, False if it is already there or subnet is invalid.
"""
assert isinstance(subnets, (list, tuple))
try:
subnets = [ipaddr.normalize_subnet(s) for s in subnets]
except ValueError:
return False
key = ip_whitelist_key(name)
entity = key.get()
if entity and all(s in entity.subnets for s in subnets):
return False
now = utils.utcnow()
if not entity:
entity = AuthIPWhitelist(
key=key,
description=description,
created_ts=now,
created_by=get_service_self_identity())
for s in subnets:
if s not in entity.subnets:
entity.subnets.append(s)
entity.record_revision(
modified_by=get_service_self_identity(),
modified_ts=now,
comment='Bootstrap')
entity.put()
replicate_auth_db()
return True
def bootstrap_loopback_ips():
"""Adds 127.0.0.1 and ::1 to '<appid>-bots' IP whitelist.
Useful on local dev server and in tests. Must not be used in production.
Returns list of corresponding bot Identities.
"""
# See api.py, AuthDB.verify_ip_whitelisted for IP -> Identity conversion.
assert utils.is_local_dev_server()
bootstrap_ip_whitelist(
bots_ip_whitelist(), ['127.0.0.1', '::1'], 'Local bots')
return [IP_WHITELISTED_BOT_ID]
@ndb.transactional
def bootstrap_ip_whitelist_assignment(identity, ip_whitelist, comment=''):
"""Sets a mapping "identity -> IP whitelist to use" for some account.
Replaces existing assignment. Can be used on local dev appserver to configure
IP whitelist assignments during startup or in tests. Should not be used from
request handlers.
Args:
identity: Identity to modify.
ip_whitelist: name of AuthIPWhitelist to assign.
comment: comment to set.
Returns:
True if IP whitelist assignment was modified, False if it was already set.
"""
entity = (
ip_whitelist_assignments_key().get() or
AuthIPWhitelistAssignments(key=ip_whitelist_assignments_key()))
found = False
for assignment in entity.assignments:
if assignment.identity == identity:
if assignment.ip_whitelist == ip_whitelist:
return False
assignment.ip_whitelist = ip_whitelist
assignment.comment = comment
found = True
break
now = utils.utcnow()
if not found:
entity.assignments.append(
AuthIPWhitelistAssignments.Assignment(
identity=identity,
ip_whitelist=ip_whitelist,
comment=comment,
created_ts=now,
created_by=get_service_self_identity()))
entity.record_revision(
modified_by=get_service_self_identity(),
modified_ts=now,
comment='Bootstrap')
entity.put()
replicate_auth_db()
return True
def fetch_ip_whitelists():
"""Fetches AuthIPWhitelistAssignments and all AuthIPWhitelist entities.
Returns:
(AuthIPWhitelistAssignments, list of AuthIPWhitelist).
"""
assign_fut = ip_whitelist_assignments_key().get_async()
whitelists_fut = AuthIPWhitelist.query(ancestor=root_key()).fetch_async()
assignments = (
assign_fut.get_result() or
AuthIPWhitelistAssignments(key=ip_whitelist_assignments_key()))
whitelists = sorted(whitelists_fut.get_result(), key=lambda x: x.key.id())
return assignments, whitelists
################################################################################
## Realms entities.
class AuthRealmsGlobals(ndb.Model, AuthVersionedEntityMixin):
"""A singleton entity with global portions of realms configuration.
Data here does not relate to any individual realm or project. Currently
contains a list of all defined permissions (with their metadata).
Entity key is realms_globals_key(). Parent entity is root_key().
"""
# Disable useless in-process per-request cache.
_use_cache = False
# All globally defined permissions, in alphabetical order.
permissions = datastore_utils.ProtobufProperty(
realms_pb2.Permission, repeated=True)
class AuthProjectRealms(ndb.Model, AuthVersionedEntityMixin):
"""All realms of some single LUCI project.
They are defined as realms_pb2.Realms proto message reduced to a single
project:
* Only project's realms are listed in `realms` field.
* Only permissions used by the project are listed in `permissions` field.
* Permissions have their metadata stripped, they have only names.
Entity key is project_realms_key(...). Parent entity is root_key().
"""
# Disable useless in-process per-request cache.
_use_cache = False
# All realms of a project, see the AuthProjectRealms doc string above.
realms = datastore_utils.ProtobufProperty(realms_pb2.Realms, compressed=True)
# The git revision the config was picked up from.
config_rev = ndb.StringProperty(indexed=False)
# Revision of permissions DB used to expand roles.
perms_rev = ndb.StringProperty(indexed=False)
def realms_globals_key():
"""The key of AuthRealmsGlobals singleton entity."""
return ndb.Key(AuthRealmsGlobals, 'globals', parent=root_key())
def project_realms_key(project_id):
"""An ndb.Key for an AuthProjectRealms entity."""
return ndb.Key(AuthProjectRealms, project_id, parent=root_key())
################################################################################
## Dev config. Used only on dev server or '-dev' instances.
class AuthDevConfig(ndb.Model):
"""Authentication related configuration for development or tests.
Meant to be updated via Cloud Console Datastore UI.
ID is 'dev_config'.
"""
# Disable memcache to simplify editing of this entity through datastore UI.
_use_cache = False
_use_memcache = False
# A custom endpoint to validate OAuth tokens to use as a fallback.
#
# E.g. "https://www.googleapis.com/oauth2/v1/tokeninfo".
token_info_endpoint = ndb.StringProperty(indexed=False, default='')
@utils.cache_with_expiration(60)
def get_dev_config():
"""Returns an instance of AuthDevConfig (possibly uninitialized).
Asserts that it is used only on dev instance.
"""
assert utils.is_local_dev_server() or utils.is_dev()
k = ndb.Key('AuthDevConfig', 'dev_config')
e = k.get()
if not e:
logging.warning('Initializing AuthDevConfig entity')
e = AuthDevConfig(key=k)
e.put() # there's a race condition here, but we don't care
return e
|
py | 7dfcd2f5d59cd84622234aca9bba2ab690cad0f4 | #############################
## Onion Skinning Operators
#############################
import bpy
from bpy.app.handlers import persistent
from bpy.types import Operator, PropertyGroup
import gpu
import bgl
from gpu_extras.batch import batch_for_shader
import numpy as np
from mathutils import Vector, Matrix
# ########################################################## #
# Data (stroring it in the object or scene doesnt work well) #
# ########################################################## #
shader = gpu.shader.from_builtin('3D_UNIFORM_COLOR')
frame_data = dict([])
batches = dict([])
extern_data = dict([])
# ################ #
# Functions #
# ################ #
def frame_get_set(_obj, frame):
scn = bpy.context.scene
anmx = scn.anmx_data
# Show from viewport > keep off this allows in_front to work
# if "_animextras" in scn.collection.children:
# vlayer = scn.view_layers['View Layer']
# vlayer.layer_collection.children['_animextras'].hide_viewport = False
if _obj.type == 'EMPTY':
if anmx.is_linked:
bpy.ops.object.duplicate_move_linked(OBJECT_OT_duplicate={"linked":True})
# Hide original but keep it able to render
_obj.hide_viewport = True
if "_animextras" in scn.collection.children:
bpy.data.collections['_animextras'].objects.link(bpy.data.objects[anmx.onion_object])
# bpy.ops.object.move_to_collection(collection_index=0, is_new=True, new_collection_name="_animextras")
_obj = bpy.context.active_object
if not "_animextras" in scn.collection.children:
bpy.ops.object.move_to_collection(collection_index=0, is_new=True, new_collection_name="_animextras")
# bpy.data.collections['_animextras'].hide_viewport = True
# bpy.data.scenes["Scene"].view_layers[0].layer_collection.collection.children["_animextras"].hide_viewport = False
bpy.data.collections['_animextras'].hide_render = True
_obj = bpy.context.selected_objects[0]
# print("_obj %s" % _obj)
if anmx.is_linked:
bpy.ops.object.make_override_library()
for i in bpy.data.collections['_animextras'].children[0].objects:
if i.type == 'MESH':
new_onion = i.name
i.hide_render = True
scn.anmx_data.onion_object = new_onion
anmx.is_linked = False
# Return duplicated linked rig made local
_obj = bpy.data.objects[anmx.onion_object]
# Make object active so panel shows
bpy.context.view_layer.objects.active = _obj
# Select active
# bpy.context.scene.objects["Body"].select_set(True)
# Gets all of the data from a mesh on a certain frame
tmpobj = _obj
# Setting the frame to get an accurate reading of the object on the selected frame
scn = bpy.context.scene
scn.frame_set(frame)
# Getting the Depenency Graph and the evaluated object
depsgraph = bpy.context.evaluated_depsgraph_get()
eval = tmpobj.evaluated_get(depsgraph)
# Making a new mesh from the object.
mesh = eval.to_mesh()
mesh.update()
# Getting the object's world matrix
mat = Matrix(_obj.matrix_world)
# This moves the mesh by the object's world matrix, thus making everything global space. This is much faster than getting each vertex individually and doing a matrix multiplication on it
mesh.transform(mat)
mesh.update()
# loop triangles are needed to properly draw the mesh on screen
mesh.calc_loop_triangles()
mesh.update()
# Creating empties so that all of the verts and indices can be gathered all at once in the next step
vertices = np.empty((len(mesh.vertices), 3), 'f')
indices = np.empty((len(mesh.loop_triangles), 3), 'i')
# Getting all of the vertices and incices all at once (from: https://docs.blender.org/api/current/gpu.html#mesh-with-random-vertex-colors)
mesh.vertices.foreach_get(
"co", np.reshape(vertices, len(mesh.vertices) * 3))
mesh.loop_triangles.foreach_get(
"vertices", np.reshape(indices, len(mesh.loop_triangles) * 3))
args = [vertices, indices]
# Hide from viewport > keep off this allows in_front to work
# if "_animextras" in scn.collection.children:
# vlayer = scn.view_layers['View Layer']
# vlayer.layer_collection.children['_animextras'].hide_viewport = True
return args
def set_to_active(_obj):
""" Sets the object that is being used for the onion skinning """
scn = bpy.context.scene
anmx = scn.anmx_data
# Clear all data > caused double drawing with mode switch
# Old clear method caused issues when using a rig
# Still see handler issue
frame_data.clear()
batches.clear()
extern_data.clear()
# skip clear if we are linked
if hasattr(anmx,"link_parent"):
if not anmx.link_parent == "":
clear_active(clrRig=False)
anmx.onion_object = _obj.name
anmx.is_linked = True if _obj.type == 'EMPTY' else False
if anmx.is_linked:
if hasattr(anmx,"link_parent"):
if not anmx.link_parent:
anmx.link_parent = _obj.name
bake_frames()
make_batches()
def clear_active(clrRig):
""" clrRig will do complete clear, sued with linked Rigs, allows to update it without deleting everuthing """
""" Clears the active object """
scn = bpy.context.scene
anmx = scn.anmx_data
name = anmx.onion_object
# Clears all the data needed to store onion skins on the previously selected object
frame_data.clear()
batches.clear()
extern_data.clear()
# Clear localzed rigs & overrides linked items
if clrRig:
if hasattr(anmx,"link_parent"):
if not anmx.link_parent == "":
bpy.data.collections["_animextras"].children[0].objects.unlink(bpy.data.objects[name])
bpy.data.collections.remove(bpy.data.collections[anmx.link_parent])
bpy.data.collections.remove(bpy.data.collections["_animextras"])
# Show original linked rig again
bpy.data.objects[anmx.link_parent].hide_viewport = False
anmx.link_parent = ""
# Gets rid of the selected object
anmx.onion_object = ""
def make_batches():
# Custom OSL shader could be set here
scn = bpy.context.scene
anmx = scn.anmx_data
_obj = bpy.data.objects[anmx.onion_object]
for key in frame_data:
arg = frame_data[key] # Dictionaries are used rather than lists or arrays so that frame numbers are a given
vertices = arg[0]
indices = arg[1]
batch = batch_for_shader(shader, 'TRIS', {"pos": vertices}, indices=indices)
batches[key] = batch
def bake_frames():
# Needs to do the following:
# 1. Bake the data for every frame and store it in the objects "["frame_data"]" items
scn = bpy.context.scene
anmx = scn.anmx_data
_obj = bpy.data.objects[anmx.onion_object]
curr = scn.frame_current
step = anmx.skin_step
# Getting the first and last frame of the animation
keyobj = _obj
if _obj.parent is not None:
keyobj = _obj.parent
# Check if obj is linked rig
elif hasattr(_obj.instance_collection, "all_objects"):
keyobj = _obj.instance_collection.all_objects[_obj.name]
# print(keyobj)
# keyobj = _obj.parent
keyframes = []
for fc in keyobj.animation_data.action.fcurves:
for k in fc.keyframe_points:
keyframes.append(int(k.co[0]))
keyframes = np.unique(keyframes)
start = int(np.min(keyframes))
end = int(np.max(keyframes)) + 1
if anmx.onion_mode == "PF":
for f in range(start, end):
arg = frame_get_set(_obj, f)
frame_data[str(f)] = arg
extern_data.clear()
elif anmx.onion_mode == "PFS":
for f in range(start, end, step):
arg = frame_get_set(_obj, f)
frame_data[str(f)] = arg
extern_data.clear()
elif anmx.onion_mode == "DC":
for fkey in keyframes:
arg = frame_get_set(_obj, fkey)
frame_data[str(fkey)] = arg
extern_data.clear()
elif anmx.onion_mode == "INB":
for f in range(start, end):
arg = frame_get_set(_obj, f)
frame_data[str(f)] = arg
extern_data.clear()
for fkey in keyframes:
extern_data[str(fkey)] = fkey
scn.frame_set(curr)
# ################ #
# Properties #
# ################ #
class ANMX_data(PropertyGroup):
# Custom update function for the toggle
def toggle_update(self, context):
if self.toggle:
bpy.ops.anim_extras.draw_meshes('INVOKE_DEFAULT')
return
def inFront(self,context):
scn = bpy.context.scene
if self.onion_object:
obj = bpy.context.view_layer.objects.active = bpy.data.objects[self.onion_object]
obj.show_in_front = True if scn["anmx_data"]["in_front"] else False
if "use_xray" in scn["anmx_data"]:
if scn["anmx_data"]["use_xray"]:
scn["anmx_data"]["use_xray"] = False if scn["anmx_data"]["in_front"] else True
return
modes = [
("PF", "Per-Frame", "Shows the amount of frames in the future and past", 1),
("PFS", "Per-Frame Stepped", "Shows the amount of frames in the future and past with option to step-over frames. This allows to see futher but still have a clear overview what is happening", 2),
("DC", "Direct Keys", "Show onion only on inserted keys using amount as frame when keys are visible", 3),
("INB", "Inbetweening", " Inbetweening, lets you see frames with direct keyframes in a different color than interpolated frames", 4)
]
# Onion Skinning Properties
skin_count: bpy.props.IntProperty(name="Count", description="Number of frames we see in past and future", default=1, min=1)
skin_step: bpy.props.IntProperty(name="Step", description="Number of frames to skip in conjuction with Count", default=1, min=1)
onion_object: bpy.props.StringProperty(name="Onion Object", default="")
onion_mode: bpy.props.EnumProperty(name="", get=None, set=None, items=modes)
use_xray: bpy.props.BoolProperty(name="Use X-Ray", description="Draws the onion visible through the object", default=False)
use_flat: bpy.props.BoolProperty(name="Flat Colors", description="Colors while not use opacity showing 100% of the color", default=False)
in_front: bpy.props.BoolProperty(name="In Front", description="Draws the selected object in front of the onion skinning", default=False, update=inFront)
toggle: bpy.props.BoolProperty(name="Draw", description="Toggles onion skinning on or off", default=False, update=toggle_update)
# Linked settings
is_linked: bpy.props.BoolProperty(name="Is linked", default=False)
link_parent: bpy.props.StringProperty(name="Link Parent", default="")
# Past settings
past_color: bpy.props.FloatVectorProperty(name="Past Color", min=0, max=1, size=3, default=(1., .1, .1), subtype='COLOR')
past_opacity_start: bpy.props.FloatProperty(name="Starting Opacity", min=0, max=1, precision=2, default=0.5)
past_opacity_end: bpy.props.FloatProperty(name="Ending Opacity", min=0, max=1, precision=2, default=0.1)
past_enabled: bpy.props.BoolProperty(name="Enabled?", default=True)
# Future settings
future_color: bpy.props.FloatVectorProperty(name="Future Color", min=0, max=1, size=3, default=(.1, .4, 1.), subtype='COLOR')
future_opacity_start: bpy.props.FloatProperty(name="Starting Opacity", min=0, max=1,precision=2, default=0.5)
future_opacity_end: bpy.props.FloatProperty(name="Ending Opacity", min=0, max=1,precision=2, default=0.1)
future_enabled: bpy.props.BoolProperty(name="Enabled?", default=True)
# ################ #
# Operators #
# ################ #
def check_selected(context):
obj = context.active_object
return context.selected_objects != []
# return True
# Need workaround so we can pose and still do updates
# return ((obj.type == 'MESH') and hasattr(obj.animation_data,"action") or (obj.type=='EMPTY') or (obj.type == 'MESH') and hasattr(obj.parent.animation_data,"action"))
# if ((obj.type == 'MESH') and hasattr(obj.animation_data,"action") or (obj.type=='EMPTY')):
# return True
# else:
# return False
class ANMX_set_onion(Operator):
bl_idname = "anim_extras.set_onion"
bl_label = "Set Onion To Selected"
bl_description = "Sets the selected object to be the onion object"
bl_options = {'REGISTER', 'UNDO' }
@classmethod
def poll(cls, context):
obj = context.active_object
if context.selected_objects != []:
if (hasattr(obj.parent,"animation_data") and (obj.type == 'MESH')):
if (hasattr(obj.parent.animation_data,"action")):
return True
if hasattr(obj.animation_data,"action"):
if hasattr(obj.animation_data.action,"fcurves"):
return ((obj.type == 'MESH') and hasattr(obj.animation_data,"action") or (obj.type=='EMPTY'))
if hasattr(obj.instance_collection, "all_objects"):
return True
def execute(self, context):
obj = context.active_object
scn = context.scene
anmx = scn.anmx_data
#Extra check for the shortcuts
if not check_selected(context):
self.report({'INFO'}, "Onion needs animated active selection ")
return {'CANCELLED'}
anmx.toggle = False if anmx.toggle else True
if obj == None:
return {"CANCELLED"}
if obj.parent is None:
try:
obj.animation_data.action.fcurves
except AttributeError:
pass
# return {"CANCELLED"}
else:
try:
# This right here needs to change for allowing linked rigs
# obj.parent.animation_data.action.fcurves
dObj = bpy.data.objects[obj.name]
hasattr(dObj.instance_collection, "all_objects")
except AttributeError:
return {"CANCELLED"}
# Or check if it is linked empty
if ((obj.type == 'MESH') or (obj.type=='EMPTY')):
set_to_active(obj)
return {"FINISHED"}
class ANMX_clear_onion(Operator):
bl_idname = "anim_extras.clear_onion"
bl_label = "Clear Selected Onion"
bl_description = "Clears the path of the onion object"
bl_options = {'REGISTER', 'UNDO' }
def execute(self, context):
#Extra check for the shortcuts
if not check_selected(context):
self.report({'INFO'}, "Onion needs animated active selection")
return {'CANCELLED'}
clear_active(clrRig=True)
return {"FINISHED"}
class ANMX_toggle_onion(Operator):
""" Operator for toggling the onion object so we can shortcut it"""
bl_idname = "anim_extras.toggle_onion"
bl_label = "Toggle Onion"
bl_description = "Toggles onion ON/OFF"
bl_options = {'REGISTER', 'UNDO' }
def execute(self, context):
context.scene.anmx_data.toggle = False if context.scene.anmx_data.toggle else True
return {"FINISHED"}
class ANMX_add_clear_onion(Operator):
""" Toggle for clearing and adding"""
bl_idname = "anim_extras.add_clear_onion"
bl_label = "Add/Toggle Onion"
bl_description = "Add/Toggles onion ON/OFF"
bl_options = {'REGISTER', 'UNDO' }
def execute(self, context):
#Extra check for the shortcuts
if not check_selected(context):
self.report({'INFO'}, "Onion needs animated active selection")
return {'CANCELLED'}
anmx = context.scene.anmx_data
if anmx.onion_object=="":
bpy.ops.anim_extras.set_onion()
else:
bpy.ops.anim_extras.clear_onion()
return {"FINISHED"}
class ANMX_update_onion(Operator):
bl_idname = "anim_extras.update_onion"
bl_label = "Update Selected Onion"
bl_description = "Updates the path of the onion object"
bl_options = {'REGISTER', 'UNDO' }
def execute(self, context):
#Extra check for the shortcuts
if not check_selected(context):
self.report({'INFO'}, "Onion needs active selection")
return {'CANCELLED'}
# This allows to update, also pose mode
if context.scene.anmx_data.onion_object in bpy.data.objects:
set_to_active(bpy.data.objects[context.scene.anmx_data.onion_object])
return {"FINISHED"}
# Uses a list formatted in the following way to draw the meshes:
# [[vertices, indices, colors], [vertices, indices, colors]]
class ANMX_draw_meshes(Operator):
bl_idname = "anim_extras.draw_meshes"
bl_label = "Draw"
bl_description = "Draws a set of meshes without creating objects"
bl_options = {'REGISTER', 'UNDO' }
def __init__(self):
print("#### __INIT__ DRAW MESHES ####")
self.handler = None
self.timer = None
self.mode = None
def __del__(self):
""" unregister when done, helps when reopening other scenes """
print("#### UNREGISTER HANDLERS ####")
self.finish(bpy.context)
print("#### HANDLER %s ####" % self.handler)
def invoke(self, context, event):
self.register_handlers(context)
context.window_manager.modal_handler_add(self)
self.mode = context.scene.anmx_data.onion_mode
return {'RUNNING_MODAL'}
def register_handlers(self, context):
self.timer = context.window_manager.event_timer_add(0.1, window=context.window)
self.handler = bpy.types.SpaceView3D.draw_handler_add(self.draw_callback, (context,), 'WINDOW', 'POST_VIEW')
def unregister_handlers(self, context):
context.scene.anmx_data.toggle = False
context.window_manager.event_timer_remove(self.timer)
if self.handler != None:
bpy.types.SpaceView3D.draw_handler_remove(self.handler, 'WINDOW')
self.handler = None
def modal(self, context, event):
if context.scene.anmx_data.onion_object not in bpy.data.objects:
self.unregister_handlers(context)
return {'CANCELLED'}
if context.scene.anmx_data.toggle is False or self.mode != context.scene.anmx_data.onion_mode:
self.unregister_handlers(context)
return {'CANCELLED'}
return {'PASS_THROUGH'}
def finish(self, context):
self.unregister_handlers(context)
return {'FINISHED'}
def draw_callback(self, context):
scn = context.scene
ac = scn.anmx_data
f = scn.frame_current
pc = ac.past_color
fc = ac.future_color
override = False
color = (0, 0, 0, 0)
threshold = ac.skin_count
if context.space_data.overlay.show_overlays == False:
return
for key in batches:
f_dif = abs(f-int(key))
# Getting the color if the batch is in the past
if len(extern_data) == 0:
if f > int(key):
if ac.past_enabled:
color = (pc[0], pc[1], pc[2], ac.past_opacity_start-((ac.past_opacity_start-ac.past_opacity_end)/ac.skin_count) * f_dif)
else:
override = True
# Getting the color if the batch is in the future
else:
if ac.future_enabled:
color = (fc[0], fc[1], fc[2], ac.future_opacity_start-((ac.future_opacity_start-ac.future_opacity_end)/ac.skin_count) * f_dif)
else:
override = True
else:
if key in extern_data:
color = (fc[0], fc[1], fc[2], ac.future_opacity_start-((ac.future_opacity_start-ac.future_opacity_end)/ac.skin_count) * f_dif)
else:
color = (pc[0], pc[1], pc[2], ac.past_opacity_start-((ac.past_opacity_start-ac.past_opacity_end)/ac.skin_count) * f_dif)
# Only draws if the frame is not the current one, it is within the skin limits, and there has not been an override
if f != int(key) and f_dif <= ac.skin_count and not override:
shader.bind()
shader.uniform_float("color", color)
# Theres gotta be a better way to do this. Seems super inefficient
if not ac.use_flat:
bgl.glEnable(bgl.GL_BLEND)
bgl.glEnable(bgl.GL_CULL_FACE)
if not ac.use_xray:
bgl.glEnable(bgl.GL_DEPTH_TEST)
batches[key].draw(shader)
bgl.glDisable(bgl.GL_BLEND)
bgl.glDisable(bgl.GL_CULL_FACE)
bgl.glDisable(bgl.GL_DEPTH_TEST)
override = False |
py | 7dfcd360f743dc3303d3c7e71f4a4a9562725517 | import numpy as np
from ndfinance.brokers.base import OHLCVT
def apply_n_percent_rule(value, n_percent=5, loss_cut_percent=20):
return (value * n_percent / 100) / (loss_cut_percent / 100)
class Universe(object):
def __init__(self, broker=None, data_provider=None):
self.broker = broker
self.data_provider = data_provider
def set_broker(self, broker):
self.broker = broker
self.data_provider = broker.data_provider
def __call__(self):
return self.broker.assets
class AvailableStockUniverse(Universe):
def __init__(self, *args, **kwargs):
super(AvailableStockUniverse, self).__init__(*args, **kwargs)
def set_broker(self, *args, **kwargs):
super(AvailableStockUniverse, self).__init__(*args, **kwargs)
self.indexer = self.broker.indexer
def __call__(self):
ret = {}
for ticker in self.broker.assets.keys():
timestamp = self.broker.data_provider.get_ohlcvt_current(ticker, OHLCVT.timestamp)
if timestamp is None: continue
ret[ticker] = self.broker.assets[ticker]
return ret
|
py | 7dfcd3b08107c6bf01de67311b706432a98c29b8 | dataset_defaults = {
'amazon': {
'split_scheme': 'official',
'model': 'distilbert-base-uncased',
'transform': 'bert',
'max_token_length': 512,
'loss_function': 'cross_entropy',
'algo_log_metric': 'accuracy',
'batch_size': 8,
'unlabeled_batch_size': 8,
'lr': 1e-5,
'weight_decay': 0.01,
'n_epochs': 3,
'n_groups_per_batch': 2,
'irm_lambda': 1.0,
'coral_penalty_weight': 1.0,
'loader_kwargs': {
'num_workers': 1,
'pin_memory': True,
},
'process_outputs_function': 'multiclass_logits_to_pred',
},
'bdd100k': {
'split_scheme': 'official',
'model': 'resnet50',
'model_kwargs': {'pretrained': True},
'loss_function': 'multitask_bce',
'val_metric': 'acc_all',
'val_metric_decreasing': False,
'optimizer': 'SGD',
'optimizer_kwargs': {'momentum': 0.9},
'batch_size': 32,
'lr': 0.001,
'weight_decay': 0.0001,
'n_epochs': 10,
'algo_log_metric': 'multitask_binary_accuracy',
'transform': 'image_base',
'process_outputs_function': 'binary_logits_to_pred',
},
'camelyon17': {
'split_scheme': 'official',
'model': 'densenet121',
'model_kwargs': {'pretrained': False},
'transform': 'image_base',
'target_resolution': (96, 96),
'loss_function': 'cross_entropy',
'groupby_fields': ['hospital'],
'val_metric': 'acc_avg',
'val_metric_decreasing': False,
'optimizer': 'SGD',
'optimizer_kwargs': {'momentum': 0.9},
'scheduler': None,
'batch_size': 32,
'unlabeled_batch_size': 32,
'lr': 0.001,
'weight_decay': 0.01,
'n_epochs': 5,
'n_groups_per_batch': 2,
'irm_lambda': 1.0,
'coral_penalty_weight': 0.1,
'algo_log_metric': 'accuracy',
'process_outputs_function': 'multiclass_logits_to_pred',
},
'celebA': {
'split_scheme': 'official',
'model': 'resnet50',
'transform': 'image_base',
'eval_transform': 'image_base',
'loss_function': 'cross_entropy',
'groupby_fields': ['male', 'y'],
'val_metric': 'acc_wg',
'val_metric_decreasing': False,
'optimizer': 'SGD',
'optimizer_kwargs': {'momentum': 0.9},
'scheduler': None,
'batch_size': 64,
'lr': 0.001,
'weight_decay': 0.0,
'n_epochs': 200,
'algo_log_metric': 'accuracy',
'process_outputs_function': 'multiclass_logits_to_pred',
},
'civilcomments': {
'split_scheme': 'official',
'model': 'distilbert-base-uncased',
'transform': 'bert',
'loss_function': 'cross_entropy',
'groupby_fields': ['black', 'y'],
'val_metric': 'acc_wg',
'val_metric_decreasing': False,
'batch_size': 16,
'unlabeled_batch_size': 16,
'lr': 1e-5,
'weight_decay': 0.01,
'n_epochs': 5,
'n_groups_per_batch': 1,
'algo_log_metric': 'accuracy',
'max_token_length': 300,
'irm_lambda': 1.0,
'coral_penalty_weight': 10.0,
'loader_kwargs': {
'num_workers': 1,
'pin_memory': True,
},
'process_outputs_function': 'multiclass_logits_to_pred',
},
'fmow': {
'split_scheme': 'official',
'dataset_kwargs': {
'seed': 111
},
'model': 'densenet121',
'model_kwargs': {'pretrained': True},
'transform': 'image_base',
'loss_function': 'cross_entropy',
'groupby_fields': ['year',],
'val_metric': 'acc_worst_region',
'val_metric_decreasing': False,
'optimizer': 'Adam',
'scheduler': 'StepLR',
'scheduler_kwargs': {'gamma': 0.96},
'batch_size': 32,
'unlabeled_batch_size': 32,
'lr': 0.0001,
'weight_decay': 0.0,
'n_epochs': 50,
'n_groups_per_batch': 8,
'irm_lambda': 1.0,
'coral_penalty_weight': 0.1,
'algo_log_metric': 'accuracy',
'process_outputs_function': 'multiclass_logits_to_pred',
},
'iwildcam': {
'loss_function': 'cross_entropy',
'val_metric': 'F1-macro_all',
'model_kwargs': {'pretrained': True},
'transform': 'image_base',
'target_resolution': (448, 448),
'val_metric_decreasing': False,
'algo_log_metric': 'accuracy',
'model': 'resnet50',
'lr': 3e-5,
'weight_decay': 0.0,
'batch_size': 16,
'unlabeled_batch_size': 16,
'n_epochs': 12,
'optimizer': 'Adam',
'split_scheme': 'official',
'scheduler': None,
'groupby_fields': ['location',],
'n_groups_per_batch': 2,
'irm_lambda': 1.,
'coral_penalty_weight': 10.,
'no_group_logging': True,
'process_outputs_function': 'multiclass_logits_to_pred'
},
'ogb-molpcba': {
'split_scheme': 'official',
'model': 'gin-virtual',
'model_kwargs': {'dropout':0.5}, # include pretrained
'loss_function': 'multitask_bce',
'groupby_fields': ['scaffold',],
'val_metric': 'ap',
'val_metric_decreasing': False,
'optimizer': 'Adam',
'batch_size': 32,
'unlabeled_batch_size': 32,
'lr': 1e-03,
'weight_decay': 0.,
'n_epochs': 100,
'n_groups_per_batch': 4,
'irm_lambda': 1.,
'coral_penalty_weight': 0.1,
'no_group_logging': True,
'process_outputs_function': None,
'algo_log_metric': 'multitask_binary_accuracy',
},
'py150': {
'split_scheme': 'official',
'model': 'code-gpt-py',
'loss_function': 'lm_cross_entropy',
'val_metric': 'acc',
'val_metric_decreasing': False,
'optimizer': 'AdamW',
'optimizer_kwargs': {'eps':1e-8},
'lr': 8e-5,
'weight_decay': 0.,
'n_epochs': 3,
'batch_size': 6,
'groupby_fields': ['repo',],
'n_groups_per_batch': 2,
'irm_lambda': 1.,
'coral_penalty_weight': 1.,
'no_group_logging': True,
'algo_log_metric': 'multitask_accuracy',
'process_outputs_function': 'multiclass_logits_to_pred',
},
'poverty': {
'split_scheme': 'official',
'dataset_kwargs': {
'no_nl': False,
'fold': 'A'
},
'model': 'resnet18_ms',
'model_kwargs': {'num_channels': 8},
'transform': 'poverty',
'loss_function': 'mse',
'groupby_fields': ['country',],
'val_metric': 'r_wg',
'val_metric_decreasing': False,
'algo_log_metric': 'mse',
'optimizer': 'Adam',
'scheduler': 'StepLR',
'scheduler_kwargs': {'gamma': 0.96},
'batch_size': 64,
'unlabeled_batch_size': 64,
'lr': 0.001,
'weight_decay': 0.0,
'n_epochs': 200,
'n_groups_per_batch': 8,
'irm_lambda': 1.0,
'coral_penalty_weight': 0.1,
'process_outputs_function': None,
},
'waterbirds': {
'split_scheme': 'official',
'model': 'resnet50',
'transform': 'image_resize_and_center_crop',
'resize_scale': 256.0/224.0,
'model_kwargs': {'pretrained': True},
'loss_function': 'cross_entropy',
'groupby_fields': ['background', 'y'],
'val_metric': 'acc_wg',
'val_metric_decreasing': False,
'algo_log_metric': 'accuracy',
'optimizer': 'SGD',
'optimizer_kwargs': {'momentum':0.9},
'scheduler': None,
'batch_size': 128,
'lr': 1e-5,
'weight_decay': 1.0,
'n_epochs': 300,
'process_outputs_function': 'multiclass_logits_to_pred',
},
'yelp': {
'split_scheme': 'official',
'model': 'bert-base-uncased',
'transform': 'bert',
'max_token_length': 512,
'loss_function': 'cross_entropy',
'algo_log_metric': 'accuracy',
'batch_size': 8,
'lr': 2e-6,
'weight_decay': 0.01,
'n_epochs': 3,
'n_groups_per_batch': 2,
'process_outputs_function': 'multiclass_logits_to_pred',
},
'sqf': {
'split_scheme': 'all_race',
'model': 'logistic_regression',
'transform': None,
'model_kwargs': {'in_features': 104},
'loss_function': 'cross_entropy',
'groupby_fields': ['y'],
'val_metric': 'precision_at_global_recall_all',
'val_metric_decreasing': False,
'algo_log_metric': 'accuracy',
'optimizer': 'Adam',
'optimizer_kwargs': {},
'scheduler': None,
'batch_size': 4,
'lr': 5e-5,
'weight_decay': 0,
'n_epochs': 4,
'process_outputs_function': None,
},
}
##########################################
### Split-specific defaults for Amazon ###
##########################################
amazon_split_defaults = {
'official':{
'groupby_fields': ['user'],
'val_metric': '10th_percentile_acc',
'val_metric_decreasing': False,
'no_group_logging': True,
},
'user':{
'groupby_fields': ['user'],
'val_metric': '10th_percentile_acc',
'val_metric_decreasing': False,
'no_group_logging': True,
},
'time':{
'groupby_fields': ['year'],
'val_metric': 'acc_avg',
'val_metric_decreasing': False,
},
'time_baseline':{
'groupby_fields': ['year'],
'val_metric': 'acc_avg',
'val_metric_decreasing': False,
},
}
user_baseline_splits = [
'A1CNQTCRQ35IMM_baseline', 'A1NE43T0OM6NNX_baseline', 'A1UH21GLZTYYR5_baseline', 'A20EEWWSFMZ1PN_baseline',
'A219Y76LD1VP4N_baseline', 'A37BRR2L8PX3R2_baseline', 'A3JVZY05VLMYEM_baseline', 'A9Q28YTLYREO7_baseline',
'ASVY5XSYJ1XOE_baseline', 'AV6QDP8Q0ONK4_baseline'
]
for split in user_baseline_splits:
amazon_split_defaults[split] = {
'groupby_fields': ['user'],
'val_metric': 'acc_avg',
'val_metric_decreasing': False,
}
category_splits = [
'arts_crafts_and_sewing_generalization', 'automotive_generalization',
'books,movies_and_tv,home_and_kitchen,electronics_generalization', 'books_generalization', 'category_subpopulation',
'cds_and_vinyl_generalization', 'cell_phones_and_accessories_generalization', 'clothing_shoes_and_jewelry_generalization',
'digital_music_generalization', 'electronics_generalization', 'grocery_and_gourmet_food_generalization',
'home_and_kitchen_generalization', 'industrial_and_scientific_generalization', 'kindle_store_generalization',
'luxury_beauty_generalization', 'movies_and_tv,books,home_and_kitchen_generalization', 'movies_and_tv,books_generalization',
'movies_and_tv_generalization', 'musical_instruments_generalization', 'office_products_generalization',
'patio_lawn_and_garden_generalization', 'pet_supplies_generalization', 'prime_pantry_generalization',
'sports_and_outdoors_generalization', 'tools_and_home_improvement_generalization', 'toys_and_games_generalization',
'video_games_generalization',
]
for split in category_splits:
amazon_split_defaults[split] = {
'groupby_fields': ['category'],
'val_metric': 'acc_avg',
'val_metric_decreasing': False,
}
########################################
### Split-specific defaults for Yelp ###
########################################
yelp_split_defaults = {
'official':{
'groupby_fields': ['year'],
'val_metric': 'acc_avg',
'val_metric_decreasing': False,
},
'user':{
'groupby_fields': ['user'],
'val_metric': '10th_percentile_acc',
'val_metric_decreasing': False,
'no_group_logging': True,
},
'time':{
'groupby_fields': ['year'],
'val_metric': 'acc_avg',
'val_metric_decreasing': False,
},
'time_baseline':{
'groupby_fields': ['year'],
'val_metric': 'acc_avg',
'val_metric_decreasing': False,
},
}
###############################
### Split-specific defaults ###
###############################
split_defaults = {
'amazon': amazon_split_defaults,
'yelp': yelp_split_defaults,
}
|
py | 7dfcd450253b47c628d15d822a56414cacb4fca5 | from typing import Optional
from os import path
import pygame
from skyjump.widgets import XAlign, YAlign
from skyjump.colors import WHITE
from skyjump.config import DATA_DIR
class Label:
"""a simple label"""
def __init__(
self,
caption: str,
x: float,
y: float,
width: float = 0.15,
height: float = 0.1,
textsize: float = 3,
font_file: str = path.join(DATA_DIR, "fonts", "carobtn.TTF"),
color: pygame.Color = WHITE,
bg_color: Optional[pygame.Color] = None,
xalign: XAlign = XAlign.CENTER,
yalign: YAlign = YAlign.CENTER,
line_spacing: float = 1.25,
):
"""create a new label
:param caption: the caption of the label
:param x: x position of the label, relative to the surface on render
:param y: y position of the label, relative to the surface on render
:param width: width of the label, relative to the surface on render
:param height: height of the label, relative to the surface on render
:param textsize: textsize (in percent) of the caption,
relative to the surface on render
:param font_file: file of the font
:param color: color of the text
:param bg_color: color of the background
:param xalign: the x alignment of the text
:param yalign: the x alignment of the text
:param line_spacing: the spacing between two lines
"""
self.caption = caption
self.x = x
self.y = y
self.width = width
self.height = height
self.font_file = font_file
# textsize is given in percent
self.textsize = textsize / 100
self.color = color
self.bg_color = bg_color
self.xalign = xalign
self.yalign = yalign
self.line_spacing = line_spacing
# font rendering is a bottleneck in pygame
# so cache the real size and font
self._font: Optional[pygame.font.Font] = None
self._fontsize: int = -1
def set_caption(self, caption: str):
"""set a new caption
:param caption: the new caption of the label
"""
self.caption = caption
def set_bg_color(self, bg_color: Optional[pygame.Color]):
"""set a new background color
:param bg_color: the new background color of the label
or None for a transparent background
"""
self.bg_color = bg_color
def render(self, out_surface: pygame.Surface):
"""render the label
:param out_surface: the surface on which the label should be rendered
"""
# create absolute position and size values
x = round(self.x * out_surface.get_width())
y = round(self.y * out_surface.get_height())
width = round(self.width * out_surface.get_width())
height = round(self.height * out_surface.get_height())
# create a temporary surface
surface = pygame.Surface((width, height), pygame.SRCALPHA)
# fill the background if there is one
if self.bg_color:
surface.fill(self.bg_color)
# calculate the fontsize
fontsize = round(self.textsize * out_surface.get_width())
self._render_text(surface, width, height, fontsize)
# blit the temporary surface
out_surface.blit(surface, (x, y))
def _render_text(
self,
out_surface: pygame.Surface,
width: int,
height: int,
fontsize: int,
):
"""render the text
:param out_surface: the surface on which the text should be rendered
:param width: the width of the label
:param height: the height of the label
:param fontsize: the textsize in px
"""
# if the window size changes, the fontsize will be changed as well
if self._fontsize != fontsize:
self._fontsize = fontsize
self._font = pygame.font.Font(self.font_file, fontsize)
# get all lines of the text
lines = self.caption.splitlines()
# create a temporary surface for the text
# determine the height of the text from sample characters
font_height = self._font.size("xX")[1]
# calculate the full height of the text
text_height = font_height * ((len(lines) - 1) * self.line_spacing + 1)
surface = pygame.Surface((width, height), pygame.SRCALPHA)
# render each line to the surface
for i, line in enumerate(lines):
# create the text from the font with the given caption and color
text = self._font.render(line, True, self.color)
# determine the width of the rendered text
font_width = self._font.size(line)[0]
# calculate the x position from the x alignment
if self.xalign == XAlign.LEFT:
font_x: int = 0
elif self.xalign == XAlign.CENTER:
font_x: int = (width - font_width) // 2
else:
font_x: int = width - font_width
font_y = round(i * self.line_spacing * font_height)
surface.blit(text, (font_x, font_y, font_width, font_height))
# calculate the y position from the y alignment
if self.yalign == YAlign.TOP:
font_y: int = 0
elif self.yalign == YAlign.CENTER:
font_y: int = round((height - text_height) / 2)
else:
font_y: int = round(height - text_height)
# blit the font to the temporary surface
out_surface.blit(surface, (0, font_y, width, text_height))
|
py | 7dfcd5013d51f708cd6b5d86c9ad49c20f62523a | from dataclasses import dataclass, field
from datetime import datetime
from typing import List, Optional
from .allowance import Allowance
from .allowance_limit_enforcement import AllowanceLimitEnforcement
from .allowance_reset_type import AllowanceResetType
from .instance_status import InstanceStatus
@dataclass
class AccountAllowance(Allowance):
type: str = field(default="co.yellowdog.platform.model.AccountAllowance", init=False)
id: Optional[str] = field(default=None, init=False)
createdById: Optional[str] = field(default=None, init=False)
effectiveFrom: datetime
resetType: AllowanceResetType
limitEnforcement: AllowanceLimitEnforcement
monitoredStatuses: List[InstanceStatus]
description: Optional[str] = None
effectiveUntil: Optional[datetime] = None
allowedHours: int = 0
remainingHours: float = 0
boostHours: Optional[int] = None
resetInterval: Optional[int] = None
hardLimitGraceMinutes: Optional[int] = None
|
py | 7dfcd53478ab1ca2865ec5178e7d9e750cca29a4 | """=============================================================
~/fn_portal/fn_portal/tests/api/test_FN125LampreyReadonly.py
Created: 07 Jun 2021 16:36:52
DESCRIPTION:
The FN125Lamprey readonly endpoint should return a list of lamprey
wounds that have either been observed on sampled fish. The
fn125Lamprey_list end point accepts a large number of filters
(url-parameters) assocaited with the lamprey wound, the sampled fish, the
catch, effort, or attributes of the net and project. Only diet items
matching those criteria should be returned when query parameters are
provided.
=============================================================
"""
import pytest
from django.urls import reverse
from rest_framework import status
from ...tests.fixtures import api_client
from ...tests.factories import (
FN011Factory,
FN121Factory,
FN122Factory,
FN123Factory,
FN125Factory,
FN125LampreyFactory,
SpeciesFactory,
)
@pytest.fixture
def fn125lamprey_records():
fn011 = FN011Factory(prj_cd="LHA_IA10_123")
fn121 = FN121Factory(project=fn011, sam=1)
eff1 = FN122Factory(sample=fn121, eff="001")
spc = SpeciesFactory(
spc="331", spc_nmco="Yellow Perch", spc_nmsc="Perca flavescens"
)
catch = FN123Factory(effort=eff1, species=spc, grp="55")
fish = FN125Factory(catch=catch, flen=350, gon=10)
lamprey1 = FN125LampreyFactory(fish=fish, lamijc_type="A1", lamijc_size=30)
lamprey2 = FN125LampreyFactory(fish=fish, lamijc_type="B2", lamijc_size=25)
return [lamprey1, lamprey2]
@pytest.mark.django_db
def test_FN125lampreyReadonly_list(api_client, fn125lamprey_records):
"""when we access the readonly endpoint for FN125lamprey objects, it should
return a paginated list of lamprey wounds that includes all of the FN125lamprey
objects in the database (ie. un-filtered).
"""
url = reverse("fn_portal_api:fn125lamprey_list")
response = api_client.get(url)
assert response.status_code == status.HTTP_200_OK
payload = response.data["results"]
assert len(payload) == 2
observed = set([x["slug"] for x in payload])
for record in fn125lamprey_records:
assert record.slug in observed
def test_FN125lampreyReadonly_only_get_allowed(api_client):
"""Only get requests are allowed on the FN125lamprey readonly endpoint. This
test verifies taht other methods are denied.
"""
url = reverse("fn_portal_api:fn125lamprey_list")
response = api_client.post(url, data={})
assert response.status_code == status.HTTP_403_FORBIDDEN
response = api_client.put(url, data={})
assert response.status_code == status.HTTP_403_FORBIDDEN
response = api_client.patch(url, data={})
assert response.status_code == status.HTTP_403_FORBIDDEN
response = api_client.delete(url, data={})
assert response.status_code == status.HTTP_403_FORBIDDEN
@pytest.mark.xfail
def test_FN125lampreyReadonly_filters():
"""The readonly api endpoint for FN125lamprey objects accepts a large number
of potential parameters as filters. This test will verify that only
lamprey matcing the specified criteria are returned.
This test will be parameterized with a list of two element tuples,
the filter to apply, and a list of the FN125lamprey slugs expected in the
response.
"""
assert 0 == 1
|
py | 7dfcd5b00f984e9d088fb3d910bf0ce45559317b | from gym_risk.envs.game.ai import AI
import random
import collections
class RandomAI(AI):
"""
RandomAI: Plays a completely random game, randomly choosing and reinforcing
territories, and attacking wherever it can without any considerations of wisdom.
"""
def initial_placement(self, empty):
if empty:
return random.choice(empty)
else:
t = random.choice(list(self.player.territories))
return t
def attack(self):
for t in self.player.territories:
for a in self.player.world.connections[t]:
if self.world.owners[a] != self.player:
if self.world.forces[t] > self.world.forces[a]:
yield (t, a, None, None)
def reinforce(self, available):
#todo .... territories if t.border : fix so it can work
border = [t for t in self.player.territories]
result = collections.defaultdict(int)
for i in range(available):
t = random.choice(border)
result[t] += 1
return result
|
py | 7dfcd5b4df746ffb7bf4c305f03efbacdf479d92 | # Copyright (c) 2018 Arista Networks, Inc.
# Use of this source code is governed by the Apache License 2.0
# that can be found in the LICENSE file.
from .swisignature import prepareSwi, signSwi
from .verifyswi import verifySwi
|
py | 7dfcd5d80fddbaae2652901959529181e775c11b | #!/usr/bin/env python
"""Module that contains API to perform filesystem operations on a GRR client."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import io
from typing import Text, Sequence
from grr_api_client import client
from grr_api_client import errors as api_errors
from grr_colab import _timeout
from grr_colab import errors
from grr_colab import representer
from grr_colab import vfs
from grr_response_proto import flows_pb2
from grr_response_proto import jobs_pb2
class FileSystem(object):
"""Wrapper for filesystem operations on GRR Client.
Attributes:
id: Id of the client.
cached: A VFS instance that allows to work with filesystem data saved on the
server that may not be up-to-date but is a way faster.
"""
def __init__(self, client_,
path_type):
self._client = client_
self._path_type = path_type
@property
def id(self):
return self._client.client_id
@property
def cached(self):
return vfs.VFS(self._client, self._path_type)
def ls(self, path, max_depth = 1):
"""Lists contents of a given directory.
Args:
path: A path to the directory to list the contents of.
max_depth: Max depth of subdirectories to explore. If max_depth is >1,
then the results will also include the contents of subdirectories (and
sub-subdirectories and so on).
Returns:
A sequence of stat entries.
"""
if max_depth > 1:
args = flows_pb2.RecursiveListDirectoryArgs()
args.pathspec.path = path
args.pathspec.pathtype = self._path_type
args.max_depth = max_depth
try:
ls = self._client.CreateFlow(name='RecursiveListDirectory', args=args)
except api_errors.AccessForbiddenError as e:
raise errors.ApprovalMissingError(self.id, e)
else:
args = flows_pb2.ListDirectoryArgs()
args.pathspec.path = path
args.pathspec.pathtype = self._path_type
try:
ls = self._client.CreateFlow(name='ListDirectory', args=args)
except api_errors.AccessForbiddenError as e:
raise errors.ApprovalMissingError(self.id, e)
_timeout.await_flow(ls)
return representer.StatEntryList([_.payload for _ in ls.ListResults()])
def glob(self, path):
"""Globs for files on the given client.
Args:
path: A glob expression (that may include `*` and `**`).
Returns:
A sequence of stat entries to the found files.
"""
args = flows_pb2.GlobArgs()
args.paths.append(path)
args.pathtype = self._path_type
try:
glob = self._client.CreateFlow(name='Glob', args=args)
except api_errors.AccessForbiddenError as e:
raise errors.ApprovalMissingError(self.id, e)
_timeout.await_flow(glob)
return representer.StatEntryList([_.payload for _ in glob.ListResults()])
def grep(self, path,
pattern):
"""Greps for given content on the specified path.
Args:
path: A path to a file to be searched.
pattern: A regular expression on search for.
Returns:
A list of buffer references to the matched content.
"""
args = flows_pb2.FileFinderArgs()
args.paths.append(path)
args.pathtype = self._path_type
cond = args.conditions.add()
cond.condition_type = \
flows_pb2.FileFinderCondition.Type.CONTENTS_REGEX_MATCH
cond.contents_regex_match.mode = \
flows_pb2.FileFinderContentsRegexMatchCondition.ALL_HITS
cond.contents_regex_match.regex = pattern
args.action.action_type = flows_pb2.FileFinderAction.Action.STAT
try:
ff = self._client.CreateFlow(name='FileFinder', args=args)
except api_errors.AccessForbiddenError as e:
raise errors.ApprovalMissingError(self.id, e)
_timeout.await_flow(ff)
return representer.BufferReferenceList(
[list(_.payload.matches)[0] for _ in ff.ListResults()])
def fgrep(self, path,
literal):
"""Greps for given content on the specified path.
Args:
path: A path to a file to be searched.
literal: A literal expression on search for.
Returns:
A list of buffer references to the matched content.
"""
args = flows_pb2.FileFinderArgs()
args.paths.append(path)
args.pathtype = self._path_type
cond = args.conditions.add()
cond.condition_type = \
flows_pb2.FileFinderCondition.Type.CONTENTS_LITERAL_MATCH
cond.contents_literal_match.mode = \
flows_pb2.FileFinderContentsLiteralMatchCondition.Mode.ALL_HITS
cond.contents_literal_match.literal = literal
args.action.action_type = flows_pb2.FileFinderAction.Action.STAT
try:
ff = self._client.CreateFlow(name='FileFinder', args=args)
except api_errors.AccessForbiddenError as e:
raise errors.ApprovalMissingError(self.id, e)
_timeout.await_flow(ff)
return representer.BufferReferenceList(
[list(_.payload.matches)[0] for _ in ff.ListResults()])
def wget(self, path):
"""Downloads a file and returns a link to it.
Args:
path: A path to download.
Returns:
A link to the file.
"""
self._collect_file(path)
return self.cached.wget(path)
def open(self, path):
"""Opens a file object corresponding to the given path on the client.
The returned file object is read-only.
Args:
path: A path to the file to open.
Returns:
A file-like object (implementing standard IO interface).
"""
self._collect_file(path)
return self.cached.open(path)
def _collect_file(self, path):
"""Save file from client to VFS.
Args:
path: A path to the file to collect.
Returns:
Nothing.
"""
args = flows_pb2.GetFileArgs()
args.pathspec.path = path
args.pathspec.pathtype = self._path_type
try:
gf = self._client.CreateFlow(name='GetFile', args=args)
except api_errors.AccessForbiddenError as e:
raise errors.ApprovalMissingError(self.id, e)
_timeout.await_flow(gf)
|
py | 7dfcd65f118036ea84c041693ff760c1ed9fa430 | # coding: utf-8
n, x = [int(i) for i in input().split()]
current = 1
ans = 0
for i in range(n):
l, r = [int(i) for i in input().split()]
ans += (l-current)%x + (r-l+1)
current = r+1
print(ans)
|
py | 7dfcd6c1a03869cdcaa53ffcdccf31c54502b590 | # -*- coding: utf-8 -*-
"""
Created on Wed Dec 11 11:10:55 2019
Turns data (in JSON format) into a pandas DataFrame. This allows
for easy reading, plotting, and writing of results.
This script takes either the single node or multinode results and turns
the results into a pandas DataFrame. A pandas DataFrame is a table of
values. It can find some statistics such as the mean or max of specific
values (similar to Excel Spreadsheets). You can plot the data, read it in,
or write out the results themselves or any changes that you have done.
The functions below perform behind-the-scenes data cleaning to prepare
the data to be used elsewhere (i.e. creating analysis reports)
@author: barn553
"""
import numpy as np
import json
import pandas as pd
from functools import reduce
import sys
import logging
# Setting up logger
logger = logging.getLogger(__name__)
# Creating benchmarks dataframe.
def make_dataframe1(json_results):
"""This function creates a meta data frame to be used for
post-processing the single node benchmark Results.
Args:
json_file (file): json file to be used for creating a dataframe
Returns:
meta_df: a data frame of all benckmark information
"""
# TDH (2019-12-23): Doing some datatype checking to figure out if we were
# passed in a json dictionary (in which case we really need to do little
# else) or something else. I assume "something else" is a path as a string
# to a JSON file that contains the results.
logging.info('cheacking what "type" json_results is')
if isinstance(json_results, dict):
logging.info('json_results is a dictionary')
dct = json_results
else:
logging.info('json_results is not a dictionary; changing it to one')
with open(json_results, 'r') as f:
dct = json.load(f)
# Getting the benchmarks columns and creating a data frame.
logging.info('concatenating all the benchmark results data')
lists1 = []
columns1 = [
'cpu_time', 'core_type', 'filter_location', 'iterations',
'name', 'real_time', 'repetition_index', 'repetitions',
'run_name', 'run_type', 'threads', 'time_unit',
'federate_count', 'interface_count', 'EvCount', 'message_count',
'message_size'
]
for f, f_dict in dct.items():
for i, b_dict in enumerate(f_dict['benchmarks']):
c_list1 = []
for c in columns1:
# If any of the benchmark dictionaries don't have a
# column in our columns list, then set the value to np.nan.
v = b_dict.get(c, np.nan)
# Append to the list of np.nans.
c_list1.append(v)
# Append to lists with our list of np.nans, and columns with
# values that belong to our columns list; in other words, if
# there IS a value in 'federate_count', append that to lists,
# along with c_list.
lists1.append(np.concatenate([[f, i], c_list1]))
bmk_df = pd.DataFrame(
lists1,
columns=np.concatenate([['identifier_id', 'benchmark_id'], columns1]))
# Getting the cache data and creating a data frame.
logging.info('concatenating all the cache data')
lists2 = []
columns2 = ['type', 'level', 'size', 'num_sharing']
for f, f_dict in dct.items():
for i, b_dict in enumerate(f_dict['caches']):
# If any of the caches dictionaries don't have a column in
# our columns list, then set the value to np.nan
c_list2 = []
for c in columns2:
# Append to the list of np.nans.
v = b_dict.get(c, np.nan)
c_list2.append(v)
# Append to lists with our list of np.nans, and columns with
# values that belong to our columns list; in other words,
# if there IS a value in 'type', append that to lists, along
# with c_list.
lists2.append(np.concatenate([[f, i], c_list2]))
cache_df = pd.DataFrame(
lists2,
columns=np.concatenate([['identifier_id', 'cache_id'], columns2]))
# Getting the general benchmark info and creating a data frame.
logging.info('concatenating all the meta-data')
lists3 = []
columns3 = [
'filename', 'zmq_version_string', 'benchmark', 'benchmark_type',
'helics_version_string', 'helics_version', 'zmq_version', 'path',
'compiler_info_string', 'generator', 'system', 'system_version',
'platform', 'cxx_compiler', 'cxx_compiler_version', 'load_avg',
'host_processor', 'host_processor_string', 'date', 'host_name',
'executable', 'num_cpus', 'mhz_per_cpu', 'cpu_scaling_enabled',
'library_build_type', 'run_id', 'build_flags_string'
]
for f, f_dict in dct.items():
c_list3 = []
for c in columns3:
# If any of the identifier dictionaries don't have a column
# in our columns list, then set the value to np.nan.
v = f_dict.get(c, np.nan)
# Append to the list of np.nans.
c_list3.append(v)
# Append to lists with our list of np.nans, and columns with
# values that belong to our columns list; in other words,
# if there IS a value in 'date', append that to lists, along
# with c_list.
lists3.append(np.concatenate([[f, i], c_list3]))
info_df = pd.DataFrame(
lists3,
columns=np.concatenate([['identifier_id', 'info_id'], columns3]))
# Concatenating all three data frames into one meta data frame
# and sending to csv.
logging.info('condensing benchmark, cache, and meta data to one dataframe')
meta_bmk_df = reduce(lambda x, y: pd.merge(
x, y, on='identifier_id', how='outer'), [info_df, cache_df, bmk_df])
# Converting 'real_time' from nanoseconds or milliseconds to seconds.
logging.info('changing values to the correct units or format')
df1 = meta_bmk_df[meta_bmk_df.time_unit == 'ns']
df2 = meta_bmk_df[meta_bmk_df.time_unit == 'ms']
df3 = meta_bmk_df[meta_bmk_df.time_unit == 's']
df1.real_time = df1.real_time.apply(lambda x: float(x)*10**(-9))
df1.cpu_time = df1.cpu_time.apply(lambda x: float(x)*10**(-9))
df2.real_time = df2.real_time.apply(lambda x: float(x)*10**(-3))
df2.cpu_time = df2.cpu_time.apply(lambda x: float(x)*10**(-3))
df3.real_time = df3.real_time.apply(lambda x: float(x)*1)
df3.cpu_time = df3.cpu_time.apply(lambda x: float(x)*1)
meta_bmk_df = pd.concat([df1, df2, df3], axis=0, ignore_index=True)
# Making sure results have the correct units and are the correct
# data type
meta_bmk_df = meta_bmk_df.replace({'time_unit': {'ns': 's',
'ms': 's'}}).reset_index()
meta_bmk_df['date'] = pd.to_datetime(meta_bmk_df.date)
meta_bmk_df['date'] = meta_bmk_df['date'].astype(str)
meta_bmk_df['federate_count'] = meta_bmk_df['federate_count'].astype(float)
meta_bmk_df['interface_count'] = meta_bmk_df[
'interface_count'].astype(float)
meta_bmk_df['EvCount'] = meta_bmk_df['EvCount'].astype(float)
meta_bmk_df['message_size'] = meta_bmk_df['message_size'].astype(float)
meta_bmk_df['message_count'] = meta_bmk_df['message_count'].astype(float)
logging.info('successfully changed single node data into a dataframe.')
return meta_bmk_df
def make_dataframe2(json_results):
"""This function creates a meta data frame to be used for
post-processing the multinode benchmark Results.
Args:
json_file (file): json file to be used for creating a dataframe
Returns:
meta_df: a data frame of all benckmark information
"""
logging.info('checking if json_results is a dictionary')
if isinstance(json_results, dict):
logging.info('json_results is a dictionary')
dct = json_results
else:
logging.info('json_results is not a dictionary; changing it to one')
with open(json_results, 'r') as f:
dct = json.load(f)
# Getting the general benchmark info and creating a data frame.
logging.info('concatenating the meta and benchmark data')
lists = []
columns = [
'filename', 'zmq_version_string', 'mhz_per_cpu', 'cluster',
'topology', 'number_of_leaves', 'num_nodes', 'federate_count',
'message_size', 'message_count', 'benchmark', 'benchmark_type',
'helics_version_string', 'helics_version', 'zmq_version', 'path',
'compiler_info_string', 'generator', 'system', 'system_version',
'platform', 'core_type', 'cxx_compiler', 'cxx_compiler_version',
'build_flags_string', 'EvCount', 'host_processor_string', 'date',
'run_id', 'elapsed_time', 'time_unit', 'host_processor',
'feds_per_node'
]
for f, f_dict in dct.items():
for d, d_dict in f_dict.items():
c_list = []
for c in columns:
# If any of the identifier dictionaries don't have a
# column in our columns list, then set the value to np.nan.
v = d_dict.get(c, np.nan)
# Append to the list of np.nans.
c_list.append(v)
# Append to lists with our list of np.nans, and columns
# with values that belong to our columns list; in other words,
# if there IS a value in 'date', append that to lists, along
# with c_list.
lists.append(np.concatenate([[f], c_list]))
info_df = pd.DataFrame(
lists, columns=np.concatenate([['identifier_id'], columns]))
# Concatenating all three data frames into one meta data frame
# and sending to csv.
logging.info('converting data into a data frame')
meta_bmk_df = reduce(lambda x, y: pd.merge(
x, y, on='identifier_id', how='outer'), [info_df])
# Converting 'elapsed_time' from nanoseconds to seconds.
logging.info('converting values to the correct units or format')
meta_bmk_df['elapsed_time'] = meta_bmk_df['elapsed_time'].apply(
lambda x: float(x)*10**(-9))
meta_bmk_df = meta_bmk_df.reset_index()
meta_bmk_df['date'] = pd.to_datetime(meta_bmk_df.date.astype(str))
meta_bmk_df['date'] = meta_bmk_df['date'].astype(str)
meta_bmk_df = meta_bmk_df.replace('nan', np.nan)
meta_bmk_df = meta_bmk_df.replace(
{'time_unit': {'ns': 's',
'nan': 's'}})
meta_bmk_df['EvCount'] = meta_bmk_df['EvCount'].astype(float)
meta_bmk_df['federate_count'] = meta_bmk_df['federate_count'].astype(
float)
meta_bmk_df['message_size'] = meta_bmk_df['message_size'].astype(float)
meta_bmk_df['message_count'] = meta_bmk_df['message_count'].astype(float)
meta_bmk_df['elapsed_time'] = meta_bmk_df['elapsed_time'].astype(float)
fill_cols = [
'number_of_leaves', 'federate_count',
'num_nodes', 'feds_per_node',
'mhz_per_cpu']
meta_bmk_df[fill_cols] = meta_bmk_df[fill_cols].fillna(
method='bfill')
meta_bmk_df = meta_bmk_df.set_index('date')
meta_bmk_df[
(meta_bmk_df.index >= '2020-06-15 00:00:00') &
(meta_bmk_df.index < '2020-06-16 00:00:00')]['mhz_per_cpu'] = 1500.0
meta_bmk_df = meta_bmk_df.reset_index()
my_list = []
# Creating a map from "summary.txt" files to the other
# multinode benchmark results files.
logging.info('filling in missing values')
for g, df in meta_bmk_df.groupby(['path', 'benchmark', 'core_type']):
a_df = df
a_df = a_df.set_index('filename')
try:
values = {
'message_size': float(
a_df.loc['summary.txt', 'message_size']),
'message_count': float(
a_df.loc['summary.txt', 'message_count']),
'cluster': str(a_df.loc['summary.txt', 'cluster']),
'topology': str(a_df.loc['summary.txt', 'topology']),
'helics_version_string':
a_df.loc['summary.txt', 'helics_version_string'],
'helics_version': a_df.loc['summary.txt', 'helics_version'],
'benchmark': a_df.loc['summary.txt', 'benchmark'],
'date': a_df.loc['summary.txt', 'date'],
'zmq_version_string':
a_df.loc['summary.txt', 'zmq_version_string'],
'zmq_version': a_df.loc['summary.txt', 'zmq_version'],
'compiler_info_string':
a_df.loc['summary.txt', 'compiler_info_string'],
'cxx_compiler': a_df.loc['summary.txt', 'cxx_compiler'],
'cxx_compiler_version':
a_df.loc['summary.txt', 'cxx_compiler_version'],
'system': a_df.loc['summary.txt', 'system'],
'system_version': a_df.loc['summary.txt', 'system_version'],
'generator': a_df.loc['summary.txt', 'generator'],
'platform': a_df.loc['summary.txt', 'platform'],
'build_flags_string':
a_df.loc['summary.txt', 'build_flags_string'],
'host_processor_string':
a_df.loc['summary.txt', 'host_processor_string'],
'host_processor': a_df.loc['summary.txt', 'host_processor'],
'core_type': a_df.loc['summary.txt', 'core_type'],
'run_id': a_df.loc['summary.txt', 'run_id']}
a_df = a_df.fillna(value=values)
except Exception as e:
print('{} does not exist for {} benchmark'.format(
e, a_df.benchmark.values[0]))
a_df = a_df.reset_index()
my_list.append(a_df)
main_df = pd.concat(my_list, axis=0, ignore_index=True)
logging.info('successfully turn multinode data into a dataframe')
return main_df
# Testing that my function works
if __name__ == '__main__':
fileHandle = logging.FileHandler("make_dataframe.log", mode='w')
fileHandle.setLevel(logging.DEBUG)
streamHandle = logging.StreamHandler(sys.stdout)
streamHandle.setLevel(logging.ERROR)
logging.basicConfig(level=logging.INFO,
handlers=[fileHandle, streamHandle])
# import os
# json_file1 = 'bm_results.json'
# final_meta_bmk_df = make_dataframe1(json_file1)
# final_meta_bmk_df.to_csv(r'{}/bmk_df_test.csv'.format(
# os.path.join(os.getcwd())))
# json_file2 = 'multinode_bm_results.json'
# multi_bmk_df = make_dataframe2(json_file2)
# multi_bmk_df.to_csv(r'{}/multi_bmk_df.csv'.format(
# os.path.join(os.getcwd())))
# json_file3 = 'multinode_bm_results_test.json'
# multi_bmk_df = make_dataframe2(json_file3)
# print('COLUMNS:', multi_bmk_df.columns.unique())
# print(final_meta_bmk_df.columns)
# print(final_meta_bmk_df.head())
# print(final_meta_bmk_df.shape)
|
py | 7dfcd6cb631dd4fb29319713a2c92c52dc412cd9 | # -*- coding: utf-8 -*-
'''
Manage users with the useradd command
'''
# Import python libs
try:
import grp
import pwd
except ImportError:
pass
import logging
from copy import deepcopy
# Import salt libs
import salt.utils
from salt._compat import string_types
log = logging.getLogger(__name__)
def __virtual__():
'''
Set the user module if the kernel is SunOS
'''
return 'user' if __grains__['kernel'] == 'SunOS' else False
def _get_gecos(name):
'''
Retrieve GECOS field info and return it in dictionary form
'''
gecos_field = pwd.getpwnam(name).pw_gecos.split(',', 3)
if not gecos_field:
return {}
else:
# Assign empty strings for any unspecified trailing GECOS fields
while len(gecos_field) < 4:
gecos_field.append('')
return {'fullname': str(gecos_field[0]),
'roomnumber': str(gecos_field[1]),
'workphone': str(gecos_field[2]),
'homephone': str(gecos_field[3])}
def _build_gecos(gecos_dict):
'''
Accepts a dictionary entry containing GECOS field names and their values,
and returns a full GECOS comment string, to be used with usermod.
'''
return '{0},{1},{2},{3}'.format(gecos_dict.get('fullname', ''),
gecos_dict.get('roomnumber', ''),
gecos_dict.get('workphone', ''),
gecos_dict.get('homephone', ''))
def add(name,
uid=None,
gid=None,
groups=None,
home=None,
shell=None,
unique=True,
fullname='',
roomnumber='',
workphone='',
homephone='',
createhome=True,
**kwargs):
'''
Add a user to the minion
CLI Example:
.. code-block:: bash
salt '*' user.add name <uid> <gid> <groups> <home> <shell>
'''
if salt.utils.is_true(kwargs.pop('system', False)):
log.warning('solaris_user module does not support the \'system\' '
'argument')
if kwargs:
log.warning('Invalid kwargs passed to user.add')
if isinstance(groups, string_types):
groups = groups.split(',')
cmd = 'useradd '
if shell:
cmd += '-s {0} '.format(shell)
if uid:
cmd += '-u {0} '.format(uid)
if gid:
cmd += '-g {0} '.format(gid)
if groups:
cmd += '-G {0} '.format(','.join(groups))
if home is None:
if createhome:
cmd += '-m '
else:
if createhome:
cmd += '-m -d {0} '.format(home)
else:
cmd += '-d {0} '.format(home)
if not unique:
cmd += '-o '
cmd += name
ret = __salt__['cmd.retcode'](cmd)
if ret != 0:
return False
else:
# At this point, the user was successfully created, so return true
# regardless of the outcome of the below functions. If there is a
# problem wth changing any of the user's info below, it will be raised
# in a future highstate call. If anyone has a better idea on how to do
# this, feel free to change it, but I didn't think it was a good idea
# to return False when the user was successfully created since A) the
# user does exist, and B) running useradd again would result in a
# nonzero exit status and be interpreted as a False result.
if fullname:
chfullname(name, fullname)
if roomnumber:
chroomnumber(name, roomnumber)
if workphone:
chworkphone(name, workphone)
if homephone:
chhomephone(name, homephone)
return True
def delete(name, remove=False, force=False):
'''
Remove a user from the minion
CLI Example:
.. code-block:: bash
salt '*' user.delete name remove=True force=True
'''
if salt.utils.is_true(force):
log.error('userdel does not support force-deleting user while '
'user is logged in')
cmd = 'userdel '
if remove:
cmd += '-r '
cmd += name
ret = __salt__['cmd.run_all'](cmd)
return not ret['retcode']
def getent():
'''
Return the list of all info for all users
CLI Example:
.. code-block:: bash
salt '*' user.getent
'''
if 'user.getent' in __context__:
return __context__['user.getent']
ret = []
for data in pwd.getpwall():
ret.append(info(data.pw_name))
__context__['user.getent'] = ret
return ret
def chuid(name, uid):
'''
Change the uid for a named user
CLI Example:
.. code-block:: bash
salt '*' user.chuid foo 4376
'''
pre_info = info(name)
if uid == pre_info['uid']:
return True
cmd = 'usermod -u {0} {1}'.format(uid, name)
__salt__['cmd.run'](cmd)
post_info = info(name)
if post_info['uid'] != pre_info['uid']:
return post_info['uid'] == uid
return False
def chgid(name, gid):
'''
Change the default group of the user
CLI Example:
.. code-block:: bash
salt '*' user.chgid foo 4376
'''
pre_info = info(name)
if gid == pre_info['gid']:
return True
cmd = 'usermod -g {0} {1}'.format(gid, name)
__salt__['cmd.run'](cmd)
post_info = info(name)
if post_info['gid'] != pre_info['gid']:
return post_info['gid'] == gid
return False
def chshell(name, shell):
'''
Change the default shell of the user
CLI Example:
.. code-block:: bash
salt '*' user.chshell foo /bin/zsh
'''
pre_info = info(name)
if shell == pre_info['shell']:
return True
cmd = 'usermod -s {0} {1}'.format(shell, name)
__salt__['cmd.run'](cmd)
post_info = info(name)
if post_info['shell'] != pre_info['shell']:
return post_info['shell'] == shell
return False
def chhome(name, home, persist=False):
'''
Change the home directory of the user, pass true for persist to copy files
to the new home dir
CLI Example:
.. code-block:: bash
salt '*' user.chhome foo /home/users/foo True
'''
pre_info = info(name)
if home == pre_info['home']:
return True
cmd = 'usermod -d {0} '.format(home)
if persist:
cmd += ' -m '
cmd += name
__salt__['cmd.run'](cmd)
post_info = info(name)
if post_info['home'] != pre_info['home']:
return post_info['home'] == home
return False
def chgroups(name, groups, append=False):
'''
Change the groups this user belongs to, add append to append the specified
groups
CLI Example:
.. code-block:: bash
salt '*' user.chgroups foo wheel,root True
'''
if isinstance(groups, string_types):
groups = groups.split(',')
ugrps = set(list_groups(name))
if ugrps == set(groups):
return True
if append:
groups += ugrps
cmd = 'usermod -G {0} {1} '.format(','.join(groups), name)
return not __salt__['cmd.retcode'](cmd)
def chfullname(name, fullname):
'''
Change the user's Full Name
CLI Example:
.. code-block:: bash
salt '*' user.chfullname foo "Foo Bar"
'''
fullname = str(fullname)
pre_info = _get_gecos(name)
if not pre_info:
return False
if fullname == pre_info['fullname']:
return True
gecos_field = deepcopy(pre_info)
gecos_field['fullname'] = fullname
cmd = 'usermod -c "{0}" {1}'.format(_build_gecos(gecos_field), name)
__salt__['cmd.run'](cmd)
post_info = info(name)
if post_info['fullname'] != pre_info['fullname']:
return post_info['fullname'] == fullname
return False
def chroomnumber(name, roomnumber):
'''
Change the user's Room Number
CLI Example:
.. code-block:: bash
salt '*' user.chroomnumber foo 123
'''
roomnumber = str(roomnumber)
pre_info = _get_gecos(name)
if not pre_info:
return False
if roomnumber == pre_info['roomnumber']:
return True
gecos_field = deepcopy(pre_info)
gecos_field['roomnumber'] = roomnumber
cmd = 'usermod -c "{0}" {1}'.format(_build_gecos(gecos_field), name)
__salt__['cmd.run'](cmd)
post_info = info(name)
if post_info['roomnumber'] != pre_info['roomnumber']:
return post_info['roomnumber'] == roomnumber
return False
def chworkphone(name, workphone):
'''
Change the user's Work Phone
CLI Example:
.. code-block:: bash
salt '*' user.chworkphone foo "7735550123"
'''
workphone = str(workphone)
pre_info = _get_gecos(name)
if not pre_info:
return False
if workphone == pre_info['workphone']:
return True
gecos_field = deepcopy(pre_info)
gecos_field['workphone'] = workphone
cmd = 'usermod -c "{0}" {1}'.format(_build_gecos(gecos_field), name)
__salt__['cmd.run'](cmd)
post_info = info(name)
if post_info['workphone'] != pre_info['workphone']:
return post_info['workphone'] == workphone
return False
def chhomephone(name, homephone):
'''
Change the user's Home Phone
CLI Example:
.. code-block:: bash
salt '*' user.chhomephone foo "7735551234"
'''
homephone = str(homephone)
pre_info = _get_gecos(name)
if not pre_info:
return False
if homephone == pre_info['homephone']:
return True
gecos_field = deepcopy(pre_info)
gecos_field['homephone'] = homephone
cmd = 'usermod -c "{0}" {1}'.format(_build_gecos(gecos_field), name)
__salt__['cmd.run'](cmd)
post_info = info(name)
if post_info['homephone'] != pre_info['homephone']:
return post_info['homephone'] == homephone
return False
def info(name):
'''
Return user information
CLI Example:
.. code-block:: bash
salt '*' user.info root
'''
ret = {}
try:
data = pwd.getpwnam(name)
ret['gid'] = data.pw_gid
ret['groups'] = list_groups(name)
ret['home'] = data.pw_dir
ret['name'] = data.pw_name
ret['passwd'] = data.pw_passwd
ret['shell'] = data.pw_shell
ret['uid'] = data.pw_uid
# Put GECOS info into a list
gecos_field = data.pw_gecos.split(',', 3)
# Assign empty strings for any unspecified GECOS fields
while len(gecos_field) < 4:
gecos_field.append('')
ret['fullname'] = gecos_field[0]
ret['roomnumber'] = gecos_field[1]
ret['workphone'] = gecos_field[2]
ret['homephone'] = gecos_field[3]
except KeyError:
return {}
return ret
def list_groups(name):
'''
Return a list of groups the named user belongs to
CLI Example:
.. code-block:: bash
salt '*' user.list_groups foo
'''
ugrp = set()
# Add the primary user's group
ugrp.add(grp.getgrgid(pwd.getpwnam(name).pw_gid).gr_name)
# If we already grabbed the group list, it's overkill to grab it again
if 'user.getgrall' in __context__:
groups = __context__['user.getgrall']
else:
groups = grp.getgrall()
__context__['user.getgrall'] = groups
# Now, all other groups the user belongs to
for group in groups:
if name in group.gr_mem:
ugrp.add(group.gr_name)
return sorted(list(ugrp))
|
py | 7dfcd6d837099ecaea28fd3c952589465a6b269c | #!/usr/bin/env python
"""Metric implementations to collect statistics."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import abc
from grr_response_core.lib.rdfvalues import stats as rdf_stats
from grr_response_core.stats import stats_collector_instance
from grr_response_core.stats import stats_utils
class AbstractMetric(metaclass=abc.ABCMeta):
"""An abstract metric with a name, fields, and values.
Refer to default_stats_collector._Metric and DefaultStatsCollector to
see how StatsCollector handles the field definitions and values.
Attributes:
name: string containing the global metric name.
"""
def __init__(self, metadata):
"""Initializes a new metric and registers it with the StatsCollector."""
self.name = metadata.varname
stats_collector_instance.RegisterMetric(metadata)
def GetValue(self, fields=None):
"""Returns the value of a given metric for given field values."""
return stats_collector_instance.Get().GetMetricValue(
self.name, fields=fields)
def GetFields(self):
"""Returns all field values for the given metric."""
return stats_collector_instance.Get().GetMetricFields(self.name)
class Counter(AbstractMetric):
"""A Counter metric that can be incremented.
Refer to default_stats_collector._CounterMetric and DefaultStatsCollector to
see how StatsCollector handles the field definitions and values.
"""
def __init__(self, name, fields=(), docstring=None, units=None):
"""Initializes a Counter metric and registers it with the StatsCollector."""
super().__init__(
rdf_stats.MetricMetadata(
varname=name,
metric_type=rdf_stats.MetricMetadata.MetricType.COUNTER,
value_type=rdf_stats.MetricMetadata.ValueType.INT,
fields_defs=stats_utils.FieldDefinitionProtosFromTuples(fields),
docstring=docstring,
units=units))
def Increment(self, delta=1, fields=None):
"""Increments a counter metric by a given delta."""
stats_collector_instance.Get().IncrementCounter(
self.name, delta, fields=fields)
def Counted(self, fields=None):
"""Returns a decorator that counts function calls."""
return stats_utils.Counted(self, fields=fields)
def SuccessesCounted(self, fields=None):
"""Returns a decorator that counts calls that don't raise an exception."""
return stats_utils.SuccessesCounted(self, fields=fields)
def ErrorsCounted(self, fields=None):
"""Returns a decorator that counts calls that raise an exception."""
return stats_utils.ErrorsCounted(self, fields=fields)
class Gauge(AbstractMetric):
"""A Gauge metric that can be set to a value.
Refer to default_stats_collector._GaugeMetric and DefaultStatsCollector to
see how StatsCollector handles the field definitions and values.
"""
def __init__(self, name, value_type, fields=(), docstring=None, units=None):
"""Initializes a Gauge metric and registers it with the StatsCollector."""
super().__init__(
rdf_stats.MetricMetadata(
varname=name,
metric_type=rdf_stats.MetricMetadata.MetricType.GAUGE,
value_type=stats_utils.MetricValueTypeFromPythonType(value_type),
fields_defs=stats_utils.FieldDefinitionProtosFromTuples(fields),
docstring=docstring,
units=units))
def SetValue(self, value, fields=None):
"""Sets value of a given gauge metric."""
stats_collector_instance.Get().SetGaugeValue(
self.name, value, fields=fields)
def SetCallback(self, callback, fields=None):
"""Attaches a callback to the given gauge metric."""
stats_collector_instance.Get().SetGaugeCallback(
self.name, callback, fields=fields)
class Event(AbstractMetric):
"""An Event metric that records timings of events.
Refer to default_stats_collector._EventMetric and DefaultStatsCollector to
see how StatsCollector handles the field definitions and values.
"""
def __init__(self, name, bins=(), fields=(), docstring=None, units=None):
"""Initializes an Event metric and registers it with the StatsCollector."""
super().__init__(
rdf_stats.MetricMetadata(
varname=name,
bins=bins,
metric_type=rdf_stats.MetricMetadata.MetricType.EVENT,
value_type=rdf_stats.MetricMetadata.ValueType.DISTRIBUTION,
fields_defs=stats_utils.FieldDefinitionProtosFromTuples(fields),
docstring=docstring,
units=units))
def RecordEvent(self, value, fields=None):
"""Records value corresponding to the given event metric."""
stats_collector_instance.Get().RecordEvent(self.name, value, fields=fields)
def Timed(self, fields=None):
"""Returns a decorator that records timing metrics for function calls."""
return stats_utils.Timed(self, fields=fields)
|
py | 7dfcd7ed471f9c89538f93a63e393d4c9930d8ff | # Copyright 2004-2018 Davide Alberani <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
This module provides the Movie class, used to store information about
a given movie.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
from copy import deepcopy
from imdb import linguistics
from imdb.utils import _Container
from imdb.utils import analyze_title, build_title, canonicalTitle, cmpMovies, flatten
class Movie(_Container):
"""A Movie.
Every information about a movie can be accessed as::
movieObject['information']
to get a list of the kind of information stored in a
Movie object, use the keys() method; some useful aliases
are defined (as "casting" for the "casting director" key); see
the keys_alias dictionary.
"""
# The default sets of information retrieved.
default_info = ('main', 'plot')
# Aliases for some not-so-intuitive keys.
keys_alias = {
'tv schedule': 'airing',
'user rating': 'rating',
'plot summary': 'plot',
'plot summaries': 'plot',
'directed by': 'director',
'created by': 'creator',
'writing credits': 'writer',
'produced by': 'producer',
'original music by': 'original music',
'non-original music by': 'non-original music',
'music': 'original music',
'cinematography by': 'cinematographer',
'cinematography': 'cinematographer',
'film editing by': 'editor',
'film editing': 'editor',
'editing': 'editor',
'actors': 'cast',
'actresses': 'cast',
'casting by': 'casting director',
'casting': 'casting director',
'art direction by': 'art direction',
'set decoration by': 'set decoration',
'costume design by': 'costume designer',
'costume design': 'costume designer',
'makeup department': 'make up',
'makeup': 'make up',
'make-up': 'make up',
'production management': 'production manager',
'production company': 'production companies',
'second unit director or assistant director': 'assistant director',
'second unit director': 'assistant director',
'sound department': 'sound crew',
'costume and wardrobe department': 'costume department',
'special effects by': 'special effects',
'visual effects by': 'visual effects',
'special effects company': 'special effects companies',
'stunts': 'stunt performer',
'other crew': 'miscellaneous crew',
'misc crew': 'miscellaneous crew',
'miscellaneouscrew': 'miscellaneous crew',
'crewmembers': 'miscellaneous crew',
'crew members': 'miscellaneous crew',
'other companies': 'miscellaneous companies',
'misc companies': 'miscellaneous companies',
'miscellaneous company': 'miscellaneous companies',
'misc company': 'miscellaneous companies',
'other company': 'miscellaneous companies',
'aka': 'akas',
'also known as': 'akas',
'country': 'countries',
'production country': 'countries',
'production countries': 'countries',
'genre': 'genres',
'runtime': 'runtimes',
'lang': 'languages',
'color': 'color info',
'cover': 'cover url',
'full-size cover': 'full-size cover url',
'seasons': 'number of seasons',
'language': 'languages',
'certificate': 'certificates',
'certifications': 'certificates',
'certification': 'certificates',
'miscellaneous links': 'misc links',
'miscellaneous': 'misc links',
'soundclips': 'sound clips',
'videoclips': 'video clips',
'photographs': 'photo sites',
'distributor': 'distributors',
'distribution': 'distributors',
'distribution companies': 'distributors',
'distribution company': 'distributors',
'guest': 'guests',
'guest appearances': 'guests',
'tv guests': 'guests',
'notable tv guest appearances': 'guests',
'episodes cast': 'guests',
'episodes number': 'number of episodes',
'amazon review': 'amazon reviews',
'merchandising': 'merchandising links',
'merchandise': 'merchandising links',
'sales': 'merchandising links',
'faq': 'faqs',
'parental guide': 'parents guide',
'frequently asked questions': 'faqs'
}
keys_tomodify_list = (
'plot', 'trivia', 'alternate versions', 'goofs',
'quotes', 'dvd', 'laserdisc', 'news', 'soundtrack',
'crazy credits', 'business', 'supplements',
'video review', 'faqs'
)
_image_key = 'cover url'
cmpFunct = cmpMovies
def _init(self, **kwds):
"""Initialize a Movie object.
*movieID* -- the unique identifier for the movie.
*title* -- the title of the Movie, if not in the data dictionary.
*myTitle* -- your personal title for the movie.
*myID* -- your personal identifier for the movie.
*data* -- a dictionary used to initialize the object.
*currentRole* -- a Character instance representing the current role
or duty of a person in this movie, or a Person
object representing the actor/actress who played
a given character in a Movie. If a string is
passed, an object is automatically build.
*roleID* -- if available, the characterID/personID of the currentRole
object.
*roleIsPerson* -- when False (default) the currentRole is assumed
to be a Character object, otherwise a Person.
*notes* -- notes for the person referred in the currentRole
attribute; e.g.: '(voice)'.
*accessSystem* -- a string representing the data access system used.
*titlesRefs* -- a dictionary with references to movies.
*namesRefs* -- a dictionary with references to persons.
*charactersRefs* -- a dictionary with references to characters.
*modFunct* -- function called returning text fields.
"""
title = kwds.get('title')
if title and 'title' not in self.data:
self.set_title(title)
self.movieID = kwds.get('movieID', None)
self.myTitle = kwds.get('myTitle', '')
def _reset(self):
"""Reset the Movie object."""
self.movieID = None
self.myTitle = ''
def set_title(self, title):
"""Set the title of the movie."""
d_title = analyze_title(title)
self.data.update(d_title)
def _additional_keys(self):
"""Valid keys to append to the data.keys() list."""
addkeys = []
if 'title' in self.data:
addkeys += ['canonical title', 'long imdb title',
'long imdb canonical title',
'smart canonical title',
'smart long imdb canonical title']
if 'episode of' in self.data:
addkeys += ['long imdb episode title', 'series title',
'canonical series title', 'episode title',
'canonical episode title',
'smart canonical series title',
'smart canonical episode title']
if 'cover url' in self.data:
addkeys += ['full-size cover url']
return addkeys
def guessLanguage(self):
"""Guess the language of the title of this movie; returns None
if there are no hints."""
lang = self.get('languages')
if lang:
lang = lang[0]
else:
country = self.get('countries')
if country:
lang = linguistics.COUNTRY_LANG.get(country[0])
return lang
def smartCanonicalTitle(self, title=None, lang=None):
"""Return the canonical title, guessing its language.
The title can be forces with the 'title' argument (internally
used) and the language can be forced with the 'lang' argument,
otherwise it's auto-detected."""
if title is None:
title = self.data.get('title', '')
if lang is None:
lang = self.guessLanguage()
return canonicalTitle(title, lang=lang)
def _getitem(self, key):
"""Handle special keys."""
if 'episode of' in self.data:
if key == 'long imdb episode title':
return build_title(self.data)
elif key == 'series title':
return self.data['episode of']['title']
elif key == 'canonical series title':
ser_title = self.data['episode of']['title']
return canonicalTitle(ser_title)
elif key == 'smart canonical series title':
ser_title = self.data['episode of']['title']
return self.smartCanonicalTitle(ser_title)
elif key == 'episode title':
return self.data.get('title', '')
elif key == 'canonical episode title':
return canonicalTitle(self.data.get('title', ''))
elif key == 'smart canonical episode title':
return self.smartCanonicalTitle(self.data.get('title', ''))
if 'title' in self.data:
if key == 'title':
return self.data['title']
elif key == 'long imdb title':
return build_title(self.data)
elif key == 'canonical title':
return canonicalTitle(self.data['title'])
elif key == 'smart canonical title':
return self.smartCanonicalTitle(self.data['title'])
elif key == 'long imdb canonical title':
return build_title(self.data, canonical=True)
elif key == 'smart long imdb canonical title':
return build_title(self.data, canonical=True, lang=self.guessLanguage())
if key == 'full-size cover url':
return self.get_fullsizeURL()
return None
def getID(self):
"""Return the movieID."""
return self.movieID
def __bool__(self):
"""The Movie is "false" if the self.data does not contain a title."""
# XXX: check the title and the movieID?
return 'title' in self.data
def isSameTitle(self, other):
"""Return true if this and the compared object have the same
long imdb title and/or movieID.
"""
# XXX: obsolete?
if not isinstance(other, self.__class__):
return False
if 'title' in self.data and 'title' in other.data and \
build_title(self.data, canonical=False) == build_title(other.data, canonical=False):
return True
if self.accessSystem == other.accessSystem and \
self.movieID is not None and self.movieID == other.movieID:
return True
return False
isSameMovie = isSameTitle # XXX: just for backward compatiblity.
def __contains__(self, item):
"""Return true if the given Person object is listed in this Movie,
or if the the given Character is represented in this Movie."""
from .Person import Person
from .Character import Character
from .Company import Company
if isinstance(item, Person):
for p in flatten(self.data, yieldDictKeys=True, scalar=Person,
toDescend=(list, dict, tuple, Movie)):
if item.isSame(p):
return True
elif isinstance(item, Character):
for p in flatten(self.data, yieldDictKeys=True, scalar=Person,
toDescend=(list, dict, tuple, Movie)):
if item.isSame(p.currentRole):
return True
elif isinstance(item, Company):
for c in flatten(self.data, yieldDictKeys=True, scalar=Company,
toDescend=(list, dict, tuple, Movie)):
if item.isSame(c):
return True
elif isinstance(item, str):
return item in self.data
return False
def __deepcopy__(self, memo):
"""Return a deep copy of a Movie instance."""
m = Movie(title='', movieID=self.movieID, myTitle=self.myTitle,
myID=self.myID, data=deepcopy(self.data, memo),
currentRole=deepcopy(self.currentRole, memo),
roleIsPerson=self._roleIsPerson,
notes=self.notes, accessSystem=self.accessSystem,
titlesRefs=deepcopy(self.titlesRefs, memo),
namesRefs=deepcopy(self.namesRefs, memo),
charactersRefs=deepcopy(self.charactersRefs, memo))
m.current_info = list(self.current_info)
m.set_mod_funct(self.modFunct)
return m
def __repr__(self):
"""String representation of a Movie object."""
# XXX: add also currentRole and notes, if present?
if 'long imdb episode title' in self:
title = self.get('long imdb episode title')
else:
title = self.get('long imdb title')
return '<Movie id:%s[%s] title:_%s_>' % (self.movieID, self.accessSystem, title)
def __str__(self):
"""Simply print the short title."""
return self.get('title', '')
def summary(self):
"""Return a string with a pretty-printed summary for the movie."""
if not self:
return ''
def _nameAndRole(personList, joiner=', '):
"""Build a pretty string with name and role."""
nl = []
for person in personList:
n = person.get('name', '')
if person.currentRole:
n += ' (%s)' % person.currentRole
nl.append(n)
return joiner.join(nl)
s = 'Movie\n=====\nTitle: %s\n' % self.get('long imdb canonical title', '')
genres = self.get('genres')
if genres:
s += 'Genres: %s.\n' % ', '.join(genres)
director = self.get('director')
if director:
s += 'Director: %s.\n' % _nameAndRole(director)
writer = self.get('writer')
if writer:
s += 'Writer: %s.\n' % _nameAndRole(writer)
cast = self.get('cast')
if cast:
cast = cast[:5]
s += 'Cast: %s.\n' % _nameAndRole(cast)
runtime = self.get('runtimes')
if runtime:
s += 'Runtime: %s.\n' % ', '.join(runtime)
countries = self.get('countries')
if countries:
s += 'Country: %s.\n' % ', '.join(countries)
lang = self.get('languages')
if lang:
s += 'Language: %s.\n' % ', '.join(lang)
rating = self.get('rating')
if rating:
s += 'Rating: %s' % rating
nr_votes = self.get('votes')
if nr_votes:
s += ' (%s votes)' % nr_votes
s += '.\n'
plot = self.get('plot')
if not plot:
plot = self.get('plot summary')
if plot:
plot = [plot]
if plot:
plot = plot[0]
i = plot.find('::')
if i != -1:
plot = plot[:i]
s += 'Plot: %s' % plot
return s
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.