input
stringlengths 2.65k
237k
| output
stringclasses 1
value |
---|---|
graph = get_image()
return graph
def get_plot_girls_secondary(**kwargs):
plt.switch_backend('AGG')
data = kwargs.get('data')
for _ in data.shape:
ger = (data['enrollment'] / data['age_12_to_16_years']) * 100
academic_year = data.academic_year
sns.set(font_scale=1)
sns.set_style("white")
ax = ger.plot.bar(figsize=(15, 6), color='green')
sns.despine(left=True, bottom=True)
# label and title
ax.set_xticklabels(np.arange(len(academic_year)))
ax.set_title('Gross Enrollment Ratio (%) for Girls in secondary School In St. Lucia', size=18)
ax.set_xticklabels(academic_year)
for tick in ax.get_xticklabels():
tick.set_rotation(-30)
ax.set(xlabel='Academic Year', ylabel='Gross enrollment rate (%)')
# annotations
for p in ax.patches:
ax.annotate(format(p.get_height(), '.2f'),
(p.get_x() + p.get_width() / 2., p.get_height()),
ha='center', va='center',
xytext=(0, 9),
textcoords='offset points')
# adjust legend
plt.tight_layout()
graph = get_image()
return graph
def get_plot_secondary(**kwargs):
plt.switch_backend('AGG')
data = kwargs.get('data')
data_boys = kwargs.get('data_boys')
data_girls = kwargs.get('data_girls')
for _ in data.shape:
ger_boys = (data_boys['enrollment'] / data_boys['age_12_to_16_years']) * 100
ger_girls = (data_girls['enrollment'] / data_girls['age_12_to_16_years']) * 100
academic_year = data_girls.academic_year
title = 'Trend of GER for Primary Schools in St. Lucia'
plt.figure(figsize=(10, 8))
plt.title(title)
plt.plot(academic_year, ger_boys, 'b-', label='boys')
plt.plot(academic_year, ger_girls, 'g-', label='girls')
plt.xticks(rotation=60)
# plt.ylim(0, max(y) + 100)
plt.ylabel("Gross Enrollment Ratio for boys and girls in Secondary School")
plt.xlabel("Academic Year")
plt.legend()
plt.grid()
plt.tight_layout()
graph = get_image()
return graph
def clean_secondary_name(name):
name = re.sub("[^a-zA-Z]+", "", name)
name = name.lower().replace('secondary', "")
name = name.replace('school', "")
return ' '.join(name.split())
def match_name(name, schools, district_dict):
for school in schools:
if clean_secondary_name(name) == clean_secondary_name(getattr(school, 'school_name')):
district_code = getattr(school, 'district_name_id')
district_dict[name] = district_code
return district_code
return None
def get_district(school_code, schools, district_dict):
for school in schools:
if int(getattr(school, 'school_code')) == school_code:
district = getattr(school, 'district_name_id')
district_dict[school_code] = district
return district
return None
def csec_performance_plot(data, district_1, district_2):
left_out = set()
df = pd.DataFrame(data.values())
plt.switch_backend('AGG')
years = [int(y) for y in df['year'].drop_duplicates()]
years.sort()
min_year = min(years)
schools = School.objects.all()
# schools = School.objects.filter(category_of_school='public secondary')
N_DISTRICTS = District.objects.count()
scores = np.zeros((len(years), N_DISTRICTS))
n_tests = np.zeros((len(years), N_DISTRICTS))
passing_scores = np.zeros((len(years), N_DISTRICTS))
# cache school to district matches
district_dict = {}
for index, row in df.iterrows():
school_code = int(row['school_id'])
if school_code in district_dict:
district = district_dict[school_code]
else:
district = get_district(school_code, schools, district_dict)
if not district:
left_out.add(row['school_id'])
continue
year = int(row['year']) - min_year
n_tests[year][district - 1] += 1
score = row['overall_grade']
if score == 'I' or score == 'II' or score == 'III':
scores[year][district - 1] += 1
passing_scores = 100 * scores / n_tests
passing_scores = pd.DataFrame(passing_scores)
labels = ['District ' + str(d + 1) for d in range(N_DISTRICTS)]
if not (district_1 and district_2):
for d in range(N_DISTRICTS):
plt.plot(years, passing_scores[d])
else:
plt.plot(years, passing_scores[district_1 - 1])
plt.plot(years, passing_scores[district_2 - 1])
labels = ['District ' + str(district_1), 'District ' + str(district_2)]
plt.xticks([min(years), max(years)])
plt.legend(labels, loc='upper left', bbox_to_anchor=(1, 1.05))
plt.title("Percentage of Passing Scores (CSEC)")
plt.tight_layout()
graph = get_image()
plt.clf()
passing_scores = passing_scores.T
passing_scores.columns = years
passing_scores.index = ['District ' + str(d + 1) for d in range(N_DISTRICTS)]
ax = sns.heatmap(passing_scores, annot=True)
plt.tight_layout()
heatmap = get_image()
return [graph, heatmap, left_out]
# ===================================================================
# Outlier detection at district level
# ===================================================================
def get_outlier_district_plot(**kwargs):
plt.switch_backend('AGG')
school_enrollment = kwargs.get('x')
school_name = kwargs.get('y')
datamean = kwargs.get('data_mean')
input_school_type = kwargs.get('input_school_type')
academic_year = kwargs.get('academic_year')
district_input = kwargs.get('input_district')
fig, ax1 = plt.subplots(figsize=(10, 8))
ax1.set_title('Enrollment for District')
ax1.set_xlabel('School_Name')
ax1.set_ylabel('School_Scores')
ax1.bar(school_name, school_enrollment, color='b')
for tick in ax1.xaxis.get_major_ticks():
tick.label.set_fontsize(8)
tick.label.set_rotation('15')
plt.plot(school_name, datamean, linewidth=5, ls='solid', color='r')
plt.xlabel("School Name")
plt.ylabel("Enrollment")
plt.title(
"Enrollment for " + input_school_type + " schools for district " + district_input + " and " + academic_year + " academic year ")
plt.tight_layout()
graph = get_image()
return graph
plt.tight_layout()
graph = get_image()
return graph
# ==========================================================================
# Outlier detection at national level
# ==========================================================================
def get_outlier_national_plot(**kwargs):
plt.switch_backend('AGG')
school_enrollment = kwargs.get('x')
school_name = kwargs.get('y')
datamean = kwargs.get('data_mean')
input_school_type = kwargs.get('input_school_type')
academic_year = kwargs.get('academic_year')
fig, ax1 = plt.subplots(figsize=(12, 10))
ax1.set_title('Enrollment for Selected Year')
ax1.set_xlabel('School_Name')
ax1.bar(school_name, school_enrollment, width=0.1, color='b')
for tick in ax1.xaxis.get_major_ticks():
tick.label.set_fontsize(10)
tick.label.set_rotation('vertical')
plt.plot(school_name, datamean, linewidth=3, ls='solid', color='r')
plt.xlabel("School Name")
plt.ylabel("Enrollment")
plt.title("Enrollment for " + input_school_type + " schools for year " + academic_year)
plt.tight_layout()
graph = get_image()
return graph
plt.tight_layout()
graph = get_image()
return graph
def get_plot_regression(**kwargs):
plt.switch_backend('AGG')
plt.figure(figsize=(10, 8))
data = kwargs.get('data')
sns.set_theme(color_codes=True)
sns.regplot(x=data.enrollment, y=data.gdp_millions, data=data, x_estimator=np.mean, label='GDP');
sns.regplot(x=data.enrollment, y=data.educational_expenditure, data=data, x_estimator=np.mean,
label='Educational Expenditure');
sns.regplot(x=data.enrollment, y=data.government_expenditure, data=data, x_estimator=np.mean,
label='Government Expenditure');
plt.xlabel("Enrollment")
plt.ylabel("Expenditure")
plt.title("Linear Regression - Enrollment / GDP / Education / government expenditure")
plt.legend()
plt.tight_layout()
graph = get_image()
return graph
def get_plot_gdp_regress(**kwargs):
plt.switch_backend('AGG')
plt.figure(figsize=(10, 8))
data = kwargs.get('data')
sns.set_theme(color_codes=True)
sns.jointplot(x=data.enrollment, y=data.gdp_millions, data=data, x_estimator=np.mean,
label='GDP', kind="reg");
plt.xlabel("Enrollment")
plt.ylabel("Expenditure")
plt.tight_layout()
graph = get_image()
return graph
def get_enrollment_joint_pearsons(**kwargs):
plt.switch_backend('AGG')
plt.figure(figsize=(10, 8))
data = kwargs.get('data')
sns.set_theme(color_codes=True)
import scipy.stats as stats
graph = sns.jointplot(data=data, x=data.enrollment, y=data.gdp_millions)
r, p = stats.pearsonr(x=data.enrollment, y=data.gdp_millions)
phantom, = graph.ax_joint.plot([], [], linestyle="", alpha=0)
# here graph is not a ax but a joint grid, so we access the axis through ax_joint method
graph.ax_joint.legend([phantom], ['r={:f}, p={:f}'.format(r, p)])
plt.tight_layout()
graph = get_image()
return graph
def get_enrollment_joint_spearman(**kwargs):
plt.switch_backend('AGG')
plt.figure(figsize=(10, 8))
data = kwargs.get('data')
sns.set_theme(color_codes=True)
graph = sns.jointplot(data=data, x=data.enrollment, y=data.gdp_millions)
r, p = stats.spearmanr(data.enrollment, data.gdp_millions)
phantom, = graph.ax_joint.plot([], [], linestyle="", alpha=0)
# here graph is not a ax but a joint grid, so we access the axis through ax_joint method
graph.ax_joint.legend([phantom], ['r={:f}, p={:f}'.format(r, p)])
plt.tight_layout()
graph = get_image()
return graph
def get_enrollment_multicollinearity(**kwargs):
plt.figure(figsize=(10, 8))
plt.switch_backend('AGG')
data = kwargs.get('data')
data = data[["educational_expenditure", "gdp_millions", "government_expenditure", "primary_school_expenditure",
"secondary_school_expenditure", "enrollment", "age_5_to_11_years", "age_12_to_16_years"]]
sns.set(style='white')
corr = data.corr()
mask = np.zeros_like(corr, dtype=np.bool)
mask[np.triu_indices_from(mask)] = True
f, ax = plt.subplots(figsize=(12, 10))
cmap = sns.diverging_palette(220, 10, as_cmap=True)
sns.heatmap(corr, mask=mask, cmap=cmap, vmax=.9, center=0, square=True, linewidths=.5, annot=True,
cbar_kws={'shrink': .5});
plt.tight_layout()
graph = get_image()
return graph
def get_kernel_density(**kwargs):
plt.figure(figsize=(10, 8))
plt.switch_backend('AGG')
data = kwargs.get('data')
sns.kdeplot(data=data.enrollment)
sns.despine()
plt.tight_layout()
graph = get_image()
return graph
def plot_national_gender_enrollment(**kwargs):
plt.switch_backend('AGG')
data_boys_primary = kwargs.get('data_boys_primary')
data_boys_secondary = kwargs.get('data_boys_secondary')
data_girls_primary = kwargs.get('data_girls_primary')
data_girls_secondary = kwargs.get('data_girls_secondary')
title = 'Trend in enrollments over time'
plt.figure(figsize=(10, 8))
plt.title(title)
plt.plot(data_boys_primary['academic_year'], data_boys_primary['enrollment'], 'b-',
label='Boys enrolled in Primary School')
plt.plot(data_boys_secondary['academic_year'], data_boys_secondary['enrollment'], 'bo',
label='Boys enrolled in Secondary School')
plt.plot(data_girls_primary['academic_year'], data_girls_primary['enrollment'], 'r-',
label='Girls enrolled in Primary School')
plt.plot(data_girls_secondary['academic_year'], data_girls_secondary['enrollment'], 'ro',
label='Girls enrolled in secondary School')
plt.xticks(rotation=60)
# plt.ylim(0, max(y) + 100)
plt.ylabel("National Enrollment Trends")
plt.xlabel("Academic Year")
plt.legend()
plt.grid()
plt.tight_layout()
graph = get_image()
return graph
def national_gender_enrollment_hist(**kwargs):
data_boys_primary = kwargs.get('data_boys_primary')
data_boys_secondary = kwargs.get('data_boys_secondary')
data_girls_primary = kwargs.get('data_girls_primary')
data_girls_secondary = kwargs.get('data_girls_secondary')
# boys primary mean of distribution
mu_boys_primary = mean(data_boys_primary.enrollment)
mu_girls_primary = mean(data_girls_primary.enrollment)
mu_boys_secondary = mean(data_boys_secondary.enrollment)
mu_girls_secondary = mean(data_girls_secondary.enrollment)
sigma_boys_primary = std(data_boys_primary.enrollment)
sigma_girls_primary = std(data_girls_primary.enrollment)
sigma_boys_secondary = std(data_boys_secondary.enrollment)
sigma_girls_secondary = std(data_girls_secondary.enrollment)
x_mu_boys_primary = mu_boys_primary + sigma_boys_primary * np.random.randn(437)
x_mu_girls_primary = mu_girls_primary + sigma_girls_primary * np.random.randn(437)
x_mu_boys_secondary = mu_boys_secondary + sigma_boys_secondary * np.random.randn(437)
x_mu_girls_secondary = mu_girls_secondary + sigma_girls_secondary * np.random.randn(437)
num_bins = 50
# fig, ax = plt.subplots()
fig, axs = plt.subplots(2, 2, figsize=(15, 15))
# the histogram of the data
n_boys_primary, bins_boys_primary, patches_boys_primary = axs[0, 0].hist(x_mu_boys_primary, num_bins, density=True)
n_boys_secondary, bins_boys_secondary, patches_boys_secondary = axs[0, 1].hist(x_mu_boys_secondary, num_bins,
density=True)
n_girls_primary, bins_girls_primary, patches_girls_primary = axs[1, 0].hist(x_mu_girls_primary, num_bins,
density=True)
n_girls_secondary, bins_girls_secondary, patches_girls_secondary = axs[1, 1].hist(x_mu_girls_secondary, num_bins,
density=True)
# add a 'best fit' line
y_boys_primary = ((1 / (np.sqrt(2 * np.pi) * sigma_boys_primary)) *
np.exp(-0.5 * (1 / sigma_boys_primary * (bins_boys_primary - mu_boys_primary)) ** 2))
y_boys_secondary = ((1 / (np.sqrt(2 * np.pi) * sigma_boys_secondary)) *
np.exp(-0.5 * (1 / sigma_boys_secondary * (bins_boys_secondary - mu_boys_secondary)) ** 2))
y_girls_primary = ((1 / (np.sqrt(2 * np.pi) * sigma_girls_primary)) *
np.exp(-0.5 * (1 / sigma_girls_primary * (bins_girls_primary - mu_girls_primary)) ** 2))
y_girls_secondary = ((1 / (np.sqrt(2 * np.pi) * sigma_girls_secondary)) *
np.exp(-0.5 * (1 / sigma_girls_secondary * (bins_girls_secondary - mu_girls_secondary)) ** 2))
for ax in axs.flat:
ax.set(xlabel='Enrollment', ylabel='Probability Density')
# Hide x labels and tick labels for top plots and y ticks for right plots.
for ax in axs.flat:
ax.label_outer()
axs[0, 0].plot(bins_boys_primary, y_boys_primary, '--')
axs[0, 0].set_title('Primary-Boys')
axs[0, 1].plot(bins_boys_secondary, y_boys_secondary, '--')
axs[0, 1].set_title('Secondary-Boys')
axs[1, 0].plot(bins_girls_primary, y_girls_primary, '--')
axs[1, 0].set_title('Primary-Girls')
axs[1, 1].plot(bins_girls_secondary, y_girls_secondary, '--')
axs[1, 1].set_title('Secondary-Girls')
plt.tight_layout()
graph = get_image()
return graph
def plot_national_education_census(**kwargs):
plt.switch_backend('AGG')
data = kwargs.get('data')
title = 'Education Census Over time'
plt.figure(figsize=(10, 8))
plt.title(title)
plt.plot(data['academic_year'], data['age_3_to_4_years'], 'b-',
label='Population of Age Group 3-4')
plt.plot(data['academic_year'], data['age_5_to_11_years'], 'y-',
label='Population of Age Group > 5 and less than 12')
plt.plot(data['academic_year'], data['age_12_to_16_years'], 'r-',
label='population of children above 12 years old')
plt.xticks(rotation=60)
# plt.ylim(0, max(y) + 100)
plt.ylabel("Education Census Trends")
plt.xlabel("Academic Year")
plt.legend()
plt.grid()
plt.tight_layout()
graph = get_image()
return graph
def national_education_census_hist(**kwargs):
data = kwargs.get('data')
# boys primary mean of distribution
mu_data_3_4 = | |
\
and x.code == 2091 and 'ORA-02291' in x.message:
six.reraise(utils.IntegrityError, utils.IntegrityError(*tuple(e.args)), sys.exc_info()[2])
raise
def schema_editor(self, *args, **kwargs):
"Returns a new instance of this backend's SchemaEditor"
return DatabaseSchemaEditor(self, *args, **kwargs)
# Oracle doesn't support savepoint commits. Ignore them.
def _savepoint_commit(self, sid):
pass
def _set_autocommit(self, autocommit):
self.connection.autocommit = autocommit
def check_constraints(self, table_names=None):
"""
To check constraints, we set constraints to immediate. Then, when, we're done we must ensure they
are returned to deferred.
"""
self.cursor().execute('SET CONSTRAINTS ALL IMMEDIATE')
self.cursor().execute('SET CONSTRAINTS ALL DEFERRED')
def is_usable(self):
try:
if hasattr(self.connection, 'ping'): # Oracle 10g R2 and higher
self.connection.ping()
else:
# Use a cx_Oracle cursor directly, bypassing Django's utilities.
self.connection.cursor().execute("SELECT 1 FROM DUAL")
except DatabaseError:
return False
else:
return True
@cached_property
def oracle_version(self):
with self.temporary_connection():
version = self.connection.version
try:
return int(version.split('.')[0])
except ValueError:
return None
class OracleParam(object):
"""
Wrapper object for formatting parameters for Oracle. If the string
representation of the value is large enough (greater than 4000 characters)
the input size needs to be set as CLOB. Alternatively, if the parameter
has an `input_size` attribute, then the value of the `input_size` attribute
will be used instead. Otherwise, no input size will be set for the
parameter when executing the query.
"""
def __init__(self, param, cursor, strings_only=False):
# With raw SQL queries, datetimes can reach this function
# without being converted by DateTimeField.get_db_prep_value.
if settings.USE_TZ and isinstance(param, datetime.datetime):
if timezone.is_naive(param):
warnings.warn("Oracle received a naive datetime (%s)"
" while time zone support is active." % param,
RuntimeWarning)
default_timezone = timezone.get_default_timezone()
param = timezone.make_aware(param, default_timezone)
param = param.astimezone(timezone.utc).replace(tzinfo=None)
# Oracle doesn't recognize True and False correctly in Python 3.
# The conversion done below works both in 2 and 3.
if param is True:
param = "1"
elif param is False:
param = "0"
if hasattr(param, 'bind_parameter'):
self.force_bytes = param.bind_parameter(cursor)
elif isinstance(param, six.memoryview):
self.force_bytes = param
else:
self.force_bytes = convert_unicode(param, cursor.charset,
strings_only)
if hasattr(param, 'input_size'):
# If parameter has `input_size` attribute, use that.
self.input_size = param.input_size
elif isinstance(param, six.string_types) and len(param) > 4000:
# Mark any string param greater than 4000 characters as a CLOB.
self.input_size = Database.CLOB
else:
self.input_size = None
class VariableWrapper(object):
"""
An adapter class for cursor variables that prevents the wrapped object
from being converted into a string when used to instanciate an OracleParam.
This can be used generally for any other object that should be passed into
Cursor.execute as-is.
"""
def __init__(self, var):
self.var = var
def bind_parameter(self, cursor):
return self.var
def __getattr__(self, key):
return getattr(self.var, key)
def __setattr__(self, key, value):
if key == 'var':
self.__dict__[key] = value
else:
setattr(self.var, key, value)
class InsertIdVar(object):
"""
A late-binding cursor variable that can be passed to Cursor.execute
as a parameter, in order to receive the id of the row created by an
insert statement.
"""
def bind_parameter(self, cursor):
param = cursor.cursor.var(Database.NUMBER)
cursor._insert_id_var = param
return param
class FormatStylePlaceholderCursor(object):
"""
Django uses "format" (e.g. '%s') style placeholders, but Oracle uses ":var"
style. This fixes it -- but note that if you want to use a literal "%s" in
a query, you'll need to use "%%s".
We also do automatic conversion between Unicode on the Python side and
UTF-8 -- for talking to Oracle -- in here.
"""
charset = 'utf-8'
def __init__(self, connection):
self.cursor = connection.cursor()
# Necessary to retrieve decimal values without rounding error.
self.cursor.numbersAsStrings = True
# Default arraysize of 1 is highly sub-optimal.
self.cursor.arraysize = 100
def _format_params(self, params):
try:
return dict((k, OracleParam(v, self, True)) for k, v in params.items())
except AttributeError:
return tuple(OracleParam(p, self, True) for p in params)
def _guess_input_sizes(self, params_list):
# Try dict handling; if that fails, treat as sequence
if hasattr(params_list[0], 'keys'):
sizes = {}
for params in params_list:
for k, value in params.items():
if value.input_size:
sizes[k] = value.input_size
self.setinputsizes(**sizes)
else:
# It's not a list of dicts; it's a list of sequences
sizes = [None] * len(params_list[0])
for params in params_list:
for i, value in enumerate(params):
if value.input_size:
sizes[i] = value.input_size
self.setinputsizes(*sizes)
def _param_generator(self, params):
# Try dict handling; if that fails, treat as sequence
if hasattr(params, 'items'):
return dict((k, v.force_bytes) for k, v in params.items())
else:
return [p.force_bytes for p in params]
def _fix_for_params(self, query, params):
# cx_Oracle wants no trailing ';' for SQL statements. For PL/SQL, it
# it does want a trailing ';' but not a trailing '/'. However, these
# characters must be included in the original query in case the query
# is being passed to SQL*Plus.
if query.endswith(';') or query.endswith('/'):
query = query[:-1]
if params is None:
params = []
query = convert_unicode(query, self.charset)
elif hasattr(params, 'keys'):
# Handle params as dict
args = dict((k, ":%s" % k) for k in params.keys())
query = convert_unicode(query % args, self.charset)
else:
# Handle params as sequence
args = [(':arg%d' % i) for i in range(len(params))]
query = convert_unicode(query % tuple(args), self.charset)
return query, self._format_params(params)
def execute(self, query, params=None):
query, params = self._fix_for_params(query, params)
self._guess_input_sizes([params])
try:
return self.cursor.execute(query, self._param_generator(params))
except Database.DatabaseError as e:
# cx_Oracle <= 4.4.0 wrongly raises a DatabaseError for ORA-01400.
if hasattr(e.args[0], 'code') and e.args[0].code == 1400 and not isinstance(e, IntegrityError):
six.reraise(utils.IntegrityError, utils.IntegrityError(*tuple(e.args)), sys.exc_info()[2])
raise
def executemany(self, query, params=None):
if not params:
# No params given, nothing to do
return None
# uniform treatment for sequences and iterables
params_iter = iter(params)
query, firstparams = self._fix_for_params(query, next(params_iter))
# we build a list of formatted params; as we're going to traverse it
# more than once, we can't make it lazy by using a generator
formatted = [firstparams] + [self._format_params(p) for p in params_iter]
self._guess_input_sizes(formatted)
try:
return self.cursor.executemany(query,
[self._param_generator(p) for p in formatted])
except Database.DatabaseError as e:
# cx_Oracle <= 4.4.0 wrongly raises a DatabaseError for ORA-01400.
if hasattr(e.args[0], 'code') and e.args[0].code == 1400 and not isinstance(e, IntegrityError):
six.reraise(utils.IntegrityError, utils.IntegrityError(*tuple(e.args)), sys.exc_info()[2])
raise
def fetchone(self):
row = self.cursor.fetchone()
if row is None:
return row
return _rowfactory(row, self.cursor)
def fetchmany(self, size=None):
if size is None:
size = self.arraysize
return tuple(_rowfactory(r, self.cursor)
for r in self.cursor.fetchmany(size))
def fetchall(self):
return tuple(_rowfactory(r, self.cursor)
for r in self.cursor.fetchall())
def var(self, *args):
return VariableWrapper(self.cursor.var(*args))
def arrayvar(self, *args):
return VariableWrapper(self.cursor.arrayvar(*args))
def __getattr__(self, attr):
if attr in self.__dict__:
return self.__dict__[attr]
else:
return getattr(self.cursor, attr)
def __iter__(self):
return CursorIterator(self.cursor)
class CursorIterator(six.Iterator):
"""Cursor iterator wrapper that invokes our custom row factory."""
def __init__(self, cursor):
self.cursor = cursor
self.iter = iter(cursor)
def __iter__(self):
return self
def __next__(self):
return _rowfactory(next(self.iter), self.cursor)
def _rowfactory(row, cursor):
# Cast numeric values as the appropriate Python type based upon the
# cursor description, and convert strings to unicode.
casted = []
for value, desc in zip(row, cursor.description):
if value is not None and desc[1] is Database.NUMBER:
precision, scale = desc[4:6]
if scale == -127:
if precision == 0:
# NUMBER column: decimal-precision floating point
# This will normally be an integer from a sequence,
# but it could be a decimal value.
if '.' in value:
value = decimal.Decimal(value)
else:
value = int(value)
else:
# FLOAT column: binary-precision floating point.
# This comes from FloatField columns.
value = float(value)
elif precision > 0:
# NUMBER(p,s) column: decimal-precision fixed point.
# This comes from IntField and DecimalField columns.
if scale == 0:
value = int(value)
else:
value = decimal.Decimal(value)
elif '.' in value:
# No type information. This normally comes from a
# mathematical expression in the SELECT list. Guess int
# or Decimal based on whether it has a decimal point.
value = decimal.Decimal(value)
else:
value = int(value)
# datetimes are returned as TIMESTAMP, except the results
# of "dates" queries, which are returned as DATETIME.
elif desc[1] in (Database.TIMESTAMP, Database.DATETIME):
# Confirm that dt is naive before overwriting its tzinfo.
if settings.USE_TZ and value is not None and timezone.is_naive(value):
value = value.replace(tzinfo=timezone.utc)
elif desc[1] in (Database.STRING, Database.FIXED_CHAR,
Database.LONG_STRING):
value = to_unicode(value)
casted.append(value)
return tuple(casted)
def to_unicode(s):
"""
Convert strings to Unicode objects (and return all other data types
unchanged).
"""
if isinstance(s, six.string_types):
return force_text(s)
return s
def | |
<reponame>sleibrock/chain.py
#!/usr/bin/env python
"""
Prelude.py
Aimed to recreate some basic functions from
GHC's "Prelude" collection
Included in this package:
* Typeclass definition dictionary
* Typechecking functions
* Basic Prelude collection for Unit calculations
Some rules:
* Don't use keyword argument functions
*
"""
# Typeclass stuff
# Use these to enforce rules amongst Unit functions
# Int - units that represent whole numbers (int, bool)
# Num - numbers used in math (ints, floats, comp)
# Real - numbers that are non-imaginary (ints, floats)
# Ord - types that can be ordered based on their value(s)
# Enum - types that have positions or storage of some kind
# Fold - values that can gain or lose shape
# String - supports only the string-type (strings != lists)
# Func - only supports callable types aka functions/methods
# Any - supports any type, literally
Int, Num, Real, Ord, Enum, Fold, String, Func, Any = range(9)
# TODO: does this system allow modularity/extendability non-builtins?
typeclasses = {
Int : (int, bool),
Num : (int, float, complex),
Real : (int, float),
Ord : (int, float, complex, bool, str, list, bytes),
Enum : (list, tuple, set, frozenset, dict, str),
Fold : (int, float, complex, bool, list, tuple, str, bytes),
String : (str,),
Func : (type(lambda:None),),
Any : (object,),
}
def typestr(tc):
"""
typestr :: Int -> String
Return a string of the Typeclass' name to be used in reporting
"""
return ["Int","Num","Real","Ord","Enum","Fold","String","Func","Any"][tc]
def type_check(*types):
"""
type_check :: [Int] -> Function -> [a] -> Function
A wrapper used to enforce type-checking at the wrapper instead of
inside a function definition. Will raise TypeError when an argument does
not match the declared Typeclasses given.
"""
def decorator(func):
def type_checker(*args):
for t, v in zip(types, args):
if isnt_type(t, v):
raise TypeError("{0} is not of type {1}".format(v, typestr(t)))
return func(*args)
return type_checker
return decorator
def get_types(cls):
"""
get_types :: Int -> [a]
Return the types belonging to a typeclass
"""
if cls not in typeclasses:
raise Exception("get_types() - Type doesn't exist")
return typeclasses[cls]
# Typeclass check functions
def is_type(cls, *value):
"""
is_type :: Int -> a -> Bool
Check if value(s) belongs in a typeclass
"""
return any((isinstance(v, c) for v in value for c in typeclasses[cls]))
def isnt_type(cls, *value):
"""
isnt_type :: Int -> a -> Bool
Wrapper for is_type so you can avoid writing "not is_type"
"""
return not is_type(cls, *value)
# A curried version of is_type so you can pass it to Unit values
def type_of(cls):
"""
type_of :: Int -> a -> Bool
"""
def itype(data):
return is_type(cls, data)
return itype
def type_not(cls):
"""
type_not :: Int -> a -> Bool
Inverse type_of for Unit operations
"""
return not type_of(cls)
# This essentially returns the entire Unit container
def id(*data):
"""
id :: a -> a
A mathematical "id" function to return what was passed
"""
if len(data) > 1:
return data
else:
return data[0]
# Equivalent to putStrLn from Haskell.GHC
def puts(data):
"""
puts :: String -> IO ()
Will always return None
"""
print(data)
# Head and Tail from Haskell.GHC
def head(data):
"""
head :: [a] -> [a]
Return the first item in an Enumerable type
If data is not a list type, return it
"""
if isnt_type(Enum, data):
return data
return data[0]
# Tail will be undefined (None) if not a list
def tail(data):
"""
tail :: [a] -> [a]
Return the tail (everything after the first)
If data is not a list, return None
"""
if isnt_type(Enum, data):
return None
return data[1:]
# Take a number of elements from a list
def take(amount):
"""
take :: Int -> [a] -> [a]
Take a number of elements from an Enumerable
If the unit data is not a list, return None
"""
if isnt_type(Num, amount):
raise Exception("take() - value given not an Integer")
def itake(data):
if not isinstance(data, list):
return None
return data[:amount]
return itake
# Drop a number of elements from a list
def drop(amount):
"""
drop :: Int -> [a] -> [a]
Drop values and return the remainder
If the unit data is not a list, return None
"""
if not isinstance(amount, int):
raise Exception("drop() - value given not an Integer")
def idrop(data):
if not isinstance(data, list):
return None
return data[amount:]
return idrop
# Successor of a value (increment on Int)
def succ(value):
"""
succ :: Num a => a -> a
Return the successor of the given value
"""
if isnt_type(Num, value):
raise Exception("succ() - value not Ord class")
return value + 1
# Predecessor of a value (decrement on Int)
def pred(value):
"""
pred :: Num a => a -> a
Return the predecessor of the given value
"""
if isnt_type(Num, value):
raise Exception("pred() - value not Ord class")
return value - 1
# Redefine common math ops so we can enforce types
def add(left_value, right_value):
"""
add :: Fold a => a -> a -> a
Add together two values (can be non-numerical)
"""
if isnt_type(Fold, left_value, right_value):
raise Exception("add() - Non-foldable types given")
return left_value + right_value
def sub(left_value, right_value):
"""
sub :: Num a => a -> a -> a
Subtract two values and return the difference
"""
if isnt_type(Num, left_value, right_value):
raise Exception("sub() - Non-numeric types given")
return left_value - right_value
def mul(left_value, right_value):
"""
mul :: Num a => a -> a -> a
Multiply two values together and return the product
"""
if isnt_type(Num, left_value, right_value):
raise Exception("mul() - Non-numeric types given")
return left_value * right_value
def div(left_value, right_value):
"""
div :: Num a => a -> a -> a
Divide one number by another, except when right_value == 0
"""
if isnt_type(Num, left_value, right_value):
raise Exception("div() - Non-numeric types given")
if right_value == 0:
raise ZeroDivisionError
return left_value / right_value
# Negate a value (Unit(5) | negate => -5)
def neg(value):
"""
neg :: Num a => a -> a
Negate a numerical value (-x)
"""
if isnt_type(Num, value):
raise Exception("div() - Non-numeric types given")
return (-value)
# Even and odd, only works for Real numbers
def odd(value):
"""
odd :: Real a => a -> Bool
Determine if a number is odd or not
"""
if isnt_type(Real, value):
raise Exception("odd() - non-real type given")
return bool(value & 1)
def even(value):
"""
even :: Real a => a -> Bool
Determine if a number is even or not
"""
if isnt_type(Real, value):
raise Exception("even() - non-real type given")
return bool(not value & 1)
# Exponentiate a number by a number
# Curries pow(x,y)
def expo(value):
"""
expo :: Num a => a -> a -> a
Exponentiate a number by an exponent
"""
def iexp(base):
if isnt_type(Num, value, base):
raise Exception("expo() - invalid input")
return pow(base, value)
return iexp
# Square a number (wraps pow)
def square(value):
"""
square :: Num a => a -> a
Square a number (wraps around expo())
"""
if isnt_type(Num, value):
raise Exception("square() - invalid input")
return pow(value, 2)
# Cubes a number (wraps pow)
def cube(value):
"""
cube :: Num a => a -> a
Cube a number (wraps around expo())
"""
if isnt_type(Num, value):
raise Exception("cube() - invalid input")
return pow(value, 3)
# Scale a list of numbers by a scalar
# If we run into a non-numeric type, raise exception
def scale(value):
"""
scale :: Num a => a -> [a] -> [a]
"""
def iscale(data):
res = list()
try:
for x in data:
res.append(x*value)
return res
except Exception as e:
raise Exception("scale() - non-numeric type encountered")
return iscale
# Take a function with no arguments and
# collects the results a number of times
def collect(amount):
"""
collect :: (a) -> Int -> [a]
Call a non-argument function N times and
return the results (ie. random.random())
"""
def icoll(fun):
res = list()
for x in range(amount):
res.append(fun())
return res
return icoll
# Span a list from 0 to x
# Usage: Unit(10) | span => [0..10]
def span(value):
"""
span :: Int -> [Int]
Create a span of numbers from 0 to N
"""
if isnt_type(Int, value):
raise Exception("span() - invalid range type")
return list(range(value))
# Create a list from beginning to end
# Desired use: Unit(0) | to(10) => [0..10]
def | |
<filename>module_build_service/web/mmd_resolver.py<gh_stars>0
# -*- coding: utf-8 -*-
# SPDX-License-Identifier: MIT
from __future__ import absolute_import
import collections
import itertools
import solv
from module_build_service.common import log, conf, models
class MMDResolver(object):
"""
Resolves dependencies between Module metadata objects.
"""
def module_dep(self, name, stream=None, version=None, version_op=None):
"""Create a libsolv Dependency
Dependency could be in following forms:
module(name)
module(name:stream)
module(name:stream) op version
:param str name: module name.
:param str stream: optional module stream. If specified, dependency
will be the 2nd form above.
:param str version: optional module version.
:param version_op: optional libsolv relational flag constant. If
specified, dependency will be the 3rd form above. Defaults to
``solv.REL_EQ``.
:return: a libsolv Dependency object
"""
if name and stream:
dep = self.pool.Dep("module({}:{})".format(name, stream))
else:
dep = self.pool.Dep("module({})".format(name))
if version:
dep = dep.Rel(version_op or solv.REL_EQ, self.pool.Dep(version))
return dep
def solvable_provides(self, solvable, name, stream=None, version=None, version_op=None):
"""Add a Provides: dependency to a solvable
This is parallel to RPM-world ``Provides: perl(foo)`` or ``Requires: perl(foo)``.
Please refer to :meth:`module_dep` for detailed information of
arguments name, stream, version and version_op.
:param solvable: a solvable object the Provides dependency will be
added to.
"""
dep = self.module_dep(name, stream, version, version_op)
solvable.add_deparray(solv.SOLVABLE_PROVIDES, dep)
def __init__(self):
self.pool = solv.Pool()
self.pool.setarch("x86_64")
self.build_repo = self.pool.add_repo("build")
self.available_repo = self.pool.add_repo("available")
# Solvable objects representing modules stored in a list grouped by
# the name:stream.
self.solvables = {}
def _deps2reqs(self, deps, base_module_stream_overrides=None, exact_versions=True):
"""
Helper method converting dependencies from MMD to solv.Dep instance expressing
the dependencies in a way libsolv accepts as input.
So for example for following input:
deps = [{'gtk': ['1'], 'foo': ['1']}]
The resulting solv.Dep expression will be:
((module(gtk) with module(gtk:1)) and (module(foo) with module(foo:1)))
Base modules are handled in a special way in case when the stream of base module
contains version in the "x.y.z" format. For example "el8.0.0" or "el7.6.0".
In this case, the resulting solv.Dep expression for such base module will contain version
string computed using ModuleBuild.get_stream_version() method:
For example:
module(platform) with module(platform:el8) = 080200
The stream used to compute the version can be also overridden using the
`base_module_stream_overrides` dict which has base module name as a key and
the stream which will be used to compute the version as a value.
This is needed for cases when module requires just "platform:el8", but was
in fact built against particular platform stream, for example platform:el8.1.0.
In this case, such module should still require platform:el8, but in particular
version which is passed to this method using the `base_module_stream_overrides`.
When `exact_versions` is set to False, the base module dependency will contain
">=" operator instead of "=".
The "with" syntax is here to allow depending on "module(gtk)" meaning "any gtk".
This can happen in case {'gtk': []} is used as an input.
See the inline comments for more information.
:param list deps: List of dicts with dependency name as key and list of
streams as value.
:param dict base_module_stream_overrides: The key is base module name, value
is the stream string which will be used to compute `version` part of the
base module solv.Dep expression.
:param bool exact_versions: When set to False, the base module dependency
will contain ">=" operator instead of "=".
:rtype: solv.Dep
:return: solv.Dep instance with dependencies in form libsolv accepts.
"""
# There are relations between modules in `deps`. For example:
# deps = [{'gtk': ['1'], 'foo': ['1']}]" means "gtk:1 and foo:1" are both required.
# deps = [{'gtk': ['1', '2']}"] means "gtk:1 or gtk:2" are required.
# This method helps creating such relations using following syntax:
# rel_or_dep(solv.Dep, solve.REL_OR, stream_dep(name, stream))
# rel_or_dep(solv.Dep, solve.REL_AND, stream_dep(name, stream))
# rel_or_dep(solv.Dep, solve.REL_WITH, stream_dep(name, stream))
# rel_or_dep(solv.Dep, solve.REL_WITHOUT, stream_dep(name, stream))
rel_or_dep = lambda dep, op, rel: dep.Rel(op, rel) if dep is not None else rel
# Check each dependency dict in `deps` list and generate the solv requirements.
reqs = None
for dep_dicts in deps:
# Contains the solv.Dep requirements for current dict.
require = None
for name, streams in dep_dicts.items():
is_base_module = name in conf.base_module_names
# The req_pos will store solv.Dep expression for "positive" requirements.
# That is the case of 'gtk': ['1', '2'].
req_pos = None
# For each stream in `streams` for this dependency, generate the
# module(name:stream) solv.Dep and add REL_OR relations between them.
for stream in streams:
if is_base_module:
# Override the stream which is used to compute the stream version in case
# `base_module_stream_overrides` is set.
if base_module_stream_overrides and name in base_module_stream_overrides:
stream_for_version = base_module_stream_overrides[name]
else:
stream_for_version = stream
# In case x.y.z versioning is not used for this base module, do not
# use versions solv.Dep.
stream_version_str = str(
models.ModuleBuild.get_stream_version(
stream_for_version, right_pad=False))
if len(stream_version_str) < 5:
req_pos = rel_or_dep(
req_pos, solv.REL_OR, self.module_dep(name, stream))
else:
# The main reason why to use `exact_versions` is the case when
# adding deps for the input module we want to resolve. This module
# buildrequires exact stream version of base module against which it
# needs for building and we should never pull in different one.
# But for modules which are buildrequires of this input module, we
# want to use "base_module >= stream_version" relation, because they
# can be chery-picked even when newer base module stream_version is
# requested, for example:
# - foo buildrequires bar and also buildrequires platform:el8 = 080100.
# - bar:1 is built against platform:el8.0.0.
# - bar:2 is built against platform:el8.2.0.
# We need libsolv to allow chery-picking "bar:1", and ignore "bar:2",
# because it is built against newer platform stream version than the
# requested and and such newer version can be incompatible with the
# old one. so we express bar's dependencies on platform like this:
# - bar:1 buildrequires platform:el8 >= 080000.
# - bar:2 buildrequires platform:el8 >= 080200.
# Because the "foo" limits the solving to platform:el8 = 080100,
# the bar:2 won't be returned by libsolv, because 080100 < 080200.
# But that bar:1 will be returned by libsovl, because it buildrequires
# platform 080000 which is lesser than 080100.
op = solv.REL_EQ
if not exact_versions:
op |= solv.REL_GT
version = models.ModuleBuild.get_stream_version(
stream_for_version, right_pad=False
)
req_pos = rel_or_dep(
req_pos,
solv.REL_OR,
self.module_dep(name, stream, str(version), op)
)
else:
req_pos = rel_or_dep(req_pos, solv.REL_OR, self.module_dep(name, stream))
# Generate the module(name) solv.Dep.
req = self.module_dep(name)
if req_pos is not None:
req = req.Rel(solv.REL_WITH, req_pos)
# And in the end use AND between the last name:[streams] and the current one.
require = rel_or_dep(require, solv.REL_AND, req)
# There might be multiple dicts in `deps` list, so use OR relation between them.
reqs = rel_or_dep(reqs, solv.REL_OR, require)
return reqs
def _add_base_module_provides(self, solvable, mmd):
"""
Adds the "stream version" and the "virtual_streams" from XMD section of `mmd` to `solvable`.
Base modules like "platform" can contain virtual streams which need to be considered
when resolving dependencies. For example module "platform:el8.1.0" can provide virtual
stream "el8". In this case the solvable will have following additional Provides:
- module(platform:el8.1.0) = 80100 - Modules can require specific platform stream.
- module(platform:el8) = 80100 - Module can also require just platform:el8.
:return: A boolean that is True if a provides for the stream version was added to the input
solvable.
"""
base_stream_ver = False
if mmd.get_module_name() not in conf.base_module_names:
return base_stream_ver
# When depsolving, we will need to follow specific rules to choose the right base
# module, like sorting the base modules sharing the same virtual streams based on
# their "stream version" - For example stream "el8.1" is lower than stream "el8.2"
# and so on. We therefore need to convert the stream and version of base module to
# integer representation and add "module($name:$stream) = $stream_based_version"
# to Provides.
stream_version = models.ModuleBuild.get_stream_version(
mmd.get_stream_name(), right_pad=False)
if stream_version:
base_stream_ver = True
self.solvable_provides(
solvable, mmd.get_module_name(), mmd.get_stream_name(), str(stream_version))
xmd = mmd.get_xmd()
# Return in case virtual_streams are not set for this mmd.
if not xmd.get("mbs", {}).get("virtual_streams"):
return base_stream_ver
version = stream_version or mmd.get_version()
# For each virtual | |
#!/usr/bin/env python
# coding: utf-8
# ## Overview
# It is a follow-up notebook to "Fine-tuning ResNet34 on ship detection" (https://www.kaggle.com/iafoss/fine-tuning-resnet34-on-ship-detection/notebook) and "Unet34 (dice 0.87+)" (https://www.kaggle.com/iafoss/unet34-dice-0-87/notebook) that shows how to evaluate the solution and submit predictions. Please check these notebooks for additional details.
# In[ ]:
from fastai.conv_learner import *
from fastai.dataset import *
import pandas as pd
import numpy as np
import os
from PIL import Image
from sklearn.model_selection import train_test_split
from tqdm import tnrange, tqdm_notebook
from scipy import ndimage
# In[ ]:
PATH = './'
TRAIN = '../input/airbus-ship-detection/train_v2/'
TEST = '../input/airbus-ship-detection/test_v2/'
SEGMENTATION = '../input/airbus-ship-detection/train_ship_segmentations_v2.csv'
PRETRAINED_DETECTION_PATH = '../input/fine-tuning-resnet34-on-ship-detection/models/'
PRETRAINED_SEGMENTATION_PATH = '../input/unet34-dice-0-87/models/'
DETECTION_TEST_PRED = '../input/fine-tuning-resnet34-on-ship-detection-new-data/ship_detection.csv'
# In[ ]:
nw = 2 #number of workers for data loader
arch = resnet34 #specify target architecture
# ### Data
# In[ ]:
train_names = [f for f in os.listdir(TRAIN)]
test_names = [f for f in os.listdir(TEST)]
#5% of data in the validation set is sufficient for model evaluation
tr_n, val_n = train_test_split(train_names, test_size=0.05, random_state=42)
segmentation_df = pd.read_csv(os.path.join(PATH, SEGMENTATION)).set_index('ImageId')
# As explained in https://www.kaggle.com/iafoss/unet34-dice-0-87/notebook, I drop all images without ships. The model responsible for ship detection will take care of them.
# In[ ]:
def cut_empty(names):
return [name for name in names
if(type(segmentation_df.loc[name]['EncodedPixels']) != float)]
tr_n_cut = cut_empty(tr_n)
val_n_cut = cut_empty(val_n)
# In[ ]:
def get_mask(img_id, df):
shape = (768,768)
img = np.zeros(shape[0]*shape[1], dtype=np.uint8)
masks = df.loc[img_id]['EncodedPixels']
if(type(masks) == float): return img.reshape(shape)
if(type(masks) == str): masks = [masks]
for mask in masks:
s = mask.split()
for i in range(len(s)//2):
start = int(s[2*i]) - 1
length = int(s[2*i+1])
img[start:start+length] = 1
return img.reshape(shape).T
# In[ ]:
class pdFilesDataset(FilesDataset):
def __init__(self, fnames, path, transform):
self.segmentation_df = pd.read_csv(SEGMENTATION).set_index('ImageId')
super().__init__(fnames, transform, path)
def get_x(self, i):
img = open_image(os.path.join(self.path, self.fnames[i]))
if self.sz == 768: return img
else: return cv2.resize(img, (self.sz, self.sz))
def get_y(self, i):
mask = np.zeros((768,768), dtype=np.uint8) if (self.path == TEST) else get_mask(self.fnames[i], self.segmentation_df)
img = Image.fromarray(mask).resize((self.sz, self.sz)).convert('RGB')
return np.array(img).astype(np.float32)
def get_c(self): return 0
# In[ ]:
def get_data(sz,bs):
tfms = tfms_from_model(arch, sz, crop_type=CropType.NO, tfm_y=TfmType.CLASS)
tr_names = tr_n if (len(tr_n_cut)%bs == 0) else tr_n[:-(len(tr_n_cut)%bs)] #cut incomplete batch
ds = ImageData.get_ds(pdFilesDataset, (tr_names,TRAIN),
(val_n_cut,TRAIN), tfms, test=(test_names,TEST))
md = ImageData(PATH, ds, bs, num_workers=nw, classes=None)
return md
# ### Model
# In[ ]:
cut,lr_cut = model_meta[arch]
# In[ ]:
def get_base(pre=True): #load ResNet34 model
layers = cut_model(arch(pre), cut)
return nn.Sequential(*layers)
# In[ ]:
class UnetBlock(nn.Module):
def __init__(self, up_in, x_in, n_out):
super().__init__()
up_out = x_out = n_out//2
self.x_conv = nn.Conv2d(x_in, x_out, 1)
self.tr_conv = nn.ConvTranspose2d(up_in, up_out, 2, stride=2)
self.bn = nn.BatchNorm2d(n_out)
def forward(self, up_p, x_p):
up_p = self.tr_conv(up_p)
x_p = self.x_conv(x_p)
cat_p = torch.cat([up_p,x_p], dim=1)
return self.bn(F.relu(cat_p))
class SaveFeatures():
features=None
def __init__(self, m): self.hook = m.register_forward_hook(self.hook_fn)
def hook_fn(self, module, input, output): self.features = output
def remove(self): self.hook.remove()
class Unet34(nn.Module):
def __init__(self, rn):
super().__init__()
self.rn = rn
self.sfs = [SaveFeatures(rn[i]) for i in [2,4,5,6]]
self.up1 = UnetBlock(512,256,256)
self.up2 = UnetBlock(256,128,256)
self.up3 = UnetBlock(256,64,256)
self.up4 = UnetBlock(256,64,256)
self.up5 = nn.ConvTranspose2d(256, 1, 2, stride=2)
def forward(self,x):
x = F.relu(self.rn(x))
x = self.up1(x, self.sfs[3].features)
x = self.up2(x, self.sfs[2].features)
x = self.up3(x, self.sfs[1].features)
x = self.up4(x, self.sfs[0].features)
x = self.up5(x)
return x[:,0]
def close(self):
for sf in self.sfs: sf.remove()
class UnetModel():
def __init__(self,model,name='Unet'):
self.model,self.name = model,name
def get_layer_groups(self, precompute):
lgs = list(split_by_idxs(children(self.model.rn), [lr_cut]))
return lgs + [children(self.model)[1:]]
# ### Score evaluation
# In[ ]:
def IoU(pred, targs):
pred = (pred > 0.5).astype(float)
intersection = (pred*targs).sum()
return intersection / ((pred+targs).sum() - intersection + 1.0)
# In[ ]:
def get_score(pred, true):
n_th = 10
b = 4
thresholds = [0.5 + 0.05*i for i in range(n_th)]
n_masks = len(true)
n_pred = len(pred)
ious = []
score = 0
for mask in true:
buf = []
for p in pred: buf.append(IoU(p,mask))
ious.append(buf)
for t in thresholds:
tp, fp, fn = 0, 0, 0
for i in range(n_masks):
match = False
for j in range(n_pred):
if ious[i][j] > t: match = True
if not match: fn += 1
for j in range(n_pred):
match = False
for i in range(n_masks):
if ious[i][j] > t: match = True
if match: tp += 1
else: fp += 1
score += ((b+1)*tp)/((b+1)*tp + b*fn + fp)
return score/n_th
# In this competition we should submit and individual mask for each identified ship. The simplest way to do it is splitting the total mask into individual ones based on the connectivity of detected objects.
# In[ ]:
def split_mask(mask):
threshold = 0.5
threshold_obj = 30 #ignor predictions composed of "threshold_obj" pixels or less
labled,n_objs = ndimage.label(mask > threshold)
result = []
for i in range(n_objs):
obj = (labled == i + 1).astype(int)
if(obj.sum() > threshold_obj): result.append(obj)
return result
# In[ ]:
def get_mask_ind(img_id, df, shape = (768,768)): #return mask for each ship
masks = df.loc[img_id]['EncodedPixels']
if(type(masks) == float): return []
if(type(masks) == str): masks = [masks]
result = []
for mask in masks:
img = np.zeros(shape[0]*shape[1], dtype=np.uint8)
s = mask.split()
for i in range(len(s)//2):
start = int(s[2*i]) - 1
length = int(s[2*i+1])
img[start:start+length] = 1
result.append(img.reshape(shape).T)
return result
# In[ ]:
class Score_eval():
def __init__(self):
self.segmentation_df = pd.read_csv(SEGMENTATION).set_index('ImageId')
self.score, self.count = 0.0, 0
def put(self,pred,name):
true = get_mask_ind(name, self.segmentation_df)
self.score += get_score(pred,true)
self.count += 1
def evaluate(self):
return self.score/self.count
# ### TTA
# Define transformations for data augmentation and TTA function (default fast.ai functions do not transform a predicted mask back):
# In[ ]:
def aug_unit(x,fwd=True,mask=False):
return x
def aug_flipV(x,fwd=True,mask=False):
return x.flip(2) if mask else x.flip(3)
def aug_flipH(x,fwd=True,mask=False):
return x.flip(1) if mask else x.flip(2)
def aug_T(x,fwd=True,mask=False):
return torch.transpose(x,1,2) if mask else torch.transpose(x,2,3)
def aug_rot_2(x,fwd=True,mask=False): #rotate pi/2
return aug_flipV(aug_flipH(x,fwd,mask),fwd,mask)
def aug_rot_4cr(x,fwd=True,mask=False): #rotate pi/4 counterclockwise
return aug_flipV(aug_T(x,fwd,mask),fwd,mask) if fwd else aug_T(aug_flipV(x,fwd,mask),fwd,mask)
def aug_rot_4cw(x,fwd=True,mask=False): #rotate pi/4 clockwise
return aug_flipH(aug_T(x,fwd,mask),fwd,mask) if fwd else aug_T(aug_flipH(x,fwd,mask),fwd,mask)
def aug_rot_2T(x,fwd=True,mask=False): #transpose and rotate pi/2
return aug_rot_2(aug_T(x,fwd,mask),fwd,mask)
trms_side_on = [aug_unit,aug_flipH]
trms_top_down = [aug_unit,aug_flipV]
trms_dihedral = [aug_unit,aug_flipH,aug_flipV,aug_T,aug_rot_2,aug_rot_2T,
aug_rot_4cw,aug_rot_4cr]
# In[ ]:
def enc_img(img):
return torch.transpose(torch.tensor(img),0,2).unsqueeze(0)
def dec_img(img):
return to_np(torch.transpose(img.squeeze(0),0,2))
def display_augs(x,augs=aug_unit):
columns = 4
n = len(augs)
rows = n//4 + 1
fig=plt.figure(figsize=(columns*4, rows*4))
img = enc_img(x)
for i in range(rows):
for j in range(columns):
idx = j+i*columns
if idx >= n: break
fig.add_subplot(rows, columns, idx+1)
plt.axis('off')
plt.imshow(dec_img(augs[idx](img)))
plt.show()
img = np.array(Image.open(os.path.join(TRAIN,'ce69faa4b.jpg')))
display_augs(img,trms_dihedral)
# Since the model predicts pixel masks, which are quite large, running standard functions for making a prediction will fail due to memory issue, especially for the test set, where about 100k 786x786 pixel masks should be created. Therefore, I wrote a function that does prediction batch by batch and applies F_save function for each generated mask.
# In[ ]:
def model_pred(learner, dl, F_save): #if use train dl, disable shuffling
learner.model.eval();
name_list = dl.dataset.fnames
num_batchs = len(dl)
t = tqdm(iter(dl), leave=False, total=num_batchs)
count = 0
for x,y in t:
py = to_np(torch.sigmoid(learn.model(V(x))))
batch_size = len(py)
for i in range(batch_size):
F_save(py[i],to_np(y[i]),name_list[count])
count += 1
def pred_aug(x,aug=[aug_unit]):
pred = []
for aug_cur in aug:
py = to_np(aug_cur(torch.sigmoid(learn.model(V(aug_cur(x)))),
fwd=False, mask=True))
pred.append(py)
pred = np.stack(pred, axis=0).mean(axis=0)
return pred
#if use train dl, disable shuffling
def model_pred_aug(learner, dl, F_save, aug=[aug_unit]):
learner.model.eval();
name_list = dl.dataset.fnames
num_batchs = len(dl)
t = tqdm(iter(dl), leave=False, total=num_batchs)
count = 0
for x,y in t:
pred = pred_aug(x,aug)
batch_size = len(pred)
for i in range(batch_size):
F_save(pred[i],to_np(y[i]),name_list[count])
count += 1
# ### Prediction
# In[ ]:
m = to_gpu(Unet34(get_base(False)))
models = UnetModel(m)
# In[ ]:
sz = 768 #image size
bs = 8 #batch size
md = get_data(sz,bs)
# In[ ]:
learn = ConvLearner(md, models)
learn.models_path = PRETRAINED_SEGMENTATION_PATH
learn.load('Unet34_768_1')
learn.models_path = PATH
# Running the model evaluation on the validation set.
# In[ ]:
score = Score_eval()
process_pred = lambda yp, y, name : score.put(split_mask(yp),name)
model_pred_aug(learn, md.val_dl, process_pred, trms_dihedral)
print('\n',score.evaluate())
# It is the **score based only on images with ships**, a model responsible for ship detection (accuracy ~98%) takes care of images without ships. Since the fraction of empty images in the test set is 0.52, the expected score of the model stacked with ship detection one (https://www.kaggle.com/iafoss/fine-tuning-resnet34-on-ship-detection/notebook) is approximately 0.52 + 0.37 x 0.48 = 0.70 (if the new test set is similar to old training data). However, you should keep in mind that the evaluated model has been trained only for one epoch on full resolution images (the dice is only ~0.80 for 784x784 images). Continuing training the model and mask postprocessing can further boost it.
# ### Submission
# Load the prediction of ship detection model (https://www.kaggle.com/iafoss/fine-tuning-resnet34-on-ship-detection/notebook) for the test set.
# In[ ]:
ship_detection = pd.read_csv(DETECTION_TEST_PRED)
ship_detection.head()
# Identify images with ships and run Unet34 model only for | |
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TF utils for computing information over given data."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# GOOGLE-INITIALIZATION
import tensorflow as tf
from tensorflow.contrib.proto.python.ops import encode_proto_op
_FLOATING_NAN = float('nan')
class VocabOrderingType(object):
FREQUENCY = 1
WEIGHTED_FREQUENCY = 2
WEIGHTED_MUTUAL_INFORMATION = 3
def reduce_batch_vocabulary(x, vocab_ordering_type,
weights=None, labels=None):
"""Performs batch-wise reduction of vocabulary.
Args:
x: Input `Tensor` to compute a vocabulary over.
vocab_ordering_type: VocabOrderingType enum.
weights: (Optional) Weights input `Tensor`.
labels: (Optional) Binary labels input `Tensor`.
Returns:
A tuple of 3 `Tensor`s:
* unique values
* total weights sum for unique values when labels and or weights is
provided, otherwise, None.
* sum of positive weights for unique values when labels is provided,
otherwise, None.
"""
if vocab_ordering_type == VocabOrderingType.FREQUENCY:
# TODO(b/112916494): Always do batch wise reduction once possible.
x = tf.reshape(x, [-1])
return (x, None, None, None)
if vocab_ordering_type == VocabOrderingType.WEIGHTED_MUTUAL_INFORMATION:
tf.assert_type(labels, tf.int64)
x = assert_same_shape(x, labels)
if weights is None:
weights = tf.ones_like(labels)
labels = tf.reshape(labels, [-1])
x = assert_same_shape(x, weights)
weights = tf.reshape(weights, [-1])
x = tf.reshape(x, [-1])
return _reduce_vocabulary_inputs(x, weights, labels)
def _reduce_vocabulary_inputs(x, weights, labels=None):
"""Reduces vocabulary inputs.
Args:
x: Input `Tensor` for vocabulary analyzer.
weights: Weights `Tensor` for vocabulary analyzer.
labels: (optional) Binary Labels `Tensor` for vocabulary analyzer.
Returns:
A tuple of 3 `Tensor`s:
* unique values
* total weights sum for unique values
* sum of positive weights for unique values when labels is provided,
otherwise, None.
"""
unique = tf.unique_with_counts(x, out_idx=tf.int64)
summed_weights = tf.unsorted_segment_sum(weights, unique.idx,
tf.size(unique.y))
if labels is None:
summed_positive_weights = None
counts = None
else:
less_assert = tf.Assert(tf.less_equal(tf.reduce_max(labels), 1), [labels])
greater_assert = tf.Assert(tf.greater_equal(
tf.reduce_min(labels), 0), [labels])
with tf.control_dependencies([less_assert, greater_assert]):
labels = tf.identity(labels)
positive_weights = (
tf.cast(labels, tf.float32) * tf.cast(weights, tf.float32))
summed_positive_weights = tf.unsorted_segment_sum(
positive_weights, unique.idx, tf.size(unique.y))
counts = unique.count
return (unique.y, summed_weights, summed_positive_weights, counts)
def assert_same_shape(x, y):
"""Asserts two tensors have the same dynamic and static shape.
Args:
x: A `Tensor`.
y: A `Tensor`
Returns:
The element `x`, the result must be used in order to ensure that the dynamic
check is executed.
"""
x.shape.assert_is_compatible_with(y.shape)
assert_eq = tf.assert_equal(tf.shape(x), tf.shape(y))
with tf.control_dependencies([assert_eq]):
return tf.identity(x)
def reduce_batch_count(x, reduce_instance_dims):
"""Counts elements in the given tensor.
Args:
x: A `Tensor` or `SparseTensor`.
reduce_instance_dims: A bool, if True - collapses the batch and instance
dimensions to arrive at a single scalar output. Otherwise, only
collapses the batch dimension and outputs a `Tensor` of the same shape
as the input.
Returns:
The element count of `x`. The result is either a scalar if
reduce_instance_dims is True, otherwise a `Tensor` of the same shape as `x`.
"""
if isinstance(x, tf.SparseTensor):
if reduce_instance_dims:
x = x.values
else:
ones_like = tf.SparseTensor(
indices=x.indices,
values=tf.ones_like(x.values, tf.int64),
dense_shape=x.dense_shape)
return tf.sparse_reduce_sum(ones_like, axis=0)
if reduce_instance_dims:
return tf.size(x)
# Count the non-nan values along the batch dim axis, and return a tensor shaped like x
return tf.reduce_sum(tf.where(tf.is_nan(x), tf.zeros_like(x), tf.ones_like(x)), axis=0)
def reduce_batch_count_mean_and_var(x, reduce_instance_dims):
"""Computes element count, mean and var for the given tensor.
Args:
x: A `Tensor` or `SparseTensor`.
reduce_instance_dims: A bool, if True - collapses the batch and instance
dimensions to arrive at a single scalar output. Otherwise, only
collapses the batch dimension and outputs a `Tensor` of the same shape
as the input.
Returns:
A 3-tuple containing the `Tensor`s (count, mean, var).
"""
if isinstance(x, tf.SparseTensor) and reduce_instance_dims:
x = x.values
x_count = tf.cast(reduce_batch_count(x, reduce_instance_dims), x.dtype)
reduce_sum_fn = (
tf.sparse_reduce_sum if isinstance(x, tf.SparseTensor) else tf.reduce_sum)
axis = None if reduce_instance_dims else 0
# If we have a dense tensor with nans, do not include in the sum
if not isinstance(x, tf.SparseTensor):
x = tf.where(tf.is_nan(x), tf.zeros_like(x), x)
x_mean = reduce_sum_fn(x, axis=axis) / x_count
if isinstance(x, tf.SparseTensor):
# This means reduce_instance_dims=False.
# TODO(b/112656428): Support SparseTensors with rank other than 2.
if x.get_shape().ndims != 2:
raise NotImplementedError(
'Mean and var only support SparseTensors with rank 2')
mean_values = tf.gather(x_mean, x.indices[:, 1])
x_minus_mean = x.values - mean_values
else:
x_minus_mean = x - x_mean
x_variance = tf.reduce_sum(tf.square(x_minus_mean), axis=axis) / x_count
return (x_count, x_mean, x_variance)
# Code for serializing and example proto
_DEFAULT_VALUE_BY_DTYPE = {
tf.string: '',
tf.float32: 0,
tf.int64: 0
}
def _encode_proto(values_dict, message_type):
"""A wrapper around encode_proto_op.encode_proto."""
field_names = []
sizes = []
values = []
for field_name, value in sorted(values_dict.items(), key=lambda x: x[0]):
if isinstance(value, tf.SparseTensor):
size = tf.sparse_reduce_sum(
tf.SparseTensor(
value.indices,
tf.ones_like(value.values, dtype=tf.int32),
value.dense_shape),
axis=1)
value = tf.sparse_tensor_to_dense(
value, _DEFAULT_VALUE_BY_DTYPE[value.dtype])
else:
value = tf.reshape(value, [tf.shape(value)[0], -1])
size = tf.fill((tf.shape(value)[0],), tf.shape(value)[1])
field_names.append(field_name)
values.append(value)
sizes.append(size)
sizes = tf.stack(sizes, axis=1)
return encode_proto_op.encode_proto(sizes, values, field_names, message_type)
def _serialize_feature(values):
"""Serialize a Tensor or SparseTensor as `Feature` protos.
`values` should be a Tensor of rank >=1 or SparseTensor of rank 2. We will
refer to the size of the first dimension as batch_size.
This function encodes each row of the `Tensor` as a list of values (flattening
the other dimensions) and each row of the `SparseTensor` as a list of values,
where the indices within each row are ignored and assumed to be 0, 1, ....
Args:
values: A `Tensor` or `SparseTensor`.
Returns:
A tensor of shape (batch_size,) and type `tf.string` where each element is
a serialized `Feature` proto.
Raises:
ValueError: If the dtype is of `values` is not `tf.string`, `tf.float32`
or `tf.int64`.
"""
values = tf.convert_to_tensor_or_sparse_tensor(values)
if values.dtype == tf.string:
values_dict = {
'bytes_list': _encode_proto({'value': values}, 'tensorflow.BytesList')
}
elif values.dtype == tf.float32:
values_dict = {
'float_list': _encode_proto({'value': values}, 'tensorflow.FloatList')
}
elif values.dtype == tf.int64:
values_dict = {
'int64_list': _encode_proto({'value': values}, 'tensorflow.Int64List')
}
else:
raise ValueError('Cannot encode values of dtype {}'.format(values.dtype))
return _encode_proto(values_dict, 'tensorflow.Feature')
def serialize_example(features):
"""Serialized a dict of `Tensor` or `SparseTensor`s as example protos.
`features` should be a dict where each value is a Tensor of rank >=1 or
SparseTensor of rank 2. The sizes of the first dimension of each value should
be the same, and we refer to this size as batch_size.
Args:
features: A dictionary whose values are `Tensor`s or `SparseTensor`s.
Returns:
A tensor of shape (batch_size,) and type `tf.string` where each element is
a serialized `Example` proto.
"""
features_dict = []
for key, value in sorted(features.items(), key=lambda x: x[0]):
serialized_value = _serialize_feature(value)
features_dict.append(_encode_proto(
{
'key': tf.fill((tf.shape(serialized_value)[0],), key),
'value': serialized_value,
},
'tensorflow.Features.FeatureEntry'))
features_dict = tf.stack(features_dict, axis=1)
features = _encode_proto({'feature': features_dict}, 'tensorflow.Features')
return _encode_proto({'features': features}, 'tensorflow.Example')
def _sparse_minus_reduce_min_and_reduce_max(x):
"""Computes the -min and max of a SparseTensor x.
It differs from sparse_reduce_max in that sparse_reduce_max returns 0 when all
elements are missing along axis 0.
We replace the 0 with NaN when x's dtype is float and dtype.min+1 when it's
int.
Args:
x: A `SparseTensor`.
Returns:
Two `Tensors' which are the -min and max.
Raises:
TypeError: If the type of `x` is not supported.
"""
if not isinstance(x, tf.SparseTensor):
raise TypeError('Expected a SparseTensor, but got %r' % x)
minus_x = tf.SparseTensor(
indices=x.indices, values=0 - x.values, dense_shape=x.dense_shape)
x_count = reduce_batch_count(x, reduce_instance_dims=False)
batch_has_no_values = tf.equal(x_count, tf.constant(0, dtype=tf.int64))
x_batch_max = tf.sparse_reduce_max(x, axis=0)
x_batch_minus_min = tf.sparse_reduce_max(minus_x, axis=0)
if x.dtype.is_floating:
missing_value = tf.constant(_FLOATING_NAN, x.dtype)
else:
missing_value = tf.constant(x.dtype.min + 1, x.dtype)
x_batch_max = tf.where(batch_has_no_values,
tf.fill(tf.shape(x_batch_max), missing_value),
x_batch_max)
x_batch_minus_min = tf.where(
batch_has_no_values, tf.fill(tf.shape(x_batch_minus_min), missing_value),
x_batch_minus_min)
return x_batch_minus_min, x_batch_max
def _inf_to_nan(tensor, output_dtype):
if tensor.dtype.is_floating:
nan = tf.constant(_FLOATING_NAN, output_dtype)
return tf.where(tf.is_inf(tensor), tensor + nan, tensor)
return tensor
def reduce_batch_minus_min_and_max(x, reduce_instance_dims):
"""Computes the -min and max of a tensor x.
Args:
x: A `tf.Tensor`.
reduce_instance_dims: A bool indicating whether this should collapse the
batch and instance dimensions to arrive at a single scalar output, or only
collapse the batch dimension and outputs a vector of the same shape as the
input.
Returns:
The computed `tf.Tensor`s | |
solvationEntry(request, section, subsection, index):
"""
A view for showing an entry in a solvation database.
"""
# Load the solvation database
database.load('solvation', section)
# Determine the entry we wish to view
try:
db = database.get_solvation_database(section, subsection)
except ValueError:
raise Http404
index = int(index)
for entry in db.entries.values():
if entry.index == index:
break
else:
raise Http404
# Get the structures of the item we are viewing
structures = []
if type(entry.item) is list: # the case for solvents
for structure in entry.item:
structures.append(getStructureInfo(structure))
else: # single values for solutes
structures.append(getStructureInfo(entry.item))
# Prepare the solvation data for passing to the template. This includes all string formatting,
# since we can't do that in the template.
# Case 1. A solvation group uses the values of other group. In this case, get the href link of that group.
if isinstance(entry.data, str):
lib_index = db.entries[entry.data].index
href = reverse('database:solvation-entry',
kwargs={'section': section, 'subsection': subsection, 'index': lib_index})
solvation = ['Link', href, entry.data]
# Case 2. A solvation group has empty data because it is a general group that doesn't need the group value.
# Returns None for this
elif entry.data is None:
solvation = None
# Case 3. The entry has an actual solute or solvent data. Returns the entry.
else:
solvation = entry
reference_type = ''
reference = entry.reference
return render(request, 'solvationEntry.html',
{'section': section, 'subsection': subsection,
'databaseName': db.name, 'entry': entry,
'structures': structures, 'reference': reference,
'referenceType': reference_type, 'solvation': solvation})
def get_solvation_from_DirectML(pair_smiles, error_msg, dGsolv_required, dHsolv_required, calc_dSsolv, energy_unit):
"""
Calculate solvation free energy, enthalpy, and entropy using the DirectML model. Corresponding
epistemic uncertainties and error message are also returned. All values are returned in the given energy unit.
"""
dGsolv298 = None
dGsolv298_epi_unc = None
dHsolv298 = None
dHsolv298_epi_unc = None
dSsolv298 = None
if dGsolv_required:
try:
avg_pre, epi_unc, valid_indices = dGsolv_estimator(pair_smiles) # default is in kcal/mol
dGsolv298, dGsolv298_epi_unc = avg_pre[0], epi_unc[0]
except:
error_msg = update_error_msg(error_msg, 'Unable to parse the SMILES', overwrite=True)
if dHsolv_required:
try:
avg_pre, epi_unc, valid_indices = dHsolv_estimator(pair_smiles) # default is in kcal/mol
dHsolv298, dHsolv298_epi_unc = avg_pre[0], epi_unc[0]
except:
error_msg = update_error_msg(error_msg, 'Unable to parse the SMILES', overwrite=True)
if calc_dSsolv and dGsolv298 is not None and dHsolv298 is not None:
dSsolv298 = (dHsolv298 - dGsolv298) / 298 # default is in kcal/mol/K
solvation_val_list = []
for val in [dGsolv298, dGsolv298_epi_unc, dHsolv298, dHsolv298_epi_unc, dSsolv298]:
if val is not None:
val = convert_energy_unit(val, 'kcal/mol', energy_unit)
solvation_val_list.append(val)
return solvation_val_list, error_msg
def get_solvation_data_ML(solvent_solute_smiles, calc_dGsolv, calc_dHsolv, calc_dSsolv,
calc_logK, calc_logP, energy_unit):
"""
Returns a dictionary with solvation property data for a given list of solvent-solute SMILES.
"""
solvent_solute_smiles_list = solvent_solute_smiles.split()
dGsolv_required = any([calc_dGsolv, calc_dSsolv, calc_logK, calc_logP]) # whether or not dGsolv calculation is needed
dHsolv_required = any([calc_dHsolv, calc_dSsolv]) # whether or not dHsolv calculation is needed
# Prepare an empty result dictionary
solvation_data_results = {}
results_col_name_list = ['Input', 'solvent SMILES', 'solute SMILES', 'Error', f'dGsolv298({energy_unit})',
f'dHsolv298({energy_unit})', f'dSsolv298({energy_unit}/K)', 'logK', 'logP',
f'dGsolv298 epi.unc.({energy_unit})', f'dHsolv298 epi.unc.({energy_unit})']
for col_name in results_col_name_list:
solvation_data_results[col_name] = []
# get predictions for each given solvent_solute SMILES
for solvent_solute in solvent_solute_smiles_list:
# initialization
solvent_smiles, solute_smiles, error_msg, logK, logP = None, None, None, None, None
dGsolv298, dGsolv298_epi_unc, dHsolv298, dHsolv298_epi_unc, dSsolv298 = None, None, None, None, None
pair_list = solvent_solute.split('_')
if not len(pair_list) == 2:
error_msg = update_error_msg(error_msg, 'Unable to process the input')
else:
solvent_smiles = pair_list[0]
solute_smiles = pair_list[1]
pair_smiles = [[solvent_smiles, solute_smiles]]
# get dGsolv, dHsolv, dSsolv calculation
[dGsolv298, dGsolv298_epi_unc, dHsolv298, dHsolv298_epi_unc, dSsolv298], error_msg = \
get_solvation_from_DirectML(pair_smiles, error_msg, dGsolv_required, dHsolv_required, calc_dSsolv, 'J/mol')
# get logK calculation
if calc_logK and dGsolv298 is not None:
logK = -dGsolv298 / (math.log(10) * constants.R * 298)
logK = clean_up_value(logK, deci_place=2, only_big=True)
# get logP calculation
if calc_logP and dGsolv298 is not None:
if solvent_smiles == 'O':
logP = 0
else:
try:
avg_pre, epi_unc, valid_indices = dGsolv_estimator([['O', solute_smiles]])
dGsolv298_water = convert_energy_unit(avg_pre[0], 'kcal/mol', 'J/mol')
logP = -(dGsolv298 - dGsolv298_water) / (math.log(10) * constants.R * 298)
logP = clean_up_value(logP, deci_place=2, only_big=True)
except:
# this error is very unlikely to happen, but it's added as a safety net
error_msg = update_error_msg(error_msg, 'Unable to parse the SMILES')
# append the results.
result_val_list = [solvent_solute, solvent_smiles, solute_smiles, error_msg, dGsolv298, dHsolv298, dSsolv298,
logK, logP, dGsolv298_epi_unc, dHsolv298_epi_unc]
for key, val in zip(results_col_name_list, result_val_list):
# Convert to the input energy unit and round to appropriate decimal places for solvation properties.
if energy_unit in key and val is not None:
val = convert_energy_unit(val, 'J/mol', energy_unit)
if 'dSsolv' in key:
val = clean_up_value(val, deci_place=2, sig_fig=2)
else:
val = clean_up_value(val, deci_place=2, sig_fig=2, only_big=True)
solvation_data_results[key].append(val)
# drop unnecessary dictionary keys.
remove_list = []
calc_val_key_tup_list = [(calc_dGsolv, [f'dGsolv298({energy_unit})', f'dGsolv298 epi.unc.({energy_unit})']),
(calc_dHsolv, [f'dHsolv298({energy_unit})', f'dHsolv298 epi.unc.({energy_unit})']),
(calc_dSsolv, [f'dSsolv298({energy_unit}/K)']), (calc_logK, ['logK']), (calc_logP, ['logP'])]
for calc_val, key in calc_val_key_tup_list:
if calc_val is False:
remove_list += key
[solvation_data_results.pop(key) for key in remove_list]
# add explanation about epistemic error if needed.
additional_info_list = []
if calc_dGsolv or calc_dHsolv:
additional_info_list += ['epi. unc.: epistemic uncertainty of the DirectML model.']
solvation_data_results = parse_none_results(solvation_data_results)
return solvation_data_results, additional_info_list
def solvationDataML(request, solvent_solute_smiles, calc_dGsolv, calc_dHsolv, calc_dSsolv,
calc_logK, calc_logP, energy_unit):
"""
Returns a pandas html table with the given solvation data results.
It also provides a downloadable excel file with results if the excel export button is clicked.
"""
# Create a downloadable excel file from the html_table that is passed as a hidden input.
# By passing the html_table, the calculation does not get repeated.
if request.method == 'POST' and 'excel' in request.POST:
input_html_table = request.POST.get('html_table')
df_results = pd.read_html(input_html_table)
output = io.BytesIO()
writer = pd.ExcelWriter(output, engine='xlsxwriter')
df_results[0].to_excel(writer, index=False)
writer.save()
# important step, rewind the buffer or when it is read() you'll get nothing
# but an error message when you try to open your zero length file in Excel
output.seek(0)
# set the mime type so that the browser knows what to do with the file
response = HttpResponse(output.read(),
content_type='application/vnd.openxmlformats-officedocument.spreadsheetml.sheet')
# set the file name in the Content-Disposition header
response['Content-Disposition'] = 'attachment; filename=SolvationResults.xlsx'
return response
# Get the solvation data results. Make sure to convert the string booleans to actual booleans.
solvation_data_results, additional_info_list = get_solvation_data_ML(solvent_solute_smiles, calc_dGsolv=='True',
calc_dHsolv=='True', calc_dSsolv=='True', calc_logK=='True', calc_logP=='True', energy_unit)
# convert the results to pandas data frame and html table
df_results = pd.DataFrame(solvation_data_results)
html_table = df_results.to_html(index=False)
return render(request, 'solvationDataML.html',
{'html_table': html_table,
'additionalInfoList': additional_info_list},
)
def check_avail_temp_dep_solvent(solvent_smiles_input, allowed_solvent_dict, error_msg):
"""
Checks whether the given `solvent_smiles_input` is available in CoolProp for the temperature dependent calculation
and returns its CoolProp name. It also returns the solvent's SMILES generated from rdkit that will be used
as the unique solvent key and the corresponding error message.
"""
try:
solvent_smiles = Chem.CanonSmiles(solvent_smiles_input)
if solvent_smiles in allowed_solvent_dict:
solvent_name = allowed_solvent_dict[solvent_smiles]
return True, solvent_smiles, solvent_name, error_msg
else:
error_msg = update_error_msg(error_msg, 'Unsupported solvent')
return False, solvent_smiles, None, error_msg
except:
error_msg = update_error_msg(error_msg, 'Unable to parse the solvent SMILES')
return False, None, None, error_msg
def parse_solute_smiles_using_rdkit(solute_smiles_input, error_msg):
"""
Check whether the given `solute_smiles_input` can be parsed correctly using rdkit and returns its unique
SMILES key and corresponding error message.
"""
try:
solute_smiles = Chem.CanonSmiles(solute_smiles_input)
return True, solute_smiles, error_msg
except:
if isinstance(error_msg, str) and 'Unable to parse the solvent SMILES' in error_msg:
error_msg = update_error_msg(error_msg, 'Unable to parse the solvent SMILES and solute SMILES',
overwrite=True)
else:
error_msg = update_error_msg(error_msg, 'Unable to parse the solute SMILES')
return False, None, error_msg
def parse_temp_input(temp, temp_unit, error_msg):
"""
Check whether the given `temp` is float and return the temperature in Kelvin and corresponding error message.
"""
try:
temp = float(temp)
if temp_unit == 'K':
temp_SI = temp
else: # then it's in degree Celcius
temp_SI = temp + 273.15
return temp_SI, error_msg
except:
error_msg = update_error_msg(error_msg, 'Incorrect input type for temperature')
return None, error_msg
def check_avail_temp(temp_SI, solvent_supported, solvent_name, error_msg):
"""
Check whether the given temperature `temp_SI` is in the valid range for CoolProp calculation for the given solvent
`solvent_name`. It returns the solvent's vapor pressure `Pvap` in Pa and the corresponding error message.
"""
if temp_SI is not None and solvent_supported is True:
try:
rho_g = PropsSI('Dmolar', 'T', temp_SI, 'Q', 1, solvent_name) # | |
<filename>nca47/agent/firewall_driver/fw_driver.py
from oslo_serialization import jsonutils as json
from oslo_config import cfg
from oslo_log import log as logging
from nca47.agent.firewall_driver import soap_client
from nca47.api.controllers.v1 import tools
from nca47.common.exception import DerviceError as derviceError
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
FW_DRIVER = None
class fw_driver(object):
def __init__(self):
self.ws_client = soap_client.fw_client.get_instance()
@classmethod
def get_instance(cls):
global FW_DRIVER
if not FW_DRIVER:
FW_DRIVER = cls()
return FW_DRIVER
def create_vlan(self, context, vlan_infos):
""" creat vlan to webservice """
vlanId = vlan_infos["vlan_id_o"]
ipAddr = tools.joinString(vlan_infos["ipaddr"])
ifNames = tools.joinString(vlan_infos["ifnames"])
url_dir = "/func/web_main/wsdl/vlan/vlan.wsdl"
LOG.info("creat vlan to webservice: " + url_dir)
service = self.ws_client.get_client(url_dir)
vlan_dic = {}
vlan_dic['vlanId'] = vlanId
vlan_dic['ipAddr'] = ipAddr
vlan_dic['ifNames'] = ifNames
try:
response = service.addVlan(**vlan_dic)
except Exception as e:
raise derviceError
# TODO zhuxy return , print only for test
return response
def del_vlan(self, context, view, dic):
""" del vlan to webservice """
vlanId = dic["vlan_id_o"]
ifNames = dic["ifnames"]
ws_ip = view['agent_nat_ip']
other_ip = "/func/web_main/wsdl/vlan/vlan.wsdl"
url = "%s%s" % (ws_ip, other_ip)
LOG.info("del vlan to webservice: " + url)
client = self.ws_client.get_client(url)
vlan_dic = {}
vlan_dic['vlanId'] = vlanId
vlan_dic['ifNames'] = ifNames
response = client.service.delVlan(vlanId, ifNames)
# TODO zhuxy return , print only for test
print json.loads(response)
def get_dev_vlan(self, context, view, dic):
""" get a vlan to webservice """
vlanId = dic["vlan_id"]
ws_ip = view['agent_nat_ip']
other_ip = "/func/web_main/wsdl/vlan/vlan.wsdl"
url = "%s%s" % (ws_ip, other_ip)
LOG.info("get a vlan to webservice: " + url)
client = self.ws_client.get_client(url)
response = client.service.getVlan(vlanId)
# TODO zhuxy return , print only for test
print json.loads(response)
def get_dev_vlans(self, context, view, dic):
""" get vlans to webservice """
ws_ip = view['agent_nat_ip']
other_ip = "/func/web_main/wsdl/vlan/vlan.wsdl"
url = "%s%s" % (ws_ip, other_ip)
LOG.info("get vlans to webservice: " + url)
client = self.ws_client.get_client(url)
response = client.service.getVlanAll()
# TODO zhuxy return , print only for test
print json.loads(response)
# this is a netservice operation
def creat_netservice(self, context, view, dic):
""" creat netservice to webservice """
ws_ip = view['agent_nat_ip']
name = dic["name"]
proto = dic["proto"]
port = dic["port"]
vfwName = dic["vfwname"]
other_ip = "/func/web_main/wsdl/netservice/netservice.wsdl"
url = "%s%s" % (ws_ip, other_ip)
LOG.info("creat netservice to webservice: " + url)
client = self.ws_client.get_client(url, name, proto, port, vfwName)
response = client.service.getVlanAll()
# TODO zhuxy return , print only for test
print json.loads(response)
def del_netservice(self, context, view, dic):
""" delete netservice to webservice """
ws_ip = view['agent_nat_ip']
name = dic["name"]
vfwName = dic["vfwname"]
other_ip = "/func/web_main/wsdl/netservice/netservice.wsdl"
url = "%s%s" % (ws_ip, other_ip)
LOG.info("delete netservice to webservice: " + url)
client = self.ws_client.get_client(url)
response = client.service.delService(name, vfwName)
# TODO zhuxy return , print only for test
print json.loads(response)
def get_dev_netservice(self, context, view, dic):
""" get a netservice to webservice """
ws_ip = view['agent_nat_ip']
name = dic["name"]
vfwName = dic["vfwname"]
other_ip = "/func/web_main/wsdl/netservice/netservice.wsdl"
url = "%s%s" % (ws_ip, other_ip)
LOG.info("get a netservice to webservice: " + url)
client = self.ws_client.get_client(url)
response = client.service.getService(name, vfwName)
# TODO zhuxy return , print only for test
print json.loads(response)
def get_dev_netservices(self, context, view, dic):
""" get all netservices to webservice """
ws_ip = view['agent_nat_ip']
vfwName = dic["vfwname"]
other_ip = "/func/web_main/wsdl/netservice/netservice.wsdl"
url = "%s%s" % (ws_ip, other_ip)
LOG.info("get all netservices to webservice: " + url)
client = self.ws_client.get_client(url)
response = client.service.getServiceAll(vfwName)
# TODO zhuxy return , print only for test
print json.loads(response)
# this is a addrobj operation
def add_addrobj(self, context, view, dic):
""" create addrobj to webservice """
ws_ip = view['agent_nat_ip']
name = dic["name"]
ip = dic["ip"]
vfwName = dic["vfwname"]
expIp = dic["ip"]
other_ip = "/func/web_main/wsdl/netaddr/netaddr.wsdl"
url = "%s%s" % (ws_ip, other_ip)
LOG.info("create addrobj to webservice: " + url)
client = self.ws_client.get_client(url)
response = client.service.addAddrObj(name, ip, expIp, vfwName)
# TODO return , print only for test
print json.loads(response)
def del_addrobj(self, context, view, dic):
""" delete addrobj to webservice """
ws_ip = view['agent_nat_ip']
name = dic["name"]
vfwName = dic["vfwname"]
other_ip = "/func/web_main/wsdl/netaddr/netaddr.wsdl"
url = "%s%s" % (ws_ip, other_ip)
LOG.info("delete addrobj to webservice: " + url)
client = self.ws_client.get_client(url)
response = client.service.delAddrObj(name, vfwName)
# TODO return , print only for test
print json.loads(response)
def get_dev_addrobj(self, context, view, dic):
""" get a addrobj to webservice """
ws_ip = view['agent_nat_ip']
name = dic["name"]
vfwName = dic["vfwname"]
other_ip = "/func/web_main/wsdl/netaddr/netaddr.wsdl"
url = "%s%s" % (ws_ip, other_ip)
LOG.info("get a addrobj to webservice: " + url)
client = self.ws_client.get_client(url)
response = client.service.getAddrObj(name, vfwName)
# TODO return , print only for test
print json.loads(response)
def get_dev_addrobjs(self, context, view, dic):
""" get a addrobj to webservice """
ws_ip = view['agent_nat_ip']
vfwName = dic["vfwname"]
other_ip = "/func/web_main/wsdl/netaddr/netaddr.wsdl"
url = "%s%s" % (ws_ip, other_ip)
LOG.info("get a addrobj to webservice: " + url)
client = self.ws_client.get_client(url)
response = client.service.getAddrObjAll(vfwName)
# TODO return , print only for test
print json.loads(response)
def create_packetfilter(self, context, packet_info_dict, agent_info_dict):
"""create packetfilter"""
url = agent_info_dict['agent_ip']
url += '/func/web_main/wsdl/pf_policy/pf_policys/pf_policys.wsdl'
trans_info_dict = {
'name': '',
'srcZoneName': '',
'dstZoneName': "",
"srcIpObjNames": '',
'dstIpObjNames': "",
'serviceNames': '',
'action': '',
'log': '',
'vfwName': ''
}
for key in trans_info_dict.keys():
if key.lower() in packet_info_dict.keys():
trans_info_dict[key] = str(packet_info_dict[key.lower()])
client = self.ws_client.get_client(url)
LOG.info("create fw_packetfilter:" + url)
ret = client.addPacketFilter(**packet_info_dict)
if ret == 0:
return 0
else:
return 'soap fault'
def delete_packetfilter(self, context, packet_info_dict, agent_info_dict):
"""delete packetfilter"""
url = agent_info_dict['agent_ip']
url += '/func/web_main/webservice/pf_policy/pf_policy/pf_policy'
client = self.ws_client.get_client(url)
LOG.info("delete fw_packetfilter:" + url)
trans_info_dict = {
'name': '',
'vfwName': ''
}
for key in trans_info_dict.keys():
if key.lower() in packet_info_dict.keys():
trans_info_dict[key] = str(packet_info_dict[key.lower()])
client = self.ws_client.get_client(url)
LOG.info("create fw_packetfilter:" + url)
ret = client.delPacketFilter(**packet_info_dict)
if ret == 0:
return 0
else:
return 'soap fault'
def get_dev_packetfilter(self, context, packet_info_dict, agent_info_dict):
"""get packetfilter"""
url = agent_info_dict['agent_ip']
url += '/func/web_main/webservice/security_zone/security_zone'
LOG.info("get fw_SecurityZone:" + url)
trans_info_dict = {
'name': '',
'vfwName': ''
}
for key in trans_info_dict.keys():
if key.lower() in packet_info_dict.keys():
trans_info_dict[key] = packet_info_dict[key.lower()]
client = self.ws_client.get_client(url)
ret = client.getZone(**packet_info_dict)
if ret == 0:
return 0
else:
return 'soap fault'
def getall_dev_packetfilter(self, context,
packet_info_dict, agent_info_dict):
"""GetAll packetfilter"""
url = agent_info_dict['agent_ip']
url += '/func/web_main/webservice/security_zone/security_zone'
LOG.info("getall fw_SecurityZone:" + url)
client = self.ws_client.get_client(url)
trans_info_dict = {
'name': '',
}
for key in trans_info_dict.keys():
if key.lower() in packet_info_dict.keys():
trans_info_dict[key] = packet_info_dict[key.lower()]
ret = client.getZoneAll(**packet_info_dict)
if ret == 0:
return 0
else:
return 'soap fault'
def create_securityzone(self, context, zone_info_dict, agent_info_dict):
"""create securityZone"""
url = agent_info_dict['agent_ip']
url += '/func/web_main/webservice/security_zone/security_zone'
LOG.info("create fw_SecurityZone:" + url)
client = self.ws_client.get_client(url)
trans_info_dict = {
'name': '',
'ifNames': '',
'priority': '',
'vfwName': '',
}
for key in trans_info_dict.keys():
if key.lower() in zone_info_dict.keys():
trans_info_dict[key] = str(zone_info_dict[key.lower()])
ret = client.addZone(**zone_info_dict)
if ret == 0:
return 0
else:
return 'soap fault'
def delete_securityzone(self, context, zone_info_dict, agent_info_dict):
"""delete SecurityZone"""
url = agent_info_dict['agent_ip']
url += '/func/web_main/webservice/security_zone/security_zone'
LOG.info("delete fw_SecurityZone:" + url)
client = self.ws_client.get_client(url)
trans_info_dict = {
'id': '',
'tenant_id': '',
'dc_name': '',
'network_zone': '',
}
for key in trans_info_dict.keys():
if key.lower() in zone_info_dict.keys():
trans_info_dict[key] = str(zone_info_dict[key.lower()])
ret = client.delZone(**zone_info_dict)
if ret == 0:
return 0
else:
return 'soap fault'
def get_dev_securityzone(self, context, zone_info_dict, agent_info_dict):
"""get SecurityZone if """
url = agent_info_dict['agent_ip']
url += '/func/web_main/webservice/security_zone/security_zone'
LOG.info("get fw_SecurityZone:" + url)
trans_info_dict = {
'name': '',
'vfwName': ''
}
for key in trans_info_dict.keys():
if key.lower() in zone_info_dict.keys():
trans_info_dict[key] = zone_info_dict[key.lower()]
client = self.ws_client.get_client(url)
ret = client.addZoneIf(**zone_info_dict)
if ret == 0:
return 0
else:
return 'soap fault'
def getall_dev_securityzone(
self,
context,
zone_info_dict,
agent_info_dict):
"""GetAll SecurityZone"""
url = agent_info_dict['agent_ip']
url += '/func/web_main/webservice/security_zone/security_zone'
LOG.info("getall fw_SecurityZone:" + url)
client = self.ws_client.get_client(url)
trans_info_dict = {
'name': '',
}
for key in trans_info_dict.keys():
if key.lower() in zone_info_dict.keys():
trans_info_dict[key] = zone_info_dict[key.lower()]
ret = client.getZoneAll(**zone_info_dict)
if ret == 0:
return 0
else:
return 'soap fault'
def securityzone_addif(self, context, zone_info_dict, agent_info_dict):
"""GetAll SecurityZone"""
url = agent_info_dict['agent_ip']
url += '/func/web_main/webservice/security_zone/security_zone'
LOG.info("addif fw_SecurityZone:" + url)
client = self.ws_client.get_client(url)
trans_info_dict = {
'ifName': '',
'zoneName': '',
'vfwName': ''
}
for key in trans_info_dict.keys():
if key.lower() in zone_info_dict.keys():
trans_info_dict[key] = zone_info_dict[key.lower()]
ret = client.addZoneIf(**zone_info_dict)
if ret == 0:
return 0
else:
return 'soap fault'
def securityzone_delif(self, context, zone_info_dict, agent_info_dict):
"""GetAll SecurityZone"""
url = agent_info_dict['agent_ip']
url += '/func/web_main/webservice/security_zone/security_zone'
LOG.info("delif fw_SecurityZone:" + url)
client = self.ws_client.get_client(url)
trans_info_dict = {
'ifName': '',
}
for key in trans_info_dict.keys():
if key.lower() in zone_info_dict.keys():
trans_info_dict[key] = zone_info_dict[key.lower()]
ret = client.delZoneIf(**zone_info_dict)
if ret == 0:
return 0
else:
return | |
"""
Module containing grid definition and every addition needed for it.
"""
from __future__ import annotations
from typing import Union, List, Tuple
import pygame
from pyggui.gui.item import StaticItem
class Cell(StaticItem):
"""
Class for representing a single rectangle in the grid that is placed in the i, j position and has i-th rows height,
j-th columns height. Items can be added to it, aligned and padded.
"""
def __init__(
self,
grid: Grid,
position_in_grid: Tuple,
position: List[int] = [0, 0],
size: Tuple[int, int] = (1, 1),
):
"""
Args:
position (List[int] = [0, 0]): Position to place item on screen (or on page).
size (Tuple[int, int] = (1, 1)): Size of item.
visible (bool): If item is currently visible.
selected (bool): If item is currently selected.
"""
super().__init__(position, size, False, False)
self.grid = grid
self.position_in_grid = position_in_grid
# Possible alignments
self.alignments = {
"left": self._left,
"right": self._right,
"top": self._top,
"bottom": self._bottom,
"centre": self._centre,
None: self._centre
}
# Possible paddings
self._padding = {
"top": 0,
"bottom": 0,
"left": 0,
"right": 0
}
@property
def padding(self):
return self._padding
@padding.setter
def padding(self, padding):
# TODO: Padding for whole cell, also to add is alignment for whole cell
pass
def _left(self, item: any) -> None:
"""
Method aligns item to the left side of cell.
"""
item.position = (self.position[0], item.position[1])
def _right(self, item: any) -> None:
"""
Method aligns item to the right side of cell.
"""
# Set right cell border to match item right side
diff = (self.width - item.width) if self.width > item.width else 0
# Set new x position
item.position = (self.position[0] + diff, item.position[1])
def _top(self, item: any) -> None:
"""
Method aligns item to the top side of cell.
"""
# Set top borders to match
item.position = (item.position[0], self.position[1])
def _bottom(self, item: any) -> None:
"""
Method aligns item to the bottom side of cell.
"""
# Set bottom cell border to match item bottom
diff = (self.height - item.height) if self.height > item.height else 0
item.position = (item.position[0], self.position[1] + diff)
def _centre(self, item: any) -> None:
"""
Method aligns item so its centre matches the cells centre.
"""
# Item centre is at cell centre
centered_x = self.position[0] + ((self.width - item.width) // 2)
centered_y = self.position[1] + ((self.height - item.height) // 2)
item.position = (centered_x, centered_y)
def __pad(self, item: any, padding: str, value: int) -> None:
"""
Method adds padding to item based on cell position and size.
Args:
item (any): Item to pad.
padding (str): Padding type (top, bottom, left, right).
value (int): Number of px to pad.
"""
# TODO: Make padding not move items if there is already enough space
if padding in self.padding.keys():
if padding == "top":
item.y += value
elif padding == "bottom":
item.y -= value
elif padding == "left":
item.x += value
elif padding == "right":
item.x -= value
def add_item(self, item: any, align: str = None, padding: str = None) -> None:
"""
Method adds item to cell, aligns and pads it base on passed values.
Args:
item (any): Item to add.
align (str): String defining alignment type. Multiple alignments are separated by a space character.
Example: alignment = "centre top" # Centre should always be first.
padding (str): String defining padding of item. Multiple alignments are separated by a comma. Value is
passed next to the alignment position as an integer value.
Example: padding = "top 5, left 3" # 5px from top 3px from bottom
"""
self.items.append(item) # Add item to item list
self.alignments["centre"](item) # Align item into centre initially so it moves it into cell
# Handle alignment
if align:
for align in align.split(" "): #
if align in self.alignments:
self.alignments[align](item) # Align item in set way
else:
self.alignments[align](item) # Default alignment for None is centre
# Handle padding
if padding:
for pad in padding.split(","): # Go over each padding
_pad = pad.strip() # Remove whitespace around
_pad = _pad.split(" ")
print(_pad, pad)
key, value = _pad[0], int(_pad[1]) # Todo add exception handling
self.__pad(item, padding=key, value=value)
def update(self):
for item in self.items:
item.update()
def draw(self, visible: bool = False):
if visible: # Only draw if grid is visible
pygame.draw.rect(
self.display,
color=(0, 0, 0),
rect=self.rect,
width=0 # Fill this one
)
pygame.draw.rect(
self.display,
color=(255, 255, 255),
rect=self.rect,
width=2
)
for item in self.items:
item.draw()
class Row:
"""
Single row in Grid, is only used for grabbing items using indexing with []. Row contains cells that are in that
row in the grid.
"""
def __init__(self, grid: Grid, data: List = None):
self.grid = grid
if data:
self._list = list(data)
else:
self._list = list()
def __len__(self):
""" List length """
return len(self._list)
def __getitem__(self, i):
""" Get a list item """
return self._list[i]
def __delitem__(self, i):
""" Delete an item """
del self._list[i]
def __setitem__(self, i, val):
""" Set item """
# optional: self._acl_check(val)
self._list[i] = val
def __repr__(self):
return "<{0} {1}>".format(self.__class__.__name__, self._list)
def __str__(self):
return str(self._list)
def insert(self, i, val):
""" Insert value at index """
# optional: self._acl_check(val)
self._list.insert(i, val)
def append(self, val):
""" Append value at end of list """
self.insert(len(self._list), val)
def make_grid_line(line: List[Union[float, int]], total_size: int, number_of_items: int) -> List[int]:
""" Used internally by Grid for constructing cell sizes for each column, row.
Function creates a list representing sizes of cells (in px) in that line (either row or column).
Line can be passed as a list of decimals (representing percentage of total size) or integers (representing sizes).
Line can also include less elements than there are rows/columns, elements then get added/removed accordingly.
Args:
line (List[Union[float, int]]): List of either integers or floats representing different size format (px or %).
total_size (int): Total size (height of all rows or width of all columns), can be either 1 (if %) or an integer
representing size in px.
number_of_items (int): Expected number of items in line.
"""
# Check number of elements matches, add/remove otherwise
element_number_difference = number_of_items - len(line)
if element_number_difference < 0: # If more were passed, remove last items
line = self.number_of_rows[:abs(element_number_difference)]
elif element_number_difference > 0: # If less were passed, add number of items (equal part)
if isinstance(line[0], float): # If float, parts added must be equal to 1/total_num_parts
one_part = 1 / number_of_items
else: # Else add equal parts of total_size
one_part = int(total_size / number_of_items)
line += [one_part for _ in range(element_number_difference)]
# Create list
if isinstance(line[0], float): # If decimal -> percentage
line_sum = sum(line)
# factor = line_sum / 1
line = [part / line_sum for part in line]
size_percentages = line # [part * factor for part in line]
return [int(total_size * part) for part in size_percentages]
else: # If not -> assume int -> sizes in px
factor = total_size / sum(line)
return [int(size * factor) for size in line]
class Grid(StaticItem):
def __init__(
self,
position: List[int] = [0, 0],
rows: int = 1,
columns: int = 1,
row_sizes: Union[List[int], List[float]] = None,
column_sizes: Union[List[int], List[float]] = None,
size: Tuple[int, int] = None,
visible: bool = False,
selected: bool = False
):
"""
Args:
position (List[int] = [0, 0]): Position to place item on screen (or on page).
rows (int): An integer representing number of rows.
columns (int): An integer representing number of columns.
row_sizes (Union[List[int], List[float]]): List of heights for each row, heights can either (all together)
be integer values (representing height of each row in px) or float numbers (representing height of each
row by percentage relative to grid size)
column_sizes (Union[List[int], List[float]]): List of widths for each column, widths can either
(all together) be integer values (representing width of each row in px) or float numbers
(representing width of each row by percentage relative to grid size)
size (Tuple[int, int] = (1, 1)): Size of item.
visible (bool): If item is currently visible.
selected (bool): If item is currently selected.
Note:
Adding | |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import mxnet as mx
import numpy as np
from config import config
ACT_BIT = 1
bn_mom = 0.9
workspace = 256
memonger = False
def Conv(**kwargs):
body = mx.sym.Convolution(**kwargs)
return body
def Act(data, act_type, name):
if act_type == 'prelu':
body = mx.sym.LeakyReLU(data=data, act_type='prelu', name=name)
else:
body = mx.symbol.Activation(data=data, act_type=act_type, name=name)
return body
#def lin(data, num_filter, workspace, name, binarize, dcn):
# bit = 1
# if not binarize:
# if not dcn:
# conv1 = Conv(data=data, num_filter=num_filter, kernel=(1,1), stride=(1,1), pad=(0,0),
# no_bias=True, workspace=workspace, name=name + '_conv')
# bn1 = mx.sym.BatchNorm(data=conv1, fix_gamma=False, momentum=bn_mom, eps=2e-5, name=name + '_bn')
# act1 = Act(data=bn1, act_type='relu', name=name + '_relu')
# return act1
# else:
# bn1 = mx.sym.BatchNorm(data=data, fix_gamma=False, momentum=bn_mom, eps=2e-5, name=name + '_bn')
# act1 = Act(data=bn1, act_type='relu', name=name + '_relu')
# conv1_offset = mx.symbol.Convolution(name=name+'_conv_offset', data = act1,
# num_filter=18, pad=(1, 1), kernel=(3, 3), stride=(1, 1))
# conv1 = mx.contrib.symbol.DeformableConvolution(name=name+"_conv", data=act1, offset=conv1_offset,
# num_filter=num_filter, pad=(1,1), kernel=(3, 3), num_deformable_group=1, stride=(1, 1), dilate=(1, 1), no_bias=False)
# #conv1 = Conv(data=act1, num_filter=num_filter, kernel=(3,3), stride=(1,1), pad=(1,1),
# # no_bias=False, workspace=workspace, name=name + '_conv')
# return conv1
# else:
# bn1 = mx.sym.BatchNorm(data=data, fix_gamma=False, momentum=bn_mom, eps=2e-5, name=name + '_bn')
# act1 = Act(data=bn1, act_type='relu', name=name + '_relu')
# conv1 = mx.sym.QConvolution_v1(data=act1, num_filter=num_filter, kernel=(1,1), stride=(1,1), pad=(0,0),
# no_bias=True, workspace=workspace, name=name + '_conv', act_bit=ACT_BIT, weight_bit=bit)
# conv1 = mx.sym.BatchNorm(data=conv1, fix_gamma=False, momentum=bn_mom, eps=2e-5, name=name + '_bn2')
# return conv1
def lin3(data, num_filter, workspace, name, k, g=1, d=1):
if k != 3:
conv1 = Conv(data=data,
num_filter=num_filter,
kernel=(k, k),
stride=(1, 1),
pad=((k - 1) // 2, (k - 1) // 2),
num_group=g,
no_bias=True,
workspace=workspace,
name=name + '_conv')
else:
conv1 = Conv(data=data,
num_filter=num_filter,
kernel=(k, k),
stride=(1, 1),
pad=(d, d),
num_group=g,
dilate=(d, d),
no_bias=True,
workspace=workspace,
name=name + '_conv')
bn1 = mx.sym.BatchNorm(data=conv1,
fix_gamma=False,
momentum=bn_mom,
eps=2e-5,
name=name + '_bn')
act1 = Act(data=bn1, act_type='relu', name=name + '_relu')
ret = act1
return ret
def ConvFactory(data,
num_filter,
kernel,
stride=(1, 1),
pad=(0, 0),
act_type="relu",
mirror_attr={},
with_act=True,
dcn=False,
name=''):
if not dcn:
conv = mx.symbol.Convolution(data=data,
num_filter=num_filter,
kernel=kernel,
stride=stride,
pad=pad,
no_bias=True,
workspace=workspace,
name=name + '_conv')
else:
conv_offset = mx.symbol.Convolution(name=name + '_conv_offset',
data=data,
num_filter=18,
pad=(1, 1),
kernel=(3, 3),
stride=(1, 1))
conv = mx.contrib.symbol.DeformableConvolution(name=name + "_conv",
data=data,
offset=conv_offset,
num_filter=num_filter,
pad=(1, 1),
kernel=(3, 3),
num_deformable_group=1,
stride=stride,
dilate=(1, 1),
no_bias=False)
bn = mx.symbol.BatchNorm(data=conv,
fix_gamma=False,
momentum=bn_mom,
eps=2e-5,
name=name + '_bn')
if with_act:
act = Act(bn, act_type, name=name + '_relu')
#act = mx.symbol.Activation(
# data=bn, act_type=act_type, attr=mirror_attr, name=name+'_relu')
return act
else:
return bn
class CAB:
def __init__(self, data, nFilters, nModules, n, workspace, name, dilate,
group):
self.data = data
self.nFilters = nFilters
self.nModules = nModules
self.n = n
self.workspace = workspace
self.name = name
self.dilate = dilate
self.group = group
self.sym_map = {}
def get_output(self, w, h):
key = (w, h)
if key in self.sym_map:
return self.sym_map[key]
ret = None
if h == self.n:
if w == self.n:
ret = (self.data, self.nFilters)
else:
x = self.get_output(w + 1, h)
f = int(x[1] * 0.5)
if w != self.n - 1:
body = lin3(x[0], f, self.workspace,
"%s_w%d_h%d_1" % (self.name, w, h), 3,
self.group, 1)
else:
body = lin3(x[0], f, self.workspace,
"%s_w%d_h%d_1" % (self.name, w, h), 3,
self.group, self.dilate)
ret = (body, f)
else:
x = self.get_output(w + 1, h + 1)
y = self.get_output(w, h + 1)
if h % 2 == 1 and h != w:
xbody = lin3(x[0], x[1], self.workspace,
"%s_w%d_h%d_2" % (self.name, w, h), 3, x[1])
#xbody = xbody+x[0]
else:
xbody = x[0]
#xbody = x[0]
#xbody = lin3(x[0], x[1], self.workspace, "%s_w%d_h%d_2"%(self.name, w, h), 3, x[1])
if w == 0:
ybody = lin3(y[0], y[1], self.workspace,
"%s_w%d_h%d_3" % (self.name, w, h), 3, self.group)
else:
ybody = y[0]
ybody = mx.sym.concat(y[0], ybody, dim=1)
body = mx.sym.add_n(xbody,
ybody,
name="%s_w%d_h%d_add" % (self.name, w, h))
body = body / 2
ret = (body, x[1])
self.sym_map[key] = ret
return ret
def get(self):
return self.get_output(1, 1)[0]
def conv_resnet(data, num_filter, stride, dim_match, name, binarize, dcn,
dilate, **kwargs):
bit = 1
#print('in unit2')
# the same as https://github.com/facebook/fb.resnet.torch#notes, a bit difference with origin paper
bn1 = mx.sym.BatchNorm(data=data,
fix_gamma=False,
eps=2e-5,
momentum=bn_mom,
name=name + '_bn1')
if not binarize:
act1 = Act(data=bn1, act_type='relu', name=name + '_relu1')
conv1 = Conv(data=act1,
num_filter=int(num_filter * 0.5),
kernel=(1, 1),
stride=(1, 1),
pad=(0, 0),
no_bias=True,
workspace=workspace,
name=name + '_conv1')
else:
act1 = mx.sym.QActivation(data=bn1,
act_bit=ACT_BIT,
name=name + '_relu1',
backward_only=True)
conv1 = mx.sym.QConvolution(data=act1,
num_filter=int(num_filter * 0.5),
kernel=(1, 1),
stride=(1, 1),
pad=(0, 0),
no_bias=True,
workspace=workspace,
name=name + '_conv1',
act_bit=ACT_BIT,
weight_bit=bit)
bn2 = mx.sym.BatchNorm(data=conv1,
fix_gamma=False,
eps=2e-5,
momentum=bn_mom,
name=name + '_bn2')
if not binarize:
act2 = Act(data=bn2, act_type='relu', name=name + '_relu2')
conv2 = Conv(data=act2,
num_filter=int(num_filter * 0.5),
kernel=(3, 3),
stride=(1, 1),
pad=(1, 1),
no_bias=True,
workspace=workspace,
name=name + '_conv2')
else:
act2 = mx.sym.QActivation(data=bn2,
act_bit=ACT_BIT,
name=name + '_relu2',
backward_only=True)
conv2 = mx.sym.QConvolution(data=act2,
num_filter=int(num_filter * 0.5),
kernel=(3, 3),
stride=(1, 1),
pad=(1, 1),
no_bias=True,
workspace=workspace,
name=name + '_conv2',
act_bit=ACT_BIT,
weight_bit=bit)
bn3 = mx.sym.BatchNorm(data=conv2,
fix_gamma=False,
eps=2e-5,
momentum=bn_mom,
name=name + '_bn3')
if not binarize:
act3 = Act(data=bn3, act_type='relu', name=name + '_relu3')
conv3 = Conv(data=act3,
num_filter=num_filter,
kernel=(1, 1),
stride=(1, 1),
pad=(0, 0),
no_bias=True,
workspace=workspace,
name=name + '_conv3')
else:
act3 = mx.sym.QActivation(data=bn3,
act_bit=ACT_BIT,
name=name + '_relu3',
backward_only=True)
conv3 = mx.sym.QConvolution(data=act3,
num_filter=num_filter,
kernel=(1, 1),
stride=(1, 1),
pad=(0, 0),
no_bias=True,
workspace=workspace,
name=name + '_conv3',
act_bit=ACT_BIT,
weight_bit=bit)
#if binarize:
# conv3 = mx.sym.BatchNorm(data=conv3, fix_gamma=False, eps=2e-5, momentum=bn_mom, name=name + '_bn4')
if dim_match:
shortcut = data
else:
if not binarize:
shortcut = Conv(data=act1,
num_filter=num_filter,
kernel=(1, 1),
stride=stride,
no_bias=True,
workspace=workspace,
name=name + '_sc')
else:
shortcut = mx.sym.QConvolution(data=act1,
num_filter=num_filter,
kernel=(1, 1),
stride=stride,
pad=(0, 0),
no_bias=True,
workspace=workspace,
name=name + '_sc',
act_bit=ACT_BIT,
weight_bit=bit)
if memonger:
shortcut._set_attr(mirror_stage='True')
return conv3 + shortcut
def conv_hpm(data, num_filter, stride, dim_match, name, binarize, dcn,
dilation, **kwargs):
bit = 1
#print('in unit2')
# the same as https://github.com/facebook/fb.resnet.torch#notes, a bit difference with origin paper
bn1 = mx.sym.BatchNorm(data=data,
fix_gamma=False,
eps=2e-5,
momentum=bn_mom,
name=name + '_bn1')
if not binarize:
act1 = Act(data=bn1, act_type='relu', name=name + '_relu1')
if not dcn:
conv1 = Conv(data=act1,
num_filter=int(num_filter * 0.5),
kernel=(3, 3),
stride=(1, 1),
pad=(dilation, dilation),
dilate=(dilation, dilation),
no_bias=True,
workspace=workspace,
name=name + '_conv1')
else:
conv1_offset = mx.symbol.Convolution(name=name + '_conv1_offset',
data=act1,
num_filter=18,
pad=(1, 1),
kernel=(3, 3),
stride=(1, 1))
conv1 = mx.contrib.symbol.DeformableConvolution(
name=name + '_conv1',
data=act1,
offset=conv1_offset,
num_filter=int(num_filter * 0.5),
pad=(1, 1),
kernel=(3, 3),
num_deformable_group=1,
stride=(1, 1),
dilate=(1, 1),
no_bias=True)
else:
act1 = mx.sym.QActivation(data=bn1,
act_bit=ACT_BIT,
name=name + '_relu1',
backward_only=True)
conv1 = mx.sym.QConvolution_v1(data=act1,
num_filter=int(num_filter * 0.5),
kernel=(3, 3),
stride=(1, 1),
pad=(1, 1),
no_bias=True,
workspace=workspace,
name=name + '_conv1',
act_bit=ACT_BIT,
weight_bit=bit)
bn2 = mx.sym.BatchNorm(data=conv1,
fix_gamma=False,
eps=2e-5,
momentum=bn_mom,
name=name + '_bn2')
if not binarize:
act2 = Act(data=bn2, act_type='relu', name=name + '_relu2')
if not dcn:
conv2 = Conv(data=act2,
num_filter=int(num_filter * 0.25),
kernel=(3, 3),
stride=(1, 1),
pad=(dilation, dilation),
dilate=(dilation, dilation),
no_bias=True,
workspace=workspace,
name=name + '_conv2')
else:
conv2_offset = mx.symbol.Convolution(name=name + '_conv2_offset',
data=act2,
num_filter=18,
pad=(1, 1),
kernel=(3, 3),
stride=(1, 1))
conv2 = mx.contrib.symbol.DeformableConvolution(
name=name + '_conv2',
data=act2,
offset=conv2_offset,
num_filter=int(num_filter * 0.25),
pad=(1, 1),
kernel=(3, 3),
num_deformable_group=1,
stride=(1, 1),
dilate=(1, 1),
no_bias=True)
else:
act2 = mx.sym.QActivation(data=bn2,
act_bit=ACT_BIT,
name=name + '_relu2',
backward_only=True)
conv2 = mx.sym.QConvolution_v1(data=act2,
num_filter=int(num_filter * 0.25),
kernel=(3, 3),
stride=(1, 1),
pad=(1, 1),
no_bias=True,
workspace=workspace,
name=name + '_conv2',
act_bit=ACT_BIT,
weight_bit=bit)
bn3 = mx.sym.BatchNorm(data=conv2,
fix_gamma=False,
eps=2e-5,
momentum=bn_mom,
name=name + '_bn3')
if not binarize:
act3 = Act(data=bn3, act_type='relu', name=name + '_relu3')
if not dcn:
conv3 = Conv(data=act3,
num_filter=int(num_filter * 0.25),
kernel=(3, 3),
stride=(1, 1),
pad=(dilation, dilation),
dilate=(dilation, dilation),
no_bias=True,
workspace=workspace,
name=name + '_conv3')
else:
conv3_offset = mx.symbol.Convolution(name=name + '_conv3_offset',
data=act3,
num_filter=18,
pad=(1, 1),
kernel=(3, 3),
stride=(1, 1))
conv3 = mx.contrib.symbol.DeformableConvolution(
name=name + '_conv3',
data=act3,
offset=conv3_offset,
num_filter=int(num_filter * 0.25),
pad=(1, 1),
kernel=(3, 3),
num_deformable_group=1,
stride=(1, 1),
dilate=(1, 1),
no_bias=True)
else:
act3 = mx.sym.QActivation(data=bn3,
act_bit=ACT_BIT,
name=name + '_relu3',
backward_only=True)
conv3 = mx.sym.QConvolution_v1(data=act3,
num_filter=int(num_filter * 0.25),
kernel=(3, 3),
stride=(1, 1),
pad=(1, 1),
no_bias=True,
workspace=workspace,
name=name + '_conv3',
act_bit=ACT_BIT,
weight_bit=bit)
conv4 = mx.symbol.Concat(*[conv1, conv2, conv3])
if binarize:
conv4 = mx.sym.BatchNorm(data=conv4,
fix_gamma=False,
eps=2e-5,
momentum=bn_mom,
name=name + '_bn4')
if dim_match:
shortcut = data
else:
if not binarize:
shortcut = Conv(data=act1,
num_filter=num_filter,
kernel=(1, 1),
stride=stride,
no_bias=True,
workspace=workspace,
name=name + '_sc')
else:
#assert(False)
shortcut = mx.sym.QConvolution_v1(data=act1,
num_filter=num_filter,
kernel=(1, 1),
stride=stride,
pad=(0, 0),
no_bias=True,
workspace=workspace,
name=name + '_sc',
act_bit=ACT_BIT,
weight_bit=bit)
shortcut = mx.sym.BatchNorm(data=shortcut,
fix_gamma=False,
eps=2e-5,
momentum=bn_mom,
name=name + '_sc_bn')
if memonger:
shortcut._set_attr(mirror_stage='True')
return conv4 + shortcut
#return bn4 + shortcut
#return act4 + shortcut
def block17(net,
input_num_channels,
scale=1.0,
with_act=True,
| |
(len(res) > 2):
# More than one plot
pl.legend(loc='lower right')
pl.grid()
pl.show()
pl.close()
if plotReturnData or returnData:
return res
################## WAVETABLE COMMANDS #################
'''
@loadWavetable@
loadWavetable(list,second=False)
Load one wavetable on the hardware board
Loading a primary wavetable erases the secondary if present
Required parameters:
list : List of values of the wavetable
If empty [] the wavetable will be erased
Optional parameters:
second : Load secondary wavetable for DAC2
(Defaults to false)
Included in slab.py
Returns nothing
'''
def loadWavetable(list,second=False):
global w_idle,w_idle2,w_points,w_points2
# Get list size
size = len(list)
# Checks
if not opened:
raise SlabEx("Not connected to board")
if size > buff_size :
raise SlabEx("Wavetable too big")
# Additional check for secondary wavetable
if second:
if size > buff_size - w_points:
raise SlabEx("Not enough space for secondary wavetable")
# Send data
if not second:
w_points = size # Size of main wavetable
if size > 0: # Iddle value (Volt)
w_idle = list[0]
else:
w_idle = -1
w_idle2 = -1 # Eliminate secondary wavetable
w_points2 = 0
startCommand('W') # Start
else:
w_points2 = size # Size of secondary wavetable
if size > 0:
w_idle2 = list[0] # Iddle value (Volt)
else:
w_idle2 = -1
startCommand('w') # Start
sendU16(size)
if size > 0:
for value in list:
ratio = value/vref
if not second:
rCal = dc_cal(ratio,dacx,dac1y)
else:
rCal = dc_cal(ratio,dacx,dac2y)
counts = ratio2counts(rCal)
sendU16(counts)
sendCRC()
checkACK()
checkCRC()
if not second and size > 0:
# Inform on frequency only on main wave
fmax = 1.0/(w_points * min_sample)
fmin = 1.0/(w_points * max_sample)
message(1,str(w_points) + " point wave loaded")
message(1,"Wave frequency must be between " + "{:.6f}".format(fmin) + " and " + "{:.2f}".format(fmax) + " Hz")
frequency = 1.0 /(sampleTime * w_points)
message(1,"Current frequency is " + str(frequency) + " Hz")
# Inform on space
space = buff_size - w_points - w_points2
message(1,"Remaining buffer space is " + str(space) + " samples")
'''
@waveSquare@
waveSquare(v1,v2,np,returnList,second)
Loads square wavetable omn the hardware board
Required parameters:
v1 : Start value
v2 : End value
np : Number of points for a full wave
Optional parameters:
returnList : Request a return list (Default False)
second : Load on secondary table
(Defaults to false)
If returnList is True, returns the table of loaded values
Included in slab.py
'''
def waveSquare(v1,v2,np,returnList=False,second=False):
# Check
if not opened:
raise SlabEx("Not connected to board")
if np < 4:
raise SlabEx("Not enough points for wave")
# Create wave
list = []
for point in range(0,np):
if point < np/2.0:
list.append(v1)
else:
list.append(v2)
# Program wave
loadWavetable(list,second)
# Return list
if returnList:
return list
'''
@wavePulse@
wavePulse(v1,v2,np,n1,returnList,second)
Loads a pulse wavetable on the hardware board
Parameters:
v1 : Start value
v2 : End value
np : Number of points for a full wave
n1 : Number of points at v1
Optional parameters:
returnList : Request a return list (Default False)
second : Load on secondary table
(Defaults to false)
If returnList is True, returns the table of loaded values
Included in slab.py
'''
def wavePulse(v1,v2,np,n1,returnList=False,second=False):
# Check
if not opened:
raise SlabEx("Not connected to board")
if np < 4:
raise SlabEx("Not enough points for wave")
# Create wave
list = []
for point in range(0,np):
if point < n1:
list.append(v1)
else:
list.append(v2)
# Program wave
loadWavetable(list,second)
# Return list
if returnList:
return list
'''
@waveTriangle@
waveTriangle(v1,v2,np,n1,returnList,second)
Loads a triangle wavetable on the hardware board
Parameters:
v1 : Minimum value
v2 : Maximum value
np : Number of points for a full wave
Optional parameters:
returnList : Request a return list (Default False)
second : Load on secondary table
(Defaults to false)
If returnList is True, returns the table of loaded values
Included in slab.py
'''
def waveTriangle(v1,v2,np,returnList=False,second=False):
# Check
if not opened:
raise SlabEx("Not connected to board")
if np < 4:
raise SlabEx("Not enough points for wave")
# Create wave
list = []
for point in range(0,np):
point = (point + np//4) % np
if point < np/2.0:
value = v1 + 2.0*(v2-v1)*point/np
else:
value = v1 + 2.0*(v2-v1)*(np - point)/np
list.append(value)
# Program wave
loadWavetable(list,second)
# Return list
if returnList:
return list
'''
@waveSawtooth@
waveSawtooth(v1,v2,np,returnList,second)
Loads a sawtooth wavetable on the hardware board
Parameters:
v1 : Start value
v2 : End value
np : Number of points for a full wave
Optional parameters:
returnList : Request a return list (Default False)
second : Load on secondary table
(Defaults to false)
If returnList is True, returns the table of loaded values
Included in slab.py
'''
def waveSawtooth(v1,v2,np,returnList=False,second=False):
# Check
if not opened:
raise SlabEx("Not connected to board")
if np < 4:
raise SlabEx("Not enough points for wave")
# Create wave
list = []
for point in range(0,np):
value = v1*1.0 + (v2*1.0-v1*1.0)*point/np
list.append(value)
# Program wave
loadWavetable(list,second)
# Return list
if returnList:
return list
'''
@waveSine@
waveSine(v1,v2,np,phase,returnList,second)
Generates a sine wavetable
Parameters:
v1 : Minimum value
v2 : Maximum value
np : Number of points for a full wave
Optional parameters:
phase : Phase of the signal (deg) (Defaults to 0)
returnList : Request a return list (Default False)
second : Load on secondary table
(Defaults to false)
If returnList is True, returns the table of loaded values
Included in slab.py
'''
def waveSine(v1,v2,np,phase=0.0,returnList=False,second=False):
# Check
if not opened:
raise SlabEx("Not connected to board")
if np < 4:
raise SlabEx("Not enough points for wave")
# Create wave
phase = phase*math.pi/180.0
list = []
mean = (v1 + v2)/2.0
amplitude = (v2 - v1)/2.0
for point in range(0,np):
value = mean + amplitude*math.sin(2.0*math.pi*point/np+phase)
list.append(value)
# Program wave
loadWavetable(list,second)
# Return list
if returnList:
return list
'''
@waveCosine@
waveCosine(v1,v2,np,returnList,second)
Generates a cosine wavetable
Parameters:
v1 : Minimum value
v2 : Maximum value
np : Number of points for a full wave
Optional parameters:
phase : Phase of the signal (deg) (Defaults to 0)
returnList : Request a return list (Default False)
second : Load on secondary table
(Defaults to false)
If returnList is True, returns the table of loaded values
Included in slab.py
'''
def waveCosine(v1,v2,np,phase=0.0,returnList=False,second=False):
# Check
if not opened:
raise SlabEx("Not connected to board")
if np < 4:
raise SlabEx("Not enough points for wave")
# Create wave
phase = phase*math.pi/180.0
list = []
mean = (v1 + v2)/2.0
amplitude = (v2 - v1)/2.0
for point in range(0,np):
value = mean + amplitude*math.cos(2.0*math.pi*point/np+phase)
list.append(value)
# Program wave
loadWavetable(list,second)
# Return list
if returnList:
return list
'''
@waveNoise@
waveNoise(vm,vstd,n,returnList,second)
Generates a noise wavetable
Based on a normal distribution
Samples are truncated between 0 and Vref
Parameters:
vm : Mean value
vstd : Standard deviation
n : Number of points
Optional parameters:
returnList : Request a return list (Default False)
second : Load on secondary table
(Defaults to false)
If returnList is True, returns the table of loaded values
Included in slab.py
'''
def waveNoise(vm,vstd,n,returnList=False,second=False):
# Check
if not opened:
raise SlabEx("Not connected to board")
if n < 4:
raise SlabEx("Not enough points for wave")
# Create wave
list = np.random.normal(loc=vm,scale=vstd,size=n)
for i in range(0,n):
if list[i] > vref:
list[i] = vref
if list[i] < 0.0:
list[i] = 0
# Program wave
loadWavetable(list,second)
# Return list
if returnList:
return list
'''
@waveRandom@
waveRandom(v1,v2,n,returnList,second)
Generates a random wavetable
Based on a uniform distribution
Samples will be random values between v1 and v2
Parameters:
v1 : Minimum voltage
v2 : Maximum voltage
n : Number of points
Optional parameters:
returnList : Request a return list (Default False)
second : Load on secondary table
(Defaults to false)
If returnList is True, returns the table of loaded values
Included in slab.py
'''
def waveRandom(v1,v2,n,returnList=False,second=False):
# Check
if not opened:
raise SlabEx("Not connected to board")
if n < 4:
raise SlabEx("Not enough points for wave")
if v1 >= v2 :
raise SlabEx("v1 must be lower than v2")
# Create | |
"""
# Arrange
source_markdown = """a[foo][baβr]a
[baβr]: /url 'title'"""
expected_tokens = [
"[para(1,1):]",
"[text(1,1):a:]",
"[link(1,2):full:/url:title:::baβr:foo:::::]",
"[text(1,3):foo:]",
"[end-link::]",
"[text(1,18):a:]",
"[end-para:::True]",
"[BLANK(2,1):]",
"[link-ref-def(3,1):True::baβr:: :/url:: :title:'title':]",
]
expected_gfm = """<p>a<a href="/url" title="title">foo</a>a</p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_paragraph_extra_80a():
"""
Test case extra 80a: 80 with newline before special characters
"""
# Arrange
source_markdown = """a[foo][ba
βr]a
[ba
βr]: /url 'title'"""
expected_tokens = [
"[para(1,1):\n]",
"[text(1,1):a:]",
"[link(1,2):full:/url:title:::ba\nβr:foo:::::]",
"[text(1,3):foo:]",
"[end-link::]",
"[text(2,9):a:]",
"[end-para:::True]",
"[BLANK(3,1):]",
"[link-ref-def(4,1):True::ba βr:ba\nβr: :/url:: :title:'title':]",
]
expected_gfm = """<p>a<a href="/url" title="title">foo</a>a</p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_paragraph_extra_81():
"""
Test case extra 81: Paragraph with full link with backspace in reference
"""
# Arrange
source_markdown = """a[foo][ba\\]r]a
[ba\\]r]: /url 'title'"""
expected_tokens = [
"[para(1,1):]",
"[text(1,1):a:]",
"[link(1,2):full:/url:title:::ba\\]r:foo:::::]",
"[text(1,3):foo:]",
"[end-link::]",
"[text(1,14):a:]",
"[end-para:::True]",
"[BLANK(2,1):]",
"[link-ref-def(3,1):True::ba\\]r:: :/url:: :title:'title':]",
]
expected_gfm = """<p>a<a href="/url" title="title">foo</a>a</p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_paragraph_extra_81a():
"""
Test case extra 81a: 81 with newline before special characters
"""
# Arrange
source_markdown = """a[foo][ba
\\]r]a
[ba
\\]r]: /url 'title'"""
expected_tokens = [
"[para(1,1):\n]",
"[text(1,1):a:]",
"[link(1,2):full:/url:title:::ba\n\\]r:foo:::::]",
"[text(1,3):foo:]",
"[end-link::]",
"[text(2,5):a:]",
"[end-para:::True]",
"[BLANK(3,1):]",
"[link-ref-def(4,1):True::ba \\]r:ba\n\\]r: :/url:: :title:'title':]",
]
expected_gfm = """<p>a<a href="/url" title="title">foo</a>a</p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_paragraph_extra_82():
"""
Test case extra 82: Paragraph with shortcut link with replacement in label
"""
# Arrange
source_markdown = """a[baβr]a
[baβr]: /url 'title'"""
expected_tokens = [
"[para(1,1):]",
"[text(1,1):a:]",
"[link(1,2):shortcut:/url:title::::baβr:::::]",
"[text(1,3):ba\aβ\aβ\ar:]",
"[end-link::]",
"[text(1,13):a:]",
"[end-para:::True]",
"[BLANK(2,1):]",
"[link-ref-def(3,1):True::baβr:: :/url:: :title:'title':]",
]
expected_gfm = """<p>a<a href="/url" title="title">baβr</a>a</p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_paragraph_extra_82a():
"""
Test case extra 82a: 82 with newline before special characters
"""
# Arrange
source_markdown = """a[ba
βr]a
[ba
βr]: /url 'title'"""
expected_tokens = [
"[para(1,1):\n]",
"[text(1,1):a:]",
"[link(1,2):shortcut:/url:title::::ba\nβr:::::]",
"[text(1,3):ba\n\aβ\aβ\ar::\n]",
"[end-link::]",
"[text(2,9):a:]",
"[end-para:::True]",
"[BLANK(3,1):]",
"[link-ref-def(4,1):True::ba βr:ba\nβr: :/url:: :title:'title':]",
]
expected_gfm = """<p>a<a href="/url" title="title">ba\nβr</a>a</p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_paragraph_extra_83():
"""
Test case extra 83: Paragraph with shortcut link with backslash in label
"""
# Arrange
source_markdown = """a[ba\\]r]a
[ba\\]r]: /url 'title'"""
expected_tokens = [
"[para(1,1):]",
"[text(1,1):a:]",
"[link(1,2):shortcut:/url:title::::ba\\]r:::::]",
"[text(1,3):ba\\\b]r:]",
"[end-link::]",
"[text(1,9):a:]",
"[end-para:::True]",
"[BLANK(2,1):]",
"[link-ref-def(3,1):True::ba\\]r:: :/url:: :title:'title':]",
]
expected_gfm = """<p>a<a href="/url" title="title">ba]r</a>a</p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_paragraph_extra_83a():
"""
Test case extra 83a: 83 with newline before special characters
"""
# Arrange
source_markdown = """a[ba
\\]r]a
[ba
\\]r]: /url 'title'"""
expected_tokens = [
"[para(1,1):\n]",
"[text(1,1):a:]",
"[link(1,2):shortcut:/url:title::::ba\n\\]r:::::]",
"[text(1,3):ba\n\\\b]r::\n]",
"[end-link::]",
"[text(2,5):a:]",
"[end-para:::True]",
"[BLANK(3,1):]",
"[link-ref-def(4,1):True::ba \\]r:ba\n\\]r: :/url:: :title:'title':]",
]
expected_gfm = """<p>a<a href="/url" title="title">ba\n]r</a>a</p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_paragraph_extra_84x():
"""
Test case extra 84: Paragraph with collapsed link with replacement in label
"""
# Arrange
source_markdown = """a[baβr][]a
[baβr]: /url 'title'"""
expected_tokens = [
"[para(1,1):]",
"[text(1,1):a:]",
"[link(1,2):collapsed:/url:title::::baβr:::::]",
"[text(1,3):ba\aβ\aβ\ar:]",
"[end-link::]",
"[text(1,15):a:]",
"[end-para:::True]",
"[BLANK(2,1):]",
"[link-ref-def(3,1):True::baβr:: :/url:: :title:'title':]",
]
expected_gfm = """<p>a<a href="/url" title="title">baβr</a>a</p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_paragraph_extra_84a():
"""
Test case extra 84a: 84 with newline before special characters
"""
# Arrange
source_markdown = """a[ba
βr][]a
[ba
βr]: /url 'title'"""
expected_tokens = [
"[para(1,1):\n]",
"[text(1,1):a:]",
"[link(1,2):collapsed:/url:title::::ba\nβr:::::]",
"[text(1,3):ba\n\aβ\aβ\ar::\n]",
"[end-link::]",
"[text(2,11):a:]",
"[end-para:::True]",
"[BLANK(3,1):]",
"[link-ref-def(4,1):True::ba βr:ba\nβr: :/url:: :title:'title':]",
]
expected_gfm = """<p>a<a href="/url" title="title">ba\nβr</a>a</p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_paragraph_extra_85():
"""
Test case extra 85: Paragraph with collapsed link with backslash in label
"""
# Arrange
source_markdown = """a[ba\\]r][]a
[ba\\]r]: /url 'title'"""
expected_tokens = [
"[para(1,1):]",
"[text(1,1):a:]",
"[link(1,2):collapsed:/url:title::::ba\\]r:::::]",
"[text(1,3):ba\\\b]r:]",
"[end-link::]",
"[text(1,11):a:]",
"[end-para:::True]",
"[BLANK(2,1):]",
"[link-ref-def(3,1):True::ba\\]r:: :/url:: :title:'title':]",
]
expected_gfm = """<p>a<a href="/url" title="title">ba]r</a>a</p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_paragraph_extra_85a():
"""
Test case extra 85a: 85 with newline before special characters
"""
# Arrange
source_markdown = """a[ba
\\]r][]a
[ba
\\]r]: /url 'title'"""
expected_tokens = [
"[para(1,1):\n]",
"[text(1,1):a:]",
"[link(1,2):collapsed:/url:title::::ba\n\\]r:::::]",
"[text(1,3):ba\n\\\b]r::\n]",
"[end-link::]",
"[text(2,7):a:]",
"[end-para:::True]",
"[BLANK(3,1):]",
"[link-ref-def(4,1):True::ba \\]r:ba\n\\]r: :/url:: :title:'title':]",
]
expected_gfm = """<p>a<a href="/url" title="title">ba\n]r</a>a</p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_paragraph_extra_90x():
"""
Test case extra 90: Paragraph with full image with backslash in label
"""
# Arrange
source_markdown = """a![foo\\#bar][bar]a
[bar]: /url 'title'"""
expected_tokens = [
"[para(1,1):]",
"[text(1,1):a:]",
"[image(1,2):full:/url:title:foo#bar:::bar:foo\\#bar:::::]",
"[text(1,18):a:]",
"[end-para:::True]",
"[BLANK(2,1):]",
"[link-ref-def(3,1):True::bar:: :/url:: :title:'title':]",
]
expected_gfm = """<p>a<img src="/url" alt="foo#bar" title="title" />a</p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_paragraph_extra_90a():
"""
Test case extra 90a: 90 with newline before special chars
"""
# Arrange
source_markdown = """a![foo
\\#bar][bar]a
[bar]: /url 'title'"""
expected_tokens = [
"[para(1,1):\n]",
"[text(1,1):a:]",
"[image(1,2):full:/url:title:foo\n#bar:::bar:foo\n\\#bar:::::]",
"[text(2,12):a:]",
"[end-para:::True]",
"[BLANK(3,1):]",
"[link-ref-def(4,1):True::bar:: :/url:: :title:'title':]",
]
expected_gfm = """<p>a<img src="/url" alt="foo\n#bar" title="title" />a</p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_paragraph_extra_91x():
"""
Test case extra 91: Paragraph with full image with replacement in label
"""
# Arrange
source_markdown = """a![fooβbar][bar]a
[bar]: /url 'title'"""
expected_tokens = [
"[para(1,1):]",
"[text(1,1):a:]",
"[image(1,2):full:/url:title:fooβbar:::bar:fooβbar:::::]",
"[text(1,22):a:]",
"[end-para:::True]",
"[BLANK(2,1):]",
"[link-ref-def(3,1):True::bar:: :/url:: :title:'title':]",
]
expected_gfm = """<p>a<img src="/url" alt="fooβbar" title="title" />a</p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_paragraph_extra_91a():
"""
Test case extra 91a: 91 with newline before special characters
"""
# Arrange
source_markdown = """a![foo
βbar][bar]a
[bar]: /url 'title'"""
expected_tokens = [
"[para(1,1):\n]",
"[text(1,1):a:]",
"[image(1,2):full:/url:title:foo\nβbar:::bar:foo\nβbar:::::]",
"[text(2,16):a:]",
"[end-para:::True]",
"[BLANK(3,1):]",
"[link-ref-def(4,1):True::bar:: :/url:: :title:'title':]",
]
expected_gfm = """<p>a<img src="/url" alt="foo\nβbar" title="title" />a</p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_paragraph_extra_92x():
"""
Test case extra 92: Paragraph with full image with replacement in reference
"""
# Arrange
source_markdown = """a![foo][baβr]a
[baβr]: /url 'title'"""
expected_tokens = [
"[para(1,1):]",
"[text(1,1):a:]",
"[image(1,2):full:/url:title:foo:::baβr:foo:::::]",
"[text(1,19):a:]",
"[end-para:::True]",
"[BLANK(2,1):]",
"[link-ref-def(3,1):True::baβr:: :/url:: :title:'title':]",
]
expected_gfm = """<p>a<img src="/url" alt="foo" title="title" />a</p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_paragraph_extra_92a():
"""
Test case extra 92a: 92 with newline before special characters
"""
# Arrange
source_markdown = """a![foo][ba
βr]a
[ba
βr]: /url 'title'"""
expected_tokens = [
"[para(1,1):\n]",
"[text(1,1):a:]",
"[image(1,2):full:/url:title:foo:::ba\nβr:foo:::::]",
"[text(2,9):a:]",
"[end-para:::True]",
"[BLANK(3,1):]",
"[link-ref-def(4,1):True::ba βr:ba\nβr: :/url:: :title:'title':]",
]
expected_gfm = """<p>a<img src="/url" alt="foo" title="title" />a</p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_paragraph_extra_93x():
"""
Test case extra 93: Paragraph with full image with backspace in reference
"""
# Arrange
source_markdown = """a![foo][ba\\]r]a
[ba\\]r]: /url 'title'"""
expected_tokens = [
"[para(1,1):]",
"[text(1,1):a:]",
"[image(1,2):full:/url:title:foo:::ba\\]r:foo:::::]",
"[text(1,15):a:]",
"[end-para:::True]",
"[BLANK(2,1):]",
"[link-ref-def(3,1):True::ba\\]r:: :/url:: :title:'title':]",
]
expected_gfm = """<p>a<img src="/url" alt="foo" title="title" />a</p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_paragraph_extra_93a():
"""
Test case extra 93a: 93 with newline before special characters
"""
# Arrange
source_markdown = """a![foo][ba
\\]r]a
[ba
\\]r]: /url 'title'"""
expected_tokens = [
"[para(1,1):\n]",
"[text(1,1):a:]",
"[image(1,2):full:/url:title:foo:::ba\n\\]r:foo:::::]",
"[text(2,5):a:]",
"[end-para:::True]",
"[BLANK(3,1):]",
"[link-ref-def(4,1):True::ba \\]r:ba\n\\]r: :/url:: :title:'title':]",
]
expected_gfm = """<p>a<img src="/url" alt="foo" title="title" />a</p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_paragraph_extra_94x():
"""
Test case extra 94: Paragraph with shortcut image with replacement in label
"""
# Arrange
source_markdown = """a![baβr]a
[baβr]: /url 'title'"""
expected_tokens = [
"[para(1,1):]",
"[text(1,1):a:]",
"[image(1,2):shortcut:/url:title:baβr::::baβr:::::]",
"[text(1,14):a:]",
"[end-para:::True]",
"[BLANK(2,1):]",
"[link-ref-def(3,1):True::baβr:: :/url:: :title:'title':]",
]
expected_gfm = """<p>a<img src="/url" alt="baβr" title="title" />a</p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_paragraph_extra_94a():
"""
Test case extra 94a: 94 with newline before special characters
"""
# Arrange
source_markdown = """a![ba
βr]a
[ba
βr]: /url 'title'"""
expected_tokens = [
"[para(1,1):\n]",
"[text(1,1):a:]",
"[image(1,2):shortcut:/url:title:ba\nβr::::ba\nβr:::::]",
"[text(2,9):a:]",
"[end-para:::True]",
"[BLANK(3,1):]",
"[link-ref-def(4,1):True::ba βr:ba\nβr: :/url:: :title:'title':]",
]
expected_gfm = """<p>a<img src="/url" alt="ba\nβr" title="title" />a</p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_paragraph_extra_95x():
"""
Test case extra 95: Paragraph with shortcut image with backslash in label
"""
# Arrange
source_markdown = """a![ba\\]r]a
[ba\\]r]: /url 'title'"""
expected_tokens = [
"[para(1,1):]",
"[text(1,1):a:]",
"[image(1,2):shortcut:/url:title:ba]r::::ba\\]r:::::]",
"[text(1,10):a:]",
"[end-para:::True]",
"[BLANK(2,1):]",
"[link-ref-def(3,1):True::ba\\]r:: :/url:: :title:'title':]",
]
expected_gfm = """<p>a<img src="/url" alt="ba]r" title="title" />a</p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_paragraph_extra_95a():
"""
Test case extra 95a: 95 with newline before special characters
"""
# Arrange
source_markdown = """a![ba
\\]r]a
[ba
\\]r]: /url 'title'"""
expected_tokens = [
"[para(1,1):\n]",
"[text(1,1):a:]",
"[image(1,2):shortcut:/url:title:ba\n]r::::ba\n\\]r:::::]",
"[text(2,5):a:]",
"[end-para:::True]",
"[BLANK(3,1):]",
"[link-ref-def(4,1):True::ba \\]r:ba\n\\]r: :/url:: :title:'title':]",
]
expected_gfm = """<p>a<img src="/url" alt="ba\n]r" title="title" />a</p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_paragraph_extra_96x():
"""
Test case extra 96: Paragraph with collapsed image with replacement in label
"""
# Arrange
source_markdown = """a![baβr][]a
[baβr]: /url 'title'"""
expected_tokens = [
"[para(1,1):]",
"[text(1,1):a:]",
"[image(1,2):collapsed:/url:title:baβr::::baβr:::::]",
"[text(1,16):a:]",
"[end-para:::True]",
"[BLANK(2,1):]",
"[link-ref-def(3,1):True::baβr:: :/url:: :title:'title':]",
]
expected_gfm = """<p>a<img src="/url" alt="baβr" title="title" />a</p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_paragraph_extra_96a():
"""
Test case extra 96a: 96 with newline before special characters
"""
# Arrange
source_markdown = """a![ba
βr][]a
[ba
βr]: /url 'title'"""
expected_tokens = [
"[para(1,1):\n]",
"[text(1,1):a:]",
"[image(1,2):collapsed:/url:title:ba\nβr::::ba\nβr:::::]",
"[text(2,11):a:]",
"[end-para:::True]",
"[BLANK(3,1):]",
"[link-ref-def(4,1):True::ba βr:ba\nβr: :/url:: :title:'title':]",
]
expected_gfm = """<p>a<img src="/url" alt="ba\nβr" title="title" />a</p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_paragraph_extra_97x():
"""
Test case extra 97: Paragraph with collapsed image with backslash in label
"""
# Arrange
source_markdown = """a![ba\\]r][]a
[ba\\]r]: /url 'title'"""
expected_tokens = [
"[para(1,1):]",
"[text(1,1):a:]",
"[image(1,2):collapsed:/url:title:ba]r::::ba\\]r:::::]",
"[text(1,12):a:]",
"[end-para:::True]",
"[BLANK(2,1):]",
"[link-ref-def(3,1):True::ba\\]r:: :/url:: :title:'title':]",
]
| |
self.Resources["ItemsPanelTemplate"] = items_panel_template
item_container_template = kwargs.get('item_container_template', None)
if item_container_template:
self.Resources["ItemContainerTemplate"] = item_container_template
item_template = kwargs.get('item_template', None)
if item_template:
self.Resources["ItemTemplate"] = \
item_template
# nicely wrap and prepare context for presentation, then present
self._prepare_context()
# list options now
self._list_options()
# setup search and filter fields
self.hide_element(self.clrsearch_b)
self.clear_search(None, None)
def _prepare_context_items(self, ctx_items):
new_ctx = []
# filter context if necessary
if self.filter_func:
ctx_items = filter(self.filter_func, ctx_items)
for item in ctx_items:
if TemplateListItem.is_checkbox(item):
item.checkable = self.multiselect
new_ctx.append(item)
else:
new_ctx.append(
TemplateListItem(item,
checkable=self.multiselect,
name_attr=self._nameattr)
)
return new_ctx
def _prepare_context(self):
if isinstance(self._context, dict) and self._context.keys():
self._update_ctx_groups(self._context.keys())
new_ctx = {}
for ctx_grp, ctx_items in self._context.items():
new_ctx[ctx_grp] = self._prepare_context_items(ctx_items)
else:
new_ctx = self._prepare_context_items(self._context)
self._context = new_ctx
def _update_ctx_groups(self, ctx_group_names):
self.show_element(self.ctx_groups_dock)
self.ctx_groups_selector_cb.ItemsSource = ctx_group_names
if self.ctx_groups_active in ctx_group_names:
self.ctx_groups_selector_cb.SelectedIndex = \
ctx_group_names.index(self.ctx_groups_active)
else:
self.ctx_groups_selector_cb.SelectedIndex = 0
def _get_active_ctx_group(self):
return self.ctx_groups_selector_cb.SelectedItem
def _get_active_ctx(self):
if isinstance(self._context, dict):
return self._context[self._get_active_ctx_group()]
else:
return self._context
def _list_options(self, option_filter=None):
if option_filter:
self.checkall_b.Content = 'Check'
self.uncheckall_b.Content = 'Uncheck'
self.toggleall_b.Content = 'Toggle'
# get a match score for every item and sort high to low
fuzzy_matches = sorted(
[(x, coreutils.fuzzy_search_ratio(x.name, option_filter))
for x in self._get_active_ctx()],
key=lambda x: x[1],
reverse=True
)
# filter out any match with score less than 80
self.list_lb.ItemsSource = \
[x[0] for x in fuzzy_matches if x[1] >= 80]
else:
self.checkall_b.Content = 'Check All'
self.uncheckall_b.Content = 'Uncheck All'
self.toggleall_b.Content = 'Toggle All'
self.list_lb.ItemsSource = [x for x in self._get_active_ctx()]
@staticmethod
def _unwrap_options(options):
unwrapped = []
for optn in options:
if isinstance(optn, TemplateListItem):
unwrapped.append(optn.unwrap())
else:
unwrapped.append(optn)
return unwrapped
def _get_options(self):
if self.multiselect:
if self.return_all:
return [x for x in self._get_active_ctx()]
else:
return self._unwrap_options(
[x for x in self._get_active_ctx()
if x.state or x in self.list_lb.SelectedItems]
)
else:
return self._unwrap_options([self.list_lb.SelectedItem])[0]
def _set_states(self, state=True, flip=False, selected=False):
all_items = self.list_lb.ItemsSource
if selected:
current_list = self.list_lb.SelectedItems
else:
current_list = self.list_lb.ItemsSource
for checkbox in current_list:
if flip:
checkbox.state = not checkbox.state
else:
checkbox.state = state
# push list view to redraw
self.list_lb.ItemsSource = None
self.list_lb.ItemsSource = all_items
def toggle_all(self, sender, args): #pylint: disable=W0613
"""Handle toggle all button to toggle state of all check boxes."""
self._set_states(flip=True)
def check_all(self, sender, args): #pylint: disable=W0613
"""Handle check all button to mark all check boxes as checked."""
self._set_states(state=True)
def uncheck_all(self, sender, args): #pylint: disable=W0613
"""Handle uncheck all button to mark all check boxes as un-checked."""
self._set_states(state=False)
def check_selected(self, sender, args): #pylint: disable=W0613
"""Mark selected checkboxes as checked."""
self._set_states(state=True, selected=True)
def uncheck_selected(self, sender, args): #pylint: disable=W0613
"""Mark selected checkboxes as unchecked."""
self._set_states(state=False, selected=True)
def button_select(self, sender, args): #pylint: disable=W0613
"""Handle select button click."""
self.response = self._get_options()
self.Close()
def search_txt_changed(self, sender, args): #pylint: disable=W0613
"""Handle text change in search box."""
if self.search_tb.Text == '':
self.hide_element(self.clrsearch_b)
else:
self.show_element(self.clrsearch_b)
self._list_options(option_filter=self.search_tb.Text)
def clear_search(self, sender, args): #pylint: disable=W0613
"""Clear search box."""
self.search_tb.Text = ' '
self.search_tb.Clear()
self.search_tb.Focus()
class CommandSwitchWindow(TemplateUserInputWindow):
"""Standard form to select from a list of command options.
Args:
context (list[str]): list of command options to choose from
switches (list[str]): list of on/off switches
message (str): window title message
config (dict): dictionary of config dicts for options or switches
Returns:
str: name of selected option
Returns:
tuple(str, dict): if ``switches`` option is used, returns a tuple
of selection option name and dict of switches
Example:
This is an example with series of command options:
>>> from pyrevit import forms
>>> ops = ['option1', 'option2', 'option3', 'option4']
>>> forms.CommandSwitchWindow.show(ops, message='Select Option')
'option2'
A more advanced example of combining command options, on/off switches,
and option or switch configuration options:
>>> from pyrevit import forms
>>> ops = ['option1', 'option2', 'option3', 'option4']
>>> switches = ['switch1', 'switch2']
>>> cfgs = {'option1': { 'background': '0xFF55FF'}}
>>> rops, rswitches = forms.CommandSwitchWindow.show(
... ops,
... switches=switches
... message='Select Option',
... config=cfgs
... )
>>> rops
'option2'
>>> rswitches
{'switch1': False, 'switch2': True}
"""
xaml_source = 'CommandSwitchWindow.xaml'
def _setup(self, **kwargs):
self.selected_switch = ''
self.Width = DEFAULT_CMDSWITCHWND_WIDTH
self.Title = 'Command Options'
message = kwargs.get('message', None)
self._switches = kwargs.get('switches', [])
if not isinstance(self._switches, dict):
self._switches = dict.fromkeys(self._switches)
configs = kwargs.get('config', None)
self.message_label.Content = \
message if message else 'Pick a command option:'
# creates the switches first
for switch, state in self._switches.items():
my_togglebutton = framework.Controls.Primitives.ToggleButton()
my_togglebutton.Content = switch
my_togglebutton.IsChecked = state if state else False
if configs and 'option' in configs:
self._set_config(my_togglebutton, configs[switch])
self.button_list.Children.Add(my_togglebutton)
for option in self._context:
my_button = framework.Controls.Button()
my_button.Content = option
my_button.Click += self.process_option
if configs and option in configs:
self._set_config(my_button, configs[option])
self.button_list.Children.Add(my_button)
self._setup_response()
self.search_tb.Focus()
self._filter_options()
@staticmethod
def _set_config(item, config_dict):
bg = config_dict.get('background', None)
if bg:
bg = bg.replace('0x', '#')
item.Background = Media.BrushConverter().ConvertFrom(bg)
def _setup_response(self, response=None):
if self._switches:
switches = [x for x in self.button_list.Children
if hasattr(x, 'IsChecked')]
self.response = response, {x.Content: x.IsChecked
for x in switches}
else:
self.response = response
def _filter_options(self, option_filter=None):
if option_filter:
self.search_tb.Tag = ''
option_filter = option_filter.lower()
for button in self.button_list.Children:
if option_filter not in button.Content.lower():
button.Visibility = WPF_COLLAPSED
else:
button.Visibility = WPF_VISIBLE
else:
self.search_tb.Tag = \
'Type to Filter / Tab to Select / Enter or Click to Run'
for button in self.button_list.Children:
button.Visibility = WPF_VISIBLE
def _get_active_button(self):
buttons = []
for button in self.button_list.Children:
if button.Visibility == WPF_VISIBLE:
buttons.append(button)
if len(buttons) == 1:
return buttons[0]
else:
for x in buttons:
if x.IsFocused:
return x
def handle_click(self, sender, args): #pylint: disable=W0613
"""Handle mouse click."""
self.Close()
def handle_input_key(self, sender, args):
"""Handle keyboard inputs."""
if args.Key == Input.Key.Escape:
if self.search_tb.Text:
self.search_tb.Text = ''
else:
self.Close()
elif args.Key == Input.Key.Enter:
self.process_option(self._get_active_button(), None)
elif args.Key != Input.Key.Tab \
and args.Key != Input.Key.Space\
and args.Key != Input.Key.LeftShift\
and args.Key != Input.Key.RightShift:
self.search_tb.Focus()
def search_txt_changed(self, sender, args): #pylint: disable=W0613
"""Handle text change in search box."""
self._filter_options(option_filter=self.search_tb.Text)
def process_option(self, sender, args): #pylint: disable=W0613
"""Handle click on command option button."""
self.Close()
if sender:
self._setup_response(response=sender.Content)
class GetValueWindow(TemplateUserInputWindow):
"""Standard form to get simple values from user.
Args:
Example:
>>> from pyrevit import forms
>>> items = ['item1', 'item2', 'item3']
>>> forms.SelectFromList.show(items, button_name='Select Item')
>>> ['item1']
"""
xaml_source = 'GetValueWindow.xaml'
def _setup(self, **kwargs):
self.Width = 400
# determine value type
self.value_type = kwargs.get('value_type', 'string')
value_prompt = kwargs.get('prompt', None)
value_default = kwargs.get('default', None)
self.reserved_values = kwargs.get('reserved_values', [])
# customize window based on type
if self.value_type == 'string':
self.show_element(self.stringPanel_dp)
self.stringValue_tb.Text = value_default if value_default else ''
self.stringValue_tb.Focus()
self.stringValue_tb.SelectAll()
self.stringPrompt.Text = \
value_prompt if value_prompt else 'Enter string:'
if self.reserved_values:
self.string_value_changed(None, None)
elif self.value_type == 'dropdown':
self.show_element(self.dropdownPanel_db)
self.dropdownPrompt.Text = \
value_prompt if value_prompt else 'Pick one value:'
self.dropdown_cb.ItemsSource = self._context
if value_default:
self.dropdown_cb.SelectedItem = value_default
elif self.value_type == 'date':
self.show_element(self.datePanel_dp)
self.datePrompt.Text = \
value_prompt if value_prompt else 'Pick date:'
def string_value_changed(self, sender, args):
filtered_rvalues = \
sorted([x for x in self.reserved_values
if self.stringValue_tb.Text in str(x)],
reverse=True)
if filtered_rvalues:
self.reservedValuesList.ItemsSource = filtered_rvalues
self.show_element(self.reservedValuesListPanel)
self.okayButton.IsEnabled = \
self.stringValue_tb.Text not in filtered_rvalues
else:
self.reservedValuesList.ItemsSource = []
self.hide_element(self.reservedValuesListPanel)
self.okayButton.IsEnabled = True
def select(self, sender, args): #pylint: disable=W0613
self.Close()
if self.value_type == 'string':
self.response = self.stringValue_tb.Text
elif self.value_type == 'dropdown':
self.response = self.dropdown_cb.SelectedItem
elif self.value_type == 'date':
if self.datePicker.SelectedDate:
datestr = self.datePicker.SelectedDate.ToString("MM/dd/yyyy")
self.response = datetime.datetime.strptime(datestr, r'%m/%d/%Y')
else:
self.response = None
class TemplatePromptBar(WPFWindow):
"""Template context-manager class for creating prompt bars.
Prompt bars are show at the top of the active Revit window and are
designed for better prompt visibility.
Args:
height (int): window height
**kwargs: other arguments to be passed to :func:`_setup`
"""
xaml_source = 'TemplatePromptBar.xaml'
def __init__(self, height=32, **kwargs):
"""Initialize user prompt window."""
WPFWindow.__init__(self,
op.join(op.dirname(__file__), self.xaml_source))
self.user_height = height
self.update_window()
self._setup(**kwargs)
def update_window(self):
"""Update the prompt bar to match Revit window."""
screen_area = HOST_APP.proc_screen_workarea
scale_factor = 1.0 / HOST_APP.proc_screen_scalefactor
top = left = width = height = 0
window_rect = revit.get_window_rectangle()
# set width and height
width = window_rect.Right - window_rect.Left
height = self.user_height
top = window_rect.Top
# in maximized window, the top might be off the active screen
# due to windows thicker window frames
# lets cut the height and re-adjust the top
top_diff = abs(screen_area.Top - top)
if 10 > top_diff > 0 and top_diff < height:
height -= top_diff
top = screen_area.Top
left = window_rect.Left
# in maximized window, | |
0xA8C3, # 中文阴圈码卅(⏺ + 卅)
0xA8C4: 0x4E00, # 注音符号— = 一
0xA8EA: 0xA8EA, # 中文阴框码一(⏹ + 一)
0xA8EB: 0xA8EB, # 中文阴框码二(⏹ + 二)
0xA8EC: 0xA8EC, # 中文阴框码三(⏹ + 三)
0xA8ED: 0xA8ED, # 中文阴框码四(⏹ + 四)
0xA8EE: 0xA8EE, # 中文阴框码五(⏹ + 五)
0xA8EF: 0xA8EF, # 中文阴框码六(⏹ + 六)
0xA8F0: 0xA8F0, # 中文阴框码七(⏹ + 七)
0xA8F1: 0xA8F1, # 中文阴框码八(⏹ + 八)
0xA8F2: 0xA8F2, # 中文阴框码九(⏹ + 九)
0xA8F3: 0xA8F3, # 中文阴框码十(⏹ + 十)
0xA8F4: 0xA8F4, # 中文阴框码廿(⏹ + 廿)
0xA8F5: 0xA8F5, # 中文阴框码卅(⏹ + 卅)
0xA8F6: 0xA8F6, # 中文阴圈码一(⏺ + 一)
0xA8F7: 0xA8F7, # 中文阴圈码二(⏺ + 二)
0xA8F8: 0xA8F8, # 中文阴圈码三(⏺ + 三)
0xA8F9: 0xA8F9, # 中文阴圈码四(⏺ + 四)
0xA8FA: 0xA8FA, # 中文阴圈码五(⏺ + 五)
0xA8FB: 0xA8FB, # 中文阴圈码六(⏺ + 六)
0xA8FC: 0xA8FC, # 中文阴圈码七(⏺ + 七)
0xA8FD: 0xA8FD, # 中文阴圈码八(⏺ + 八)
0xA8FE: 0xA8FE # 中文阴圈码九(⏺ + 九)
})
# Area A9
_update({
0xA9A1: 0xA9A1, # (╪)
0xA9A2: 0xA9A2, # (╡)
0xA9F0: 0x21E8, # 空心向右箭头 = ⇨
0xA9F1: 0x21E6, # 空心向左箭头 = ⇦
0xA9F2: 0x2B06, # 实心向上箭头 = ⬆
0xA9F3: 0x2B07, # 实心向下箭头 = ⬇
0xA9F4: 0x27A1, # 实心向右箭头 = ➡
0xA9F5: 0x2B05, # 实心向左箭头 = ⬅
0xA9F6: 0x2B62, # 箭头-无翅向右 = ⭢
0xA9F7: 0x2B60, # 箭头-无翅向左 = ⭠
0xA9F8: 0x2B61, # 箭头-无翅向左 = ⭡
0xA9F9: 0x2B63, # 箭头-无翅向左 = ⭣
0xA9FA: 0x21C1, # 箭头-下单翅向右 = ⇁
0xA9FB: 0x21BD, # 箭头-下单翅向左 = ↽
0xA9FC: 0xA9FC, # 箭头-双向向内(ꜜ͎)
0xA9FD: 0x2195, # 箭头-双向向外 = ↕
0xA9FE: 0x2B65, # 箭头-无翅双向向外 = ⭥
})
# Area AA
_update({
0xAAA1: 0xAAA1, # BD语言注解:盘外符开弧(⸨)
0xAAA2: 0xAAA2, # BD语言注解:盘外符标记()→)
0xAAA3: 0xAAA3, # BD语言注解:盘外符闭弧(⸩)
0xAAA4: 0xAAA4, # BD语言注解:换行符(⇙)
0xAAA5: 0xAAA5, # BD语言注解:换段符(↙)
0xAAA6: 0xAAA6, # BD语言注解:小样文件结束(Ω)
0xAAA7: 0xAAA7, # BD语言注解:数学态标记(◯ + ﹩)
0xAAA8: 0xAAA8, # BD语言注解:自定义参数(◯ + ﹠)
0xAAA9: 0xAAA9, # BD语言注解:盒子开弧(⦃)
0xAAAA: 0xAAAA, # BD语言注解:盒子闭弧(⦄)
0xAAAB: 0xAAAB, # BD语言注解:转字体标记(ⓩ)
0xAAAC: 0xAAAC, # BD语言注解:上标(⤊)
0xAAAD: 0xAAAD, # BD语言注解:下标(⤋)
0xAAB0: 0x002C, # 千分撇 = ,
0xAAB1: 0x002E, # 小数点 = .
0xAAB2: 0x2010, # 半字线 = ‒
0xAAB3: 0x002A, # 六角星号、呼应号 = *
0xAAB4: 0x0021, # 阶乘 = !
0xAAB5: 0x2202, # 偏导数 = ∂
0xAAB6: 0x2211, # 和 = ∑
0xAAB7: 0x220F, # 积 = ∏
0xAAB8: 0x2AEE, # 非因子号 = ⫮
0xAAB9: 0x2031, # 万分号 = ‱
0xAABA: 0x227B, # 前继 = ≻
0xAABB: 0x227A, # 后继 = ≺
0xAABC: 0x2282, # 包含于 = ⊂
0xAABD: 0x2283, # 包含 = ⊃
0xAABE: 0x225C, # Delta等于 = ≜
0xAABF: 0x00AC, # 否定 = ¬
0xAAC0: 0x22CD, # ⋍
0xAAC1: 0x2286, # 包含于 = ⊆
0xAAC2: 0x2287, # 包含 = ⊇
0xAAC3: 0x225C, # ≜
0xAAC4: 0x2243, # 近似符号 = ⋍
0xAAC5: 0x2265, # 大于等于 = ≥
0xAAC6: 0x2264, # 小于等于 = ≤
0xAAC7: 0x2214, # 穆勒连分符号、集合合 = ∔
0xAAC8: 0x2238, # 算术差 = ∸
0xAAC9: 0x2A30, # 直积号 = ⨰
0xAACA: 0x2271, # 不大于等于 = ≱
0xAACB: 0x2270, # 不小于等于 = ≰
0xAACC: 0x2AB0, # ⪰
0xAACD: 0x2AAF, # ⪯
0xAACE: 0x5350, # 卐
0xAACF: 0x212A, # 绝对温度单位 = K
0xAAD0: 0x2200, # 全称量词 = ∀
0xAAD1: 0x21D1, # ⇑
0xAAD2: 0x21E7, # ⇧
0xAAD3: 0x21BE, # ↾
0xAAD4: 0x21D3, # ⇓
0xAAD5: 0x21E9, # ⇩
0xAAD6: 0x21C3, # ⇃
0xAAD7: 0x2935, # ⤵
0xAAD8: 0x21E5, # ⇥
0xAAD9: 0x22F0, # 对角三连点 = ⋰
0xAADA: 0x21D4, # 等价 = ⇔
0xAADB: 0x21C6, # ⇆
0xAADC: 0x2194, # ↔
0xAADD: 0x21D2, # 推断 = ⇒
0xAADE: 0x21E8, # ⇨
0xAADF: 0x21C0, # ⇀
0xAAE0: 0x27F6, # ⟶
0xAAE1: 0x21D0, # ⇐
0xAAE2: 0x21E6, # ⇦
0xAAE3: 0x21BC, # ↼
0xAAE4: 0x27F5, # ⟵
0xAAE5: 0x2196, # ↖️
0xAAE6: 0x2199, # ↙️
0xAAE7: 0x2198, # ↘️
0xAAE8: 0x2197, # ↗️
0xAAE9: 0x22D5, # 平行等于 = ⋕
0xAAEA: 0x2AC5, # 包含于 = ⫅
0xAAEB: 0x2AC6, # 包含 = ⫆
0xAAEC: 0x29CB, # 相当于 = ⧋
0xAAED: 0x226B, # 远大于 = ≫
0xAAEE: 0x226A, # 远小于 = ≪
0xAAEF: 0x2A72, # 加或等于 = ⩲
0xAAF0: 0x22BB, # ⊻
0xAAF1: 0x2AE8, # 垂直等于 = ⫨
0xAAF2: 0x2277, # 大于或小于 = ≷
0xAAF3: 0x227D, # ≽
0xAAF4: 0x227C, # ≼
0xAAF5: 0x2109, # 华氏度 = ℉
0xAAF6: 0x2203, # 存在量词 = ∃
0xAAF7: 0x22F1, # 对角三连点 = ⋱
0xAAF9: 0x2241, # ≁
0xAAFA: 0x2244, # ≄
0xAAFB: 0x2276, # ≶
0xAAFC: 0x2209, # 不属于 = ∉
0xAAFD: 0x2267, # ≧
0xAAFE: 0x2266 # ≦
})
# Area AB
_update({
0xABA1: 0x224B, # ≋
0xABA2: 0x2262, # 不恒等于 = ≢
0xABA3: 0x2251, # 近似值号 = ≑
0xABA4: 0x2284, # 不包含于 = ⊄
0xABA5: 0x2285, # 不包含 = ⊅
0xABA6: 0x2259, # 相当于、等角的、估算 = ≙
0xABA7: 0x2205, # 空集 = ∅
0xABA8: 0x2207, # 微分算符 = ∇
0xABA9: 0x2A01, # 直和 = ⨁
0xABAA: 0x2A02, # 重积 = ⨂
0xABAB: 0x03F9, # 组合 = Ϲ
0xABAC: 0xABAC, # 对角六连点(⋰ + ⋰)
0xABAD: 0x263C, # ☼
0xABAE: 0xABAE, # (⚬ + ↑)
0xABAF: 0x2247, # 不近似等于 = ≇
0xABB0: 0x2249, # 不近似等于 = ≉
0xABB1: 0x2278, # 不小于大于 = ≸
0xABB2: 0x22F6, # 不属于 = ⋶
0xABB3: 0x2AFA, # 大于等于 = ⫺
0xABB4: 0x2AF9, # 小于等于 = ⫹
0xABB5: 0x2245, # 近似等于、接近 = ≅
0xABB6: 0x2267, # 大于等于 = ≧
0xABB7: 0x2250, # 近似等于 = ≐
0xABB8: 0x2266, # 小于等于 = ≦
0xABB9: 0x2A26, # 加或差 = ⨦
0xABBA: 0x2213, # 负或正、减或加 = ∓
0xABBB: 0x233F, # ⌿
0xABBC: 0x30FC, # 日文符号 = ー
0xABBD: 0xABBD, # 近似值号(· + ≈)
0xABBE: 0x2288, # 不包含于 = ⊈
0xABBF: 0x2289, # 不包含 = ⊉
0xABC0: 0x225A, # 角相等 = ≚
0xABC1: 0x2205, # 空集 = ∅
0xABC2: 0x2205, # (diagonal 卐)
0xABC3: 0x0024, # $
0xABC4: 0x2709, # ✉
0xABC5: 0x272E, # ✮
0xABC6: 0x272F, # ✯
0xABC7: 0x2744, # ❄
0xABC8: 0x211E, # 处方符号 = ℞
0xABC9: 0x1D110, # 𝄐
0xABCA: 0x2034, # 三次微分 = ‴
0xABCB: 0xABCB, # 对角六连点(⋱ + ⋱)
0xABCC: 0x2ACB, # 真包含于 = ⫋
0xABCD: 0x2ACC, # 真包含 = ⫌
0xABCE: 0x2A63, # ⩣
0xABCF: 0xABCF, # 约数0(0 + \)
0xABD0: 0xABD0, # 约数1(1 + \)
0xABD1: 0xABD1, # 约数2(2 + \)
0xABD2: 0xABD2, # 约数3(3 + \)
0xABD3: 0xABD3, # 约数4(4 + \)
0xABD4: 0xABD4, # 约数5(5 + \)
0xABD5: 0xABD5, # 约数6(6 + \)
0xABD6: 0xABD6, # 约数7(7 + \)
0xABD7: 0xABD7, # 约数8(8 + \)
0xABD8: 0xABD8, # 约数9(9 + \)
0xABD9: 0x216C, # 罗马数字50 = Ⅼ
0xABDA: 0x216D, # 罗马数字100 = Ⅽ
0xABDB: 0x216E, # 罗马数字500 = Ⅾ
0xABDC: 0x216F, # 罗马数字1000 = Ⅿ
0xABDD: 0x2295, # 圈加 = ⊕
0xABDE: 0xABDE, # 圈加减(◯ + ±)
0xABDF: 0x2296, # 圈减 = ⊖
0xABE0: 0xABE0, # 圈点减(◯ + ∸)
0xABE1: 0x2297, # 圈乘 = ⊗
0xABE2: 0x2A38, # 圈除 = ⨸
0xABE3: 0x229C, # 圈等于 = ⊜
0xABE4: 0xABE4, # 交流电机(◯ + ∼)
0xABE5: 0xABE5, # 圈大于等于(◯ + ≥)
0xABE6: 0xABE6, # 圈小于等于(◯ + ≤)
0xABE7: 0x224A, # 近似等于 = ≊
0xABE8: 0xABE8, # (> + >)
0xABE9: 0xABE9, # (< + <)
0xABEA: 0x22DB, # 大于等于小于 = ⋛
0xABEB: 0x22DA, # 小于等于大于 = ⋚
0xABEC: 0x2A8C, # 大于等于小于 = ⪌
0xABED: 0x2A8B, # 小于等于大于 = ⪋
0xABEE: 0x2273, # ≳
0xABEF: 0x2272, # ≲
0xABF0: 0x29A5, # ⦥
0xABF1: 0x29A4, # ⦤
0xABF2: 0x2660, # 黑桃 = ♠
0xABF3: 0x2394, # 正六边形 = ⎔
0xABF4: 0x2B20, # 正五边形 = ⬠
0xABF5: 0x23E2, # 梯形 = ⏢
0xABF6: 0x2663, # 梅花 = ♣
0xABF7: 0x25B1, # 平行四边形 = ▱
0xABF8: 0x25AD, # 矩形 = ▭
0xABF9: 0x25AF, # 矩形 = ▯
0xABFA: 0x2665, # 红桃 = ♥
0xABFB: 0x2666, # 方块 = ♦
0xABFC: 0x25C1, # 三角形(向左) = ◁
0xABFD: 0x25BD, # 三角形(向下) = ▽
0xABFE: 0x25BD # 三角形(向右) = ▷
})
# Area AC
_update({
0xACA1: 0x25C0, # 实三角形(向左) = ◀
0xACA2: 0x25BC, # 实三角形(向下) = ▼
0xACA3: 0x25B6, # 实三角形(向右) = ▶
0xACA4: 0x25FA, # 直角三角形 = ◺
0xACA5: 0x22BF, # 直角三角形 = ⊿
0xACA6: 0x25B3, # △
0xACA7: 0x27C1, # ⟁
0xACA8: 0x2BCE, # ⯎
0xACA9: 0x2B2F, # ⬯
0xACAA: 0xACAA, | |
<filename>aioboto3lite/aioboto3lite.py
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
"""
aioboto3 compatibility wrapper for aiohttp and aiosonic.
N.B. This is currently very much "proof of concept"/"prototype" software YMMV.
The premise of this library is that whilst botocore/boto3/aiobotocore/aioboto3
are incredibly useful and powerful SDKs a key design goal of those is to provide
a very general SDK that can be dynamically generated from a JSON model, and so
can support new AWS services almost as soon as they are available. The down-side
of that design goal is that in order to be general there are often many layers
of indirection and delegation between the SDK API methods like s3.get_object to
the underlying HTTP REST API call.
The aim of this library is to illustrate the performance difference that taking
a more "direct" route from the SDK method to API invocation might make. It also
aims to illustrate how different HTTP client libraries might make a difference.
With aiobotocore we only have the option of using aiohttp so with this library
we also include support for aiosonic.
In general the performance difference can be profound. Usin aiosonic put_object
appears to have more than 2.5x the throughput of aioboto3 and get_object appears
to have more than 4.5x the throughput.
In addition to increased performance this library has a tiny footprint when
compared with aioboto3 (and all of its dependencies)
The *significant* down-side of this approach, however, is "sustainability". That
is to say this proof of concept only supports a fairly limited set of S3 CRUD
operations at the moment and although it is relatively straightforward to add
new methods, and it would be possible to make things more generic and even
JSON model driven there's a danger that that could make things evolve into
a slightly "dodgy" boto clone.
Whilst the underlying premise of improving performance by optimising the path
from SDK method to API call is sound ultimately perhaps only a handful of
methods actually benefit from such an optimisation. Many SDK methods are used
for setup/teardown and relatively few (like s3.get_object/s3.put_oject/
sfn.start_execution/lambda.invoke etc.) are actually used on application
"critical path". So perhaps a better (more sustainable) approach might be to
start with aioboto3/aiobotocore and "inject" optimised SDK methods to replace
the handful that would really benefit from optimisation.
"""
import sys
assert sys.version_info >= (3, 8) # Bomb out if not running Python3.8
import asyncio, io, os, time, uuid
import aiohttp, aiosonic
import hashlib, hmac, re, urllib.parse
from datetime import datetime, timezone, timedelta
"""
Attempt to use ujson if available https://pypi.org/project/ujson/
"""
try:
import ujson as json
except: # Fall back to standard library json
import json
"""
Attempt to use pybase64 libbase64 based codec if available
pip3 install pybase64
https://github.com/mayeut/pybase64
https://github.com/aklomp/base64
"""
try:
import pybase64 as base64
except: # Fall back to standard library base64
import base64
to_pascal_case_pattern = re.compile(r"(?:^|_|-)(.)")
def to_pascal_case(string):
# Convert from snake (or hyphen) case to upper camel (e.g. Pascal) case
return to_pascal_case_pattern.sub(lambda m: m.group(1).upper(), string)
to_hyphen_case_pattern1 = re.compile(r"([A-Z]+)([A-Z][a-z])")
to_hyphen_case_pattern2 = re.compile(r"([a-z\d])([A-Z])")
def to_hyphen_case(string):
# Convert from camel/PascalCase to hyphen-case
string = to_hyphen_case_pattern1.sub(r'\1-\2', string)
string = to_hyphen_case_pattern2.sub(r'\1-\2', string)
return string.lower()
def parse_rfc3339_datetime(rfc3339):
"""
Parse an RFC3339 (https://www.ietf.org/rfc/rfc3339.txt) format string into
a datetime object which is essentially the inverse operation to
datetime.now(timezone.utc).astimezone().isoformat()
We primarily need this in the Wait state so we can compute timeouts etc.
"""
rfc3339 = rfc3339.strip() # Remove any leading/trailing whitespace
if rfc3339[-1] == "Z":
date = rfc3339[:-1]
offset = "+00:00"
else:
date = rfc3339[:-6]
offset = rfc3339[-6:]
if "." not in date:
date = date + ".0"
raw_datetime = datetime.strptime(date, "%Y-%m-%dT%H:%M:%S.%f")
delta = timedelta(hours=int(offset[-5:-3]), minutes=int(offset[-2]))
if offset[0] == "-":
delta = -delta
return raw_datetime.replace(tzinfo=timezone(delta))
def XML_iterator(buffer):
"""
This is a fast streaming XML parser that operates on binary sequence types.
https://docs.python.org/3/library/stdtypes.html#binary-sequence-types-bytes-bytearray-memoryview
it iterates over a memoryview, implementing a simple XML state machine
triggered on the <!?/ > characters and yields a tuple of the form:
tag, attributes, value, end
The first three are the tag, attributes and value of the XML element and
end is a boolean that is True if the tuple signifies the tag end else False.
Using a memoryview avoids unnecessary copying and the tag, attributes, value
string are constructed from memoryview slices, again to minimise copying.
"""
INIT = 1
SEEN_LT = 2 # <
SEEN_START_TAG = 3 # <tag>
SEEN_END_TAG = 4 # </tag>
SEEN_POSSIBLE_EMPTY_TAG = 5 # <tag/>
SEEN_GT = 6 # >
COLLECTING_VALUE = 7
state = INIT
tag_start_idx = tag_end_idx = 0 # Indices to start & end of tag name.
attributes_start_idx = attributes_end_idx = 0 # start & end of attribute.
value_start_idx = value_end_idx = 0 # Indices to start & end of tag value.
tag_stack = [] # Used to get tag names for end tags
"""
Use memoryview to efficiently iterate over bytes in memory.
"""
memview = memoryview(buffer)
for i, ch in enumerate(memview):
if state == INIT:
if ch == ord("<"):
state = SEEN_LT
elif state == COLLECTING_VALUE:
if ch == ord("<"):
state = SEEN_LT;
tag = bytes(memview[tag_start_idx:tag_end_idx + 1]).decode("utf-8")
tag_stack.append(tag)
attributes = bytes(memview[attributes_start_idx:attributes_end_idx + 1]).decode("utf-8")
value = bytes(memview[value_start_idx:value_end_idx + 1]).decode("utf-8")
yield tag, value, attributes, False
else:
value_end_idx = i
elif state == SEEN_LT:
if ch == ord("!") or ch == ord("?"):
state = INIT
elif ch == ord("/"):
state = SEEN_END_TAG
else:
state = SEEN_START_TAG
tag_start_idx = i
attributes_start_idx = tag_start_idx
elif state == SEEN_START_TAG:
if ch == ord(">"):
state = COLLECTING_VALUE
value_start_idx = i + 1
elif ch == ord("/"):
state = SEEN_POSSIBLE_EMPTY_TAG
elif ch == ord(" "):
if attributes_start_idx > tag_start_idx:
attributes_end_idx = i
else:
attributes_start_idx = i + 1
else:
if attributes_start_idx > tag_start_idx:
attributes_end_idx = i
else:
tag_end_idx = i
elif state == SEEN_END_TAG:
if ch == ord(">"):
state = INIT;
yield tag_stack.pop(), "", "", True
elif state == SEEN_POSSIBLE_EMPTY_TAG:
if ch == ord(">"): # It actually is an empty tag
state = INIT
tag = bytes(memview[tag_start_idx:tag_end_idx + 1]).decode("utf-8")
attributes = bytes(memview[attributes_start_idx:attributes_end_idx + 1]).decode("utf-8")
yield tag, "", attributes, True
else: # It's not an empty tag, just a / character in an attribute
state = SEEN_START_TAG
if attributes_start_idx > tag_start_idx:
attributes_end_idx = i
def deserialise_XML(buffer, template):
"""
Used to transform raw XML API responses of the form described in the AWS docs
https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjectsV2.html
into response dicts of the form described in the boto3 docs.
https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/s3.html#S3.Client.list_objects_v2
This function operates on a buffer of binary sequence types.
https://docs.python.org/3/library/stdtypes.html#binary-sequence-types-bytes-bytearray-memoryview
it uses the XML_iterator to iterate over a memoryview, yielding a sequence
of tag, value, attribute, end tuples, where end == True signifies an
end/closing tag. The structure of the source XML document is used to create
a dict, with literal values defaulting to strings and nested structures
defaulting to nested dicts.
To fully deserialise, a "type overrides" template is used of the form:
template = {
"int": {"/Contents/Size", "/KeyCount", "/MaxKeys"},
"bool": {"/IsTruncated"},
"object_list": {"/Contents", "/CommonPrefixes"},
"datetime": {"/Contents/LastModified"},
}
This allows default str/dict for specified paths to be overridden. The int,
float, bool, datetime overrides are fairly obvious whilst object_list may be
used to transform repeating tags like <Contents>...</Contents><Contents>...</Contents>
into e.g. "Contents": [{...}, {...}]
"""
numeric_int = template.get("int", {})
numeric_float = template.get("float", {})
boolean = template.get("bool", {})
object_list = template.get("object_list", {})
time_datetime = template.get("datetime", {})
tag_stack = [("root", "", "", False)]
for tags in XML_iterator(buffer):
tag, value, attribute, end = tags
if end: # Is the tag an end/closing tag?
# Get actual tag/value off the stack. Slice gets first two items in tuple
tag, value = tag_stack.pop()[:2]
# After getting current tag the top | |
self, namespaces ):
return '-'.join( namespaces )
def _EditNamespaceSort( self, namespaces ):
# users might want to add a namespace with a hyphen in it, so in lieu of a nice list to edit we'll just escape for now mate
correct_char = '-'
escaped_char = '\\-'
escaped_namespaces = [ namespace.replace( correct_char, escaped_char ) for namespace in namespaces ]
edit_string = '-'.join( escaped_namespaces )
message = 'Write the namespaces you would like to sort by here, separated by hyphens. Any namespace in any of your sort definitions will be added to the collect-by menu.'
message += os.linesep * 2
message += 'If the namespace you want to add has a hyphen, like \'creator-id\', instead type it with a backslash escape, like \'creator\\-id-page\'.'
with ClientGUIDialogs.DialogTextEntry( self, message, allow_blank = False, default = edit_string ) as dlg:
if dlg.exec() == QW.QDialog.Accepted:
edited_string = dlg.GetValue()
edited_escaped_namespaces = re.split( r'(?<!\\)\-', edited_string )
edited_namespaces = [ namespace.replace( escaped_char, correct_char ) for namespace in edited_escaped_namespaces ]
edited_namespaces = [ HydrusTags.CleanTag( namespace ) for namespace in edited_namespaces if HydrusTags.TagOK( namespace ) ]
if len( edited_namespaces ) > 0:
return tuple( edited_namespaces )
raise HydrusExceptions.VetoException()
def UpdateOptions( self ):
self._new_options.SetDefaultSort( self._default_media_sort.GetSort() )
self._new_options.SetFallbackSort( self._fallback_media_sort.GetSort() )
self._new_options.SetBoolean( 'save_page_sort_on_change', self._save_page_sort_on_change.isChecked() )
self._new_options.SetDefaultCollect( self._default_media_collect.GetValue() )
sort_by_choices = [ ( 'namespaces', list( data ) ) for data in self._namespace_sort_by.GetData() ]
HC.options[ 'sort_by' ] = sort_by_choices
class _SpeedAndMemoryPanel( QW.QWidget ):
def __init__( self, parent, new_options ):
QW.QWidget.__init__( self, parent )
self._new_options = new_options
disk_panel = ClientGUICommon.StaticBox( self, 'disk cache' )
disk_cache_help_button = ClientGUICommon.BetterBitmapButton( disk_panel, CC.global_pixmaps().help, self._ShowDiskCacheHelp )
disk_cache_help_button.setToolTip( 'Show help regarding the disk cache.' )
help_hbox = ClientGUICommon.WrapInText( disk_cache_help_button, disk_panel, 'help for this panel -->', QG.QColor( 0, 0, 255 ) )
self._disk_cache_init_period = ClientGUICommon.NoneableSpinCtrl( disk_panel, unit = 's', none_phrase = 'do not run', min = 1, max = 120 )
self._disk_cache_init_period.setToolTip( 'When the client boots, it can speed up operation (particularly loading your session pages) by reading the front of its database into memory. This sets the max number of seconds it can spend doing that.' )
self._disk_cache_maintenance = ClientGUIControls.NoneableBytesControl( disk_panel, initial_value = 256 * 1024 * 1024, none_label = 'do not keep db cached' )
self._disk_cache_maintenance.setToolTip( 'The client can regularly ensure the front of its database is cached in your OS\'s disk cache. This represents how many megabytes it will ensure are cached in memory.' )
#
media_panel = ClientGUICommon.StaticBox( self, 'thumbnail size and media cache' )
self._thumbnail_cache_size = QP.MakeQSpinBox( media_panel, min=5, max=3000 )
self._thumbnail_cache_size.valueChanged.connect( self.EventThumbnailsUpdate )
self._estimated_number_thumbnails = QW.QLabel( '', media_panel )
self._fullscreen_cache_size = QP.MakeQSpinBox( media_panel, min=25, max=8192 )
self._fullscreen_cache_size.valueChanged.connect( self.EventFullscreensUpdate )
self._estimated_number_fullscreens = QW.QLabel( '', media_panel )
self._thumbnail_cache_timeout = ClientGUITime.TimeDeltaButton( media_panel, min = 300, days = True, hours = True, minutes = True )
self._thumbnail_cache_timeout.setToolTip( 'The amount of time after which a thumbnail in the cache will naturally be removed, if it is not shunted out due to a new member exceeding the size limit. Requires restart to kick in.' )
self._image_cache_timeout = ClientGUITime.TimeDeltaButton( media_panel, min = 300, days = True, hours = True, minutes = True )
self._image_cache_timeout.setToolTip( 'The amount of time after which a rendered image in the cache will naturally be removed, if it is not shunted out due to a new member exceeding the size limit. Requires restart to kick in.' )
#
buffer_panel = ClientGUICommon.StaticBox( self, 'video buffer' )
self._video_buffer_size_mb = QP.MakeQSpinBox( buffer_panel, min=48, max=16*1024 )
self._video_buffer_size_mb.valueChanged.connect( self.EventVideoBufferUpdate )
self._estimated_number_video_frames = QW.QLabel( '', buffer_panel )
#
ac_panel = ClientGUICommon.StaticBox( self, 'tag autocomplete' )
self._autocomplete_results_fetch_automatically = QW.QCheckBox( ac_panel )
self._autocomplete_exact_match_threshold = ClientGUICommon.NoneableSpinCtrl( ac_panel, none_phrase = 'always do full search', min = 1, max = 1024 )
self._autocomplete_exact_match_threshold.setToolTip( 'If the search input has this many characters or fewer, it will fetch exact results rather than full autocomplete results.' )
#
misc_panel = ClientGUICommon.StaticBox( self, 'misc' )
self._forced_search_limit = ClientGUICommon.NoneableSpinCtrl( misc_panel, '', min = 1, max = 100000 )
#
self._disk_cache_init_period.SetValue( self._new_options.GetNoneableInteger( 'disk_cache_init_period' ) )
disk_cache_maintenance_mb = self._new_options.GetNoneableInteger( 'disk_cache_maintenance_mb' )
if disk_cache_maintenance_mb is None:
disk_cache_maintenance = disk_cache_maintenance_mb
else:
disk_cache_maintenance = disk_cache_maintenance_mb * 1024 * 1024
self._disk_cache_maintenance.SetValue( disk_cache_maintenance )
self._thumbnail_cache_size.setValue( int( HC.options['thumbnail_cache_size'] // 1048576 ) )
self._fullscreen_cache_size.setValue( int( HC.options['fullscreen_cache_size'] // 1048576 ) )
self._thumbnail_cache_timeout.SetValue( self._new_options.GetInteger( 'thumbnail_cache_timeout' ) )
self._image_cache_timeout.SetValue( self._new_options.GetInteger( 'image_cache_timeout' ) )
self._video_buffer_size_mb.setValue( self._new_options.GetInteger( 'video_buffer_size_mb' ) )
self._autocomplete_results_fetch_automatically.setChecked( self._new_options.GetBoolean( 'autocomplete_results_fetch_automatically' ) )
self._autocomplete_exact_match_threshold.SetValue( self._new_options.GetNoneableInteger( 'autocomplete_exact_match_threshold' ) )
self._forced_search_limit.SetValue( self._new_options.GetNoneableInteger( 'forced_search_limit' ) )
#
rows = []
rows.append( ( 'run disk cache on boot for this long: ', self._disk_cache_init_period ) )
rows.append( ( 'regularly ensure this much of the db is in OS\'s disk cache: ', self._disk_cache_maintenance ) )
gridbox = ClientGUICommon.WrapInGrid( disk_panel, rows )
vbox = QP.VBoxLayout()
disk_panel.Add( help_hbox, CC.FLAGS_BUTTON_SIZER )
disk_panel.Add( gridbox, CC.FLAGS_EXPAND_SIZER_PERPENDICULAR )
QP.AddToLayout( vbox, disk_panel, CC.FLAGS_EXPAND_PERPENDICULAR )
#
thumbnails_sizer = QP.HBoxLayout()
QP.AddToLayout( thumbnails_sizer, self._thumbnail_cache_size, CC.FLAGS_VCENTER )
QP.AddToLayout( thumbnails_sizer, self._estimated_number_thumbnails, CC.FLAGS_VCENTER )
fullscreens_sizer = QP.HBoxLayout()
QP.AddToLayout( fullscreens_sizer, self._fullscreen_cache_size, CC.FLAGS_VCENTER )
QP.AddToLayout( fullscreens_sizer, self._estimated_number_fullscreens, CC.FLAGS_VCENTER )
video_buffer_sizer = QP.HBoxLayout()
QP.AddToLayout( video_buffer_sizer, self._video_buffer_size_mb, CC.FLAGS_VCENTER )
QP.AddToLayout( video_buffer_sizer, self._estimated_number_video_frames, CC.FLAGS_VCENTER )
rows = []
rows.append( ( 'MB memory reserved for thumbnail cache: ', thumbnails_sizer ) )
rows.append( ( 'MB memory reserved for image cache: ', fullscreens_sizer ) )
rows.append( ( 'Thumbnail cache timeout: ', self._thumbnail_cache_timeout ) )
rows.append( ( 'Image cache timeout: ', self._image_cache_timeout ) )
gridbox = ClientGUICommon.WrapInGrid( media_panel, rows )
media_panel.Add( gridbox, CC.FLAGS_EXPAND_SIZER_PERPENDICULAR )
QP.AddToLayout( vbox, media_panel, CC.FLAGS_EXPAND_PERPENDICULAR )
#
text = 'Hydrus video rendering is CPU intensive.'
text += os.linesep
text += 'If you have a lot of memory, you can set a generous potential video buffer to compensate.'
text += os.linesep
text += 'If the video buffer can hold an entire video, it only needs to be rendered once and will play and loop very smoothly.'
text += os.linesep
text += 'PROTIP: Do not go crazy here.'
buffer_panel.Add( QW.QLabel( text, buffer_panel ), CC.FLAGS_VCENTER )
rows = []
rows.append( ( 'MB memory for video buffer: ', video_buffer_sizer ) )
gridbox = ClientGUICommon.WrapInGrid( buffer_panel, rows )
buffer_panel.Add( gridbox, CC.FLAGS_EXPAND_SIZER_PERPENDICULAR )
QP.AddToLayout( vbox, buffer_panel, CC.FLAGS_EXPAND_PERPENDICULAR )
#
text = 'If you disable automatic autocomplete results fetching, use Ctrl+Space to fetch results manually.'
ac_panel.Add( QW.QLabel( text, ac_panel ), CC.FLAGS_EXPAND_PERPENDICULAR )
rows = []
rows.append( ( 'Automatically fetch autocomplete results: ', self._autocomplete_results_fetch_automatically ) )
rows.append( ( 'Fetch exact match results if input has <= this many characters: ', self._autocomplete_exact_match_threshold ) )
gridbox = ClientGUICommon.WrapInGrid( ac_panel, rows )
ac_panel.Add( gridbox, CC.FLAGS_EXPAND_SIZER_PERPENDICULAR )
QP.AddToLayout( vbox, ac_panel, CC.FLAGS_EXPAND_PERPENDICULAR )
#
rows = []
rows.append( ( 'Forced system:limit for all searches: ', self._forced_search_limit ) )
gridbox = ClientGUICommon.WrapInGrid( misc_panel, rows )
misc_panel.Add( gridbox, CC.FLAGS_EXPAND_SIZER_PERPENDICULAR )
QP.AddToLayout( vbox, misc_panel, CC.FLAGS_EXPAND_PERPENDICULAR )
#
QP.AddToLayout( vbox, QW.QWidget( self ), CC.FLAGS_EXPAND_BOTH_WAYS )
self.setLayout( vbox )
#
self.EventFullscreensUpdate( self._fullscreen_cache_size.value() )
self.EventThumbnailsUpdate( self._thumbnail_cache_size.value() )
self.EventVideoBufferUpdate( self._video_buffer_size_mb.value() )
def _ShowDiskCacheHelp( self ):
message = 'The hydrus database runs best on a drive with fast random access latency. Certain important operations can function up to 100 times faster when started raw from an SSD rather than an HDD.'
message += os.linesep * 2
message += 'To get around this, the client populates a pre-boot and ongoing disk cache. By contiguously frontloading the database into memory, the most important functions do not need to wait | |
= Constraint(expr= - m.b133 + m.x594 <= 0)
m.c1355 = Constraint(expr= - m.b134 + m.x595 <= 0)
m.c1356 = Constraint(expr= - m.b135 + m.x596 <= 0)
m.c1357 = Constraint(expr= - m.b136 + m.x597 <= 0)
m.c1358 = Constraint(expr= - m.b137 + m.x598 <= 0)
m.c1359 = Constraint(expr= - m.b138 + m.x599 <= 0)
m.c1360 = Constraint(expr= - m.b139 + m.x600 <= 0)
m.c1361 = Constraint(expr= - m.b140 + m.x601 <= 0)
m.c1362 = Constraint(expr= - m.b141 + m.x602 <= 0)
m.c1363 = Constraint(expr= - m.b142 + m.x603 <= 0)
m.c1364 = Constraint(expr= - m.b143 + m.x604 <= 0)
m.c1365 = Constraint(expr= - m.b144 + m.x605 <= 0)
m.c1366 = Constraint(expr= - m.b145 + m.x606 <= 0)
m.c1367 = Constraint(expr= - m.b146 + m.x607 <= 0)
m.c1368 = Constraint(expr= - m.b147 + m.x608 <= 0)
m.c1369 = Constraint(expr= - m.b148 + m.x609 <= 0)
m.c1370 = Constraint(expr= - m.b149 + m.x610 <= 0)
m.c1371 = Constraint(expr= - m.b150 + m.x611 <= 0)
m.c1372 = Constraint(expr= - m.b151 + m.x612 <= 0)
m.c1373 = Constraint(expr= - m.b152 + m.x613 <= 0)
m.c1374 = Constraint(expr= - m.b153 + m.x614 <= 0)
m.c1375 = Constraint(expr= - m.b154 + m.x615 <= 0)
m.c1376 = Constraint(expr= - m.b155 + m.x616 <= 0)
m.c1377 = Constraint(expr= - m.b156 + m.x617 <= 0)
m.c1378 = Constraint(expr= - m.b157 + m.x618 <= 0)
m.c1379 = Constraint(expr= - m.b158 + m.x619 <= 0)
m.c1380 = Constraint(expr= - m.b159 + m.x620 <= 0)
m.c1381 = Constraint(expr= - m.b160 + m.x621 <= 0)
m.c1382 = Constraint(expr= - m.b161 + m.x622 <= 0)
m.c1383 = Constraint(expr= - m.b162 + m.x623 <= 0)
m.c1384 = Constraint(expr= - m.b163 + m.x624 <= 0)
m.c1385 = Constraint(expr= - m.b164 + m.x625 <= 0)
m.c1386 = Constraint(expr= - m.b165 + m.x626 <= 0)
m.c1387 = Constraint(expr= - m.b166 + m.x627 <= 0)
m.c1388 = Constraint(expr= - m.b167 + m.x628 <= 0)
m.c1389 = Constraint(expr= - m.b168 + m.x629 <= 0)
m.c1390 = Constraint(expr= - m.b169 + m.x630 <= 0)
m.c1391 = Constraint(expr= - m.b170 + m.x631 <= 0)
m.c1392 = Constraint(expr= - m.b171 + m.x632 <= 0)
m.c1393 = Constraint(expr= - m.b172 + m.x633 <= 0)
m.c1394 = Constraint(expr= - m.b173 + m.x634 <= 0)
m.c1395 = Constraint(expr= - m.b174 + m.x635 <= 0)
m.c1396 = Constraint(expr= - m.b175 + m.x636 <= 0)
m.c1397 = Constraint(expr= - m.b176 + m.x637 <= 0)
m.c1398 = Constraint(expr= - m.b177 + m.x638 <= 0)
m.c1399 = Constraint(expr= - m.b178 + m.x639 <= 0)
m.c1400 = Constraint(expr= - m.b179 + m.x640 <= 0)
m.c1401 = Constraint(expr= - m.b180 + m.x641 <= 0)
m.c1402 = Constraint(expr= - m.b181 + m.x642 <= 0)
m.c1403 = Constraint(expr= - m.b182 + m.x643 <= 0)
m.c1404 = Constraint(expr= - m.b183 + m.x644 <= 0)
m.c1405 = Constraint(expr= - m.b184 + m.x645 <= 0)
m.c1406 = Constraint(expr= - m.b185 + m.x646 <= 0)
m.c1407 = Constraint(expr= - m.b186 + m.x647 <= 0)
m.c1408 = Constraint(expr= - m.b187 + m.x648 <= 0)
m.c1409 = Constraint(expr= - m.b188 + m.x649 <= 0)
m.c1410 = Constraint(expr= - m.b189 + m.x650 <= 0)
m.c1411 = Constraint(expr= - m.b190 + m.x651 <= 0)
m.c1412 = Constraint(expr= - m.b191 + m.x652 <= 0)
m.c1413 = Constraint(expr= - m.b192 + m.x653 <= 0)
m.c1414 = Constraint(expr= - m.b193 + m.x654 <= 0)
m.c1415 = Constraint(expr= - m.b194 + m.x655 <= 0)
m.c1416 = Constraint(expr= - m.b195 + m.x656 <= 0)
m.c1417 = Constraint(expr= - m.b196 + m.x657 <= 0)
m.c1418 = Constraint(expr= - m.b197 + m.x658 <= 0)
m.c1419 = Constraint(expr= - m.b198 + m.x659 <= 0)
m.c1420 = Constraint(expr= - m.b199 + m.x660 <= 0)
m.c1421 = Constraint(expr= - m.b200 + m.x661 <= 0)
m.c1422 = Constraint(expr= - m.b201 + m.x662 <= 0)
m.c1423 = Constraint(expr= - m.b202 + m.x663 <= 0)
m.c1424 = Constraint(expr= - m.b203 + m.x664 <= 0)
m.c1425 = Constraint(expr= - m.b204 + m.x665 <= 0)
m.c1426 = Constraint(expr= - m.b205 + m.x666 <= 0)
m.c1427 = Constraint(expr= - m.b206 + m.x667 <= 0)
m.c1428 = Constraint(expr= - m.b207 + m.x668 <= 0)
m.c1429 = Constraint(expr= - m.b208 + m.x669 <= 0)
m.c1430 = Constraint(expr= - m.b209 + m.x670 <= 0)
m.c1431 = Constraint(expr= - m.b210 + m.x671 <= 0)
m.c1432 = Constraint(expr= - m.b211 + m.x672 <= 0)
m.c1433 = Constraint(expr= - m.b212 + m.x673 <= 0)
m.c1434 = Constraint(expr= - m.b213 + m.x674 <= 0)
m.c1435 = Constraint(expr= - m.b214 + m.x675 <= 0)
m.c1436 = Constraint(expr= - m.b215 + m.x676 <= 0)
m.c1437 = Constraint(expr= - m.b216 + m.x677 <= 0)
m.c1438 = Constraint(expr= - m.b217 + m.x678 <= 0)
m.c1439 = Constraint(expr= - m.b218 + m.x679 <= 0)
m.c1440 = Constraint(expr= - m.b219 + m.x680 <= 0)
m.c1441 = Constraint(expr= - m.b220 + m.x681 <= 0)
m.c1442 = Constraint(expr= - m.b221 + m.x682 <= 0)
m.c1443 = Constraint(expr= - m.b222 + m.x683 <= 0)
m.c1444 = Constraint(expr= - m.b223 + m.x684 <= 0)
m.c1445 = Constraint(expr= - m.b224 + m.x685 <= 0)
m.c1446 = Constraint(expr= - m.b225 + m.x686 <= 0)
m.c1447 = Constraint(expr= - m.b226 + m.x687 <= 0)
m.c1448 = Constraint(expr= - m.b227 + m.x688 <= 0)
m.c1449 = Constraint(expr= - m.b228 + m.x689 <= 0)
m.c1450 = Constraint(expr= - m.b229 + m.x690 <= 0)
m.c1451 = Constraint(expr= - m.b230 + m.x691 <= 0)
m.c1452 = Constraint(expr= - m.b231 + m.x692 <= 0)
m.c1453 = Constraint(expr= - m.b232 + m.x693 <= 0)
m.c1454 = Constraint(expr= - m.b233 + m.x694 <= 0)
m.c1455 = Constraint(expr= - m.b234 + m.x695 <= 0)
m.c1456 = Constraint(expr= - m.b235 + m.x696 <= 0)
m.c1457 = Constraint(expr= - m.b236 + m.x697 <= 0)
m.c1458 = Constraint(expr= - m.b237 + m.x698 <= 0)
m.c1459 = Constraint(expr= - m.b238 + m.x699 <= 0)
m.c1460 = Constraint(expr= - m.b239 + m.x700 <= 0)
m.c1461 = Constraint(expr= - m.b240 + m.x701 <= 0)
m.c1462 = Constraint(expr= - m.b241 + m.x702 <= 0)
m.c1463 = Constraint(expr= - m.b242 + m.x703 <= 0)
m.c1464 = Constraint(expr= - m.b243 + m.x704 <= 0)
m.c1465 = Constraint(expr= - m.b244 + m.x705 <= 0)
m.c1466 = Constraint(expr= - m.b245 + m.x706 <= 0)
m.c1467 = Constraint(expr= - m.b246 + m.x707 <= 0)
m.c1468 = Constraint(expr= - m.b247 + m.x708 <= 0)
m.c1469 = Constraint(expr= - m.b248 + m.x709 <= 0)
m.c1470 = Constraint(expr= - m.b249 + m.x710 <= 0)
m.c1471 = Constraint(expr= - m.b250 + m.x711 <= 0)
m.c1472 = Constraint(expr= - m.b251 + m.x712 <= 0)
m.c1473 = Constraint(expr= - m.b252 + m.x713 <= 0)
m.c1474 = Constraint(expr= - m.b253 + m.x714 <= 0)
m.c1475 = Constraint(expr= - m.b254 + m.x715 <= 0)
m.c1476 = Constraint(expr= - m.b255 + m.x716 <= 0)
m.c1477 = Constraint(expr= - m.b256 + m.x717 <= 0)
m.c1478 = Constraint(expr= - m.b257 + m.x718 <= 0)
m.c1479 = Constraint(expr= - m.b258 + m.x719 <= 0)
m.c1480 = Constraint(expr= - m.b259 + m.x720 <= 0)
m.c1481 = Constraint(expr= - m.b260 + m.x721 <= 0)
m.c1482 = Constraint(expr= - m.b261 + m.x722 <= 0)
m.c1483 = Constraint(expr= - m.b262 + m.x723 <= 0)
m.c1484 = Constraint(expr= - m.b263 + m.x724 <= 0)
m.c1485 = Constraint(expr= - m.b264 + m.x725 <= 0)
m.c1486 = Constraint(expr= - m.b265 + m.x726 <= 0)
m.c1487 = Constraint(expr= - m.b266 + m.x727 <= 0)
m.c1488 = Constraint(expr= - m.b267 + m.x728 <= 0)
m.c1489 = Constraint(expr= - m.b268 + m.x729 <= 0)
m.c1490 = Constraint(expr= - m.b269 + m.x730 <= 0)
m.c1491 = Constraint(expr= - m.b270 + m.x731 <= 0)
m.c1492 = Constraint(expr= - m.b271 + m.x732 <= 0)
m.c1493 = Constraint(expr= - m.b272 + m.x733 <= 0)
m.c1494 = Constraint(expr= - m.b273 + m.x734 <= 0)
m.c1495 = Constraint(expr= - m.b274 + m.x735 <= 0)
m.c1496 = Constraint(expr= - m.b275 + m.x736 <= 0)
m.c1497 = Constraint(expr= - m.b276 + m.x737 <= 0)
m.c1498 = Constraint(expr= - m.b277 + m.x738 <= 0)
m.c1499 = Constraint(expr= - m.b278 + m.x739 <= 0)
m.c1500 = Constraint(expr= - m.b279 + m.x740 <= 0)
m.c1501 = Constraint(expr= - m.b280 + m.x741 <= 0)
m.c1502 = Constraint(expr= - m.b281 + m.x742 <= 0)
m.c1503 = Constraint(expr= - m.b282 + m.x743 <= 0)
m.c1504 = Constraint(expr= - m.b283 + m.x744 <= 0)
m.c1505 = Constraint(expr= - m.b284 + m.x745 <= 0)
m.c1506 = Constraint(expr= - m.b285 + m.x746 <= 0)
m.c1507 = Constraint(expr= - m.b286 + m.x747 <= 0)
m.c1508 = Constraint(expr= - m.b287 + m.x748 <= 0)
m.c1509 = Constraint(expr= - m.b288 + m.x749 <= 0)
m.c1510 = Constraint(expr= - m.b289 + m.x750 <= 0)
m.c1511 = Constraint(expr= - m.b290 + m.x751 <= 0)
m.c1512 = Constraint(expr= - m.b291 + m.x752 <= 0)
m.c1513 = Constraint(expr= - m.b292 + m.x753 <= 0)
m.c1514 | |
self.walls[(Num_layer ) * 6 + 4]) / 3
self.Vec_12[Num_layer].pos = self.walls[(Num_layer) * 6 + 0] + self.walls[(Num_layer) * 6 + 1]
self.Vec_12[Num_layer].axis = vector.norm(self.walls[(Num_layer) * 6 + 2] + self.walls[(Num_layer) * 6 + 3] - (
self.walls[(Num_layer) * 6 + 0] + self.walls[(Num_layer) * 6 + 1])) * self.spr_distance[Num_layer]
self.Vec_23[Num_layer].pos = self.walls[(Num_layer) * 6 + 2] + self.walls[(Num_layer) * 6 + 3]
self.Vec_23[Num_layer].axis = vector.norm(self.walls[(Num_layer) * 6 + 4] + self.walls[(Num_layer) * 6 + 5] - (
self.walls[(Num_layer) * 6 + 2] + self.walls[(Num_layer) * 6 + 3])) * self.spr_distance[Num_layer]
self.Vec_31[Num_layer].pos = self.walls[(Num_layer) * 6 + 4] + self.walls[(Num_layer) * 6 + 5]
self.Vec_31[Num_layer].axis = vector.norm(self.walls[(Num_layer) * 6 + 0] + self.walls[(Num_layer) * 6 + 1] - (
self.walls[(Num_layer) * 6 + 4] + self.walls[(Num_layer) * 6 + 5])) * self.spr_distance[Num_layer]
self.top_cycle[Num_layer].pos = (self.Vec_12[Num_layer].pos + self.Vec_23[Num_layer].pos + self.Vec_31[
Num_layer].pos) / 3
self.top_cycle[Num_layer].axis = vector.cross(self.Vec_23[Num_layer].axis, self.Vec_12[Num_layer].axis)
def add_one_layer_on_top(self):
Num_layer = int(self.walls.__len__() / 6)
spring1_pos = self.walls[(Num_layer - 1) * 6 + 0]
spring1_axis = self.walls[(Num_layer - 1) * 6 + 1]
spring1_end = spring1_pos + spring1_axis
spring2_pos = self.walls[(Num_layer - 1) * 6 + 2]
spring2_axis = self.walls[(Num_layer - 1) * 6 + 3]
spring2_end = spring2_pos + spring2_axis
spring3_pos = self.walls[(Num_layer - 1) * 6 + 4]
spring3_axis = self.walls[(Num_layer - 1) * 6 + 5]
spring3_end = spring3_pos + spring3_axis
initial_vec = vector.cross((spring3_end - spring1_end), (spring2_end - spring1_end)).hat * self.ini_len # Problem
Num_layer_new = int(self.walls.__len__() / 6)+1
if self.spr_distance[Num_layer] == self.spr_distance[Num_layer - 1]:
if self.shift_angle[Num_layer] == 0:
self.walls.append(spring1_end)
self.walls.append(initial_vec)
self.walls.append(spring2_end)
self.walls.append(initial_vec)
self.walls.append(spring3_end)
self.walls.append(initial_vec)
self.spring_len.append([self.ini_len, self.ini_len, self.ini_len])
else:
Rotation_angle = self.shift_angle[Num_layer]
N1, N2, N3 = swift_three_points( spring1_end, spring2_end, spring1_end,Rotation_angle, initial_vec.hat)
self.walls.append(N1)
self.walls.append(initial_vec)
self.walls.append(N2)
self.walls.append(initial_vec)
self.walls.append(N3)
self.walls.append(initial_vec)
self.spring_len.append([self.ini_len, self.ini_len, self.ini_len])
else:
top_center_point = self.get_top_center_positions()
new_start_point1 = (spring1_end - top_center_point).hat * self.spr_distance[Num_layer] / np.sqrt(3) + top_center_point
new_start_point2 = (spring2_end - top_center_point).hat * self.spr_distance[Num_layer] / np.sqrt(3) + top_center_point
new_start_point3 = (spring3_end - top_center_point).hat * self.spr_distance[Num_layer] / np.sqrt(3) + top_center_point
if self.shift_angle[Num_layer] == 0:
self.walls.append(new_start_point1)
self.walls.append(initial_vec)
self.walls.append(new_start_point2)
self.walls.append(initial_vec)
self.walls.append(new_start_point3)
self.walls.append(initial_vec)
self.spring_len.append([self.ini_len, self.ini_len, self.ini_len])
else:
Rotation_angle = self.shift_angle[Num_layer]
N1, N2, N3 = swift_three_points(new_start_point1, new_start_point2, new_start_point3, Rotation_angle,
initial_vec.hat)
self.walls.append(N1)
self.walls.append(initial_vec)
self.walls.append(N2)
self.walls.append(initial_vec)
self.walls.append(N3)
self.walls.append(initial_vec)
self.spring_len.append([self.ini_len, self.ini_len, self.ini_len])
self.spring1[len(self.spring1):] = [helix(pos=self.walls[(Num_layer_new - 1) * 6 + 0], # (x1, y1, 0),
axis=self.walls[(Num_layer_new - 1) * 6 + 1],
# vector(0, 0, self.spr_len),
thickness=self.spr_len_thickness, coils=self.spr_coils,
radius=self.spr_radius, color=self.spr_color_vec,
opacity=self.lateral_opacity)]
self.spring2[len(self.spring2):] = [helix(pos=self.walls[(Num_layer_new - 1) * 6 + 2], # vector(x2, y2, 0),
axis=self.walls[(Num_layer_new - 1) * 6 + 3],
# vector(0, 0, self.spr_len),
thickness=self.spr_len_thickness, coils=self.spr_coils,
radius=self.spr_radius, color=self.spr_color_vec,
opacity=self.lateral_opacity)]
self.spring3[len(self.spring3):] = [helix(pos=self.walls[(Num_layer_new - 1) * 6 + 4], # vector(x3, y3, 0),
axis=self.walls[(Num_layer_new - 1) * 6 + 5],
# vector(0, 0, self.spr_len),
thickness=self.spr_len_thickness, coils=self.spr_coils,
radius=self.spr_radius, color=self.spr_color_vec,
opacity=self.lateral_opacity)]
# self.springC[len(self.springC):] = [helix(pos = ((self.walls[(Num_layer_new - 1)*6 + 0] +
# self.walls[(Num_layer_new - 1)*6 + 2] +
# self.walls[(Num_layer_new - 1)*6 + 4] )/3),
# axis = ((self.walls[(Num_layer_new - 1)*6 + 1] +
# self.walls[(Num_layer_new - 1)*6 + 3] +
# self.walls[(Num_layer_new - 1)*6 + 5] )/3),
# thickness = self.spr_len_thickness, coils = self.spr_coils,
# radius = self.spr_radius, color = vec(19/255, 153/255, 36/255) )]
self.Vec_12[len(self.Vec_12):] = [
cylinder(pos=self.walls[(Num_layer_new - 1) * 6 + 0] + self.walls[(Num_layer_new - 1) * 6 + 1],
axis=vector.norm(
self.walls[(Num_layer_new - 1) * 6 + 2] + self.walls[(Num_layer_new - 1) * 6 + 3] - (
self.walls[(Num_layer_new - 1) * 6 + 0] + self.walls[
(Num_layer_new - 1) * 6 + 1])) * self.spr_distance[Num_layer_new - 1],
radius=self.spr_len_thickness, color=self.spr_color_vec, opacity=self.opacity - 0.55)]
self.Vec_23[len(self.Vec_23):] = [
cylinder(pos=self.walls[(Num_layer_new - 1) * 6 + 2] + self.walls[(Num_layer_new - 1) * 6 + 3],
axis=vector.norm(
self.walls[(Num_layer_new - 1) * 6 + 4] + self.walls[(Num_layer_new - 1) * 6 + 5] - (
self.walls[(Num_layer_new - 1) * 6 + 2] + self.walls[
(Num_layer_new - 1) * 6 + 3])) * self.spr_distance[Num_layer_new - 1],
radius=self.spr_len_thickness, color=self.spr_color_vec, opacity=self.opacity - 0.55)]
self.Vec_31[len(self.Vec_31):] = [
cylinder(pos=self.walls[(Num_layer_new - 1) * 6 + 4] + self.walls[(Num_layer_new - 1) * 6 + 5],
axis=vector.norm(
self.walls[(Num_layer_new - 1) * 6 + 0] + self.walls[(Num_layer_new - 1) * 6 + 1] - (
self.walls[(Num_layer_new - 1) * 6 + 4] + self.walls[
(Num_layer_new - 1) * 6 + 5])) * self.spr_distance[Num_layer_new - 1],
radius=self.spr_len_thickness, color=self.spr_color_vec, opacity=self.opacity - 0.55)]
ring_rad = self.spr_distance[Num_layer_new - 1] / np.sqrt(3)
self.top_cycle[len(self.top_cycle):] = [ring(pos=(self.Vec_12[len(self.Vec_12) - 1].pos + self.Vec_23[
len(self.Vec_23) - 1].pos + self.Vec_31[len(self.Vec_31) - 1].pos) / 3,
axis=vector.cross(self.Vec_23[len(self.Vec_23) - 1].axis,
self.Vec_12[len(self.Vec_12) - 1].axis),
radius=ring_rad, thickness=self.spr_len_thickness,
color=self.spr_color_vec, opacity=self.lateral_opacity)]
#if (len(self.Vec_31) - 1) % self.module_num == 0:
# self.top_cycle[len(self.top_cycle) - 1].color = vec(0 / 255, 102 / 255, 0 / 255)
# self.update_top_segment()
def add_one_module_on_top(self, initial_len):
dt = initial_len / self.module_num
for i in range(self.module_num):
self.add_one_layer_on_top()
self.increase_all_sides(dt)
def increase_one_side(self, stepLength, SprNum):
# only increase the top layer
print("start to increase spring{0}".format(SprNum))
Num_layer = int(self.walls.__len__() / 6)
spring1_pos = self.walls[(Num_layer - 1) * 6 + 0]
spring1_axis = self.walls[(Num_layer - 1) * 6 + 1]
spring1_len = spring1_axis.mag
spring1_end = spring1_pos + spring1_axis
spring2_pos = self.walls[(Num_layer - 1) * 6 + 2]
spring2_axis = self.walls[(Num_layer - 1) * 6 + 3]
spring2_len = spring2_axis.mag
spring2_end = spring2_pos + spring2_axis
spring3_pos = self.walls[(Num_layer - 1) * 6 + 4]
spring3_axis = self.walls[(Num_layer - 1) * 6 + 5]
spring3_len = spring3_axis.mag
spring3_end = spring3_pos + spring3_axis
spring1_len = self.spring_len[Num_layer - 1][0]
spring2_len = self.spring_len[Num_layer - 1][1]
spring3_len = self.spring_len[Num_layer - 1][2]
'''
vec1_to_2 = spring2_end - spring1_end
vec2_to_3 = spring3_end - spring2_end
vec3_to_1 = spring1_end - spring3_end
center_top = (spring1_end + spring2_end + spring3_end)/3
vec_bottom_unit = vector.cross((spring3_end - spring1_end), (spring2_end - spring1_end)).norm()
vec_top_unit = vector.cross((spring3_end - spring1_end), (spring2_end - spring1_end)).norm()
'''
# rotation_angle = math.asin(stepLength/2/self.spr_distance)*2
if (SprNum == 1):
spring1_len = spring1_len + stepLength
# spring1_end_tran = spring1_end - spring2_end
# rotate(spring1_end_tran, angle = rotation_angle, axis = vec2_to_3 )
elif (SprNum == 2):
spring2_len = spring2_len + stepLength
else: # SprNum == 3
spring3_len = spring3_len + stepLength
max_len = max(spring1_len, spring2_len, spring3_len)
min_len = min(spring1_len, spring2_len, spring3_len)
dif_len = max(spring1_len - min_len, spring2_len - min_len, spring3_len - min_len)
if self.max_len < max_len and self.max_dif_len < dif_len:
print("spring length out of limitation in Func: increase_one_side")
return 0 # indicate no grow
switch_num = switch_for_cal_vec(spring1_len, spring2_len, spring3_len)
# print("switch_num = {}".format(switch_num))
spring_len_array = [spring1_len, spring2_len, spring3_len]
spring_pos_array = [spring1_pos, spring2_pos, spring3_pos]
spring_len_after_switch = np.roll(spring_len_array, switch_num)
spring_pos_after_switch = np.roll(spring_pos_array, switch_num)
list_spring_unswitch_axix = calculation_spring_vector(spring_pos_after_switch[0], spring_pos_after_switch[1],
spring_pos_after_switch[2], spring_len_after_switch[0],
spring_len_after_switch[1], spring_len_after_switch[2],
self.spr_distance[Num_layer])
list_spring_axix = np.roll(list_spring_unswitch_axix, switch_num * -1)
self.walls[(Num_layer - 1) * 6 + 1] = list_spring_axix[0]
self.walls[(Num_layer - 1) * 6 + 3] = list_spring_axix[1]
self.walls[(Num_layer - 1) * 6 + 5] = list_spring_axix[2]
self.update_top_segment()
if (SprNum == 1):
self.spring_len[Num_layer - 1][0] = spring1_len + stepLength
elif (SprNum == 2):
self.spring_len[Num_layer - 1][1] = spring2_len + stepLength
else: # SprNum == 3
self.spring_len[Num_layer - 1][2] = spring3_len + stepLength
return 1
def increase_all_sides(self, stepLength):
# increase only top layer
Num_layer = int(self.walls.__len__() / 6)
spring1_pos = self.walls[(Num_layer - 1) * 6 + 0]
spring1_axis = self.walls[(Num_layer - 1) * 6 + 1]
spring1_len = spring1_axis.mag
spring1_end = spring1_pos + spring1_axis
spring2_pos = self.walls[(Num_layer - 1) * 6 + 2]
spring2_axis = self.walls[(Num_layer - 1) * 6 + 3]
spring2_len = spring2_axis.mag
spring2_end = spring2_pos + spring2_axis
spring3_pos = self.walls[(Num_layer - 1) * 6 + 4]
spring3_axis = self.walls[(Num_layer - 1) * 6 + 5]
spring3_len = spring3_axis.mag
spring3_end = spring3_pos + spring3_axis
[spring1_len, spring2_len, spring3_len] = self.get_spring_len(Num_layer - 1)
vec = spring1_axis.hat
max_len = max(spring1_len + stepLength, spring2_len + stepLength, spring3_len + stepLength)
if self.max_len < max_len:
return 0 # indicate no grow
spring1_axis_new = vec * (spring1_len + stepLength)
spring2_axis_new = vec * (spring2_len + stepLength)
spring3_axis_new = vec * (spring3_len + stepLength)
self.walls[(Num_layer - 1) * 6 + 1] = spring1_axis_new
self.walls[(Num_layer - 1) * 6 + 3] = spring2_axis_new
self.walls[(Num_layer - 1) * 6 + 5] = spring3_axis_new
self.spring_len[(Num_layer - 1)] = [spring1_len | |
<reponame>kevindwei/adminset
import os, datetime, mimetypes, re, inspect, time, logging
try:
from PIL import Image
except ImportError:
import Image
from base64 import b64encode, b64decode
from string import maketrans
from tarfile import TarFile
from django.core.cache import cache
from django.utils.translation import ugettext as _
from elfinder.exceptions import ElfinderErrorMessages, FileNotFoundError, DirNotFoundError, PermissionDeniedError, NamedError, NotAnImageError
from elfinder.utils.archivers import ZipFileArchiver
class ElfinderVolumeDriver(object):
"""
The base volume driver. Every elfinder volume driver should subclass
this volume.
"""
#The driver id.
#Must start with a letter and contain only [a-z0-9]
#Used as part of volume id
_driver_id = 'a'
#Directory separator - required by the client
_separator = os.sep
#*********************************************************************#
#* INITIALIZATION *#
#*********************************************************************#
def __init__(self):
"""
Default constructor
"""
# files is in type key file value type
self._files = {}
# key label
self._key_label = ''
#logger
self.logger = logging.getLogger(__name__)
#Volume id - used as prefix for files hashes
self._id = ''
#Flag - volume "mounted" and available
self._mounted = False
#Root directory path
self._root = ''
#Root basename | alias
self._root_name = ''
#Default directory to open
self._start_path = ''
#Store moved or overwrited files info
self._removed = []
#Is thumbnails dir writable
self._tmb_path_writable = False
#Today 24:00 timestamp
self._today = 0
#Yesterday 24:00 timestamp
self._yesterday = 0
#list of attributes
self._attributes = []
#Default permissions
self._defaults = {}
#Archivers config
self._archivers = {
'create' : {},
'extract' : {}
}
#Object configuration
self._options = {
'id' : '',
#root directory path
'path' : '/',
#alias to replace root dir_ name
'alias' : '',
#root url, not set to disable sending URL to client (replacement for old "fileURL" option)
'URL' : '',
#open this path on initial request instead of root path
'startPath' : '',
#how many subdirs levels return per request
'treeDeep' : 1,
#directory separator. required by client to show paths correctly
'separator' : os.sep,
#directory for thumbnails
'tmbPath' : '.tmb',
#thumbnails dir URL. Set it if store thumbnails outside root directory
'tmbURL' : '',
#thumbnails size (px)
'tmbSize' : 48,
#thumbnails crop (True - crop, False - scale image to fit thumbnail size)
'tmbCrop' : True,
#thumbnails background color (hex #rrggbb or 'transparent')
'tmbBgColor' : '#ffffff',
#on paste file - if True - old file will be replaced with new one, if False new file get name - original_name-number.ext
'copyOverwrite' : True,
#if True - join new and old directories content on paste
'copyJoin' : True,
#on upload - if True - old file will be replaced with new one, if False new file get name - original_name-number.ext
'uploadOverwrite' : True,
#filter mime types to allow
'onlyMimes' : [],
#mimetypes allowed to upload
'uploadAllow' : [],
#mimetypes not allowed to upload
'uploadDeny' : [],
#order to proccess uploadAllow and uploadDeny options
'uploadOrder' : ['deny', 'allow'],
#maximum upload file size. Set as number or string with unit - "10M", "500K", "1G". NOTE - applies to each uploaded file individually
'uploadMaxSize' : 0,
#files dates format. CURRENTLY NOT IMPLEMENTED
'dateFormat' : 'j M Y H:i',
#files time format. CURRENTLY NOT IMPLEMENTED
'timeFormat' : 'H:i',
#if True - every folder will be check for children folders, otherwise all folders will be marked as having subfolders
'checkSubfolders' : True,
#allow to copy from this volume to other ones?
'copyFrom' : True,
#allow to copy from other volumes to this one?
'copyTo' : True,
#list of commands disabled on this root
'disabled' : [],
#regexp or function name to validate new file name
'acceptedName' : r'^[^\.].*', #<-- DONT touch this! Use constructor options to overwrite it!
#callable to control file permissions
'accessControl' : None,
# allow rmDir
'rmDir' : None,
#default permissions. Do not set hidden/locked here - take no effect
'defaults' : {
'read' : True,
'write' : True
},
#a list of dictionaries, each defining a 'pattern' and values for at
#least one of the 'hidden', 'locked', 'read' and 'write' attributes for this pattern
'attributes' : [],
#quarantine folder name - required to check archive (must be hidden)
'quarantine' : '.quarantine',
#Allowed archive's mimetypes to create. Leave empty for all available types.
'archiveMimes' : [],
#Manual config for archivers.
'archivers' : {},
#max allowed archive files size (0 - no limit)
'archiveMaxSize' : 0,
#seconds to cache the file and dir data used by the driver
'cache' : 600
}
#*********************************************************************#
#* PUBLIC API *#
#*********************************************************************#
def name(self):
"""
Return the driver name.
"""
return self.__class__.__name__[len('elfindervolume'):].lower()
def driver_id(self):
"""
Return the driver id. Used as a part of volume id.
"""
return self._driver_id
def id(self):
"""
Return volume id.
"""
return self._id
def debug(self):
"""
Return debug info for client. The returned dictionary contains
the following keys:
:id: the volume id
:name: the volume name
"""
return {
'id' : self.id(),
'name' : self.name(),
}
def mount(self, opts):
"""
"Mount" volume. Return ``True`` if volume available for read
or write, ``False`` otherwise.
It is common for drivers to override this method.
"""
self._options.update(opts)
if self._options['id']:
self._id = '%s%s_' % (self._driver_id, self._options['id'])
else:
raise Exception(_('No volume id found'))
self._root = self._normpath(unicode(self._options['path']))
self._separator = self._options['separator'] if 'separator' in self._options else os.sep
#default file attribute
self._defaults = {
'read' : self._options['defaults']['read'] if 'read' in self._options['defaults'] else True,
'write' : self._options['defaults']['write'] if 'write' in self._options['defaults'] else True,
'locked' : False,
'hidden' : False
}
#root attributes
self._attributes.insert(0, {
'pattern' : '^%s$' % re.escape(self._separator),
'locked' : True,
'hidden' : False
})
#set files attributes
for a in self._options['attributes']:
#attributes must contain pattern and at least one rule
if 'pattern' in a and len(a) > 1:
self._attributes.append(a)
#assign some options to private members
self._today = time.mktime(datetime.date.today().timetuple())
self._yesterday = self._today-86400
#set uploadMaxSize, archiveMaxSize
units = {
'k' : 1024,
'm' : 1048576,
'g' : 1073741824,
'b' : 1
}
for opt in ['uploadMaxSize', 'archiveMaxSize']:
if not isinstance(self._options[opt], int):
try:
self._options[opt] = int(self._options[opt][:-1]) * units[self._options[opt][-1].lower()]
except (TypeError, KeyError):
self._options[opt] = 0
self._root_name = self._basename(self._root) if not self._options['alias'] else self._options['alias']
try:
root = self.stat(self._root)
except os.error:
raise DirNotFoundError
if (not 'read' in root or not root['read']) and (not 'write' in root and not root['write']):
raise PermissionDeniedError
if 'read' in root and root['read']:
#check startPath - path to open by default instead of root
if self._options['startPath']:
try:
startpath = self._join_path(self._root, self._options['startPath'])
start = self.stat(startpath)
if start['mime'] == 'directory' and start['read'] and not self._is_hidden(start):
self._start_path = self._normpath(startpath)
except os.error:
#Fail silently if startPath does not exist
pass
else:
self._options['URL'] = ''
self._options['tmbURL'] = ''
self._options['tmbPath'] = ''
#read only volume
self._attributes.insert(0, {
'pattern' : r'.*',
'read' : False
})
self._options['URL'] = self._urlize(self._options['URL'])
self._options['tmbURL'] = self._urlize(self._options['tmbURL'])
self._checkArchivers()
#add quarantine folder to locked and hidden patterns
self._attributes.append({
'pattern' : '^%s$' % re.escape('%s%s' % (self._separator, self._options['quarantine'])),
'read' : False,
'write' : False,
'locked' : True,
'hidden': True
})
#check quarantine dir
if self._options['quarantine']:
self._quarantine = self._join_path(self._root, self._options['quarantine'])
isdir = os.path.isdir(self._quarantine)
if not self._options['quarantine'] or (isdir and not os.access(self._quarantine, os.W_OK)):
self._archivers['extract'] = {}
self._options['disabled'].append('extract')
elif self._options['quarantine'] and not isdir:
try:
os.mkdir(self._quarantine)
except os.error:
self._archivers['extract'] = {}
self._options['disabled'].append('extract')
self._configure()
self._mounted = True
def _configure(self):
"""
Configure after successful mount. The default implementation
sets the thumbnail path.
It is common for drivers to override this method.
"""
#set thumbnails path
if self._options['tmbPath']:
path = self._join_path(self._root, self._options['tmbPath'])
self._attributes.append({
'pattern' : '^%s$' % re.escape('%s%s' % (self._separator, self._relpath(path))),
'locked' : True,
'hidden' : True
})
try:
stat = self.stat(path)
except os.error:
try:
self._mkdir(path=path)
stat = self.stat(path)
except os.error:
stat = None
if stat and stat['mime'] == 'directory' and stat['read']:
self._options['tmbPath'] = path
self._tmb_path_writable = stat['write']
else:
self._options['tmbPath'] = ''
def unmount(self):
"""
The unmunt method is currently not used, but in the future it
might be useful to some drivers.
"""
pass
def default_path(self):
"""
Return volume root or startPath hash.
"""
return self.encode(self._start_path | |
r"""Downloads and converts Market1501 data to TFRecords of TF-Example protos.
This module downloads the Market1501 data, uncompresses it, reads the files
that make up the Market1501 data and creates two TFRecord datasets: one for train
and one for test. Each TFRecord dataset is comprised of a set of TF-Example
protocol buffers, each of which contain a single image and label.
The script should take about a minute to run.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import os
import random
import sys
import tensorflow as tf
try:
import dataset_utils
except:
from datasets import dataset_utils
import numpy as np
import pickle
import pdb
import glob
# The URL where the Market1501 data can be downloaded.
# _DATA_URL = 'xxxxx'
# The number of images in the validation set.
# _NUM_VALIDATION = 350
# Seed for repeatability.
_RANDOM_SEED = 0
random.seed(_RANDOM_SEED)
# The number of shards per dataset split.
_NUM_SHARDS = 1
_IMG_PATTERN = '.jpg'
class ImageReader(object):
"""Helper class that provides TensorFlow image coding utilities."""
def __init__(self):
# Initializes function that decodes RGB JPEG data.
self._decode_jpeg_data = tf.placeholder(dtype=tf.string)
self._decode_jpeg = tf.image.decode_jpeg(self._decode_jpeg_data, channels=3)
def read_image_dims(self, sess, image_data):
image = self.decode_jpeg(sess, image_data)
return image.shape[0], image.shape[1]
def decode_jpeg(self, sess, image_data):
image = sess.run(self._decode_jpeg,
feed_dict={self._decode_jpeg_data: image_data})
assert len(image.shape) == 3
assert image.shape[2] == 3
return image
def _get_folder_path(dataset_dir, split_name):
if split_name == 'train':
folder_path = os.path.join(dataset_dir, 'bounding_box_train')
elif split_name == 'train_flip':
folder_path = os.path.join(dataset_dir, 'bounding_box_train_flip')
elif split_name == 'test':
folder_path = os.path.join(dataset_dir, 'bounding_box_test')
elif split_name == 'test_samples':
folder_path = os.path.join(dataset_dir, 'bounding_box_test_samples')
elif split_name == 'all':
folder_path = os.path.join(dataset_dir, 'bounding_box_all')
elif split_name == 'query':
folder_path = os.path.join(dataset_dir, 'query')
assert os.path.isdir(folder_path)
return folder_path
def _get_image_file_list(dataset_dir, split_name):
folder_path = _get_folder_path(dataset_dir, split_name)
if split_name == 'train' or split_name == 'train_flip' or split_name == 'test_samples' or split_name == 'query' or split_name == 'all':
filelist = sorted(os.listdir(folder_path))
# filelist = glob.glob(os.path.join(folder_path, _IMG_PATTERN)) # glob will return full path
# pdb.set_trace()
filelist = sorted(filelist)
elif split_name == 'test':
filelist = sorted(os.listdir(folder_path))[6617:] # before 6617 are junk detections
# filelist = glob.glob(os.path.join(folder_path, _IMG_PATTERN))
# filelist = sorted(filelist)[6617:]
elif split_name == 'test_clean':
filelist = sorted(os.listdir(folder_path)) # before 6617 are junk detections
# Remove non-jpg files
valid_filelist = []
for i in xrange(0, len(filelist)):
if filelist[i].endswith('.jpg') or filelist[i].endswith('.png'):
valid_filelist.append(filelist[i])
return valid_filelist
def _get_dataset_filename(dataset_dir, out_dir, split_name, shard_id):
output_filename = 'Market1501_%s_%05d-of-%05d.tfrecord' % (
split_name.split('_')[0], shard_id, _NUM_SHARDS)
return os.path.join(out_dir, output_filename)
def _get_train_all_pn_pairs(dataset_dir, out_dir, split_name='train', augment_ratio=1, mode='diff_cam',add_switch_pair=True):
"""Returns a list of pair image filenames.
Args:
dataset_dir: A directory containing person images.
Returns:
p_pairs: A list of positive pairs.
n_pairs: A list of negative pairs.
"""
assert split_name in {'train', 'train_flip', 'test', 'test_samples', 'all'}
if split_name=='train_flip':
p_pairs_path = os.path.join(out_dir, 'p_pairs_train_flip.p')
n_pairs_path = os.path.join(out_dir, 'n_pairs_train_flip.p')
else:
p_pairs_path = os.path.join(out_dir, 'p_pairs_'+split_name.split('_')[0]+'.p')
n_pairs_path = os.path.join(out_dir, 'n_pairs_'+split_name.split('_')[0]+'.p')
if os.path.exists(p_pairs_path):
with open(p_pairs_path,'r') as f:
p_pairs = pickle.load(f)
with open(n_pairs_path,'r') as f:
n_pairs = pickle.load(f)
else:
filelist = _get_image_file_list(dataset_dir, split_name)
filenames = []
p_pairs = []
n_pairs = []
if 'diff_cam'==mode:
for i in xrange(0, len(filelist)):
id_i = filelist[i][0:4]
cam_i = filelist[i][6]
for j in xrange(i+1, len(filelist)):
id_j = filelist[j][0:4]
cam_j = filelist[j][6]
if id_j == id_i and cam_j != cam_i:
p_pairs.append([filelist[i],filelist[j]])
# p_pairs.append([filelist[j],filelist[i]]) # two streams share the same weights, no need switch
if len(p_pairs)%100000==0:
print(len(p_pairs))
elif j%10==0 and id_j != id_i and cam_j != cam_i: # limit the neg pairs to 1/10, otherwise it cost too much time
n_pairs.append([filelist[i],filelist[j]])
# n_pairs.append([filelist[j],filelist[i]]) # two streams share the same weights, no need switch
if len(n_pairs)%100000==0:
print(len(n_pairs))
elif 'same_cam'==mode:
for i in xrange(0, len(filelist)):
id_i = filelist[i][0:4]
cam_i = filelist[i][6]
for j in xrange(i+1, len(filelist)):
id_j = filelist[j][0:4]
cam_j = filelist[j][6]
if id_j == id_i and cam_j == cam_i:
p_pairs.append([filelist[i],filelist[j]])
# p_pairs.append([filelist[j],filelist[i]]) # two streams share the same weights, no need switch
if len(p_pairs)%100000==0:
print(len(p_pairs))
elif j%10==0 and id_j != id_i and cam_j == cam_i: # limit the neg pairs to 1/10, otherwise it cost too much time
n_pairs.append([filelist[i],filelist[j]])
# n_pairs.append([filelist[j],filelist[i]]) # two streams share the same weights, no need switch
if len(n_pairs)%100000==0:
print(len(n_pairs))
elif 'same_diff_cam'==mode:
for i in xrange(0, len(filelist)):
id_i = filelist[i][0:4]
cam_i = filelist[i][6]
for j in xrange(i+1, len(filelist)):
id_j = filelist[j][0:4]
cam_j = filelist[j][6]
if id_j == id_i:
p_pairs.append([filelist[i],filelist[j]])
if add_switch_pair:
p_pairs.append([filelist[j],filelist[i]]) # if two streams share the same weights, no need switch
if len(p_pairs)%100000==0:
print(len(p_pairs))
elif j%2000==0 and id_j != id_i: # limit the neg pairs to 1/40, otherwise it cost too much time
n_pairs.append([filelist[i],filelist[j]])
# n_pairs.append([filelist[j],filelist[i]]) # two streams share the same weights, no need switch
if len(n_pairs)%100000==0:
print(len(n_pairs))
print('repeat positive pairs augment_ratio times and cut down negative pairs to balance data ......')
p_pairs = p_pairs * augment_ratio
random.shuffle(n_pairs)
n_pairs = n_pairs[:len(p_pairs)]
print('p_pairs length:%d' % len(p_pairs))
print('n_pairs length:%d' % len(n_pairs))
print('save p_pairs and n_pairs ......')
with open(p_pairs_path,'w') as f:
pickle.dump(p_pairs,f)
with open(n_pairs_path,'w') as f:
pickle.dump(n_pairs,f)
print('_get_train_all_pn_pairs finish ......')
print('p_pairs length:%d' % len(p_pairs))
print('n_pairs length:%d' % len(n_pairs))
print('save pn_pairs_num ......')
pn_pairs_num = len(p_pairs) + len(n_pairs)
if split_name=='train_flip':
fpath = os.path.join(out_dir, 'pn_pairs_num_train_flip.p')
else:
fpath = os.path.join(out_dir, 'pn_pairs_num_'+split_name.split('_')[0]+'.p')
with open(fpath,'w') as f:
pickle.dump(pn_pairs_num,f)
return p_pairs, n_pairs
##################### one_pair_rec ###############
import scipy.io
import scipy.stats
import skimage.morphology
from skimage.morphology import square, dilation, erosion
from PIL import Image
def _getPoseMask(peaks, height, width, radius=4, var=4, mode='Solid'):
## MSCOCO Pose part_str = [nose, neck, Rsho, Relb, Rwri, Lsho, Lelb, Lwri, Rhip, Rkne, Rank, Lhip, Lkne, Lank, Leye, Reye, Lear, Rear, pt19]
# find connection in the specified sequence, center 29 is in the position 15
# limbSeq = [[2,3], [2,6], [3,4], [4,5], [6,7], [7,8], [2,9], [9,10], \
# [10,11], [2,12], [12,13], [13,14], [2,1], [1,15], [15,17], \
# [1,16], [16,18], [3,17], [6,18]]
# limbSeq = [[2,3], [2,6], [3,4], [4,5], [6,7], [7,8], [2,9], [9,10], \
# [10,11], [2,12], [12,13], [13,14], [2,1], [1,15], [15,17], \
# [1,16], [16,18]] # , [9,12]
# limbSeq = [[3,4], [4,5], [6,7], [7,8], [9,10], \
# [10,11], [12,13], [13,14], [2,1], [1,15], [15,17], \
# [1,16], [16,18]] #
limbSeq = [[2,3], [2,6], [3,4], [4,5], [6,7], [7,8], [2,9], [9,10], \
[10,11], [2,12], [12,13], [13,14], [2,1], [1,15], [15,17], \
[1,16], [16,18], [2,17], [2,18], [9,12], [12,6], [9,3], [17,18]] #
indices = []
values = []
for limb in limbSeq:
p0 = peaks[limb[0] -1]
p1 = peaks[limb[1] -1]
if 0!=len(p0) and 0!=len(p1):
r0 = p0[0][1]
c0 = p0[0][0]
r1 = p1[0][1]
c1 = p1[0][0]
ind, val = _getSparseKeypoint(r0, c0, 0, height, width, radius, var, mode)
indices.extend(ind)
values.extend(val)
ind, val = _getSparseKeypoint(r1, c1, 0, height, width, radius, var, mode)
indices.extend(ind)
values.extend(val)
distance = np.sqrt((r0-r1)**2 + (c0-c1)**2)
sampleN = int(distance/radius)
# sampleN = 0
if sampleN>1:
for i in xrange(1,sampleN):
r = r0 + (r1-r0)*i/sampleN
c = c0 + (c1-c0)*i/sampleN
ind, val = _getSparseKeypoint(r, c, 0, height, width, radius, var, mode)
indices.extend(ind)
values.extend(val)
shape = [height, width, 1]
## Fill body
dense = np.squeeze(_sparse2dense(indices, values, shape))
## TODO
# im = Image.fromarray((dense*255).astype(np.uint8))
# im.save('xxxxx.png')
# pdb.set_trace()
dense = dilation(dense, square(5))
dense = erosion(dense, square(5))
return dense
Ratio_0_4 = 1.0/scipy.stats.norm(0, 4).pdf(0)
Gaussian_0_4 = scipy.stats.norm(0, 4)
def _getSparseKeypoint(r, c, k, height, width, radius=4, var=4, mode='Solid'):
r = int(r)
c = int(c)
k = int(k)
indices = []
values = []
for i in range(-radius, radius+1):
for j in range(-radius, radius+1):
distance = np.sqrt(float(i**2+j**2))
if r+i>=0 and r+i<height and c+j>=0 and c+j<width:
if 'Solid'==mode and distance<=radius:
indices.append([r+i, c+j, k])
values.append(1)
elif 'Gaussian'==mode and distance<=radius:
indices.append([r+i, c+j, k])
if 4==var:
values.append( Gaussian_0_4.pdf(distance) * Ratio_0_4 )
else:
assert 'Only define Ratio_0_4 Gaussian_0_4 ...'
return indices, values
def _getSparsePose(peaks, height, width, channel, radius=4, var=4, mode='Solid'):
indices = []
values = []
for k in range(len(peaks)):
p = peaks[k]
if 0!=len(p):
r = p[0][1]
c = p[0][0]
ind, val = _getSparseKeypoint(r, c, k, height, width, radius, var, mode)
indices.extend(ind)
values.extend(val)
shape = [height, width, channel]
return indices, values, shape
def _oneDimSparsePose(indices, shape):
ind_onedim = []
for ind in indices:
# idx = ind[2]*shape[0]*shape[1] + ind[1]*shape[0] + ind[0]
idx = ind[0]*shape[2]*shape[1] + ind[1]*shape[2] + ind[2]
ind_onedim.append(idx)
shape = np.prod(shape)
return ind_onedim, shape
def _sparse2dense(indices, values, shape):
dense = np.zeros(shape)
for i in range(len(indices)):
r = indices[i][0]
c = indices[i][1]
k = indices[i][2]
dense[r,c,k] = values[i]
return dense
def _get_valid_peaks(all_peaks, subsets):
try:
subsets = subsets.tolist()
valid_idx = -1
valid_score = -1
for | |
[]
# l_list = Approval.objects.filter(
# submitter=request.user,
# status='current',
# )
# for l in l_list:
# lchild = l.child_obj
# # mooring text required?
# if type(lchild) == MooringLicence:
# if Mooring.objects.filter(mooring_licence=lchild):
# mooring = Mooring.objects.filter(mooring_licence=lchild)[0]
# existing_licences.append({
# "approval_id": lchild.id,
# "current_proposal_id": lchild.current_proposal.id,
# "lodgement_number": lchild.lodgement_number,
# #"mooring": mooring.name,
# "mooring_id": mooring.id,
# #"app_type_code": lchild.code,
# #"code": 'ml_{}'.format(lchild.id),
# "code": lchild.code,
# "description": lchild.description,
# #"new_application_text": "I want to add a vessel to Mooring Licence {} on mooring {}".format(lchild.lodgement_number, mooring.name)
# "new_application_text": "I want to amend or renew my current mooring licence {}".format(lchild.lodgement_number)
# })
# else:
# existing_licences.append({
# "approval_id": lchild.id,
# "lodgement_number": lchild.lodgement_number,
# "current_proposal_id": lchild.current_proposal.id,
# #"lodgement_number": ml.lodgement_number,
# "code": lchild.code,
# "description": lchild.description,
# "new_application_text": "I want to amend or renew my current {} {}".format(lchild.description.lower(), lchild.lodgement_number)
# })
# return Response(existing_licences)
@list_route(methods=['GET',])
@basic_exception_handler
def existing_licences(self, request, *args, **kwargs):
existing_licences = []
l_list = Approval.objects.filter(
submitter=request.user,
#status='current',
status__in=['current', 'fulfilled'],
)
for l in l_list:
lchild = l.child_obj
if type(lchild) == MooringLicence:
if Mooring.objects.filter(mooring_licence=lchild):
mooring = Mooring.objects.filter(mooring_licence=lchild)[0]
existing_licences.append({
"approval_id": lchild.id,
"current_proposal_id": lchild.current_proposal.id,
"lodgement_number": lchild.lodgement_number,
"mooring_id": mooring.id,
"code": lchild.code,
"description": lchild.description,
"new_application_text": "I want to amend or renew my current mooring licence {}".format(lchild.lodgement_number)
})
else:
if lchild.amend_or_renew:
existing_licences.append({
"approval_id": lchild.id,
"lodgement_number": lchild.lodgement_number,
"current_proposal_id": lchild.current_proposal.id,
"code": lchild.code,
"description": lchild.description,
"new_application_text": "I want to amend or renew my current {} {}".format(lchild.description.lower(), lchild.lodgement_number)
})
return Response(existing_licences)
@list_route(methods=['GET'])
def holder_list(self, request, *args, **kwargs):
holder_list = self.get_queryset().values_list('submitter__id', flat=True)
print(holder_list)
distinct_holder_list = list(dict.fromkeys(holder_list))
print(distinct_holder_list)
serializer = EmailUserSerializer(EmailUser.objects.filter(id__in=distinct_holder_list), many=True)
return Response(serializer.data)
#return Response()
# @list_route(methods=['GET',])
# def filter_list(self, request, *args, **kwargs):
# """ Used by the external dashboard filters """
# region_qs = self.get_queryset().filter(current_proposal__region__isnull=False).values_list('current_proposal__region__name', flat=True).distinct()
# activity_qs = self.get_queryset().filter(current_proposal__activity__isnull=False).values_list('current_proposal__activity', flat=True).distinct()
# application_types=ApplicationType.objects.all().values_list('name', flat=True)
# data = dict(
# regions=region_qs,
# activities=activity_qs,
# approval_status_choices = [i[1] for i in Approval.STATUS_CHOICES],
# application_types=application_types,
# )
# return Response(data)
@detail_route(methods=['GET'])
@renderer_classes((JSONRenderer,))
@basic_exception_handler
def get_moorings(self, request, *args, **kwargs):
instance = self.get_object()
#serializer = ApprovalMooringSerializer(instance.mooringonapproval_set.all(), many=True)
#return Response(serializer.data)
#moorings_on_approval = instance.mooringonapproval_set.all()
moorings = []
for moa in instance.mooringonapproval_set.all():
#mooring_name = moa.mooring.name
licence_holder_data = {}
if moa.mooring.mooring_licence:
licence_holder_data = UserSerializer(moa.mooring.mooring_licence.submitter).data
moorings.append({
"id": moa.id,
"mooring_name": moa.mooring.name,
#"licence_holder": licence_holder_data,
"licensee": licence_holder_data.get('full_name') if licensee else '',
"mobile": licence_holder_data.get('mobile_number') if licensee else '',
"email": licence_holder_data.get('email') if licensee else '',
})
return Response(moorings)
@detail_route(methods=['POST'])
@renderer_classes((JSONRenderer,))
@basic_exception_handler
def request_new_stickers(self, request, *args, **kwargs):
# external
approval = self.get_object()
details = request.data['details']
sticker_ids = [sticker['id'] for sticker in request.data['stickers']]
# TODO: Validation
sticker_action_details = []
stickers = Sticker.objects.filter(approval=approval, id__in=sticker_ids, status__in=(Sticker.STICKER_STATUS_CURRENT, Sticker.STICKER_STATUS_AWAITING_PRINTING,))
data = {}
for sticker in stickers:
data['action'] = 'Request new sticker'
data['user'] = request.user.id
data['reason'] = details['reason']
serializer = StickerActionDetailSerializer(data=request.data)
serializer.is_valid(raise_exception=True)
new_sticker_action_detail = serializer.save()
new_sticker_action_detail.sticker = sticker
new_sticker_action_detail.save()
sticker_action_details.append(new_sticker_action_detail.id)
return Response({'sticker_action_detail_ids': sticker_action_details})
@detail_route(methods=['GET'])
@renderer_classes((JSONRenderer,))
@basic_exception_handler
def stickers(self, request, *args, **kwargs):
instance = self.get_object()
stickers = instance.stickers.filter(status__in=[Sticker.STICKER_STATUS_CURRENT,])
serializer = StickerSerializer(stickers, many=True)
return Response({'stickers': serializer.data})
@detail_route(methods=['GET'])
@renderer_classes((JSONRenderer,))
@basic_exception_handler
def approval_history(self, request, *args, **kwargs):
instance = self.get_object()
serializer = ApprovalHistorySerializer(instance.approvalhistory_set.all(), many=True)
return Response(serializer.data)
@detail_route(methods=['GET'])
@renderer_classes((JSONRenderer,))
@basic_exception_handler
def lookup_approval(self, request, *args, **kwargs):
instance = self.get_object()
#serializer = ApprovalHistorySerializer(instance.approvalhistory_set.all(), many=True)
approval_details = {
"approvalType": instance.child_obj.description,
"approvalLodgementNumber": instance.lodgement_number,
#"history": serializer.data,
}
return Response(approval_details)
@detail_route(methods=['POST'])
@renderer_classes((JSONRenderer,))
@basic_exception_handler
def process_waiting_list_offer_document(self, request, *args, **kwargs):
instance = self.get_object()
returned_data = process_generic_document(request, instance, document_type='waiting_list_offer_document')
if returned_data:
return Response(returned_data)
else:
return Response()
@detail_route(methods=['POST'])
@renderer_classes((JSONRenderer,))
def process_document(self, request, *args, **kwargs):
instance = self.get_object()
action = request.POST.get('action')
section = request.POST.get('input_name')
if action == 'list' and 'input_name' in request.POST:
pass
elif action == 'delete' and 'document_id' in request.POST:
document_id = request.POST.get('document_id')
document = instance.qaofficer_documents.get(id=document_id)
document.visible = False
document.save()
instance.save(version_comment='Licence ({}): {}'.format(section, document.name)) # to allow revision to be added to reversion history
elif action == 'save' and 'input_name' in request.POST and 'filename' in request.POST:
proposal_id = request.POST.get('proposal_id')
filename = request.POST.get('filename')
_file = request.POST.get('_file')
if not _file:
_file = request.FILES.get('_file')
document = instance.qaofficer_documents.get_or_create(input_name=section, name=filename)[0]
path = default_storage.save('{}/proposals/{}/approvals/{}'.format(settings.MEDIA_APP_DIR, proposal_id, filename), ContentFile(_file.read()))
document._file = path
document.save()
instance.save(version_comment='Licence ({}): {}'.format(section, filename)) # to allow revision to be added to reversion history
#instance.current_proposal.save(version_comment='File Added: {}'.format(filename)) # to allow revision to be added to reversion history
return Response( [dict(input_name=d.input_name, name=d.name,file=d._file.url, id=d.id, can_delete=d.can_delete) for d in instance.qaofficer_documents.filter(input_name=section, visible=True) if d._file] )
#@detail_route(methods=['POST',])
#def approval_extend(self, request, *args, **kwargs):
# try:
# instance = self.get_object()
# serializer = ApprovalExtendSerializer(data=request.data)
# serializer.is_valid(raise_exception=True)
# instance.approval_extend(request,serializer.validated_data)
# serializer = ApprovalSerializer(instance,context={'request':request})
# return Response(serializer.data)
# except serializers.ValidationError:
# print(traceback.print_exc())
# raise
# except ValidationError as e:
# if hasattr(e,'error_dict'):
# raise serializers.ValidationError(repr(e.error_dict))
# else:
# if hasattr(e,'message'):
# raise serializers.ValidationError(e.message)
# except Exception as e:
# print(traceback.print_exc())
# raise serializers.ValidationError(str(e))
@detail_route(methods=['POST',])
def approval_cancellation(self, request, *args, **kwargs):
try:
instance = self.get_object()
serializer = ApprovalCancellationSerializer(data=request.data)
serializer.is_valid(raise_exception=True)
instance.approval_cancellation(request,serializer.validated_data)
#serializer = ApprovalSerializer(instance,context={'request':request})
#return Response(serializer.data)
return Response()
except serializers.ValidationError:
print(traceback.print_exc())
raise
except ValidationError as e:
if hasattr(e,'error_dict'):
raise serializers.ValidationError(repr(e.error_dict))
else:
if hasattr(e,'message'):
raise serializers.ValidationError(e.message)
except Exception as e:
print(traceback.print_exc())
raise serializers.ValidationError(str(e))
@detail_route(methods=['POST',])
def approval_suspension(self, request, *args, **kwargs):
try:
instance = self.get_object()
serializer = ApprovalSuspensionSerializer(data=request.data)
serializer.is_valid(raise_exception=True)
instance.approval_suspension(request,serializer.validated_data)
#serializer = ApprovalSerializer(instance,context={'request':request})
#return Response(serializer.data)
return Response()
except serializers.ValidationError:
print(traceback.print_exc())
raise
except ValidationError as e:
if hasattr(e,'error_dict'):
raise serializers.ValidationError(repr(e.error_dict))
else:
if hasattr(e,'message'):
raise serializers.ValidationError(e.message)
except Exception as e:
print(traceback.print_exc())
raise serializers.ValidationError(str(e))
@detail_route(methods=['POST',])
def approval_reinstate(self, request, *args, **kwargs):
try:
instance = self.get_object()
instance.reinstate_approval(request)
#serializer = self.get_serializer(instance)
#return Response(serializer.data)
return Response()
except serializers.ValidationError:
print(traceback.print_exc())
raise
except ValidationError as e:
if hasattr(e,'error_dict'):
raise serializers.ValidationError(repr(e.error_dict))
else:
if hasattr(e,'message'):
raise serializers.ValidationError(e.message)
except Exception as e:
print(traceback.print_exc())
raise serializers.ValidationError(str(e))
@detail_route(methods=['POST',])
def approval_surrender(self, request, *args, **kwargs):
try:
instance = self.get_object()
serializer = ApprovalSurrenderSerializer(data=request.data)
serializer.is_valid(raise_exception=True)
instance.approval_surrender(request,serializer.validated_data)
#serializer = ApprovalSerializer(instance,context={'request':request})
#return Response(serializer.data)
return Response()
except serializers.ValidationError:
print(traceback.print_exc())
raise
except ValidationError as e:
if hasattr(e,'error_dict'):
raise serializers.ValidationError(repr(e.error_dict))
else:
if hasattr(e,'message'):
raise serializers.ValidationError(e.message)
except Exception as e:
print(traceback.print_exc())
raise serializers.ValidationError(str(e))
@detail_route(methods=['GET',])
def action_log(self, request, *args, **kwargs):
try:
instance = self.get_object()
qs = instance.action_logs.all()
serializer = ApprovalUserActionSerializer(qs,many=True)
return Response(serializer.data)
except serializers.ValidationError:
print(traceback.print_exc())
raise
except ValidationError as e:
print(traceback.print_exc())
raise serializers.ValidationError(repr(e.error_dict))
except Exception as e:
print(traceback.print_exc())
raise serializers.ValidationError(str(e))
@detail_route(methods=['GET',])
def comms_log(self, request, *args, **kwargs):
try:
instance = self.get_object()
qs = instance.comms_logs.all()
serializer = ApprovalLogEntrySerializer(qs,many=True)
return Response(serializer.data)
except serializers.ValidationError:
print(traceback.print_exc())
raise
except ValidationError as e:
print(traceback.print_exc())
raise serializers.ValidationError(repr(e.error_dict))
except Exception as e:
print(traceback.print_exc())
raise serializers.ValidationError(str(e))
@detail_route(methods=['POST',])
@renderer_classes((JSONRenderer,))
def add_comms_log(self, request, *args, **kwargs):
try:
with transaction.atomic():
instance = self.get_object()
mutable=request.data._mutable
request.data._mutable=True
request.data['approval'] = u'{}'.format(instance.id)
request.data['staff'] = u'{}'.format(request.user.id)
request.data._mutable=mutable
serializer = ApprovalLogEntrySerializer(data=request.data)
serializer.is_valid(raise_exception=True)
comms = serializer.save()
# Save the files
for f in request.FILES:
document = comms.documents.create()
document.name = str(request.FILES[f])
document._file = request.FILES[f]
document.save()
# End Save Documents
return Response(serializer.data)
except serializers.ValidationError:
print(traceback.print_exc())
raise
except ValidationError as e:
print(traceback.print_exc())
raise serializers.ValidationError(repr(e.error_dict))
except Exception as e:
print(traceback.print_exc())
raise serializers.ValidationError(str(e))
class DcvAdmissionViewSet(viewsets.ModelViewSet):
queryset = DcvAdmission.objects.all().order_by('id')
serializer_class = DcvAdmissionSerializer
@staticmethod
def _handle_dcv_vessel(dcv_vessel, org_id=None):
data = dcv_vessel
rego_no_requested = data.get('rego_no', '')
vessel_name_requested = data.get('vessel_name', '')
try:
dcv_vessel = DcvVessel.objects.get(rego_no=rego_no_requested)
except DcvVessel.DoesNotExist:
data['rego_no'] = rego_no_requested
data['vessel_name'] = vessel_name_requested
serializer = DcvVesselSerializer(data=data)
serializer.is_valid(raise_exception=True)
dcv_vessel = serializer.save()
except Exception as e:
logger.error(e)
raise
return dcv_vessel
def create(self, request, *args, **kwargs):
data = request.data
dcv_vessel = self._handle_dcv_vessel(request.data.get('dcv_vessel'), None)
if request.user.is_authenticated():
# Logged in user
# 1. DcvPermit exists
# 2. DcvPermit doesn't exist
submitter = request.user
if not dcv_vessel.dcv_permits.count():
# No DcvPermit found, create DcvOrganisation and link it to DcvVessel
my_data = {}
my_data['organisation'] = request.data.get('organisation_name')
my_data['abn_acn'] = request.data.get('organisation_abn')
dcv_organisation = DcvPermitViewSet.handle_dcv_organisation(my_data)
dcv_vessel.dcv_organisation = dcv_organisation
dcv_vessel.save()
else:
# Anonymous user
# 1. DcvPermit exists
# 2. DcvPermit doesn't exist
if dcv_vessel.dcv_permits.count():
# DcvPermit exists
submitter = dcv_vessel.dcv_permits.first().submitter
else:
# DcvPermit doesn't exist
email_address = request.data.get('email_address')
email_address_confirmation = request.data.get('email_address_confirmation')
skipper = request.data.get('skipper')
if email_address and email_address_confirmation:
if email_address == email_address_confirmation:
if skipper:
this_user = EmailUser.objects.filter(email=email_address)
if this_user:
new_user = this_user.first()
else:
new_user = EmailUser.objects.create(email=email_address, first_name=skipper)
submitter = new_user
else:
raise forms.ValidationError('Please fill the skipper field')
else:
raise forms.ValidationError('Email addresses do not match')
else:
raise forms.ValidationError('Please fill the email address fields')
# No DcvPermit found, create DcvOrganisation and link it to DcvVessel
my_data = {}
my_data['organisation'] = request.data.get('organisation_name')
my_data['abn_acn'] = request.data.get('organisation_abn')
dcv_organisation = DcvPermitViewSet.handle_dcv_organisation(my_data)
dcv_vessel.dcv_organisation = dcv_organisation
dcv_vessel.save()
data['submitter'] = submitter.id
data['dcv_vessel_id'] = dcv_vessel.id
# data['fee_sid'] = fee_season_requested.get('id')
serializer = | |
name, color, data):
try:
level = str(utils.comma(int(data['player']['achievements']['bedwars_level']))) + ' ⭐'
except:
level = 'N/A'
try:
games_played = data['player']['stats']['Bedwars']['four_four_games_played_bedwars']
except:
games_played = 'N/A'
try:
exp = data['player']['stats']['Bedwars']['Experience']
except:
exp = 'N/A'
try:
final_deaths = data['player']['stats']['Bedwars']['four_four_final_deaths_bedwars']
except:
final_deaths = 'N/A'
try:
normal_deaths = data['player']['stats']['Bedwars']['four_four_deaths_bedwars']
except:
normal_deaths = 'N/A'
try:
total_deaths = final_deaths+normal_deaths
if total_deaths == 'N/AN/A':
raise ValueError
except:
total_deaths = 'N/A'
try:
beds_lost = data['player']['stats']['Bedwars']['four_four_beds_lost_bedwars']
except:
beds_lost = 'N/A'
try:
beds_broken = data['player']['stats']['Bedwars']['four_four_beds_broken_bedwars']
except:
beds_broken = 'N/A'
try:
normal_kills = data['player']['stats']['Bedwars']['four_four_kills_bedwars']
except:
normal_kills = 'N/A'
try:
final_kills = data['player']['stats']['Bedwars']['four_four_final_kills_bedwars']
except:
final_kills = 'N/A'
try:
total_kills = final_kills+normal_kills
if total_kills == 'N/AN/A':
raise ValueError
except:
total_kills = 'N/A'
try:
wins = data['player']['stats']['Bedwars']['four_four_wins_bedwars']
except:
wins = 'N/A'
try:
losses = data['player']['stats']['Bedwars']['four_four_losses_bedwars']
except:
losses = 'N/A'
try:
winstreak = data['player']['stats']['Bedwars']['four_four_winstreak']
except:
winstreak = 'N/A'
embed = discord.Embed(title=name + "'s Bedwars Stats - 4v4v4v4", color=color)
embed.set_thumbnail(url=f"https://crafatar.com/renders/head/{data['player']['_id']}")
embed.add_field(name="Bedwars Level", value=str(level), inline=True)
try:
embed.add_field(name="Bedwars Experience", value=str(utils.comma(round(exp, 0))), inline=True)
except:
embed.add_field(name="Bedwars Experience", value=str('N/A'), inline=True)
embed.add_field(name="Games Played", value=str(utils.comma(games_played)), inline=True)
embed.add_field(name="Normal Deaths", value=str(utils.comma(normal_deaths)), inline=True)
embed.add_field(name="Final Deaths", value=str(utils.comma(final_deaths)), inline=True)
embed.add_field(name="Total Deaths", value=str(utils.comma(total_deaths)), inline=True)
embed.add_field(name="Normal Kills", value=str(utils.comma(normal_kills)), inline=True)
embed.add_field(name="Final Kills", value=str(utils.comma(final_kills)), inline=True)
embed.add_field(name="Total Kills", value=str(utils.comma(total_kills)), inline=True)
try:
embed.add_field(name="Normal K/D Ratio", value=str(utils.comma(round(normal_kills/normal_deaths, 2))), inline=True)
except:
embed.add_field(name="Normal K/D Ratio", value=str('N/A'), inline=True)
try:
embed.add_field(name="Final K/D Ratio", value=str(utils.comma(round(final_kills/final_deaths, 2))), inline=True)
except:
embed.add_field(name="Final K/D Ratio", value=str('N/A'), inline=True)
try:
embed.add_field(name="K/D Ratio", value=str(utils.comma(round(total_kills/total_deaths, 2))), inline=True)
except:
embed.add_field(name="K/D Ratio", value=str('N/A'), inline=True)
embed.add_field(name="Wins", value=str(utils.comma(wins)), inline=True)
embed.add_field(name="Losses", value=str(utils.comma(losses)), inline=True)
try:
embed.add_field(name="W/L Ratio", value=str(utils.comma(round(wins/losses, 2))), inline=True)
except:
embed.add_field(name="W/L Ratio", value=str('N/A'), inline=True)
embed.add_field(name="Beds Lost", value=str(utils.comma(beds_lost)), inline=True)
embed.add_field(name="Beds Broken", value=str(utils.comma(beds_broken)), inline=True)
embed.add_field(name="Winstreak", value=str(utils.comma(winstreak)), inline=True)
embed.set_footer(text='Unofficial Hypixel Discord Bot - Page 5/6')
return embed
async def fourfour(self, name, color, data):
try:
level = str(utils.comma(int(data['player']['achievements']['bedwars_level']))) + ' ⭐'
except:
level = 'N/A'
try:
games_played = data['player']['stats']['Bedwars']['two_four_games_played_bedwars']
except:
games_played = 'N/A'
try:
exp = data['player']['stats']['Bedwars']['Experience']
except:
exp = 'N/A'
try:
final_deaths = data['player']['stats']['Bedwars']['two_four_final_deaths_bedwars']
except:
final_deaths = 'N/A'
try:
normal_deaths = data['player']['stats']['Bedwars']['two_four_deaths_bedwars']
except:
normal_deaths = 'N/A'
try:
total_deaths = final_deaths+normal_deaths
if total_deaths == 'N/AN/A':
raise ValueError
except:
total_deaths = 'N/A'
try:
beds_lost = data['player']['stats']['Bedwars']['two_four_beds_lost_bedwars']
except:
beds_lost = 'N/A'
try:
beds_broken = data['player']['stats']['Bedwars']['two_four_beds_broken_bedwars']
except:
beds_broken = 'N/A'
try:
normal_kills = data['player']['stats']['Bedwars']['two_four_kills_bedwars']
except:
normal_kills = 'N/A'
try:
final_kills = data['player']['stats']['Bedwars']['two_four_final_kills_bedwars']
except:
final_kills = 'N/A'
try:
total_kills = final_kills+normal_kills
if total_kills == 'N/AN/A':
raise ValueError
except:
total_kills = 'N/A'
try:
wins = data['player']['stats']['Bedwars']['two_four_wins_bedwars']
except:
wins = 'N/A'
try:
losses = data['player']['stats']['Bedwars']['two_four_losses_bedwars']
except:
losses = 'N/A'
try:
winstreak = data['player']['stats']['Bedwars']['two_four_winstreak']
except:
winstreak = 'N/A'
embed = discord.Embed(title=name + "'s Bedwars Stats - 4v4", color=color)
embed.set_thumbnail(url=f"https://crafatar.com/renders/head/{data['player']['_id']}")
embed.add_field(name="Bedwars Level", value=str(level), inline=True)
try:
embed.add_field(name="Bedwars Experience", value=str(utils.comma(round(exp, 0))), inline=True)
except:
embed.add_field(name="Bedwars Experience", value=str('N/A'), inline=True)
embed.add_field(name="Games Played", value=str(utils.comma(games_played)), inline=True)
embed.add_field(name="Normal Deaths", value=str(utils.comma(normal_deaths)), inline=True)
embed.add_field(name="Final Deaths", value=str(utils.comma(final_deaths)), inline=True)
embed.add_field(name="Total Deaths", value=str(utils.comma(total_deaths)), inline=True)
embed.add_field(name="Normal Kills", value=str(utils.comma(normal_kills)), inline=True)
embed.add_field(name="Final Kills", value=str(utils.comma(final_kills)), inline=True)
embed.add_field(name="Total Kills", value=str(utils.comma(total_kills)), inline=True)
try:
embed.add_field(name="Normal K/D Ratio", value=str(utils.comma(round(normal_kills/normal_deaths, 2))), inline=True)
except:
embed.add_field(name="Normal K/D Ratio", value=str('N/A'), inline=True)
try:
embed.add_field(name="Final K/D Ratio", value=str(utils.comma(round(final_kills/final_deaths, 2))), inline=True)
except:
embed.add_field(name="Final K/D Ratio", value=str('N/A'), inline=True)
try:
embed.add_field(name="K/D Ratio", value=str(utils.comma(round(total_kills/total_deaths, 2))), inline=True)
except:
embed.add_field(name="K/D Ratio", value=str('N/A'), inline=True)
embed.add_field(name="Wins", value=str(utils.comma(wins)), inline=True)
embed.add_field(name="Losses", value=str(utils.comma(losses)), inline=True)
try:
embed.add_field(name="W/L Ratio", value=str(utils.comma(round(wins/losses, 2))), inline=True)
except:
embed.add_field(name="W/L Ratio", value=str('N/A'), inline=True)
embed.add_field(name="Beds Lost", value=str(utils.comma(beds_lost)), inline=True)
embed.add_field(name="Beds Broken", value=str(utils.comma(beds_broken)), inline=True)
embed.add_field(name="Winstreak", value=str(utils.comma(winstreak)), inline=True)
embed.set_footer(text='Unofficial Hypixel Discord Bot - Page 6/6')
return embed
async def generate(self, ctx, name, data, perms):
color=random.randint(1, 16777215)
main = await self.main(name, color, data)
solo = await self.solo(name, color, data)
doubles = await self.doubles(name, color, data)
threes = await self.threes(name, color, data)
fours = await self.fours(name, color, data)
fourfour = await self.fourfour(name, color, data)
paginator = DiscordUtils.Pagination.CustomEmbedPaginator(ctx, timeout=60, remove_reactions=False)
if perms is not None:
if perms.manage_messages:
paginator = DiscordUtils.Pagination.CustomEmbedPaginator(ctx, timeout=60, remove_reactions=True)
paginator.add_reaction('⏮️', "first")
paginator.add_reaction('⏪', "back")
paginator.add_reaction('⏹', "lock")
paginator.add_reaction('⏩', "next")
paginator.add_reaction('⏭️', "last")
embeds = [main, solo, doubles, threes, fours, fourfour]
return embeds, paginator
class Skywars:
async def main(self, name, color, data):
try:
level = 'N/A'
xp = data['player']['stats']['SkyWars']['skywars_experience']
xps = [0, 20, 70, 150, 250, 500, 1000, 2000, 3500, 6000, 10000, 15000]
if xp >= 15000:
level = (xp - 15000) / 10000. + 12
else:
for i in range(len(xps)):
if xp < xps[i]:
level = str(utils.comma(int(round(int(1 + i + float(xp - xps[i-1]) / (xps[i] - xps[i-1], 0)))))) + ' ⭐'
except:
level = 'N/A'
try:
games_played = data['player']['stats']['SkyWars']['games_played_skywars']
except:
games_played = 'N/A'
try:
winstreak = data['player']['stats']['SkyWars']['win_streak']
except:
winstreak = 'N/A'
try:
kills = data['player']['stats']['SkyWars']['kills']
except:
kills = 'N/A'
try:
deaths = data['player']['stats']['SkyWars']['deaths']
except:
deaths = 'N/A'
try:
top_winstreak = data['player']['stats']['SkyWars']['highestWinstreak']
except:
top_winstreak = 'N/A'
try:
coins = int(data['player']['stats']['SkyWars']['coins'])
except:
coins = 'N/A'
try:
souls = data['player']['stats']['SkyWars']['souls']
except:
souls = 'N/A'
try:
wins = data['player']['stats']['SkyWars']['wins']
except:
wins = 'N/A'
try:
losses = data['player']['stats']['SkyWars']['losses']
except:
losses = 'N/A'
embed = discord.Embed(title=name + "'s Skywars Stats - Overall", color=color)
embed.set_thumbnail(url=f"https://crafatar.com/renders/head/{data['player']['_id']}")
try:
embed.add_field(name="Skywars Level", value=str(int(level))+' ⭐', inline=True)
except:
embed.add_field(name="Skywars Level", value='N/A', inline=True)
embed.add_field(name="Skywars Coins", value=str(utils.comma(coins)), inline=True)
embed.add_field(name="Skywars Souls", value=str(utils.comma(souls)), inline=True)
embed.add_field(name="Skywars Games Played", value=str(utils.comma(round(games_played, 0))), inline=True)
embed.add_field(name="Skywars Winstreak", value=str(utils.comma(winstreak)), inline=True)
embed.add_field(name="Skywars Highest Winstreak", value=str(utils.comma(top_winstreak)), inline=True)
embed.add_field(name="Kills", value=str(utils.comma(kills)), inline=True)
embed.add_field(name="Deaths", value=str(utils.comma(deaths)), inline=True)
try:
embed.add_field(name="K/D Ratio", value=str(utils.comma(round(int(kills)/int(deaths), 2))), inline=True)
except:
embed.add_field(name="K/D Ratio", value='N/A', inline=True)
embed.add_field(name="Wins", value=str(utils.comma(wins)), inline=True)
embed.add_field(name="Losses", value=str(utils.comma(losses)), inline=True)
try:
embed.add_field(name="W/L Ratio", value=str(utils.comma(round(int(wins)/int(losses), 2))), inline=True)
except:
embed.add_field(name="W/L Ratio", value='N/A', inline=True)
embed.set_footer(text='Unofficial Hypixel Discord Bot - Page 1/6')
return embed
async def solon(self, name, color, data):
try:
level = 'N/A'
xp = data['player']['stats']['SkyWars']['skywars_experience']
xps = [0, 20, 70, 150, 250, 500, 1000, 2000, 3500, 6000, 10000, 15000]
if xp >= 15000:
level = (xp - 15000) / 10000. + 12
else:
for i in range(len(xps)):
if xp < xps[i]:
level = str(utils.comma(int(round(int(1 + i + float(xp - xps[i-1]) / (xps[i] - xps[i-1], 0)))))) + ' ⭐'
except:
level = 'N/A'
try:
games_played = data['player']['stats']['SkyWars']['games_played_skywars']
except:
games_played = 'N/A'
try:
winstreak = data['player']['stats']['SkyWars']['win_streak']
except:
winstreak = 'N/A'
try:
kills = data['player']['stats']['SkyWars']['kills_solo_normal']
except:
kills = 'N/A'
try:
deaths = data['player']['stats']['SkyWars']['deaths_solo_normal']
except:
deaths = 'N/A'
try:
top_winstreak = data['player']['stats']['SkyWars']['highestWinstreak']
except:
top_winstreak = 'N/A'
try:
coins = int(data['player']['stats']['SkyWars']['coins'])
except:
coins = 'N/A'
try:
souls = data['player']['stats']['SkyWars']['souls']
except:
souls = 'N/A'
try:
wins = data['player']['stats']['SkyWars']['wins_solo_normal']
except:
wins = 'N/A'
try:
losses = data['player']['stats']['SkyWars']['losses_solo_normal']
except:
losses = 'N/A'
embed = discord.Embed(title=name + "'s Skywars Stats - Solo Normal", color=color)
embed.set_thumbnail(url=f"https://crafatar.com/renders/head/{data['player']['_id']}")
try:
embed.add_field(name="Skywars Level", value=str(int(level))+' ⭐', inline=True)
except:
embed.add_field(name="Skywars Level", value='N/A', inline=True)
embed.add_field(name="Skywars Coins", value=str(utils.comma(coins)), inline=True)
embed.add_field(name="Skywars Souls", value=str(utils.comma(souls)), inline=True)
embed.add_field(name="Skywars Games Played", value=str(utils.comma(games_played)), inline=True)
embed.add_field(name="Skywars Winstreak", value=str(utils.comma(winstreak)), inline=True)
embed.add_field(name="Skywars Highest Winstreak", value=str(utils.comma(top_winstreak)), inline=True)
embed.add_field(name="Kills", value=str(utils.comma(kills)), inline=True)
embed.add_field(name="Deaths", value=str(utils.comma(deaths)), inline=True)
try:
embed.add_field(name="K/D Ratio", value=str(utils.comma(round(int(kills)/int(deaths), 2))), inline=True)
except:
embed.add_field(name="K/D Ratio", value='N/A', inline=True)
embed.add_field(name="Wins", value=str(utils.comma(wins)), inline=True)
embed.add_field(name="Losses", value=str(utils.comma(losses)), inline=True)
try:
embed.add_field(name="W/L Ratio", value=str(utils.comma(round(int(wins)/int(losses), 2))), inline=True)
except:
embed.add_field(name="W/L Ratio", value='N/A', inline=True)
embed.set_footer(text='Unofficial Hypixel Discord Bot - Page 2/6')
return embed
async def soloi(self, name, color, data):
try:
level = 'N/A'
xp = data['player']['stats']['SkyWars']['skywars_experience']
xps = [0, 20, 70, 150, 250, 500, 1000, 2000, 3500, 6000, 10000, 15000]
if xp >= 15000:
level = (xp - 15000) / 10000. + 12
else:
for i in range(len(xps)):
if xp < xps[i]:
level = str(utils.comma(int(round(int(1 + i + float(xp - xps[i-1]) / (xps[i] - xps[i-1], 0)))))) + ' ⭐'
except:
level = 'N/A'
try:
games_played = data['player']['stats']['SkyWars']['games_played_skywars']
except:
games_played = 'N/A'
try:
winstreak = data['player']['stats']['SkyWars']['win_streak']
except:
winstreak = 'N/A'
try:
kills = data['player']['stats']['SkyWars']['kills_solo_insane']
except:
kills = 'N/A'
try:
deaths = data['player']['stats']['SkyWars']['deaths_solo_insane']
except:
deaths = 'N/A'
try:
top_winstreak = data['player']['stats']['SkyWars']['highestWinstreak']
except:
top_winstreak = 'N/A'
try:
coins = int(data['player']['stats']['SkyWars']['coins'])
except:
coins = 'N/A'
try:
souls = data['player']['stats']['SkyWars']['souls']
except:
souls = 'N/A'
try:
wins = data['player']['stats']['SkyWars']['wins_solo_insane']
except:
wins = 'N/A'
try:
losses = data['player']['stats']['SkyWars']['losses_solo_insane']
except:
losses = 'N/A'
embed = discord.Embed(title=name + "'s Skywars Stats - Solo Insane", color=color)
embed.set_thumbnail(url=f"https://crafatar.com/renders/head/{data['player']['_id']}")
try:
embed.add_field(name="Skywars Level", value=str(int(level))+' ⭐', inline=True)
except:
embed.add_field(name="Skywars Level", value='N/A', inline=True)
embed.add_field(name="Skywars Coins", value=str(utils.comma(coins)), inline=True)
embed.add_field(name="Skywars Souls", value=str(utils.comma(souls)), inline=True)
embed.add_field(name="Skywars Games Played", value=str(utils.comma(games_played)), inline=True)
embed.add_field(name="Skywars Winstreak", value=str(utils.comma(winstreak)), inline=True)
embed.add_field(name="Skywars Highest Winstreak", value=str(utils.comma(top_winstreak)), inline=True)
embed.add_field(name="Kills", value=str(utils.comma(kills)), inline=True)
embed.add_field(name="Deaths", value=str(utils.comma(deaths)), inline=True)
try:
embed.add_field(name="K/D Ratio", value=str(utils.comma(round(int(kills)/int(deaths), 2))), inline=True)
except:
embed.add_field(name="K/D Ratio", value='N/A', inline=True)
embed.add_field(name="Wins", value=str(utils.comma(wins)), inline=True)
embed.add_field(name="Losses", value=str(utils.comma(losses)), inline=True)
try:
embed.add_field(name="W/L Ratio", value=str(utils.comma(round(int(wins)/int(losses), 2))), | |
<filename>designs/kitaplarim.py
qt# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'kitaplarim.ui'
#
# Created by: PyQt5 UI code generator 5.15.1
#
# WARNING: Any manual changes made to this file will be lost when pyuic5 is
# run again. Do not edit this file unless you know what you are doing.
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(1173, 620)
MainWindow.setMinimumSize(QtCore.QSize(0, 0))
palette = QtGui.QPalette()
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(66, 73, 90))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(55, 61, 75))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(22, 24, 30))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(29, 32, 40))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(210, 210, 210))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.BrightText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(85, 170, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Highlight, brush)
brush = QtGui.QBrush(QtGui.QColor(85, 170, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Link, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 0, 127))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.LinkVisited, brush)
brush = QtGui.QBrush(QtGui.QColor(22, 24, 30))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(44, 49, 60))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ToolTipBase, brush)
brush = QtGui.QBrush(QtGui.QColor(210, 210, 210))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ToolTipText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(66, 73, 90))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(55, 61, 75))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(22, 24, 30))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(29, 32, 40))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(210, 210, 210))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.BrightText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(85, 170, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Highlight, brush)
brush = QtGui.QBrush(QtGui.QColor(85, 170, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Link, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 0, 127))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.LinkVisited, brush)
brush = QtGui.QBrush(QtGui.QColor(22, 24, 30))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(44, 49, 60))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ToolTipBase, brush)
brush = QtGui.QBrush(QtGui.QColor(210, 210, 210))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ToolTipText, brush)
brush = QtGui.QBrush(QtGui.QColor(22, 24, 30))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(66, 73, 90))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(55, 61, 75))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(22, 24, 30))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(29, 32, 40))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(22, 24, 30))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.BrightText, brush)
brush = QtGui.QBrush(QtGui.QColor(22, 24, 30))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(51, 153, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Highlight, brush)
brush = QtGui.QBrush(QtGui.QColor(85, 170, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Link, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 0, 127))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.LinkVisited, brush)
brush = QtGui.QBrush(QtGui.QColor(44, 49, 60))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(44, 49, 60))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ToolTipBase, brush)
brush = QtGui.QBrush(QtGui.QColor(210, 210, 210))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ToolTipText, brush)
MainWindow.setPalette(palette)
font = QtGui.QFont()
font.setFamily("Segoe UI")
font.setPointSize(10)
MainWindow.setFont(font)
MainWindow.setStyleSheet("QMainWindow {background: transparent; }\n"
"QToolTip {\n"
" color: #ffffff;\n"
" background-color: rgba(27, 29, 35, 160);\n"
" border: 1px solid rgb(40, 40, 40);\n"
" border-radius: 2px;\n"
"}")
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setStyleSheet("background: transparent;\n"
"color: rgb(210, 210, 210);")
self.centralwidget.setObjectName("centralwidget")
self.horizontalLayout = QtWidgets.QHBoxLayout(self.centralwidget)
self.horizontalLayout.setContentsMargins(10, 10, 10, 10)
self.horizontalLayout.setSpacing(0)
self.horizontalLayout.setObjectName("horizontalLayout")
self.frame_main1 = QtWidgets.QFrame(self.centralwidget)
self.frame_main1.setStyleSheet("/* LINE EDIT */\n"
"QLineEdit {\n"
" background-color: rgb(27, 29, 35);\n"
" border-radius: 5px;\n"
" border: 2px solid rgb(27, 29, 35);\n"
" padding-left: 10px;\n"
"}\n"
"QLineEdit:hover {\n"
" border: 2px solid rgb(64, 71, 88);\n"
"}\n"
"QLineEdit:focus {\n"
" border: 2px solid rgb(91, 101, 124);\n"
"}\n"
"\n"
"/* SCROLL BARS */\n"
"QScrollBar:horizontal {\n"
" border: none;\n"
" background: rgb(52, 59, 72);\n"
" height: 14px;\n"
" margin: 0px 21px 0 21px;\n"
" border-radius: 0px;\n"
"}\n"
"QScrollBar::handle:horizontal {\n"
" background: rgb(85, 170, 255);\n"
" min-width: 25px;\n"
" border-radius: 7px\n"
"}\n"
"QScrollBar::add-line:horizontal {\n"
" border: none;\n"
" background: rgb(55, 63, 77);\n"
" width: 20px;\n"
" border-top-right-radius: 7px;\n"
" border-bottom-right-radius: 7px;\n"
" subcontrol-position: right;\n"
" subcontrol-origin: margin;\n"
"}\n"
"QScrollBar::sub-line:horizontal {\n"
" border: none;\n"
" background: rgb(55, 63, 77);\n"
" width: 20px;\n"
" border-top-left-radius: 7px;\n"
" border-bottom-left-radius: 7px;\n"
" subcontrol-position: left;\n"
" subcontrol-origin: margin;\n"
"}\n"
"QScrollBar::up-arrow:horizontal, QScrollBar::down-arrow:horizontal\n"
"{\n"
" background: none;\n"
"}\n"
"QScrollBar::add-page:horizontal, QScrollBar::sub-page:horizontal\n"
"{\n"
" background: none;\n"
"}\n"
" QScrollBar:vertical {\n"
" border: none;\n"
" background: rgb(52, 59, 72);\n"
" width: 14px;\n"
" margin: 21px 0 21px 0;\n"
" border-radius: 0px;\n"
" }\n"
" QScrollBar::handle:vertical { \n"
" background: rgb(85, 170, 255);\n"
" min-height: 25px;\n"
" border-radius: 7px\n"
" }\n"
" QScrollBar::add-line:vertical {\n"
" border: none;\n"
" background: rgb(55, 63, 77);\n"
" height: 20px;\n"
" border-bottom-left-radius: 7px;\n"
" border-bottom-right-radius: 7px;\n"
" subcontrol-position: bottom;\n"
" subcontrol-origin: margin;\n"
" }\n"
" QScrollBar::sub-line:vertical {\n"
" border: none;\n"
" background: rgb(55, 63, 77);\n"
" height: 20px;\n"
" border-top-left-radius: 7px;\n"
" border-top-right-radius: 7px;\n"
" subcontrol-position: top;\n"
" subcontrol-origin: margin;\n"
" }\n"
" QScrollBar::up-arrow:vertical, QScrollBar::down-arrow:vertical {\n"
" background: none;\n"
" }\n"
"\n"
" QScrollBar::add-page:vertical, QScrollBar::sub-page:vertical {\n"
" background: none;\n"
" }\n"
"\n"
"/* CHECKBOX */\n"
"QCheckBox::indicator {\n"
" border: 3px solid rgb(52, 59, 72);\n"
" width: 15px;\n"
" height: 15px;\n"
" border-radius: 10px;\n"
" background: rgb(44, 49, 60);\n"
"}\n"
"QCheckBox::indicator:hover {\n"
" border: 3px solid rgb(58, 66, 81);\n"
"}\n"
"QCheckBox::indicator:checked {\n"
" background: 3px solid rgb(52, 59, 72);\n"
" border: 3px solid rgb(52, 59, 72); \n"
" background-image: url(:/16x16/icons/16x16/cil-check-alt.png);\n"
"}\n"
"\n"
"/* RADIO BUTTON */\n"
"QRadioButton::indicator {\n"
" border: 3px solid rgb(52, 59, 72);\n"
" width: 15px;\n"
" height: 15px;\n"
" border-radius: 10px;\n"
" background: rgb(44, 49, 60);\n"
"}\n"
"QRadioButton::indicator:hover {\n"
" border: 3px solid rgb(58, 66, 81);\n"
"}\n"
"QRadioButton::indicator:checked {\n"
" background: 3px solid rgb(94, 106, 130);\n"
" border: 3px solid rgb(52, 59, 72); \n"
"}\n"
"\n"
"/* COMBOBOX */\n"
"QComboBox{\n"
" background-color: rgb(27, 29, 35);\n"
" border-radius: 5px;\n"
" border: 2px solid rgb(27, 29, 35);\n"
" padding: 5px;\n"
" padding-left: 10px;\n"
"}\n"
"QComboBox:hover{\n"
" border: 2px solid rgb(64, 71, 88);\n"
"}\n"
"QComboBox::drop-down {\n"
" subcontrol-origin: padding;\n"
" subcontrol-position: top right;\n"
" width: 25px; \n"
" border-left-width: 3px;\n"
" border-left-color: rgba(39, 44, 54, 150);\n"
" border-left-style: solid;\n"
" border-top-right-radius: 3px;\n"
" border-bottom-right-radius: 3px; \n"
" background-image: url(:/16x16/icons/16x16/cil-arrow-bottom.png);\n"
" background-position: center;\n"
" background-repeat: no-reperat;\n"
" }\n"
"QComboBox QAbstractItemView {\n"
" color: rgb(85, 170, 255); \n"
" background-color: rgb(27, 29, 35);\n"
" padding: 10px;\n"
" selection-background-color: rgb(39, 44, 54);\n"
"}\n"
"\n"
"/* SLIDERS */\n"
"QSlider::groove:horizontal {\n"
" border-radius: 9px;\n"
" height: 18px;\n"
" margin: 0px;\n"
" background-color: rgb(52, 59, 72);\n"
"}\n"
"QSlider::groove:horizontal:hover {\n"
" background-color: rgb(55, 62, 76);\n"
"}\n"
"QSlider::handle:horizontal {\n"
" background-color: rgb(85, 170, 255);\n"
" border: none;\n"
" height: 18px;\n"
" width: 18px;\n"
" margin: 0px;\n"
" border-radius: 9px;\n"
"}\n"
"QSlider::handle:horizontal:hover {\n"
" background-color: rgb(105, 180, 255);\n"
"}\n"
"QSlider::handle:horizontal:pressed {\n"
" background-color: rgb(65, 130, 195);\n"
"}\n"
"\n"
"QSlider::groove:vertical {\n"
" border-radius: 9px;\n"
" width: 18px;\n"
" margin: 0px;\n"
" background-color: rgb(52, 59, 72);\n"
"}\n"
"QSlider::groove:vertical:hover {\n"
" background-color: rgb(55, 62, 76);\n"
"}\n"
"QSlider::handle:vertical {\n"
" background-color: rgb(85, 170, 255);\n"
" border: none;\n"
" height: 18px;\n"
" width: 18px;\n"
" margin: 0px;\n"
" border-radius: 9px;\n"
"}\n"
"QSlider::handle:vertical:hover {\n"
" background-color: rgb(105, 180, 255);\n"
"}\n"
"QSlider::handle:vertical:pressed {\n"
" background-color: rgb(65, 130, 195);\n"
"}\n"
"\n"
"")
self.frame_main1.setFrameShape(QtWidgets.QFrame.NoFrame)
self.frame_main1.setFrameShadow(QtWidgets.QFrame.Raised)
self.frame_main1.setObjectName("frame_main1")
self.verticalLayout = QtWidgets.QVBoxLayout(self.frame_main1)
self.verticalLayout.setContentsMargins(0, 0, 0, 0)
self.verticalLayout.setSpacing(0)
self.verticalLayout.setObjectName("verticalLayout")
self.frame_top1 = QtWidgets.QFrame(self.frame_main1)
self.frame_top1.setMinimumSize(QtCore.QSize(0, 65))
self.frame_top1.setMaximumSize(QtCore.QSize(16777215, 65))
self.frame_top1.setStyleSheet("background-color: transparent;")
self.frame_top1.setFrameShape(QtWidgets.QFrame.NoFrame)
self.frame_top1.setFrameShadow(QtWidgets.QFrame.Raised)
self.frame_top1.setObjectName("frame_top1")
self.horizontalLayout_3 = QtWidgets.QHBoxLayout(self.frame_top1)
self.horizontalLayout_3.setContentsMargins(0, 0, 0, 0)
self.horizontalLayout_3.setSpacing(0)
self.horizontalLayout_3.setObjectName("horizontalLayout_3")
self.frame_toggle = QtWidgets.QFrame(self.frame_top1)
self.frame_toggle.setMaximumSize(QtCore.QSize(70, 16777215))
self.frame_toggle.setStyleSheet("background-color: rgb(27, 29, 35);")
self.frame_toggle.setFrameShape(QtWidgets.QFrame.NoFrame)
self.frame_toggle.setFrameShadow(QtWidgets.QFrame.Raised)
self.frame_toggle.setObjectName("frame_toggle")
self.verticalLayout_3 = QtWidgets.QVBoxLayout(self.frame_toggle)
self.verticalLayout_3.setContentsMargins(0, 0, 0, 0)
self.verticalLayout_3.setSpacing(0)
self.verticalLayout_3.setObjectName("verticalLayout_3")
self.horizontalLayout_3.addWidget(self.frame_toggle)
self.frame_top_right = QtWidgets.QFrame(self.frame_top1)
self.frame_top_right.setStyleSheet("background: transparent;")
self.frame_top_right.setFrameShape(QtWidgets.QFrame.NoFrame)
self.frame_top_right.setFrameShadow(QtWidgets.QFrame.Raised)
self.frame_top_right.setObjectName("frame_top_right")
self.verticalLayout_2 = QtWidgets.QVBoxLayout(self.frame_top_right)
self.verticalLayout_2.setContentsMargins(0, 0, 0, 0)
self.verticalLayout_2.setSpacing(0)
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.frame_top_btns = QtWidgets.QFrame(self.frame_top_right)
self.frame_top_btns.setMaximumSize(QtCore.QSize(16777215, 42))
self.frame_top_btns.setStyleSheet("background-color: rgba(27, 29, 35, 200)")
self.frame_top_btns.setFrameShape(QtWidgets.QFrame.NoFrame)
self.frame_top_btns.setFrameShadow(QtWidgets.QFrame.Raised)
self.frame_top_btns.setObjectName("frame_top_btns")
self.horizontalLayout_4 = QtWidgets.QHBoxLayout(self.frame_top_btns)
self.horizontalLayout_4.setContentsMargins(0, 0, 0, 0)
self.horizontalLayout_4.setSpacing(0)
self.horizontalLayout_4.setObjectName("horizontalLayout_4")
self.frame_label_top_btns = QtWidgets.QFrame(self.frame_top_btns)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.frame_label_top_btns.sizePolicy().hasHeightForWidth())
self.frame_label_top_btns.setSizePolicy(sizePolicy)
self.frame_label_top_btns.setFrameShape(QtWidgets.QFrame.NoFrame)
self.frame_label_top_btns.setFrameShadow(QtWidgets.QFrame.Raised)
self.frame_label_top_btns.setObjectName("frame_label_top_btns")
self.horizontalLayout_10 = QtWidgets.QHBoxLayout(self.frame_label_top_btns)
self.horizontalLayout_10.setContentsMargins(5, 0, 10, 0)
self.horizontalLayout_10.setSpacing(0)
self.horizontalLayout_10.setObjectName("horizontalLayout_10")
self.frame_icon_top_bar = QtWidgets.QFrame(self.frame_label_top_btns)
self.frame_icon_top_bar.setMaximumSize(QtCore.QSize(30, 30))
self.frame_icon_top_bar.setStyleSheet("background: transparent;\n"
"background-image: url(:/16x16/icons/16x16/cil-terminal.png);\n"
"background-position: center;\n"
"background-repeat: no-repeat;\n"
"")
self.frame_icon_top_bar.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.frame_icon_top_bar.setFrameShadow(QtWidgets.QFrame.Raised)
self.frame_icon_top_bar.setObjectName("frame_icon_top_bar")
self.horizontalLayout_10.addWidget(self.frame_icon_top_bar)
self.label_title_bar_top = QtWidgets.QLabel(self.frame_label_top_btns)
font = QtGui.QFont()
font.setFamily("Segoe UI")
font.setPointSize(10)
font.setBold(True)
font.setWeight(75)
self.label_title_bar_top.setFont(font)
self.label_title_bar_top.setStyleSheet("background: transparent;\n"
"")
self.label_title_bar_top.setObjectName("label_title_bar_top")
self.horizontalLayout_10.addWidget(self.label_title_bar_top)
self.horizontalLayout_4.addWidget(self.frame_label_top_btns)
self.frame_btns_right = QtWidgets.QFrame(self.frame_top_btns)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.frame_btns_right.sizePolicy().hasHeightForWidth())
self.frame_btns_right.setSizePolicy(sizePolicy)
self.frame_btns_right.setMaximumSize(QtCore.QSize(120, 16777215))
self.frame_btns_right.setFrameShape(QtWidgets.QFrame.NoFrame)
self.frame_btns_right.setFrameShadow(QtWidgets.QFrame.Raised)
self.frame_btns_right.setObjectName("frame_btns_right")
self.horizontalLayout_5 = QtWidgets.QHBoxLayout(self.frame_btns_right)
self.horizontalLayout_5.setContentsMargins(0, 0, 0, 0)
self.horizontalLayout_5.setSpacing(0)
self.horizontalLayout_5.setObjectName("horizontalLayout_5")
self.btn_minimize = QtWidgets.QPushButton(self.frame_btns_right)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.btn_minimize.sizePolicy().hasHeightForWidth())
self.btn_minimize.setSizePolicy(sizePolicy)
self.btn_minimize.setMinimumSize(QtCore.QSize(40, 0))
self.btn_minimize.setMaximumSize(QtCore.QSize(40, 16777215))
self.btn_minimize.setStyleSheet("QPushButton { \n"
" border: none;\n"
" background-color: transparent;\n"
"}\n"
"QPushButton:hover {\n"
" background-color: rgb(52, 59, 72);\n"
"}\n"
"QPushButton:pressed { \n"
" background-color: rgb(85, 170, 255);\n"
"}")
self.btn_minimize.setText("")
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(":/16x16/icons/16x16/cil-window-minimize.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.btn_minimize.setIcon(icon)
self.btn_minimize.setObjectName("btn_minimize")
self.horizontalLayout_5.addWidget(self.btn_minimize)
self.btn_maximize_restore = QtWidgets.QPushButton(self.frame_btns_right)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.btn_maximize_restore.sizePolicy().hasHeightForWidth())
self.btn_maximize_restore.setSizePolicy(sizePolicy)
self.btn_maximize_restore.setMinimumSize(QtCore.QSize(40, 0))
self.btn_maximize_restore.setMaximumSize(QtCore.QSize(40, 16777215))
self.btn_maximize_restore.setStyleSheet("QPushButton { \n"
" border: none;\n"
" background-color: transparent;\n"
"}\n"
"QPushButton:hover {\n"
" background-color: rgb(52, 59, 72);\n"
"}\n"
"QPushButton:pressed { \n"
" background-color: rgb(85, 170, 255);\n"
"}")
self.btn_maximize_restore.setText("")
icon1 = | |
assert data.ExtraFileExtensionsHelpTexts2 == "string"
assert data.Failed == "string"
assert data.FailedDownloadHandling == "string"
assert data.FailedLoadingSearchResults == "string"
assert data.FailedToLoadMovieFromAPI == "string"
assert data.FailedToLoadQueue == "string"
assert data.FeatureRequests == "string"
assert data.FileDateHelpText == "string"
assert data.FileDetails == "string"
assert data.FileManagement == "string"
assert data.Filename == "string"
assert data.FileNames == "string"
assert data.FileNameTokens == "string"
assert data.Files == "string"
assert data.FilesTotal == "string"
assert data.FileWasDeletedByUpgrade == "string"
assert data.FileWasDeletedByViaUI == "string"
assert data.Filter == "string"
assert data.FilterAnalyticsEvents == "string"
assert data.FilterAuthor == "string"
assert data.FilterPlaceHolder == "string"
assert data.Filters == "string"
assert data.FilterSentryEventsHelpText == "string"
assert data.FirstBook == "string"
assert data.FirstDayOfWeek == "string"
assert data.Fixed == "string"
assert data.FocusSearchBox == "string"
assert data.Folder == "string"
assert data.FolderMoveRenameWarning == "string"
assert data.Folders == "string"
assert data.FollowPerson == "string"
assert data.Forecast == "string"
assert data.ForeignIdHelpText == "string"
assert data.Formats == "string"
assert data.ForMoreInformationOnTheIndividualDownloadClients == "string"
assert (
data.ForMoreInformationOnTheIndividualDownloadClientsClickOnTheInfoButtons
== "string"
)
assert (
data.ForMoreInformationOnTheIndividualImportListsClinkOnTheInfoButtons
== "string"
)
assert data.ForMoreInformationOnTheIndividualIndexers == "string"
assert (
data.ForMoreInformationOnTheIndividualIndexersClickOnTheInfoButtons == "string"
)
assert data.ForMoreInformationOnTheIndividualListsClickOnTheInfoButtons == "string"
assert data.FreeSpace == "string"
assert data.From == "string"
assert data.FutureBooks == "string"
assert data.FutureDays == "string"
assert data.FutureDaysHelpText == "string"
assert data.General == "string"
assert data.GeneralSettings == "string"
assert data.GeneralSettingsSummary == "string"
assert data.Genres == "string"
assert data.Global == "string"
assert data.GoToAuthorListing == "string"
assert data.GoToInterp == "string"
assert data.Grab == "string"
assert data.Grabbed == "string"
assert data.GrabID == "string"
assert data.GrabRelease == "string"
assert data.GrabReleaseMessageText == "string"
assert data.GrabSelected == "string"
assert data.Group == "string"
assert data.HardlinkCopyFiles == "string"
assert data.HasMonitoredBooksNoMonitoredBooksForThisAuthor == "string"
assert data.HasPendingChangesNoChanges == "string"
assert data.HasPendingChangesSaveChanges == "string"
assert data.HaveNotAddedMovies == "string"
assert data.Health == "string"
assert data.HealthNoIssues == "string"
assert data.HelpText == "string"
assert data.HiddenClickToShow == "string"
assert data.HideAdvanced == "string"
assert data.HideBooks == "string"
assert data.History == "string"
assert data.HomePage == "string"
assert data.Host == "string"
assert data.HostHelpText == "string"
assert data.Hostname == "string"
assert data.Hours == "string"
assert data.HttpHttps == "string"
assert data.ICalFeed == "string"
assert data.ICalHttpUrlHelpText == "string"
assert data.iCalLink == "string"
assert data.ICalLink == "string"
assert data.IconForCutoffUnmet == "string"
assert data.IconTooltip == "string"
assert (
data.IfYouDontAddAnImportListExclusionAndTheAuthorHasAMetadataProfileOtherThanNoneThenThisBookMayBeReaddedDuringTheNextAuthorRefresh
== "string"
)
assert data.Ignored == "string"
assert data.IgnoredAddresses == "string"
assert data.IgnoreDeletedBooks == "string"
assert data.IgnoreDeletedMovies == "string"
assert data.IgnoredHelpText == "string"
assert data.IgnoredMetaHelpText == "string"
assert data.IgnoredPlaceHolder == "string"
assert data.IllRestartLater == "string"
assert data.Images == "string"
assert data.IMDb == "string"
assert data.ImdbRating == "string"
assert data.ImdbVotes == "string"
assert data.Import == "string"
assert data.ImportCustomFormat == "string"
assert data.Imported == "string"
assert data.ImportedTo == "string"
assert data.ImportErrors == "string"
assert data.ImportExistingMovies == "string"
assert data.ImportExtraFiles == "string"
assert data.ImportExtraFilesHelpText == "string"
assert data.ImportFailed == "string"
assert data.ImportFailedInterp == "string"
assert data.ImportFailures == "string"
assert data.ImportHeader == "string"
assert data.ImportIncludeQuality == "string"
assert data.Importing == "string"
assert data.ImportLibrary == "string"
assert data.ImportListExclusions == "string"
assert data.ImportListMissingRoot == "string"
assert data.ImportListMultipleMissingRoots == "string"
assert data.ImportLists == "string"
assert data.ImportListSettings == "string"
assert data.ImportListSpecificSettings == "string"
assert data.ImportListStatusCheckAllClientMessage == "string"
assert data.ImportListStatusCheckSingleClientMessage == "string"
assert data.ImportListSyncIntervalHelpText == "string"
assert data.ImportMechanismHealthCheckMessage == "string"
assert data.ImportMovies == "string"
assert data.ImportNotForDownloads == "string"
assert data.ImportRootPath == "string"
assert data.ImportTipsMessage == "string"
assert data.InCinemas == "string"
assert data.InCinemasDate == "string"
assert data.InCinemasMsg == "string"
assert data.IncludeCustomFormatWhenRenaming == "string"
assert data.IncludeCustomFormatWhenRenamingHelpText == "string"
assert data.IncludeHealthWarningsHelpText == "string"
assert data.IncludePreferredWhenRenaming == "string"
assert data.IncludeRadarrRecommendations == "string"
assert data.IncludeRecommendationsHelpText == "string"
assert data.IncludeUnknownAuthorItemsHelpText == "string"
assert data.IncludeUnknownMovieItemsHelpText == "string"
assert data.IncludeUnmonitored == "string"
assert data.Indexer == "string"
assert data.IndexerDownloadClientHelpText == "string"
assert data.IndexerFlags == "string"
assert data.IndexerIdHelpText == "string"
assert data.IndexerIdHelpTextWarning == "string"
assert data.IndexerIdvalue0IncludeInPreferredWordsRenamingFormat == "string"
assert data.IndexerIdvalue0OnlySupportedWhenIndexerIsSetToAll == "string"
assert data.IndexerJackettAll == "string"
assert data.IndexerLongTermStatusCheckAllClientMessage == "string"
assert data.IndexerLongTermStatusCheckSingleClientMessage == "string"
assert data.IndexerPriority == "string"
assert data.IndexerPriorityHelpText == "string"
assert data.IndexerRssHealthCheckNoAvailableIndexers == "string"
assert data.IndexerRssHealthCheckNoIndexers == "string"
assert data.Indexers == "string"
assert data.IndexerSearchCheckNoAutomaticMessage == "string"
assert data.IndexerSearchCheckNoAvailableIndexersMessage == "string"
assert data.IndexerSearchCheckNoInteractiveMessage == "string"
assert data.IndexerSettings == "string"
assert data.IndexersSettingsSummary == "string"
assert data.IndexerStatusCheckAllClientMessage == "string"
assert data.IndexerStatusCheckSingleClientMessage == "string"
assert data.IndexerTagHelpText == "string"
assert data.Info == "string"
assert data.InstallLatest == "string"
assert data.InteractiveImport == "string"
assert data.InteractiveImportErrLanguage == "string"
assert data.InteractiveImportErrMovie == "string"
assert data.InteractiveImportErrQuality == "string"
assert data.InteractiveSearch == "string"
assert data.Interval == "string"
assert data.InvalidFormat == "string"
assert data.ISBN == "string"
assert data.IsCalibreLibraryHelpText == "string"
assert data.IsCutoffCutoff == "string"
assert data.IsCutoffUpgradeUntilThisQualityIsMetOrExceeded == "string"
assert data.IsExpandedHideBooks == "string"
assert data.IsExpandedHideFileInfo == "string"
assert data.IsExpandedShowBooks == "string"
assert data.IsExpandedShowFileInfo == "string"
assert (
data.IsInUseCantDeleteAMetadataProfileThatIsAttachedToAnAuthorOrImportList
== "string"
)
assert (
data.IsInUseCantDeleteAQualityProfileThatIsAttachedToAnAuthorOrImportList
== "string"
)
assert data.IsShowingMonitoredMonitorSelected == "string"
assert data.IsShowingMonitoredUnmonitorSelected == "string"
assert data.IsTagUsedCannotBeDeletedWhileInUse == "string"
assert data.KeepAndUnmonitorMovie == "string"
assert data.KeyboardShortcuts == "string"
assert data.Label == "string"
assert data.Language == "string"
assert data.LanguageHelpText == "string"
assert data.Languages == "string"
assert data.Large == "string"
assert data.LastDuration == "string"
assert data.LastExecution == "string"
assert data.LastUsed == "string"
assert data.LastWriteTime == "string"
assert data.LatestBook == "string"
assert data.LaunchBrowserHelpText == "string"
assert data.Letterboxd == "string"
assert data.Level == "string"
assert data.LibraryHelpText == "string"
assert data.LinkHere == "string"
assert data.Links == "string"
assert data.ListExclusions == "string"
assert data.Lists == "string"
assert data.ListSettings == "string"
assert data.ListsSettingsSummary == "string"
assert data.ListSyncLevelHelpText == "string"
assert data.ListSyncLevelHelpTextWarning == "string"
assert data.ListTagsHelpText == "string"
assert data.ListUpdateInterval == "string"
assert data.LoadingBookFilesFailed == "string"
assert data.LoadingBooksFailed == "string"
assert data.LoadingMovieCreditsFailed == "string"
assert data.LoadingMovieExtraFilesFailed == "string"
assert data.LoadingMovieFilesFailed == "string"
assert data.Local == "string"
assert data.LocalPath == "string"
assert data.LocalPathHelpText == "string"
assert data.Location == "string"
assert data.LogFiles == "string"
assert data.Logging == "string"
assert data.LogLevel == "string"
assert data.LogLevelTraceHelpTextWarning == "string"
assert data.LogLevelvalueTraceTraceLoggingShouldOnlyBeEnabledTemporarily == "string"
assert data.LogOnly == "string"
assert data.LogRotateHelpText == "string"
assert data.LogRotation == "string"
assert data.Logs == "string"
assert data.LogSQL == "string"
assert data.LogSqlHelpText == "string"
assert data.LongDateFormat == "string"
assert data.LookingForReleaseProfiles1 == "string"
assert data.LookingForReleaseProfiles2 == "string"
assert data.LowerCase == "string"
assert data.MaintenanceRelease == "string"
assert data.Manual == "string"
assert data.ManualDownload == "string"
assert data.ManualImport == "string"
assert data.ManualImportSelectLanguage == "string"
assert data.ManualImportSelectMovie == "string"
assert data.ManualImportSelectQuality == "string"
assert data.ManualImportSetReleaseGroup == "string"
assert data.MappedDrivesRunningAsService == "string"
assert data.MarkAsFailed == "string"
assert data.MarkAsFailedMessageText == "string"
assert data.MassBookSearch == "string"
assert data.MassBookSearchWarning == "string"
assert data.MassMovieSearch == "string"
assert data.Max == "string"
assert data.MaximumLimits == "string"
assert data.MaximumSize == "string"
assert data.MaximumSizeHelpText == "string"
assert data.Mechanism == "string"
assert data.MediaInfo == "string"
assert data.MediaManagement == "string"
assert data.MediaManagementSettings == "string"
assert data.MediaManagementSettingsSummary == "string"
assert data.Medium == "string"
assert data.MediumFormat == "string"
assert data.MegabytesPerMinute == "string"
assert data.Message == "string"
assert data.Metadata == "string"
assert data.MetadataConsumers == "string"
assert data.MetadataProfile == "string"
assert data.MetadataProfileIdHelpText == "string"
assert data.MetadataProfiles == "string"
assert data.MetadataProviderSource == "string"
assert data.MetadataSettings == "string"
assert data.MetadataSettingsSummary == "string"
assert data.MetadataSource == "string"
assert data.MetadataSourceHelpText == "string"
assert data.MIA == "string"
assert data.Min == "string"
assert data.MinAvailability == "string"
assert data.MinFormatScoreHelpText == "string"
assert data.MinimumAge == "string"
assert data.MinimumAgeHelpText == "string"
assert data.MinimumAvailability == "string"
assert data.MinimumCustomFormatScore == "string"
assert data.MinimumFreeSpace == "string"
assert data.MinimumFreeSpaceWhenImportingHelpText == "string"
assert data.MinimumLimits == "string"
assert data.MinimumPages == "string"
assert data.MinimumPopularity == "string"
assert data.MinPagesHelpText == "string"
assert data.MinPopularityHelpText == "string"
assert data.Minutes == "string"
assert data.MinutesHundredTwenty == "string"
assert data.MinutesNinety == "string"
assert data.MinutesSixty == "string"
assert data.Missing == "string"
assert data.MissingBooks == "string"
assert data.MissingBooksAuthorMonitored == "string"
assert data.MissingBooksAuthorNotMonitored == "string"
assert data.MissingFromDisk == "string"
assert data.MissingMonitoredAndConsideredAvailable == "string"
assert data.MissingNotMonitored == "string"
assert data.Mode == "string"
assert data.Monday == "string"
assert data.Monitor == "string"
assert data.MonitorAuthor == "string"
assert data.MonitorBook == "string"
assert data.MonitorBookExistingOnlyWarning == "string"
assert data.Monitored == "string"
assert data.MonitoredAuthorIsMonitored == "string"
assert data.MonitoredAuthorIsUnmonitored == "string"
assert data.MonitoredHelpText == "string"
assert data.MonitoredOnly == "string"
assert data.MonitoredStatus == "string"
assert data.Monitoring == "string"
assert data.MonitoringOptions == "string"
assert data.MonitoringOptionsHelpText == "string"
assert data.MonitorMovie == "string"
assert data.MonitorNewItems == "string"
assert data.MonitorNewItemsHelpText == "string"
assert data.MonoVersion == "string"
assert data.Month == "string"
assert data.Months == "string"
assert data.More == "string"
| |
<reponame>OCR-D/ocrd_segment
from __future__ import absolute_import
import sys
import os
import json
from itertools import chain
import click
import numpy as np
from skimage import draw
from PIL import Image
from shapely.geometry import Polygon
from ocrd import Processor
from ocrd_utils import (
getLogger,
initLogging,
assert_file_grp_cardinality,
xywh_from_polygon,
polygon_from_points,
coordinates_of_segment,
MIMETYPE_PAGE
)
from ocrd_modelfactory import page_from_file
from ocrd_models.ocrd_page import parse as parse_page
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
from pycocotools.mask import (
encode as encodeMask,
merge as mergeMasks,
area as maskArea
)
from .config import OCRD_TOOL
TOOL = 'ocrd-segment-evaluate'
class EvaluateSegmentation(Processor):
def __init__(self, *args, **kwargs):
kwargs['ocrd_tool'] = OCRD_TOOL['tools'][TOOL]
kwargs['version'] = OCRD_TOOL['version']
super(EvaluateSegmentation, self).__init__(*args, **kwargs)
def process(self):
"""Performs segmentation evaluation with pycocotools on the workspace.
Open and deserialize PAGE files from the first and second input file group
(the first as ground truth, the second as prediction).
Then iterate over the element hierarchy down to ``level-of-operation``.
Aggregate and convert all pages' segmentation (coordinates and classes)
to COCO:
- On the region level, unless ``ignore-subtype``, differentiate segment
classes by their `@type`, if applicable.
- On the region level, unless ``for-categories`` is empty, select only
segment classes in that (comma-separated) list.
- If ``only-fg``, then use the foreground mask from the binarized
image inside each segment for overlap calculations.
Next, configure and run COCOEval for comparison of all pages. Show the matching
pairs (GT segment ID, prediction segment ID, IoU) for every overlap on each page.
Also, calculate per-class precision and recall (at the point of maximum recall).
Finally, get the typical summary mean average precision / recall (but without
restriction on the number of segments).
Write a JSON report to the output file group.
"""
LOG = getLogger('processor.EvaluateSegmentation')
assert_file_grp_cardinality(self.output_file_grp, 1)
assert_file_grp_cardinality(self.input_file_grp, 2, 'GT and evaluation data')
# region or line level?
level = self.parameter['level-of-operation']
onlyfg = self.parameter['only-fg']
typed = not self.parameter['ignore-subtype']
selected = self.parameter['for-categories']
if selected:
selected = selected.split(',')
# get input file groups
ifgs = self.input_file_grp.split(",")
# get input file tuples
ifts = self.zip_input_files(mimetype=MIMETYPE_PAGE)
# convert to 2 COCO datasets from all page pairs
categories = ["bg"] # needed by cocoeval
images = []
annotations_gt = []
annotations_dt = []
for ift in ifts:
file_gt, file_dt = ift
if not file_gt:
LOG.warning("skipping page %s missing from GT", file_gt.pageId)
continue
if not file_dt:
LOG.warning("skipping page %s missing from prediction", file_dt.pageId)
continue
LOG.info("processing page %s", file_gt.pageId)
pcgts_gt = page_from_file(self.workspace.download_file(file_gt))
pcgts_dt = page_from_file(self.workspace.download_file(file_dt))
page_gt = pcgts_gt.get_Page()
page_dt = pcgts_dt.get_Page()
if onlyfg:
page_image, page_coords, _ = self.workspace.image_from_page(page_gt, file_gt.pageId,
feature_selector='binarized')
page_mask = ~ np.array(page_image)
imgid = len(images)
images.append({'file_name': file_gt.pageId,
'width': page_gt.get_imageWidth(),
'height': page_gt.get_imageHeight(),
})
# read annotations from each page recursively (all categories including subtypes)
# and merge GT and prediction categories
_add_annotations(annotations_gt, page_gt, imgid, categories,
level=level, typed=typed,
coords=page_coords if onlyfg else None,
mask=page_mask if onlyfg else None)
_add_annotations(annotations_dt, page_dt, imgid, categories,
level=level, typed=typed,
coords=page_coords if onlyfg else None,
mask=page_mask if onlyfg else None)
if level == 'line':
categories.append('textline')
elif selected:
selected = [categories.index(cat) for cat in selected if cat in categories]
_add_ids(categories)
_add_ids(images)
_add_ids(annotations_gt, 1) # cocoeval expects annotation IDs starting at 1
_add_ids(annotations_dt, 1) # cocoeval expects annotation IDs starting at 1
LOG.info(f"found {len(annotations_gt)} GT / {len(annotations_dt)} DT segments"
f" in {len(categories) - 1} categories for {len(images)} images")
coco_gt = _create_coco(categories, images, annotations_gt)
coco_dt = _create_coco(categories, images, annotations_dt)
stats = evaluate_coco(coco_gt, coco_dt, self.parameter, selected)
# write regions to custom JSON for this page
file_id = 'id' + self.output_file_grp + '_report'
self.workspace.add_file(
ID=file_id,
file_grp=self.output_file_grp,
pageId=None,
local_filename=os.path.join(self.output_file_grp, file_id + '.json'),
mimetype='application/json',
content=json.dumps(stats, indent=2))
# todo: also write report for each page
@click.command()
@click.option('-G', '--gt-page-filelst', type=click.File('r'),
help="list file of ground-truth page file paths")
@click.option('-D', '--dt-page-filelst', type=click.File('r'),
help="list file of detection page file paths")
@click.option('-I', '--bin-img-filelst', type=click.File('r'),
help="list file of binarized image file paths")
@click.option('-L', '--level-of-operation', type=click.Choice(['region', 'line']), default='region',
help="hierarchy level of segments to compare")
@click.option('-T', '--ignore-subtype', is_flag=True,
help="on region level, ignore @type distinction")
@click.option('-C', '--for-categories', default='', type=str,
help="on region level, comma-separated list of category names to evaluate (empty for all)")
@click.option('-R', '--report-file', type=click.File('w'), default="eval.log",
help="file name to write evaluation results to")
@click.argument('tabfile', type=click.File('r'), required=False)
def standalone_cli(gt_page_filelst,
dt_page_filelst,
bin_img_filelst,
level_of_operation,
ignore_subtype,
for_categories,
report_file,
tabfile):
"""Performs segmentation evaluation with pycocotools on the given PAGE-XML files.
\b
Open and deserialize PAGE files from the list files.
Then iterate over the element hierarchy down to ``level-of-operation``.
Aggregate and convert all pages' segmentation (coordinates and classes)
to COCO:
\b
- On the region level, unless ``ignore-subtype``, differentiate segment
classes by their `@type`, if applicable.
- On the region level, unless ``for-categories`` is empty, select only
segment classes in that (comma-separated) list.
- If image files are given (as separate file list or in the 3rd column
of the tab-separated list file), then for each PAGE file pair, use
the foreground mask from the binarized image inside all segments for
overlap calculations.
\b
Next, configure and run COCOEval for comparison of all pages. Show the
matching pairs (GT segment ID, prediction segment ID, IoU) for every
overlap on each page.
Also, calculate per-class precision and recall (at maximum recall).
Finally, get the typical summary mean average precision / recall
(but without restriction on the number of segments), and write all
statistics to ``report-file``.
\b
Write a JSON report to the output file group.
"""
assert (tabfile is None) == (gt_page_filelst is not None) == (dt_page_filelst is not None), \
"pass file lists either as tab-separated single file or as separate files"
if tabfile is None:
gt_page_files = [line.strip() for line in gt_page_filelst.readlines()]
dt_page_files = [line.strip() for line in dt_page_filelst.readlines()]
assert len(gt_page_files) == len(dt_page_files), \
"number of DT files must match number of GT files"
if bin_img_filelst is not None:
bin_img_files = [line.strip() for line in bin_img_filelst.readlines()]
assert len(bin_img_files) == len(gt_page_files), \
"number of image files must match number of GT files"
else:
bin_img_files = None
else:
files = [line.strip().split('\t') for line in tabfile.readlines()]
assert len(files), "list of files is empty"
len0 = len(files[0])
assert 2 <= len0 <= 3, "list of files must be tab-separated (GT, DT[, bin-img])"
assert all(map(lambda line: len(line) == len0, files)), \
"number of DT files must match number of GT files"
if len0 == 2:
gt_page_files, dt_page_files = zip(*files)
bin_img_files = None
else:
gt_page_files, dt_page_files, bin_img_files = zip(*files)
stats = evaluate_files(gt_page_files,
dt_page_files,
bin_img_files,
level_of_operation,
not ignore_subtype,
for_categories)
json.dump(stats, report_file, indent=2)
# standalone entry point
def evaluate_files(gt_files, dt_files, img_files=None, level='region', typed=True, selected=None):
initLogging()
LOG = getLogger('processor.EvaluateSegmentation')
categories = ["bg"] # needed by cocoeval
images = []
annotations_gt = []
annotations_dt = []
for gt_file, dt_file, img_file in zip(gt_files, dt_files,
img_files or [None] * len(gt_files)):
pcgts_gt = parse_page(gt_file)
pcgts_dt = parse_page(dt_file)
page_id = pcgts_gt.pcGtsId or gt_file
LOG.info("processing page %s", page_id)
page_gt = pcgts_gt.get_Page()
page_dt = pcgts_dt.get_Page()
if img_file:
page_image = Image.open(img_file)
assert page_image.mode == '1', "input images must already be binarized"
assert page_image.width - 2 < page_gt.get_imageWidth() < page_image.width + 2, \
"mismatch between width of binary image and PAGE description"
assert page_image.height - 2 < page_gt.get_imageHeight() < page_image.height + 2, \
"mismatch between height of binary image and PAGE description"
page_mask = ~ np.array(page_image)
page_coords = {"transform": np.eye(3), "angle": 0, "features": "binarized"}
imgid = len(images)
images.append({'file_name': page_id,
'width': page_gt.get_imageWidth(),
'height': page_gt.get_imageHeight(),
})
# read annotations from each page recursively (all categories including subtypes)
# and merge GT and prediction categories
_add_annotations(annotations_gt, page_gt, imgid, categories,
level=level, typed=typed,
coords=page_coords if img_file else None,
mask=page_mask if img_file else None)
_add_annotations(annotations_dt, page_dt, imgid, categories,
level=level, typed=typed,
coords=page_coords if img_file else None,
mask=page_mask if img_file else None)
if level == 'line':
categories.append('textline')
elif selected:
selected = [categories.index(cat) for cat in selected if cat in categories]
_add_ids(categories)
_add_ids(images)
_add_ids(annotations_gt, 1) # cocoeval expects annotation IDs starting at 1
_add_ids(annotations_dt, 1) # cocoeval expects annotation IDs starting at 1
LOG.info(f"found {len(annotations_gt)} GT / {len(annotations_dt)} DT segments"
f" in {len(categories) - 1} categories for {len(images)} images")
coco_gt = _create_coco(categories, images, annotations_gt)
coco_dt = _create_coco(categories, images, annotations_dt)
parameters = {"level-of-operation": level,
"only-fg": bool(img_files),
"ignore-subtype": not typed,
"for-categories": selected}
stats = evaluate_coco(coco_gt, coco_dt, parameters, selected)
return stats
def evaluate_coco(coco_gt, coco_dt, parameters, catIds=None):
LOG = getLogger('processor.EvaluateSegmentation')
LOG.info("comparing segmentations")
stats = dict(parameters)
coco_eval | |
<reponame>IsaiahPressman/Kaggle_Hungry_Geese<gh_stars>0
from kaggle_environments import make as kaggle_make
from kaggle_environments.envs.hungry_geese.hungry_geese import Action, Configuration, row_col
from typing import *
import torch
from ..config import N_PLAYERS
from .goose_env import ObsType
from ..utils import STATE_TYPE, torch_terminal_value_func
ACTIONS_TUPLE = tuple(Action)
class TorchEnv:
"""
A PyTorch vectorized version of goose_env, able to be run on GPU
"""
def __init__(
self,
n_envs: int,
obs_type: ObsType,
config: Optional[Configuration] = None,
n_geese: int = N_PLAYERS,
device: torch.device = torch.device('cuda')
):
if config is None:
config = Configuration(kaggle_make('hungry_geese', debug=False).configuration)
self.config = config
self.n_rows = config.rows
self.n_cols = config.columns
self.max_len = int(config.max_length)
self.n_food = config.min_food
self.hunger_rate = config.hunger_rate
self.episode_steps = config.episode_steps
self.n_envs = n_envs
self.obs_type = obs_type
self.n_geese = n_geese
self.device = device
tensor_kwargs = dict(
dtype=torch.int64,
device=self.device
)
self.geese = torch.zeros((self.n_envs, self.n_geese, self.max_len, 2), **tensor_kwargs)
self.geese_tensor = torch.zeros((self.n_envs, self.n_geese, self.n_rows, self.n_cols), **tensor_kwargs)
self.head_ptrs = torch.zeros((self.n_envs, self.n_geese), **tensor_kwargs)
self.tail_ptrs = torch.zeros_like(self.head_ptrs)
self.last_actions = torch.zeros_like(self.head_ptrs)
self.lengths = torch.ones_like(self.head_ptrs)
self.rewards = torch.zeros_like(self.head_ptrs)
self.alive = torch.ones((self.n_envs, self.n_geese), dtype=torch.bool, device=self.device)
self.ate_last_turn = torch.zeros_like(self.alive)
self.food_tensor = torch.zeros((self.n_envs, self.n_rows, self.n_cols), **tensor_kwargs)
self.step_counters = torch.zeros((self.n_envs,), **tensor_kwargs)
self.dones = torch.ones((self.n_envs,), dtype=torch.bool, device=self.device)
self.obs = torch.zeros((self.n_envs, *obs_type.get_obs_spec(self.n_geese)[1:]),
dtype=torch.float32, device=self.device)
self.env_idxs = torch.arange(self.n_envs, device=self.device)
self.env_geese_idxs = self.env_idxs.repeat_interleave(self.n_geese)
self.geese_idxs = torch.arange(self.n_geese, device=self.device).repeat(self.n_envs)
self.env_food_idxs = self.env_idxs.repeat_interleave(self.n_food)
self.food_idxs = torch.arange(self.n_food, device=self.device).repeat(self.n_envs)
self.loc_to_row_col = torch.tensor(
[row_col(i, self.n_cols) for i in range(self.n_rows * self.n_cols)],
**tensor_kwargs
).view(self.n_rows * self.n_cols, 2)
self.row_col_to_loc = torch.arange(
self.n_rows * self.n_cols,
device=self.device
).view(self.n_rows, self.n_cols)
self.move_to_offset = torch.tensor(
[list(a.to_row_col()) for a in Action],
**tensor_kwargs
)
self.wrap_vals = torch.tensor([self.n_rows, self.n_cols], **tensor_kwargs)
self.goose_body_idxs = torch.arange(self.max_len, device=self.device)
self.obs_channel_idxs = {}
self.geese_channel_idxs = None
if self.obs_type == ObsType.COMBINED_GRADIENT_OBS_SMALL:
player_channel_list = [
'contains_head',
'contains_body',
]
for i, channel in enumerate(player_channel_list):
self.obs_channel_idxs[channel] = torch.arange(
i,
self.n_geese * len(player_channel_list),
len(player_channel_list),
device=self.device
)
self.obs_channel_idxs.update({
'contains_food': torch.tensor([-3]).to(device=self.device),
'steps_since_starvation': torch.tensor([-2]).to(device=self.device),
'current_step': torch.tensor([-1]).to(device=self.device),
})
self.geese_channel_idxs = torch.arange(
self.n_geese * len(player_channel_list),
device=self.device
).view(
1,
self.n_geese,
len(player_channel_list)
).expand(
self.n_envs,
self.n_geese,
len(player_channel_list)
).clone()
elif self.obs_type == ObsType.COMBINED_GRADIENT_OBS_LARGE:
player_channel_list = [
'contains_head',
'contains_tail',
'contains_body',
]
for i, channel in enumerate(player_channel_list):
self.obs_channel_idxs[channel] = torch.arange(
i,
self.n_geese * len(player_channel_list),
len(player_channel_list),
device=self.device
)
self.obs_channel_idxs.update({
'contains_food': torch.tensor([-3]).to(device=self.device),
'steps_since_starvation': torch.tensor([-2]).to(device=self.device),
'current_step': torch.tensor([-1]).to(device=self.device),
})
self.geese_channel_idxs = torch.arange(
self.n_geese * len(player_channel_list),
device=self.device
).view(
1,
self.n_geese,
len(player_channel_list)
).expand(
self.n_envs,
self.n_geese,
len(player_channel_list)
).clone()
elif self.obs_type == ObsType.COMBINED_GRADIENT_OBS_FULL:
player_channel_list = [
'contains_head',
'contains_tail',
'last_head_loc',
'contains_body',
]
for i, channel in enumerate(player_channel_list):
self.obs_channel_idxs[channel] = torch.arange(
i,
self.n_geese * len(player_channel_list),
len(player_channel_list),
device=self.device
)
self.obs_channel_idxs.update({
'contains_food': torch.tensor([-3]).to(device=self.device),
'steps_since_starvation': torch.tensor([-2]).to(device=self.device),
'current_step': torch.tensor([-1]).to(device=self.device),
})
self.geese_channel_idxs = torch.arange(
self.n_geese * len(player_channel_list),
device=self.device
).view(
1,
self.n_geese,
len(player_channel_list)
).expand(
self.n_envs,
self.n_geese,
len(player_channel_list)
).clone()
else:
raise NotImplementedError(f'Unsupported obs_type: {self.obs_type}')
self.reset()
def reset(self, get_reward_and_dead: bool = False):
self.geese[self.dones] = 0
self.geese_tensor[self.dones] = 0
self.head_ptrs[self.dones] = 0
self.tail_ptrs[self.dones] = 0
self.last_actions[self.dones] = 0
self.lengths[self.dones] = 1
self.rewards[self.dones] = 0
self.alive[self.dones] = True
self.ate_last_turn[self.dones] = False
self.food_tensor[self.dones] = 0
self.step_counters[self.dones] = 0
self.obs[self.dones] = 0.
head_locs = torch.multinomial(
torch.ones((self.dones.sum(), self.n_rows * self.n_cols), device=self.device),
self.n_geese
)
done_env_idxs = self.env_idxs[self.dones].repeat_interleave(self.n_geese)
done_geese_env_idxs = self.geese_idxs.view(self.n_envs, self.n_geese)[self.dones].view(-1)
done_geese_idxs = self.loc_to_row_col[head_locs]
self.geese[self.dones, :, 0] = done_geese_idxs
self.geese_tensor[
done_env_idxs,
done_geese_env_idxs,
done_geese_idxs[:, :, 0].view(-1),
done_geese_idxs[:, :, 1].view(-1)
] = 1
food_weights = 1. - (self.all_geese_tensor + self.food_tensor).view(self.n_envs, -1)[self.dones]
food_locs = torch.multinomial(
food_weights,
self.n_food
)
done_env_idxs = self.env_idxs[self.dones].repeat_interleave(self.n_food)
done_food_idxs = self.loc_to_row_col[food_locs]
self.food_tensor[done_env_idxs, done_food_idxs[:, :, 0].view(-1), done_food_idxs[:, :, 1].view(-1)] = 1
self._initialize_obs(self.dones)
self.dones[:] = False
if get_reward_and_dead:
agent_rankings = torch_terminal_value_func(self.rewards)
agents_not_done = self.alive & ~self.dones.unsqueeze(-1)
returned_rewards = torch.where(
agents_not_done,
torch.zeros_like(agent_rankings),
agent_rankings
)
return self.obs, returned_rewards, ~agents_not_done
else:
return self.obs
def force_reset(self, *args, **kwargs):
self.dones[:] = True
return self.reset(*args, **kwargs)
@property
def heads(self) -> torch.Tensor:
return self.geese[
self.env_geese_idxs,
self.geese_idxs,
self.head_ptrs.view(-1)
].view(self.n_envs, self.n_geese, 2)
@property
def head_locs(self) -> torch.Tensor:
heads = self.heads.view(-1, 2)
return self.row_col_to_loc[
heads[:, 0],
heads[:, 1]
].view(self.n_envs, self.n_geese)
@property
def tails(self) -> torch.Tensor:
return self.geese[
self.env_geese_idxs,
self.geese_idxs,
self.tail_ptrs.view(-1)
].view(self.n_envs, self.n_geese, 2)
@property
def all_geese_tensor(self) -> torch.Tensor:
return self.geese_tensor.sum(dim=1)
def get_available_action_masks(self, dtype: torch.dtype = torch.bool) -> torch.Tensor:
action_masks = torch.ones((self.n_envs, self.n_geese, 4), dtype=dtype, device=self.device)
action_masks.scatter_(
-1,
self.last_actions.unsqueeze(-1),
torch.zeros((self.n_envs, self.n_geese, 1), dtype=dtype, device=self.device)
)
action_masks[self.step_counters == 0] = True
return action_masks[:, :, [2, 3, 0, 1]]
def get_heads_tensor(self, dtype: Optional[torch.dtype] = None) -> torch.Tensor:
if dtype is None:
dtype = self.geese_tensor.dtype
heads_tensor = torch.zeros(self.geese_tensor.shape, dtype=dtype, device=self.device)
heads = self.heads[self.alive]
heads_tensor[
self.env_geese_idxs[self.alive.view(-1)],
self.geese_idxs[self.alive.view(-1)],
heads[:, 0],
heads[:, 1]
] = 1
return heads_tensor
def get_tails_tensor(self, dtype: Optional[torch.dtype] = None) -> torch.Tensor:
if dtype is None:
dtype = self.geese_tensor.dtype
tails_tensor = torch.zeros(self.geese_tensor.shape, dtype=dtype, device=self.device)
tails = self.tails[self.alive]
tails_tensor[
self.env_geese_idxs[self.alive.view(-1)],
self.geese_idxs[self.alive.view(-1)],
tails[:, 0],
tails[:, 1]
] = 1
return tails_tensor
def _wrap(self, position_tensor: torch.Tensor) -> torch.Tensor:
view_shape = [1] * (position_tensor.ndim - 1)
return position_tensor % self.wrap_vals.view(*view_shape, 2)
def _kill_geese(self, kill_goose_mask: torch.Tensor) -> NoReturn:
self.geese_tensor[kill_goose_mask] = 0
self.alive[kill_goose_mask] = False
self.ate_last_turn[kill_goose_mask] = False
def _move_geese(self, actions: torch.Tensor, update_mask: torch.Tensor) -> NoReturn:
update_geese = self.alive & update_mask.unsqueeze(dim=-1)
# Get new head positions
offsets = self.move_to_offset[actions]
new_heads = self._wrap(self.heads + offsets)
# Check for illegal actions
illegal_actions = (((self.last_actions - actions).abs() == 2) &
update_geese &
(self.step_counters.unsqueeze(-1) > 1))
# Update last action
self.last_actions[update_mask] = torch.where(
self.alive[update_mask],
actions[update_mask],
torch.zeros_like(self.last_actions[update_mask])
)
# Kill geese that took illegal actions
self._kill_geese(illegal_actions)
update_geese = self.alive & update_mask.unsqueeze(dim=-1)
# Update self.head_ptrs
self.head_ptrs[update_geese] = (self.head_ptrs[update_geese] + 1) % self.max_len
# Update self.geese
updated_new_heads = new_heads[update_geese].view(-1, 2)
updated_env_geese_idxs = self.env_geese_idxs[update_geese.view(-1)]
updated_geese_idxs = self.geese_idxs[update_geese.view(-1)]
self.geese[
updated_env_geese_idxs,
updated_geese_idxs,
self.head_ptrs.view(-1)[update_geese.view(-1)]
] = updated_new_heads
# Update self.geese_tensor by adding new heads
self.geese_tensor[
updated_env_geese_idxs,
updated_geese_idxs,
updated_new_heads[:, 0],
updated_new_heads[:, 1]
] += 1
# Check if any geese eat
goose_eat = (self.food_tensor[
self.env_geese_idxs,
new_heads.view(-1, 2)[:, 0],
new_heads.view(-1, 2)[:, 1]
] > 0) & update_geese.view(-1)
self.ate_last_turn[update_geese] = goose_eat[update_geese.view(-1)]
# Remove food where geese have eaten
self.food_tensor[
self.env_geese_idxs[goose_eat],
new_heads.view(-1, 2)[goose_eat, 0],
new_heads.view(-1, 2)[goose_eat, 1]
] = 0
# Update self.geese_tensor by removing tails
updated_tails = self.tails[update_geese].view(-1, 2)
self.geese_tensor[
updated_env_geese_idxs,
updated_geese_idxs,
updated_tails[:, 0],
updated_tails[:, 1]
] -= (1 - goose_eat.to(torch.int64))[update_geese.view(-1)]
# Update self.tail_ptrs
grow_goose = goose_eat & (self.lengths.view(-1) < self.max_len)
self.tail_ptrs[:] = torch.where(
grow_goose,
self.tail_ptrs.view(-1),
(self.tail_ptrs.view(-1) + 1) % self.max_len
).view(self.n_envs, self.n_geese)
# Update self.lengths
self.lengths[grow_goose.view(self.n_envs, self.n_geese)] += 1
# Check if any geese collide with themselves
self_collision = self.geese_tensor[
self.env_geese_idxs,
self.geese_idxs,
self.heads.view(-1, 2)[:, 0],
self.heads.view(-1, 2)[:, 1]
] > 1
self_collision = self_collision.view(self.n_envs, self.n_geese) & update_geese
# Kill geese that collided with themselves
self._kill_geese(self_collision)
update_geese = self.alive & update_mask.unsqueeze(dim=-1)
# Shrink geese every self.hunger_rate steps
shrink_goose = ((self.step_counters.repeat_interleave(self.n_geese) % self.hunger_rate == 0) &
update_geese.view(-1))
shrink_tails = self.tails.view(-1, 2)[shrink_goose]
self.geese_tensor[
self.env_geese_idxs[shrink_goose],
self.geese_idxs[shrink_goose],
shrink_tails[:, 0],
shrink_tails[:, 1]
] -= 1
self.tail_ptrs[:] = torch.where(
shrink_goose,
(self.tail_ptrs.view(-1) + 1) % self.max_len,
self.tail_ptrs.view(-1)
).view(self.n_envs, self.n_geese)
self.lengths[shrink_goose.view(self.n_envs, self.n_geese)] -= 1
self._kill_geese((self.lengths == 0) & update_geese)
update_geese = self.alive & update_mask.unsqueeze(dim=-1)
# Check for collisions between geese
collision = self.all_geese_tensor[
self.env_geese_idxs,
self.heads.view(-1, 2)[:, 0],
self.heads.view(-1, 2)[:, 1]
] > 1
collision = collision.view(self.n_envs, self.n_geese) & update_geese
self._kill_geese(collision)
def _replenish_food(self, update_mask: torch.Tensor) -> NoReturn:
all_geese_cached = self.all_geese_tensor
food_weights = 1. - (all_geese_cached + self.food_tensor).view(self.n_envs, -1)[update_mask]
# Ensure that if there are no available food locations, an error is not thrown
food_weights[food_weights.sum(dim=-1) == 0] = 1.
food_locs = torch.multinomial(
food_weights,
self.n_food
).view(-1)
# Make sure to only put food in environments according to the number of food missing from that env
n_food_needed = self.n_food - self.food_tensor.view(self.n_envs, -1)[update_mask].sum(dim=-1, keepdims=True)
spots_available = self.n_rows * self.n_cols - (all_geese_cached +
self.food_tensor
).view(self.n_envs, -1)[update_mask].sum(dim=-1, keepdims=True)
new_food_needed = torch.arange(self.n_food, device=self.device).unsqueeze(dim=0).expand(update_mask.sum(), -1)
new_food_needed = (new_food_needed < torch.minimum(n_food_needed, spots_available)).view(-1)
new_food_env_idxs = self.env_idxs[update_mask].repeat_interleave(self.n_food)[new_food_needed]
new_food_idxs = self.loc_to_row_col[food_locs[new_food_needed]]
self.food_tensor[new_food_env_idxs, new_food_idxs[:, 0], new_food_idxs[:, 1]] = 1
def _check_if_done(self) -> NoReturn:
self.dones[:] = (self.alive.sum(dim=-1) <= 1) | (self.step_counters >= self.episode_steps - 1)
def _initialize_obs(self, new_envs_mask: torch.Tensor) -> NoReturn:
if self.obs_type == ObsType.COMBINED_GRADIENT_OBS_SMALL:
updated_env_geese_idxs = self.env_geese_idxs[new_envs_mask.repeat_interleave(self.n_geese)]
updated_obs_channel_idxs = {
key: val.unsqueeze(0).expand(
self.n_envs,
-1
)[new_envs_mask].view(-1) for key, val in self.obs_channel_idxs.items()
}
self.obs[new_envs_mask] = 0.
self.obs[
updated_env_geese_idxs,
updated_obs_channel_idxs['contains_head']
] = self.get_heads_tensor(dtype=torch.float32)[new_envs_mask].view(-1, self.n_rows, self.n_cols)
updated_heads = self.heads[new_envs_mask].view(-1, 2)
self.obs[
updated_env_geese_idxs,
updated_obs_channel_idxs['contains_body'],
updated_heads[:, 0],
updated_heads[:, 1]
] = self.lengths.to(dtype=torch.float32)[new_envs_mask].view(-1) / self.max_len
self.obs[
self.env_idxs[new_envs_mask],
updated_obs_channel_idxs['contains_food']
] = self.food_tensor[new_envs_mask].to(dtype=torch.float32)
elif self.obs_type == ObsType.COMBINED_GRADIENT_OBS_LARGE:
updated_env_geese_idxs = self.env_geese_idxs[new_envs_mask.repeat_interleave(self.n_geese)]
updated_obs_channel_idxs = {
key: val.unsqueeze(0).expand(
self.n_envs,
-1
)[new_envs_mask].view(-1) for key, val in self.obs_channel_idxs.items()
}
self.obs[new_envs_mask] = 0.
self.obs[
updated_env_geese_idxs,
updated_obs_channel_idxs['contains_head']
] = self.get_heads_tensor(dtype=torch.float32)[new_envs_mask].view(-1, self.n_rows, self.n_cols)
self.obs[
updated_env_geese_idxs,
updated_obs_channel_idxs['contains_tail']
] = self.get_tails_tensor(dtype=torch.float32)[new_envs_mask].view(-1, self.n_rows, self.n_cols)
updated_heads | |
# Gauss-seidel solver is used
import numpy as np
import math
import time
# Constants
rho = 1.19 # Density of air (kg/m^3)
Cp = 1005 # Specific heat of air (J/kg-K)
visc_k = 1.5462e-5 # Kinematic viscosity of air (m^2/s)
visc_d = 1.84e-5 # Dynamic viscosity of air (N-s/m^2)
k_air = 0.0261 # Thermal conductivity of air (W/m-K)
g = 9.81 # Gravitational acceleration (m/s^2)
beta = 0.00341 # Expansion of coefficient of air (1/K)
class PFM:
def __init__(self, design_inputs, solver_config):
# Input - 1 design variables
self.input_1 = design_inputs[0]
# Input - 2 design variables
self.input_2 = design_inputs[1]
# Heat source
self.q_total = design_inputs[2]
# Opening parameters
self.opening = design_inputs[3]
# Left wall parameters
self.left_wall = design_inputs[4]
# Right wall parameters
self.right_wall = design_inputs[5]
# Top wall parameters
self.top_wall = design_inputs[6]
# Bottom wall parameters
self.bottom_wall = design_inputs[7]
# Solver config
self.solver_config = solver_config
# Temperature parameters
self.temp = self.solver_config[2]
self.solve_temp = self.temp[0]
# Iteration parameters
self.PFM_parameter = self.solver_config[5]
# monitor point
self.monitor = self.solver_config[4]
self.monitor_x = self.monitor[0]
self.monitor_y = self.monitor[1]
self.monitor_U = []
self.monitor_V = []
self.monitor_P = []
self.monitor_T = []
# Other Variables
self.U = None
self.V = None
self.T = None
self.P = None
self.U_col = None
self.V_col = None
self.T_col = None
self.P_col = None
def initialize(self):
b = self.opening[0]
if b == 1:
f = 0
else:
f = (1 / b ** 2) * (1 + 0.5 * (1 - b) ** 0.75 + 1.414 * (1 - b) ** 0.375)
N = self.solver_config[0]
dx = 1 / N
V_1 = self.input_1[0]
V_2 = self.input_2[0]
V_left = self.left_wall[0]
V_right = self.right_wall[0]
U_top = self.top_wall[0]
U_bottom = self.bottom_wall[0]
V_scale = max(
abs(V_1), abs(V_2), abs(V_left), abs(V_right), abs(U_top), abs(U_bottom), 1
)
P_scale = abs(rho * V_scale ** 2) / 2
T_1 = self.input_1[1]
T_2 = self.input_2[1]
T_ref = self.temp[1]
DT_scale = max(abs(T_1 - T_2), abs(T_1 - T_ref), abs(T_2 - T_ref), 1)
q = self.q_total / N
self.U = np.zeros((N + 1, N + 2))
self.V = np.zeros((N + 2, N + 1))
self.P = np.zeros((N + 2, N + 2))
self.T = T_ref * np.ones((N + 2, N + 2))
self.phi = np.zeros((N + 2, N + 2))
self.U_col = np.zeros((N, N))
self.V_col = np.zeros((N, N))
self.T_col = np.zeros((N, N))
self.P_col = np.zeros((N, N))
self.monitor_U.append(0)
self.monitor_V.append(0)
self.monitor_P.append(0)
self.monitor_T.append(0)
return f, N, dx, V_scale, P_scale, DT_scale, q
def set_bocos(self, N):
# N = self.solver_config[0]
V_1 = self.input_1[0]
theta_1 = self.input_1[2]
T_1 = self.input_1[1]
V_2 = self.input_2[0]
theta_2 = self.input_2[2]
T_2 = self.input_2[1]
V_left = self.left_wall[0]
T_left = self.left_wall[1]
V_right = self.right_wall[0]
T_right = self.right_wall[1]
U_top = self.top_wall[0]
T_top = self.top_wall[1]
U_bottom = self.bottom_wall[0]
T_bottom = self.bottom_wall[1]
V_out = (V_1 + V_2) / 2
# left wall velocity and temperature
self.U[0, :] = 0
self.T[0, :] = T_left
self.V[0, :] = V_left
# right wall velocity and temperature
self.U[N, :] = 0
self.T[N + 1, :] = T_right
self.V[N + 1, :] = V_right
# top wall velocity and temperature
self.V[:, N] = 0
self.T[:, N + 1] = T_top
if V_out != 0:
for i in range(int((3 / 8) * N + 1), int((5 / 8) * N) + 1):
self.V[i, N] = V_out
self.U[:, N + 1] = U_top
# bottom wall velocity and temperature
self.V[:, 0] = 0
self.T[:, 0] = T_bottom
if V_1 != 0:
for i in range(int(N / 8) + 1, int(N / 4) + 1):
self.V[i, 0] = V_1
self.T[i, 0] = T_1
if V_2 != 0:
for i in range(int(3 / 4 * N) + 1, int(7 / 8 * N) + 1):
self.V[i, 0] = V_2
self.T[i, 0] = T_2
self.U[:, 0] = U_bottom
if V_1 != 0:
for i in range(int(N / 8), int(N / 4) + 1):
self.U[i, 0] = V_1 / (math.tan(theta_1 * 0.017453293))
if V_2 != 0:
for i in range(int(3 / 4 * N), int(7 / 8 * N) + 1):
self.U[i, 0] = V_2 / (math.tan(theta_2 * 0.017453293))
def checkMassBalanceBottom(self, d):
f = self.opening[0]
L = self.opening[1]
N = self.solver_config[0]
V_1 = self.input_1[0]
V_2 = self.input_2[0]
sum = 0
for i in range(1, N + 1):
sum = (
sum
+ np.sign(self.P[i, int(N * L)] - self.P[i, int(N * L) + 1] + d)
* (
(2 * abs(self.P[i, int(N * L)] - self.P[i, int(N * L) + 1] + d))
/ (f * rho)
)
** 0.5
)
massBalanceBottom = (N / 8) * (V_1 + V_2) - sum
return massBalanceBottom
def presCorrectionBottom(self):
low = -5000
high = 5000
lowVal = self.checkMassBalanceBottom(low)
highVal = self.checkMassBalanceBottom(high)
for ii in range(1, 50):
Middle = (low + high) / 2
midVal = self.checkMassBalanceBottom(Middle)
if np.sign(midVal) == np.sign(lowVal):
low = Middle
lowVal = midVal
else:
high = Middle
highVal = midVal
pressureCorrectionBottom = Middle
return pressureCorrectionBottom
def Solve_PFM(self):
ref = time.time()
f, N, dx, V_scale, P_scale, DT_scale, q = self.initialize()
self.set_bocos(N)
PFMOuter_Iter = self.PFM_parameter[0]
T_iter = self.PFM_parameter[2]
for t in range(1, PFMOuter_Iter + 1):
if f == 0:
self.velPotAll(N, dx)
self.BernoulliPres(N)
else:
self.BernoulliPres(N)
self.scalePresBottom(N)
self.resetMidV(N)
self.velPotBottom(N, dx)
self.velPotTop(N, dx)
self.monitor_UVP(V_scale, P_scale, N)
if self.solve_temp == "Y":
self.temperature(DT_scale, N)
self.output(N)
timestamp = round(time.time() - ref, 2)
monitor_data = []
monitor_data.append(np.arange(PFMOuter_Iter + 1))
monitor_data.append(self.monitor_U)
monitor_data.append(self.monitor_V)
monitor_data.append(self.monitor_P)
monitor_data.append(self.monitor_T)
# Append one more array to accommodate temperature iterations (T_iter for PFM is different)
monitor_data.append(np.arange(T_iter + 1))
mass = self.calcMassBalance(N)
return (
self.U_col,
self.V_col,
self.T_col,
self.P_col,
timestamp,
monitor_data,
mass,
)
def velPotAll(self, N, dx):
# Velocity Potentials All() ---------------------
VelPot_Iter = self.PFM_parameter[1]
omega = self.solver_config[1]
# Solve for velocity potentials at each scalar cell using Gauss-Seidel
for k in range(1, VelPot_Iter + 1):
# Bottom and top fictitious halo cells
for i in range(1, N + 1):
self.phi[i, 0] = (1 - omega) * self.phi[i, 0] + omega * (
self.phi[i, 1] - dx * self.V[i, 0]
)
self.phi[i, N + 1] = (1 - omega) * self.phi[i, N + 1] + omega * (
self.phi[i, N] + dx * self.V[i, N]
)
# left and right fictitious halo cells
for j in range(1, N + 1):
self.phi[0, j] = (1 - omega) * self.phi[0, j] + omega * (
self.phi[1, j] - dx * self.U[0, j]
)
self.phi[N + 1, j] = (1 - omega) * self.phi[N + 1, j] + omega * (
self.phi[N, j] + dx * self.U[N, j]
)
# interior cells
for i in range(1, N + 1):
for j in range(1, N + 1):
self.phi[i, j] = (1 - omega) * self.phi[i, j] + (omega / 4) * (
self.phi[i + 1, j]
+ self.phi[i, j + 1]
+ self.phi[i - 1, j]
+ self.phi[i, j - 1]
)
# compute interior velocities from velocity potentials
for i in range(1, N):
for j in range(1, N + 1):
self.U[i, j] = (1 / dx) * (self.phi[i + 1, j] - self.phi[i, j])
for i in range(1, N + 1):
for j in range(1, N):
self.V[i, j] = (1 / dx) * (self.phi[i, j + 1] - self.phi[i, j])
def BernoulliPres(self, N):
# BernoulliPressure() --------------------------
# Interpolate Vectors() -----------------------
for i in range(1, N + 1):
for j in range(1, N + 1):
self.U_col[i - 1, j - 1] = (self.U[i - 1, j] + self.U[i, j]) / 2
self.V_col[i - 1, j - 1] = (self.V[i, j - 1] + self.V[i, j]) / 2
self.T_col[i - 1, j - 1] = self.T[i, j]
self.P_col[i - 1, j - 1] = self.P[i, j]
# compute relative pressure
for j in range(N, 0, -1):
for i in range(N, 0, -1):
self.P[i, j] = -(rho / 2) * (
self.U_col[i - 1, j - 1] ** 2 + self.V_col[i - 1, j - 1] ** 2
)
def scalePresBottom(self, N):
# ScalePressuresBottom() ------------------------
d = self.presCorrectionBottom()
L = self.opening[1]
# Scale pressures with new pressure correction value
for | |
updating. "
"Please wait a few seconds and try again.")
return
jobView.updating_now = True
try:
self.progress = ExecutionProgressDialog(self.vistrail_view)
self.progress.show()
if not self.jobMonitor.currentWorkflow():
self.create_job = True
result = self.execute_current_workflow(reason=reason, sinks=sinks)
self.progress.setValue(100)
finally:
self.progress.hide()
self.progress.deleteLater()
self.progress = None
self.create_job = False
self.jobMonitor.finishWorkflow()
jobView.updating_now = False
return result
def enable_missing_package(self, identifier, deps):
configuration = get_vistrails_configuration()
if getattr(configuration, 'enablePackagesSilently', False):
return True
msg = "VisTrails needs to enable package '%s'." % identifier
if len(deps) > 0:
msg += (" This will also enable the dependencies: %s."
" Do you want to enable these packages?" % (
", ".join(deps),))
else:
msg += " Do you want to enable this package?"
res = show_question('Enable package?',
msg,
[YES_BUTTON, NO_BUTTON],
YES_BUTTON)
if res == NO_BUTTON:
return False
return True
def install_missing_package(self, identifier):
res = show_question('Install package?',
"This pipeline contains a module"
" in package '%s', which"
" is not installed. Do you want to"
" install and enable that package?" % \
identifier, [YES_BUTTON, NO_BUTTON],
YES_BUTTON)
return res == YES_BUTTON
def change_selected_version(self, new_version, report_all_errors=True,
do_validate=True, from_root=False):
"""change_selected_version(new_version: int,
report_all_errors: boolean,
do_validate: boolean,
from_root: boolean)
Change the current vistrail version into new_version and emit a
notification signal.
NB: in most situations, the following post-condition holds:
>>> controller.change_selected_version(v)
>>> assert v == controller.current_version
In some occasions, however, the controller will not be able to
switch to the desired version. One example where this can
happen is when the selected version has obsolete modules (that
is, the currently installed package for those modules has
module upgrades). In these cases, change_selected_version will
return a new version which corresponds to a workflow that was
created by the upgrading mechanism that packages can provide.
"""
try:
self.do_version_switch(new_version, report_all_errors,
do_validate, from_root)
except InvalidPipeline, e:
# from vistrails.gui.application import get_vistrails_application
#
# def process_err(err):
# if isinstance(err, Package.InitializationFailed):
# QtGui.QMessageBox.critical(
# get_vistrails_application().builderWindow,
# 'Package load failed',
# 'Package "%s" failed during initialization. '
# 'Please contact the developer of that package '
# 'and report a bug.' % err.package.name)
# elif isinstance(err, MissingPackage):
# QtGui.QMessageBox.critical(
# get_vistrails_application().builderWindow,
# 'Unavailable package',
# 'Cannot find package "%s" in\n'
# 'list of available packages. \n'
# 'Please install it first.' % err._identifier)
# elif issubclass(err.__class__, MissingPort):
# msg = ('Cannot find %s port "%s" for module "%s" '
# 'in loaded package "%s". A different package '
# 'version might be necessary.') % \
# (err._port_type, err._port_name,
# err._module_name, err._package_name)
# QtGui.QMessageBox.critical(
# get_vistrails_application().builderWindow, 'Missing port',
# msg)
# else:
# QtGui.QMessageBox.critical(
# get_vistrails_application().builderWindow,
# 'Invalid Pipeline', str(err))
# VisTrails will not raise upgrade exceptions unless
# configured to do so. To get the upgrade requests,
# configuration option upgradeModules must be set to True.
exception_set = e.get_exception_set()
if len(exception_set) > 0:
# msg_box = QtGui.QMessageBox(get_vistrails_application().builderWindow)
# msg_box.setIcon(QtGui.QMessageBox.Warning)
# msg_box.setText("The current workflow could not be validated.")
# msg_box.setInformativeText("Errors occurred when trying to "
# "construct this workflow.")
# msg_box.setStandardButtons(QtGui.QMessageBox.Ok)
# msg_box.setDefaultButton(QtGui.QMessageBox.Ok)
# msg_box.setDetailedText(debug.format_exception(e))
# msg_box.exec_()
# text = "The current workflow could not be validated."
# debug.critical(text, e)
debug.critical("Error changing version", e)
# print 'got to exception set'
# # Process all errors as usual
# if report_all_errors:
# for exc in exception_set:
# print 'processing', exc
# process_err(exc)
# else:
# process_err(exception_set.__iter__().next())
except Exception, e:
import traceback
debug.critical('Unexpected Exception',
traceback.format_exc())
# FIXME: this code breaks undo/redo, and seems to be ok with normal
# pipeline manipulations so I am leaving it commented out for now
# if not self._current_terse_graph or \
# new_version not in self._current_terse_graph.vertices:
# self.recompute_terse_graph()
self.emit(QtCore.SIGNAL('versionWasChanged'), self.current_version)
def set_search(self, search, text=''):
""" set_search(search: SearchStmt, text: str) -> None
Change the currrent version tree search statement
"""
if self.search != search or self.search_str != text:
self.search = search
self.search_str = text
if self.search:
self.search.run(self.vistrail, '')
self.invalidate_version_tree(True)
if self.refine:
# need to recompute the graph because the refined items might
# have changed since last time
self.recompute_terse_graph()
self.invalidate_version_tree(True)
else:
self.invalidate_version_tree(False)
self.emit(QtCore.SIGNAL('searchChanged'))
def set_refine(self, refine):
""" set_refine(refine: bool) -> None
Set the refine state to True or False
"""
if self.refine!=refine:
self.refine = refine
# need to recompute the graph because the refined items might
# have changed since last time
self.recompute_terse_graph()
self.invalidate_version_tree(True)
def set_full_tree(self, full):
""" set_full_tree(full: bool) -> None
Set if Vistrails should show a complete version tree or just a
terse tree
"""
if full != self.full_tree:
self.full_tree = full
self.invalidate_version_tree(True)
def recompute_terse_graph(self):
BaseController.recompute_terse_graph(self)
self._previous_graph_layout = copy.deepcopy(self._current_graph_layout)
self._current_graph_layout.layout_from(self.vistrail,
self._current_terse_graph)
def refine_graph(self, step=1.0):
""" refine_graph(step: float in [0,1]) -> (Graph, Graph)
Refine the graph of the current vistrail based the search
status of the controller. It also return the full graph as a
reference
"""
if self._current_full_graph is None:
self.recompute_terse_graph()
if not self.animate_layout:
return (self._current_terse_graph, self._current_full_graph,
self._current_graph_layout)
graph_layout = copy.deepcopy(self._current_graph_layout)
terse_graph = copy.deepcopy(self._current_terse_graph)
am = self.vistrail.actionMap
step = 1.0/(1.0+math.exp(-(step*12-6))) # use a logistic sigmoid function
# Adding nodes to tree
for (c_id, c_node) in self._current_graph_layout.nodes.iteritems():
if self._previous_graph_layout.nodes.has_key(c_id):
p_node = self._previous_graph_layout.nodes[c_id]
else:
p_id = c_id
# Find closest child of contained in both graphs
while not self._previous_graph_layout.nodes.has_key(p_id):
# Should always have exactly one child
p_id = [to for (to, _) in \
self._current_full_graph.adjacency_list[p_id]
if (to in am) and \
not self.vistrail.is_pruned(to)][0]
p_node = self._previous_graph_layout.nodes[p_id]
# Interpolate position
x = p_node.p.x - c_node.p.x
y = p_node.p.y - c_node.p.y
graph_layout.move_node(c_id, x*(1.0-step), y*(1.0-step))
# Removing nodes from tree
for (p_id, p_node) in self._previous_graph_layout.nodes.iteritems():
if not self._current_graph_layout.nodes.has_key(p_id):
# Find closest parent contained in both graphs
shared_parent = p_id
while (shared_parent > 0 and
shared_parent not in self._current_graph_layout.nodes):
shared_parent = \
self._current_full_graph.parent(shared_parent)
# Find closest child contained in both graphs
c_id = p_id
while not self._current_graph_layout.nodes.has_key(c_id):
# Should always have exactly one child
c_id = [to for (to, _) in \
self._current_full_graph.adjacency_list[c_id]
if (to in am) and \
not self.vistrail.is_pruned(to)][0]
# Don't show edge that skips the disappearing nodes
if terse_graph.has_edge(shared_parent, c_id):
terse_graph.delete_edge(shared_parent, c_id)
# Add the disappearing node to the graph and layout
c_node = copy.deepcopy(self._current_graph_layout.nodes[c_id])
c_node.id = p_id
graph_layout.add_node(p_id, c_node)
terse_graph.add_vertex(p_id)
p_parent = self._current_full_graph.parent(p_id)
if not terse_graph.has_edge(p_id, p_parent):
terse_graph.add_edge(p_parent, p_id)
p_child = p_id
while p_child not in self._current_graph_layout.nodes:
# Should always have exactly one child
p_child = [to for (to, _) in \
self._current_full_graph.adjacency_list[p_child]
if (to in am) and \
not self.vistrail.is_pruned(to)][0]
if not terse_graph.has_edge(p_id, p_child):
terse_graph.add_edge(p_id, p_child)
# Interpolate position
x = p_node.p.x - c_node.p.x
y = p_node.p.y - c_node.p.y
graph_layout.move_node(p_id, x*(1.0-step), y*(1.0-step))
return (terse_graph, self._current_full_graph,
graph_layout)
##########################################################################
# undo/redo navigation
def _change_version_short_hop(self, new_version):
"""_change_version_short_hop is used internally to
change versions when we're moving exactly one action up or down.
This allows a few optimizations that improve interactivity."""
if self.current_version <> new_version:
# Instead of recomputing the terse graph, simply update it
# There are two variables in play:
# a) whether or not the destination node is currently on the
# terse tree (it will certainly be after the move)
# b) whether or not the current node will be visible (it
# certainly is now, since it's the current one)
dest_node_in_terse_tree = new_version in self._current_terse_graph.vertices
current = self.current_version
tree = self.vistrail.tree.getVersionTree()
# same logic as recompute_terse_graph except for current
children_count = len([x for (x, _) in tree.adjacency_list[current]
if (x in self.vistrail.actionMap and
not self.vistrail.is_pruned(x))])
current_node_will_be_visible = \
(self.full_tree or
self.vistrail.has_tag(self.current_version) or
children_count <> 1)
self.change_selected_version(new_version)
# case 1:
if not dest_node_in_terse_tree and \
not current_node_will_be_visible and not current == 0:
# we're going from one boring node to another,
# so just rename the node on the terse graph
self._current_terse_graph.rename_vertex(current, new_version)
self.replace_unnamed_node_in_version_tree(current, new_version)
else:
# bail, for now
self.recompute_terse_graph()
self.invalidate_version_tree(False)
def show_parent_version(self):
""" show_parent_version() -> None
Go back one from the current version and display it
"""
# NOTE cscheid: Slight change in the logic under refined views:
# before r1185, undo would back up more than one action in the
# presence of non-matching refined nodes. That seems wrong. Undo
# should always move one step only.
prev = None
try:
prev = self._current_full_graph.parent(self.current_version)
except Graph.VertexHasNoParentError:
prev = 0
self._change_version_short_hop(prev)
def show_child_version(self, which_child):
""" show_child_version(which_child: int) -> None
Go forward one version and display it. This is used in redo.
ONLY CALL | |
<filename>code/mesh_recons/fnnrecon.py
import numpy as np
import os, sys
import math, time
from scipy.interpolate import InterpolatedUnivariateSpline as iuspline
from matplotlib import pyplot as plt
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
import tensorflow_probability as tfp
import mesh_tensorflow as mtf
import flowpm
import flowpm.mesh_ops as mpm
import flowpm.mtfpm as mtfpm
import flowpm.mesh_utils as mesh_utils
from astropy.cosmology import Planck15
from flowpm.tfpm import PerturbationGrowth
from flowpm import linear_field, lpt_init, nbody, cic_paint
sys.path.append('./utils/')
import tools
import diagnostics as dg
from fnn import *
##
cosmology=Planck15
np.random.seed(100)
tf.random.set_random_seed(200)
cscratch = "/global/cscratch1/sd/chmodi/flowpm/recon/"
tf.flags.DEFINE_integer("gpus_per_node", 8, "Number of GPU on each node")
tf.flags.DEFINE_integer("gpus_per_task", 8, "Number of GPU in each task")
tf.flags.DEFINE_integer("tasks_per_node", 1, "Number of task in each node")
tf.flags.DEFINE_integer("nc", 128, "Size of the cube")
tf.flags.DEFINE_integer("batch_size", 1, "Batch Size")
tf.flags.DEFINE_float("box_size", 400, "Batch Size")
tf.flags.DEFINE_float("a0", 0.1, "initial scale factor")
tf.flags.DEFINE_float("af", 1.0, "final scale factor")
tf.flags.DEFINE_integer("nsteps", 5, "Number of time steps")
tf.flags.DEFINE_bool("nbody", False, "Do nbody evolution")
tf.flags.DEFINE_string("suffix", "", "suffix for the folder name")
#pyramid flags
tf.flags.DEFINE_integer("dsample", 2, "downsampling factor")
tf.flags.DEFINE_integer("hsize", 32, "halo size")
#mesh flags
tf.flags.DEFINE_integer("nx", 4, "# blocks along x")
tf.flags.DEFINE_integer("ny", 2, "# blocks along y")
tf.flags.DEFINE_string("mesh_shape", "row:16", "mesh shape")
#tf.flags.DEFINE_string("layout", "nx:b1", "layout rules")
tf.flags.DEFINE_string("output_file", "timeline", "Name of the output timeline file")
FLAGS = tf.flags.FLAGS
nc, bs = FLAGS.nc, FLAGS.box_size
a0, a, nsteps =FLAGS.a0, FLAGS.af, FLAGS.nsteps
stages = np.linspace(a0, a, nsteps, endpoint=True)
fpath = cscratch + "fnn_nx%d_ny%d_mesh%s/"%(FLAGS.nx, FLAGS.ny, FLAGS.suffix)
print(fpath)
for ff in [fpath, fpath + '/figs']:
try: os.makedirs(ff)
except Exception as e: print (e)
numd = 1e-3
def recon_prototype(mesh, data, nc=FLAGS.nc, bs=FLAGS.box_size, batch_size=FLAGS.batch_size,
a0=FLAGS.a0, a=FLAGS.af, nsteps=FLAGS.nsteps, dtype=tf.float32):
"""
Prototype of function computing LPT deplacement.
Returns output tensorflow and mesh tensorflow tensors
"""
if dtype == tf.float32:
npdtype = "float32"
cdtype = tf.complex64
elif dtype == tf.float64:
npdtype = "float64"
cdtype = tf.complex128
print("Dtype : ", dtype, npdtype)
# Compute a few things first, using simple tensorflow
kny = 1*np.pi*nc/bs
R1, R2 = 3., 3*1.2
stages = np.linspace(a0, a, nsteps, endpoint=True)
#graph = mtf.Graph()
#mesh = mtf.Mesh(graph, "my_mesh")
# Define the named dimensions
# Parameters of the small scales decomposition
n_block_x = FLAGS.nx
n_block_y = FLAGS.ny
n_block_z = 1
halo_size = FLAGS.hsize
if halo_size >= 0.5*min(nc//n_block_x, nc//n_block_y, nc//n_block_z):
new_size = int(0.5*min(nc//n_block_x, nc//n_block_y, nc//n_block_z))
print('WARNING: REDUCING HALO SIZE from %d to %d'%(halo_size, new_size))
halo_size = new_size
# Parameters of the large scales decomposition
scalar = mtf.Dimension("scalar", 1)
fx_dim = mtf.Dimension("nx", nc)
fy_dim = mtf.Dimension("ny", nc)
fz_dim = mtf.Dimension("nz", nc)
tfx_dim = mtf.Dimension("tx", nc)
tfy_dim = mtf.Dimension("ty", nc)
tfz_dim = mtf.Dimension("tz", nc)
tx_dim = mtf.Dimension("tx_lr", nc)
ty_dim = mtf.Dimension("ty_lr", nc)
tz_dim = mtf.Dimension("tz_lr", nc)
nx_dim = mtf.Dimension('nx_block', n_block_x)
ny_dim = mtf.Dimension('ny_block', n_block_y)
nz_dim = mtf.Dimension('nz_block', n_block_z)
sx_dim = mtf.Dimension('sx_block', nc//n_block_x)
sy_dim = mtf.Dimension('sy_block', nc//n_block_y)
sz_dim = mtf.Dimension('sz_block', nc//n_block_z)
#k_dims = [tx_dim, ty_dim, tz_dim]
batch_dim = mtf.Dimension("batch", batch_size)
klin = np.loadtxt('../flowpm/data/Planck15_a1p00.txt').T[0]
plin = np.loadtxt('../flowpm/data/Planck15_a1p00.txt').T[1]
ipklin = iuspline(klin, plin)
pk_dim = mtf.Dimension("npk", len(plin))
pk = mtf.import_tf_tensor(mesh, plin.astype(npdtype), shape=[pk_dim])
# Compute necessary Fourier kernels
kvec = flowpm.kernels.fftk((nc, nc, nc), symmetric=False)
kx = mtf.import_tf_tensor(mesh, kvec[0].squeeze().astype('float32'), shape=[tfx_dim])
ky = mtf.import_tf_tensor(mesh, kvec[1].squeeze().astype('float32'), shape=[tfy_dim])
kz = mtf.import_tf_tensor(mesh, kvec[2].squeeze().astype('float32'), shape=[tfz_dim])
kv = [ky, kz, kx]
# kvec for low resolution grid
kvec_lr = flowpm.kernels.fftk([nc, nc, nc], symmetric=False)
kx_lr = mtf.import_tf_tensor(mesh, kvec_lr[0].squeeze().astype('float32'), shape=[tx_dim])
ky_lr = mtf.import_tf_tensor(mesh, kvec_lr[1].squeeze().astype('float32'), shape=[ty_dim])
kz_lr = mtf.import_tf_tensor(mesh, kvec_lr[2].squeeze().astype('float32'), shape=[tz_dim])
kv_lr = [ky_lr, kz_lr, kx_lr]
shape = [batch_dim, fx_dim, fy_dim, fz_dim]
lr_shape = [batch_dim, fx_dim, fy_dim, fz_dim]
hr_shape = [batch_dim, nx_dim, ny_dim, nz_dim, sx_dim, sy_dim, sz_dim]
part_shape = [batch_dim, fx_dim, fy_dim, fz_dim]
#
# Begin simulation
## Compute initial initial conditions distributed
#initc = mtfpm.linear_field(mesh, shape, bs, nc, pk, kv)
fieldvar = mtf.get_variable(mesh,'linear', part_shape)
input_field = tf.placeholder(data.dtype, [batch_size, nc, nc, nc])
mtfinp = mtf.import_tf_tensor(mesh, input_field, shape=part_shape)
linearop = mtf.assign(fieldvar, mtfinp)
#field = fieldvar
initc = fieldvar
print("initc : ", initc)
# Here we can run our nbody
if FLAGS.nbody:
state = mtfpm.lpt_init_single(fieldvar, a0, kv_lr, halo_size, lr_shape, hr_shape, part_shape[1:], antialias=True,)
# Here we can run our nbody
final_state = mtfpm.nbody_single(state, stages, lr_shape, hr_shape, kv_lr, halo_size)
else:
final_state = mtfpm.lpt_init_single(initc, stages[-1], kv_lr, halo_size, lr_shape, hr_shape, part_shape[1:], antialias=True,)
# paint the field
final_field = mtf.zeros(mesh, shape=hr_shape)
for block_size_dim in hr_shape[-3:]:
final_field = mtf.pad(final_field, [halo_size, halo_size], block_size_dim.name)
final_field = mesh_utils.cic_paint(final_field, final_state[0], halo_size)
# Halo exchange
for blocks_dim, block_size_dim in zip(hr_shape[1:4], final_field.shape[-3:]):
final_field = mpm.halo_reduce(final_field, blocks_dim, block_size_dim, halo_size)
# Remove borders
for block_size_dim in hr_shape[-3:]:
final_field = mtf.slice(final_field, halo_size, block_size_dim.size, block_size_dim.name)
final_field = mtf.slicewise(lambda x: x[:,0,0,0],
[final_field],
output_dtype=dtype,
output_shape=[batch_dim, fx_dim, fy_dim, fz_dim],
name='my_dumb_reshape',
splittable_dims=part_shape[:-1]+hr_shape[:4])
##
x = final_field
ppars, mpars, kernel = setupfnn()
pwts, pbias, pmx, psx = ppars
mwts, mbias, mmx, msx, mmy, msy = mpars
msy, mmy = msy[0], mmy[0]
print("mmy : ", mmy)
size = 3
k_dims = [d.shape[0] for d in kv]
k_dims = [k_dims[2], k_dims[0], k_dims[1]]
tfnc, tfbs = float_to_mtf(nc*1., mesh, scalar), float_to_mtf(bs, mesh, scalar)
x1f = mesh_utils.r2c3d(x, k_dims, dtype=cdtype)
x1f = mtf.cwise(cwise_decic, [x1f] + kv + [tfnc, tfbs], output_dtype=cdtype)
x1d = mesh_utils.c2r3d(x1f, x.shape[-3:], dtype=dtype)
x1d = mtf.add(x1d, -1.)
x1f0 = mesh_utils.r2c3d(x1d, k_dims, dtype=cdtype)
x1f = mtf.cwise(cwise_fingauss, [x1f0, float_to_mtf(R1, mesh, scalar)] + kv + [tfnc, tfbs], output_dtype=cdtype)
x1 = mesh_utils.c2r3d(x1f, x1d.shape[-3:], dtype=dtype)
x2f = mtf.cwise(cwise_fingauss, [x1f0, float_to_mtf(R2, mesh, scalar)] + kv + [tfnc, tfbs], output_dtype=cdtype)
x2 = mesh_utils.c2r3d(x2f, x1d.shape[-3:], dtype=dtype)
x12 = x1-x2
width = tf.placeholder(tf.float32, shape=())
def apply_pwts(x, x1, x2):
#y = tf.expand_dims(x, axis=-1)
y = tf.nn.conv3d(tf.expand_dims(x, axis=-1), kernel, [1, 1, 1, 1, 1], 'SAME')
y1 = tf.nn.conv3d(tf.expand_dims(x1, axis=-1), kernel, [1, 1, 1, 1, 1], 'SAME')
y2 = tf.nn.conv3d(tf.expand_dims(x2, axis=-1), kernel, [1, 1, 1, 1, 1], 'SAME')
#y = tf.nn.conv3d(tf.expand_dims(tfwrap3D(x), -1), kernel, [1, 1, 1, 1, 1], 'VALID')
#y1 = tf.nn.conv3d(tf.expand_dims(tfwrap3D(x1), -1), kernel, [1, 1, 1, 1, 1], 'VALID')
#y2 = tf.nn.conv3d(tf.expand_dims(tfwrap3D(x12), -1), kernel, [1, 1, 1, 1, 1], 'VALID')
yy = tf.concat([y, y1, y2], axis=-1)
yy = yy - pmx
yy = yy / psx
yy1 = tf.nn.relu(tf.matmul(yy, pwts[0]) + pbias[0])
yy2 = tf.nn.relu(tf.matmul(yy1, pwts[1]) + pbias[1])
yy3 = tf.matmul(yy2, pwts[2]) + pbias[2]
pmodel = tf.nn.sigmoid(width * yy3)
return pmodel[...,0]
pmodel = mtf.slicewise(apply_pwts,
[x, x1, x12],
output_dtype=tf.float32,
output_shape=part_shape, # + [mtf.Dimension('c_dim', 81)],
name='apply_pwts',
splittable_dims=lr_shape[:-1]+hr_shape[1:4]+part_shape[1:3])
def apply_mwts(x, x1, x2):
#y = tf.expand_dims(x, axis=-1)
zz = tf.concat([tf.expand_dims(x, -1), tf.expand_dims(x1, -1), tf.expand_dims(x2, -1)], axis=-1)
zz = zz - mmx
zz = zz / msx
zz1 = tf.nn.elu(tf.matmul(zz, mwts[0]) + mbias[0])
zz2 = tf.nn.elu(tf.matmul(zz1, mwts[1]) + mbias[1])
zz3 = tf.matmul(zz2, mwts[2]) + mbias[2]
mmodel = zz3*msy + mmy
return mmodel[...,0]
mmodel = mtf.slicewise(apply_mwts,
[x, x1, x12],
output_dtype=tf.float32,
output_shape=part_shape, # + [mtf.Dimension('c_dim', 81)],
name='apply_mwts',
splittable_dims=lr_shape[:-1]+hr_shape[1:4]+part_shape[1:3])
model = pmodel*mmodel
mtfdata = mtf.import_tf_tensor(mesh, tf.convert_to_tensor(data), shape=shape)
# Get prior
#k_dims = [d.shape[0] for d in kv]
#k_dims = [k_dims[2], k_dims[0], k_dims[1]]
k_dims_pr = [d.shape[0] for d in kv]
k_dims_pr = [k_dims_pr[2], k_dims_pr[0], k_dims_pr[1]]
cfield = mesh_utils.r2c3d(fieldvar, k_dims_pr, dtype=cdtype)
def _cwise_prior(kfield, pk, kx, ky, kz):
kx = tf.reshape(kx, [-1, 1, 1])
ky = tf.reshape(ky, [1, -1, 1])
kz = tf.reshape(kz, [1, 1, -1])
kk = tf.sqrt((kx / bs * nc)**2 + (ky / bs * nc)**2 + (kz / bs * nc)**2)
kshape = kk.shape
kk = tf.reshape(kk, [-1])
pkmesh = tfp.math.interp_regular_1d_grid(x=kk, x_ref_min=1e-05, x_ref_max=1000.0,
y_ref=pk, grid_regularizing_transform=tf.log)
priormesh = tf.reshape(pkmesh, kshape)
return tf.abs(kfield) / priormesh**0.5
cpfield = mtf.cwise(_cwise_prior, [cfield, pk] + kv, output_dtype=tf.float32)
prior = mtf.reduce_sum(mtf.square(cpfield)) * bs**3 *nc**3
# Total loss
#diff = (model - mtfdata)
modelf = mesh_utils.r2c3d(model, k_dims, dtype=cdtype)
modelsmf = mtf.cwise(cwise_fingauss, [modelf, float_to_mtf(R1, mesh, scalar)] + kv + [tfnc, tfbs], output_dtype=cdtype)
modelsm = mesh_utils.c2r3d(modelsmf, x1d.shape[-3:], dtype=dtype)
#dataf = mesh_utils.r2c3d(mtfdata, k_dims, dtype=cdtype)
#datasmf = mtf.cwise(cwise_fingauss, [dataf, float_to_mtf(R1, mesh, scalar)] + kv + [tfnc, tfbs], output_dtype=cdtype)
#datasm = mesh_utils.c2r3d(datasmf, x1d.shape[-3:], dtype=dtype)
##Anneal
R0 = tf.placeholder(tf.float32, shape=())
M0 = tf.placeholder(tf.float32, shape=())
off, istd = tf.placeholder(tf.float32, shape=data.shape), tf.placeholder(tf.float32, shape=data.shape)
mtfoff = mtf.import_tf_tensor(mesh, off, shape=shape)
mtfistd = mtf.import_tf_tensor(mesh, istd, shape=shape)
diff = mtf.log(modelsm + M0) - mtf.log(mtfdata + M0)
#diff = diff / 0.25
#diff = (diff + mtfoff)*mtfistd #For some reason, doing things wrong this one
diff = (diff + mtfoff)/0.25
def _cwise_smooth(kfield, kx, ky, kz):
kx = tf.reshape(kx, [-1, 1, 1])
ky = tf.reshape(ky, [1, -1, 1])
kz = tf.reshape(kz, [1, 1, -1])
kk = (kx / bs * nc)**2 + (ky/ bs * nc)**2 + (kz/ bs * nc)**2
wts = tf.cast(tf.exp(- kk* (R0*bs/nc)**2), kfield.dtype)
return | |
"""
Calculate the bounding box of the polygons.
Returns
-------
out : Numpy array[2, 2] or None
Bounding box of this polygon in the form [[x_min, y_min],
[x_max, y_max]], or None if the polygon is empty.
"""
if len(self.polygons) == 0:
return None
return numpy.array(
(
(
min(pts[:, 0].min() for pts in self.polygons),
min(pts[:, 1].min() for pts in self.polygons),
),
(
max(pts[:, 0].max() for pts in self.polygons),
max(pts[:, 1].max() for pts in self.polygons),
),
)
)
def rotate(self, angle, center=(0, 0)):
"""
Rotate this object.
Parameters
----------
angle : number
The angle of rotation (in *radians*).
center : array-like[2]
Center point for the rotation.
Returns
-------
out : `PolygonSet`
This object.
"""
ca = numpy.cos(angle)
sa = numpy.sin(angle) * _mpone
c0 = numpy.array(center)
new_polys = []
for points in self.polygons:
pts = points - c0
new_polys.append(pts * ca + pts[:, ::-1] * sa + c0)
self.polygons = new_polys
return self
def scale(self, scalex, scaley=None, center=(0, 0)):
"""
Scale this object.
Parameters
----------
scalex : number
Scaling factor along the first axis.
scaley : number or None
Scaling factor along the second axis. If None, same as
`scalex`.
center : array-like[2]
Center point for the scaling operation.
Returns
-------
out : `PolygonSet`
This object.
"""
c0 = numpy.array(center)
s = scalex if scaley is None else numpy.array((scalex, scaley))
self.polygons = [(points - c0) * s + c0 for points in self.polygons]
return self
def to_gds(self, multiplier):
"""
Convert this object to a series of GDSII elements.
Parameters
----------
multiplier : number
A number that multiplies all dimensions written in the GDSII
elements.
Returns
-------
out : string
The GDSII binary string that represents this object.
"""
data = []
for ii in range(len(self.polygons)):
if len(self.polygons[ii]) > 8190:
warnings.warn(
"[GDSPY] Polygons with more than 8190 are not supported by the official GDSII specification. This GDSII file might not be compatible with all readers.",
stacklevel=4,
)
data.append(
struct.pack(
">4Hh2Hh",
4,
0x0800,
6,
0x0D02,
self.layers[ii],
6,
0x0E02,
self.datatypes[ii],
)
)
xy = numpy.empty((self.polygons[ii].shape[0] + 1, 2), dtype=">i4")
xy[:-1] = numpy.round(self.polygons[ii] * multiplier)
xy[-1] = xy[0]
i0 = 0
while i0 < xy.shape[0]:
i1 = min(i0 + 8190, xy.shape[0])
data.append(struct.pack(">2H", 4 + 8 * (i1 - i0), 0x1003))
data.append(xy[i0:i1].tostring())
i0 = i1
else:
data.append(
struct.pack(
">4Hh2Hh2H",
4,
0x0800,
6,
0x0D02,
self.layers[ii],
6,
0x0E02,
self.datatypes[ii],
12 + 8 * len(self.polygons[ii]),
0x1003,
)
)
xy = numpy.round(self.polygons[ii] * multiplier).astype(">i4")
data.append(xy.tostring())
data.append(xy[0].tostring())
data.append(struct.pack(">2H", 4, 0x1100))
return b"".join(data)
def area(self, by_spec=False):
"""
Calculate the total area of this polygon set.
Parameters
----------
by_spec : bool
If True, the return value is a dictionary with
``{(layer, datatype): area}``.
Returns
-------
out : number, dictionary
Area of this object.
"""
if by_spec:
path_area = {}
for poly, key in zip(self.polygons, zip(self.layers, self.datatypes)):
poly_area = 0
for ii in range(1, len(poly) - 1):
poly_area += (poly[0][0] - poly[ii + 1][0]) * (
poly[ii][1] - poly[0][1]
) - (poly[0][1] - poly[ii + 1][1]) * (poly[ii][0] - poly[0][0])
if key in path_area:
path_area[key] += 0.5 * abs(poly_area)
else:
path_area[key] = 0.5 * abs(poly_area)
else:
path_area = 0
for points in self.polygons:
poly_area = 0
for ii in range(1, len(points) - 1):
poly_area += (points[0][0] - points[ii + 1][0]) * (
points[ii][1] - points[0][1]
) - (points[0][1] - points[ii + 1][1]) * (
points[ii][0] - points[0][0]
)
path_area += 0.5 * abs(poly_area)
return path_area
def fracture(self, max_points=199, precision=1e-3):
"""
Slice these polygons in the horizontal and vertical directions
so that each resulting piece has at most `max_points`. This
operation occurs in place.
Parameters
----------
max_points : integer
Maximal number of points in each resulting polygon (at least
5 for the fracture to occur).
precision : float
Desired precision for rounding vertice coordinates.
Returns
-------
out : `PolygonSet`
This object.
"""
if max_points > 4:
ii = 0
while ii < len(self.polygons):
if len(self.polygons[ii]) > max_points:
pts0 = sorted(self.polygons[ii][:, 0])
pts1 = sorted(self.polygons[ii][:, 1])
ncuts = len(pts0) // max_points
if pts0[-1] - pts0[0] > pts1[-1] - pts1[0]:
# Vertical cuts
cuts = [
pts0[int(i * len(pts0) / (ncuts + 1.0) + 0.5)]
for i in range(1, ncuts + 1)
]
chopped = clipper._chop(
self.polygons[ii], cuts, 0, 1 / precision
)
else:
# Horizontal cuts
cuts = [
pts1[int(i * len(pts1) / (ncuts + 1.0) + 0.5)]
for i in range(1, ncuts + 1)
]
chopped = clipper._chop(
self.polygons[ii], cuts, 1, 1 / precision
)
self.polygons.pop(ii)
layer = self.layers.pop(ii)
datatype = self.datatypes.pop(ii)
self.polygons.extend(
numpy.array(x) for x in itertools.chain.from_iterable(chopped)
)
npols = sum(len(c) for c in chopped)
self.layers.extend(layer for _ in range(npols))
self.datatypes.extend(datatype for _ in range(npols))
else:
ii += 1
return self
def fillet(self, radius, points_per_2pi=128, max_points=199, precision=1e-3):
"""
Round the corners of these polygons and fractures them into
polygons with less vertices if necessary.
Parameters
----------
radius : number, array-like
Radius of the corners. If number: all corners filleted by
that amount. If array: specify fillet radii on a
per-polygon basis (length must be equal to the number of
polygons in this `PolygonSet`). Each element in the array
can be a number (all corners filleted by the same amount) or
another array of numbers, one per polygon vertex.
Alternatively, the array can be flattened to have one radius
per `PolygonSet` vertex.
points_per_2pi : integer
Number of vertices used to approximate a full circle. The
number of vertices in each corner of the polygon will be the
fraction of this number corresponding to the angle
encompassed by that corner with respect to 2 pi.
max_points : integer
Maximal number of points in each resulting polygon (at least
5, otherwise the resulting polygon is not fractured).
precision : float
Desired precision for rounding vertice coordinates in case
of fracturing.
Returns
-------
out : `PolygonSet`
This object.
"""
two_pi = 2 * numpy.pi
fracture = False
if numpy.isscalar(radius):
radii = [[radius] * p.shape[0] for p in self.polygons]
else:
if len(radius) == len(self.polygons):
radii = []
for r, p in zip(radius, self.polygons):
if numpy.isscalar(r):
radii.append([r] * p.shape[0])
else:
if len(r) != p.shape[0]:
raise ValueError(
"[GDSPY] Wrong length in fillet radius list. Expected lengths are {} or {}; got {}.".format(
len(self.polygons), total, len(radius)
)
)
radii.append(r)
else:
total = sum(p.shape[0] for p in self.polygons)
if len(radius) != total:
raise ValueError(
"[GDSPY] Wrong length in fillet radius list. Expected lengths are {} or {}; got {}.".format(
len(self.polygons), total, len(radius)
)
)
radii = []
n = 0
for p in self.polygons:
radii.append(radius[n : n + p.shape[0]])
n += p.shape[0]
for jj in range(len(self.polygons)):
vec = self.polygons[jj].astype(float) - numpy.roll(self.polygons[jj], 1, 0)
length = (vec[:, 0] ** 2 + vec[:, 1] ** 2) ** 0.5
ii = numpy.flatnonzero(length)
if len(ii) < len(length):
self.polygons[jj] = numpy.array(self.polygons[jj][ii])
radii[jj] = [radii[jj][i] for i in ii]
vec = self.polygons[jj].astype(float) - numpy.roll(
self.polygons[jj], 1, 0
)
length = (vec[:, 0] ** 2 + vec[:, 1] ** 2) ** 0.5
vec[:, 0] = vec[:, 0] / length
vec[:, 1] = vec[:, 1] / length
dvec = numpy.roll(vec, -1, 0) - vec
norm = (dvec[:, 0] ** 2 + dvec[:, 1] ** 2) ** 0.5
ii = numpy.flatnonzero(norm)
dvec[ii, 0] = dvec[ii, 0] / norm[ii]
dvec[ii, 1] = dvec[ii, 1] / norm[ii]
dot = numpy.roll(vec, -1, 0) * vec
theta = numpy.arccos(dot[:, 0] + dot[:, 1])
ct = numpy.cos(theta * 0.5)
tt = numpy.tan(theta * 0.5)
new_points = []
for ii in range(-1, len(self.polygons[jj]) - 1):
if theta[ii] > 1e-6:
a0 = -vec[ii] * tt[ii] - dvec[ii] / ct[ii]
a0 = numpy.arctan2(a0[1], a0[0])
a1 = vec[ii + 1] * tt[ii] - dvec[ii] / ct[ii]
a1 = numpy.arctan2(a1[1], a1[0])
if a1 - a0 > numpy.pi:
a1 -= two_pi
elif a1 - a0 < -numpy.pi:
a1 += two_pi
n = max(
int(numpy.ceil(abs(a1 - a0) / two_pi * points_per_2pi) + 0.5), 2
)
a = numpy.linspace(a0, a1, n)
ll = radii[jj][ii] * tt[ii]
if | |
"python-ciscoconfparse",
"rubygem-chef-server-api",
"rubygem-extlib",
"rubygem-sprockets-helpers",
"rubygem-sqlite3",
"patterns-cloud",
"suse-openstack-cloud-user",
"python-fake-factory",
"rubygem-multipart-post",
"rubygem-ember-source",
"python-aioeventlet",
"yast2-crowbar",
"xtrabackup",
"openstack-dashboard-theme-HPE",
"rubygem-activeresource",
"suse-openstack-cloud-operations",
"bzr",
"hpe-helion-openstack-migration",
"rubygem-http-cookie",
"rubygem-net-http-digest_auth",
"python-Beaker",
"rubygem-dep_selector-0_1",
"rubygem-ohai-6",
"suse-openstack-cloud-security",
"_product:suse-openstack-cloud-crowbar-ftp-POOL-x86_64",
"python-gitdb2",
"ipxe",
"jsmn",
"python-pytidylib6",
"python-Beaver",
"rubygem-closure-compiler",
"ardana-cassandra",
"rubygem-mysql2",
"crowbar-hyperv",
"ardana-horizon",
"python-html",
"zookeeper-sources",
"documentation-suse-openstack-cloud",
"erlang-rebar-obs",
"venv-openstack-monasca-ceilometer",
"venv-openstack-keystone",
"hpe-helion-openstack-planning",
"crowbar-ha",
"rubygem-webmock",
"rubygem-http_parser.rb",
"rubygem-crowbar-client",
"python-termstyle",
"rubygem-merb-param-protection",
"crowbar",
"fence-agents",
"rubygem-chef-server",
"venv-openstack-ceilometer",
"venv-openstack-designate",
"crowbar-openstack",
"python-cinderlm",
"rubygem-fast_xs-0_7",
"rubygem-ember-rails",
"ardana-db",
"python-ardana-configurationprocessor",
"rubygem-sprockets-2_11",
"rubygem-merb-core",
},
"in_ibs_devel_but_not_in_obs": {
"rubygem-delayed_job",
"rubygem-activesupport-4_1",
"rubygem-raindrops",
"rubygem-bson-1_11",
"ardana-octavia",
"golang-github-jteeuwen-go-bindata",
"rubygem-backports",
"rubygem-chef",
"ardana-installer-ui",
"suse-openstack-cloud-monitor-msoperator_en",
"rubygem-mimemagic",
"rubygem-js-routes-0_9",
"ardana-keystone",
"rubygem-simplecov-html",
"_product:suse-openstack-cloud-crowbar-ftp-POOL-s390x",
"venv-openstack-horizon",
"ardana-cluster",
"rubygem-tilt-1_4",
"rubygem-archive",
"rubygem-multi_xml",
"rubygem-sprockets-rails",
"ardana-cinder",
"rubygem-hashie-3_3",
"hpe-helion-openstack-opsconsole",
"libsodium",
"caasp-openstack-heat-templates",
"rubygem-pg",
"hpe-helion-openstack-installation",
"ardana-neutron",
"rubygem-sinatra",
"rubygem-crack",
"rubygem-merb-helpers",
"rubygem-json-1_7",
"rubygem-moneta-0_6",
"rubygem-rack-protection",
"ardana-osconfig",
"suse-openstack-cloud-monitor-osoperator_en",
"ardana-extensions-dcn",
"rubygem-activerecord-4_1",
"rubygem-mail",
"python-tinydb",
"rubygem-actionpack-4_1",
"rubygem-fastercsv",
"rubygem-actionview-4_1",
"rubygem-diff-lcs",
"rubygem-arel",
"crowbar-core",
"rubygem-rack-test-0_6",
"venv-openstack-neutron",
"rubygem-kgio",
"python-lockfile",
"rubygem-autoprefixer-rails",
"rubygem-sidekiq",
"venv-openstack-manila",
"rubygem-yaml_db",
"python-promise",
"rubygem-rainbows-rails",
"rubygem-simple-navigation",
"rubygem-polyglot",
"rubygem-ffi",
"_product:hpe-helion-openstack-cd-cd-x86_64",
"ardana-glance",
"_product:suse-openstack-cloud-ftp-POOL-x86_64",
"ardana-manila",
"rubygem-chef-expander",
"supportutils-plugin-suse-openstack-cloud",
"rubygem-sprockets-2_12",
"crowbar-ceph",
"crowbar-core-branding-SOC",
"pdns",
"crowbar-init",
"rubygem-terminal-table",
"rubygem-mocha",
"suse-openstack-cloud-planning",
"skelcd-socc",
"ardana-cobbler",
"rubygem-uglifier-2_2",
"rubygem-hike-1_2",
"ruby.SLE_12",
"rubygem-redis-namespace",
"rubygem-net-ssh-multi-1_1",
"release-notes-suse-openstack-cloud",
"ardana-qa-ansible",
"ardana-service",
"skelcd-soc",
"rubygem-sass-3_2",
"venv-openstack-heat",
"rubygem-httparty",
"rubygem-rspec-mocks",
"_product:suse-openstack-cloud-cd-cd-x86_64",
"rubygem-ipaddress",
"rubygem-simplecov",
"rubygem-rake",
"gecode",
"rubygem-dotenv",
"_product:suse-openstack-cloud-crowbar-cd-cd-x86_64",
"rubygem-libxml-ruby",
"ardana-barbican",
"suse-openstack-cloud-upstream-user",
"rubygem-unf_ext",
"rubygem-bundler",
"rubygem-rest-client.cloud5",
"ardana-magnum",
"suse-openstack-cloud-monitor-overview_en",
"ardana-logging",
"rubygem-bunny",
"openstack-dashboard-theme-SUSE",
"ardana-monasca-transform",
"rubygem-merb-assets",
"venv-openstack-zaqar",
"_product:suse-openstack-cloud-release",
"rubygem-treetop-1_4",
"ardana-tls",
"venv-openstack-aodh",
"rubygem-rest-client",
"rubygem-knife-backup",
"ardana-monasca",
"memcached",
"ardana-opsconsole",
"venv-openstack-swift",
"_product:hpe-helion-openstack-ftp-POOL-x86_64",
"rubygem-metaclass",
"rubygem-mixlib-cli",
"rubygem-puma_worker_killer",
"rubygem-gem2rpm",
"crowbar-ui",
"pdns-common",
"rubygem-sprockets-standalone",
"rubygem-apipie-rails",
"rubygem-chef-solr",
"ruby-packaging-helper",
"rubygem-actionmailer-4_1",
"rubygem-tzinfo",
"python-python-daemon",
"rubygem-bunny-0_6",
"rubygem-mysql",
"venv-openstack-trove",
"python-yarb",
"rubygem-amqp-0_6",
"rubygem-ember-data-source",
"ardana-freezer",
"ardana-ironic",
"venv-openstack-barbican",
"rubygem-execjs",
"hpe-xsl-stylesheets",
"venv-openstack-sahara",
"rubygem-thin",
"rubygem-timers-1_1",
"rubygem-syslogger",
"python-venvjail",
"ardana-extensions-apicaci",
"rubygem-thor",
"ardana-installer-server",
"rubygem-chef-zero",
"python-conf_d",
"rubygem-daemons",
"_product:suse-openstack-cloud-crowbar-release",
"rubygem-eventmachine",
"rubygem-kwalify",
"rubygem-railties-4_1",
"rubygem-systemu",
"rubygem-net-ssh-gateway",
"rubygem-handlebars-source",
"ardana-service-ansible",
"suse-openstack-cloud-opsconsole",
"hpe-helion-openstack-operations",
"suse-openstack-cloud-deployment_en",
"openstack-octavia-amphora-image",
"venv-openstack-magnum",
"python-ardana-packager",
"ardana-heat",
"ardana-ceilometer",
"rubygem-dotenv-deployment",
"rubygem-redis",
"rubygem-rspec-rails",
"rubygem-mixlib-log",
"rubygem-rainbows",
"ardana",
"rubygem-activemodel-4_1",
"rubygem-connection_pool",
"rubygem-sinatra-contrib",
"rubygem-em-http-request-1_0",
"ardana-nova",
"rubygem-netrc",
"rubygem-systemu-2_5",
"rubygem-ruby-shadow",
"rubygem-uuidtools",
"suse-openstack-cloud-supplement_en",
"ardana-designate",
"_product:suse-openstack-cloud-crowbar-ftp-POOL-ppc64le",
"rubygem-rails-observers",
"venv-openstack-freezer",
"rubygem-cstruct",
"sleshammer",
"rubygem-barber",
"rubygem-activerecord-session_store",
"rubygem-rack-1_5",
"rubygem-simple_navigation_renderers",
"jeos-firstboot",
"rubygem-faraday",
"rubygem-brakeman",
"venv-openstack-octavia",
"venv-openstack-glance",
"rubygem-safe_yaml",
"python-python-engineio",
"ardana-spark",
"venv-openstack-nova",
"ardana-mq",
"rubygem-inifile",
"python-Flask-SocketIO",
"venv-openstack-cinder",
"rubygem-haml",
"rubygem-rspec-core",
"collectl",
"skelcd-hos",
"ardana-extensions-nsx",
"ardana-extensions-example",
"rubygem-unicorn",
"ardana-tempest",
"kubernetes-node-image-pause",
"rubygem-redcarpet",
"couchdb",
"python-smmap2",
"rubygem-xml-simple",
"_product:hpe-helion-openstack-release",
"ardana-opsconsole-ui",
"ansible1",
"rubygem-mixlib-config-1",
"rubygem-cookiejar",
"rubygem-ruby2ruby",
"rubygem-mixlib-authentication",
"rubygem-haml-rails-0_5",
"ardana-input-model",
"rubygem-rest-client-1_6",
"rubygem-addressable",
"python-python-socketio",
"rubygem-delayed_job_active_record",
"rubygem-docile",
"python-ardana-opsconsole-server",
"galera-3",
"_product:suse-openstack-cloud-crowbar-ftp-POOL-aarch64",
"rubygem-rack",
"_product",
"ruby-common",
"ardana-memcached",
"rubygem-unf",
"python-Pykka",
"rubygem-easy_diff",
"rubygem-highline",
"mariadb",
"python-socketIO-client",
"rubygem-font-awesome-sass",
"hpe-helion-openstack-user",
"rubygem-i18n-js",
"python-glance-check",
"rubygem-celluloid-0_15",
"galera-python-clustercheck",
"python-swiftlm",
"venv-openstack-murano",
"venv-openstack-monasca",
"rubygem-wsman",
"rubygem-rspec-expectations",
"rubygem-erubis",
"ruby2.1",
"rubygem-mongo",
"venv-openstack-ironic",
"openstack-ironic-image",
"rubygem-domain_name",
"rubygem-httmultiparty",
"rubygem-rspec-support",
"rubygem-em-socksify",
"rubygem-js-routes",
"rubygem-rails-4_1",
"rubygem-rack-mini-profiler",
"hpe-helion-openstack-security",
"ardana-swift",
"mariadb-connector-c",
"suse-openstack-cloud-installation",
"ardana-ansible",
"rubygem-get_process_mem",
"suse-openstack-cloud-upstream-admin",
"rubygem-puma",
"rubygem-sass-rails-4",
"rubygem-chef-server-api",
"rubygem-extlib",
"rubygem-sprockets-helpers",
"rubygem-sqlite3",
"patterns-cloud",
"suse-openstack-cloud-user",
"rubygem-multipart-post",
"rubygem-ember-source",
"yast2-crowbar",
"xtrabackup",
"openstack-dashboard-theme-HPE",
"rubygem-mime-types-1",
"rubygem-activeresource",
"rubygem-multi_json",
"suse-openstack-cloud-operations",
"hpe-helion-openstack-migration",
"rubygem-http-cookie",
"rubygem-net-http-digest_auth",
"rubygem-dep_selector-0_1",
"rubygem-ohai-6",
"suse-openstack-cloud-security",
"_product:suse-openstack-cloud-crowbar-ftp-POOL-x86_64",
"rubygem-thread_safe",
"python-gitdb2",
"ipxe",
"jsmn",
"python-Beaver",
"rubygem-merb-core",
"rubygem-closure-compiler",
"rubygem-i18n",
"ardana-cassandra",
"rubygem-jquery-rails",
"rubygem-mysql2",
"ardana-horizon",
"python-html",
"rubygem-net-ssh",
"documentation-suse-openstack-cloud",
"venv-openstack-monasca-ceilometer",
"rubygem-sprockets-2_11",
"rubygem-yajl-ruby",
"rubygem-builder",
"rubygem-active_model_serializers",
"venv-openstack-keystone",
"rubygem-byebug",
"hpe-helion-openstack-planning",
"crowbar-ha",
"rubygem-webmock",
"rubygem-http_parser.rb",
"rubygem-crowbar-client",
"rubygem-merb-param-protection",
"crowbar",
"rubygem-chef-server",
"venv-openstack-ceilometer",
"rubygem-mixlib-shellout",
"crowbar-openstack",
"python-cinderlm",
"rubygem-minitest",
"rubygem-fast_xs-0_7",
"rubygem-ember-rails",
"ardana-db",
"python-ardana-configurationprocessor",
"venv-openstack-designate",
"rubygem-arel-5",
"ardana-ses",
},
"in_obs_but_not_in_product": {
"golang-github-naoina-go-stringutil",
"python-Cython",
"python-colorama",
"python-ecdsa",
"python-group-based-policy-client",
"openvswitch",
"python-Flask-WTF",
"python-stestr",
"go1.14",
"python-Flask-HTTPAuth",
"python-warlock",
"python-Cycler",
"grafana-apache",
"python3-base",
"python-reportlab",
"python-senlinclient",
"metis",
"python-uritemplate",
"apache-rpm-macros-control",
"python-tabulate",
"python-dulwich",
"python-scp",
"python-vcversioner",
"python-jsonpointer",
"conntrack-tools",
"python-PyYAML",
"python-jsonschema",
"venv-openstack-tempest",
"venv-openstack-horizon:hpe",
"openstack-rally",
"python-WTForms",
"python-bottle",
"python-Flask-Bootstrap",
"influxdb-relay",
"python-pyinotify",
"go1.9",
"python-subprocess32",
"python-pyperclip",
"python-iniparse",
"wodim",
"python-textfsm",
"sle12-cloud-aggregates",
"python-pytest-expect",
"python-futures",
"openstack-refstack",
"python-ptyprocess",
"python-zope.event",
"python-blinker",
"python-cmd2",
"suitesparse",
"python-xvfbwrapper",
"python-oauthlib",
"haproxy",
"python-visitor",
"python-Flask-Admin",
"python-aci-integration-module",
"python-pycodestyle",
"python-botocore",
"python-ironic-inspector-client",
"python-glareclient",
"python-masakariclient",
"python-dominate",
"python-pexpect",
"python-scipy",
"python-acitoolkit",
"nodejs-common",
"python-gitdb",
"python-numpy",
"python-zope.interface",
"python-cliff-tablib",
"golang-github-naoina-toml",
"python-mox3",
"openstack-neutron-opflex-agent",
"python-nosehtmloutput",
"rpmlint-mini",
"python-hp3parclient",
"go1.4",
"python-google-api-python-client",
"python-pycparser",
"openblas",
"qhull",
"apache-rpm-macros",
"apache2-mod_wsgi",
"python-matplotlib",
"python-smmap",
"python-Flask-SQLAlchemy",
"nodejs6",
"python-heat-cfntools",
"python-netmiko",
"python-u-msgpack-python",
"python-pyldap",
"python-pycurl",
"python-jsonpatch",
"python-graphviz",
"python-functools32",
"python-lxml",
"dnsmasq",
"python3",
"python-backports",
},
"in_obs_but_not_in_ibs_devel": {
"python-python-memcached",
"python-cassandra-driver",
"python-docutils",
"python-colorama",
"python-fasteners",
"python-elasticsearch-curator",
"python-contextlib2",
"python-stestr",
"elasticsearch",
"python-testrepository",
"python-Cycler",
"grafana-apache",
"python-sphinxcontrib-pecanwsme",
"python-singledispatch",
"python-senlinclient",
"python-dulwich",
"python-XStatic-Angular-Schema-Form",
"crudini",
"python-nose",
"python-jsonpointer",
"python-cov-core",
"python-jsonschema",
"python-pecan",
"python-pydot",
"python-pytest-httpbin",
"python-django-formtools",
"openstack-rally",
"python-repoze.who",
"python-typing",
"python-keyring",
"python-py",
"python-Genshi",
"python-pyinotify",
"python-websocket-client",
"python-XStatic-smart-table",
"python-pyperclip",
"python-rjsmin",
"python-wrapt",
"python-wsgi_intercept",
"python-pytest-expect",
"python-futures",
"python-setuptools_scm",
"python-xvfbwrapper",
"python-ddt",
"python-unicodecsv",
"patterns-OpenStack",
"python-requests-aws",
"python-ironic-inspector-client",
"python-glareclient",
"python-mock",
"python-rfc3986",
"python-XStatic-mdi",
"python-SQLAlchemy-Utils",
"python-dogpile.cache",
"python-ply",
"python-gitdb",
"python-PyNaCl",
"python-pyasn1-modules",
"python-numpy",
"python-sphinxcontrib-websupport",
"python-cliff-tablib",
"python-mox3",
"python-pykerberos",
"python-kazoo",
"python-hp3parclient",
"java-monasca-common",
"python-XStatic-JQuery-Migrate",
"python-boto",
"python-idna",
"python-pymongo",
"python-smmap",
"go",
"python-pyngus",
"python-greenlet",
"python-httpbin",
"python-kafka-python",
"python-functools32",
"python-simplegeneric",
"python-yaql",
"python-funcparserlib",
"python-asn1crypto",
"python-scrypt",
"python-reno",
"golang-github-naoina-go-stringutil",
"qpid-proton",
"python-python-dateutil",
"python-hypothesis",
"python-Pint",
"python-ecdsa",
"python-click",
"python-group-based-policy-client",
"python-castellan",
"python-Sphinx",
"python-jsonpath-rw-ext",
"python-webcolors",
"python-certifi",
"python-mox",
"python-zake",
"python-setuptools",
"python-Pygments",
"python-reportlab",
"python-termcolor",
"metis",
"python-WSME",
"python-enum34",
"python-pykafka",
"python-setproctitle",
"python-networkx",
"python-python-logstash",
"python-PyYAML",
"python-XStatic-JQuery.quicksearch",
"venv-openstack-horizon:hpe",
"python-positional",
"liberasurecode",
"python-passlib",
"python-bottle",
"influxdb-relay",
"python-ipaddress",
"python-pysmi",
"java-monasca-common-kit",
"wodim",
"python-XStatic-Bootstrap-SCSS",
"python-blockdiag",
"python-process-tests",
"sle12-cloud-aggregates",
"python-requests-mock",
"python-betamax",
"erlang-rebar",
"python-XStatic-Angular-FileUpload",
"python-XStatic-Angular-lrdragndrop",
"python-pysendfile",
"python-sure",
"python-XStatic-jQuery",
"python-cursive",
"python-snowballstemmer",
"python-redis",
"python-sphinxcontrib-seqdiag",
"python-pyquery",
"python-UcsSdk",
"python-pytest-cov",
"python-tinyrpc",
"nodejs-common",
"python-XStatic-term.js",
"python-docker",
"python-nosehtmloutput",
"python-decorator",
"python-seqdiag",
"go1.4",
"python-monotonic",
"python-flaky",
"qhull",
"python-Babel",
"python-proboscis",
"python-cssselect",
"python-u-msgpack-python",
"python-pyldap",
"python-six",
"python-sphinxcontrib-httpdomain",
"python-toml",
"python-XStatic-Jasmine",
"python-ncclient",
"python-websockify",
"python-pydotplus",
"python-backports",
"python-XStatic-objectpath",
"python-pytest-mock",
"openvswitch",
"python-pyScss",
"python-logutils",
"python-xattr",
"python-jmespath",
"go1.14",
"python-cachetools",
"python-pytz",
"python-sqlparse",
"python-linecache2",
"python-virtualenv",
"python-ntplib",
"python-scp",
"python-backports_abc",
"python-vcversioner",
"conntrack-tools",
"python-xmltodict",
"python-pyparsing",
"python-PyMySQL",
"python-XStatic-Bootstrap-Datepicker",
"python-meld3",
"python-funcsigs",
"python-python-mimeparse",
"python-retrying",
"python-sphinx-testing",
"python-requests-kerberos",
"python-rcssmin",
"python-croniter",
"python-Tempita",
"python-enum-compat",
"python-traceback2",
"python-pytest-runner",
"python-pifpaf",
"python-appdirs",
"python-pyOpenSSL",
"python-zope.event",
"python-pika-pool",
"python-eventlet",
"python-sqlalchemy-migrate",
"python-Routes",
"python-gevent",
"python-case",
"python-pexpect",
"python-defusedxml",
"python-scipy",
"python-WebOb",
"python-zope.interface",
"golang-github-naoina-toml",
"python-testtools",
"python-elasticsearch",
"python-Jinja2",
"python-unittest2",
"openblas",
"python-deprecation",
"python-pathlib",
"python-nose-exclude",
"python-ujson",
"python-matplotlib",
"python-chardet",
"python-rsa",
"python-bcrypt",
"python-Paste",
"python-olefile",
"spark-kit",
"python-XStatic-Hogan",
"python-dnspython",
"python-suds-jurko",
"dnsmasq",
"python-XStatic-Spin",
"python-repoze.lru",
"python-django-appconf",
"python-imagesize",
"elasticsearch-kit",
"python-SecretStorage",
"python-vine",
"python-Cython",
"python-pip",
"python-alabaster",
"python-warlock",
"python-happybase",
"python-python-json-logger",
"erlang-retest",
"python-fixtures",
"python-sphinx_rtd_theme",
"python3-base",
"python-MySQL-python",
"python-requestsexceptions",
"apache-rpm-macros-control",
"python-netaddr",
"python-WSGIProxy2",
"python-simplejson",
"python-itsdangerous",
"python-os-testr",
"python-django-babel",
"venv-openstack-tempest",
"python-pycadf",
"python-jsonpath-rw",
"go1.9",
"python-MarkupSafe",
"python-subprocess32",
"python-pycrypto",
"python-daiquiri",
"python-iniparse",
"python-textfsm",
"python-extras",
"python-django-pyscss",
"python-uncertainties",
"openstack-refstack",
"python-ptyprocess",
"python-blinker",
"python-cmd2",
"suitesparse",
"python-django_compressor",
"python-msgpack-python",
"python-oauthlib",
"haproxy",
"python-pycodestyle",
"python-botocore",
"python-html5lib",
"python-cffi",
"python-influxdb",
"python-masakariclient",
"python3",
"python-beautifulsoup4",
"python-PyECLib",
"python-pika",
"python-WebTest",
"python-voluptuous",
"python-XStatic-Rickshaw",
"rpmlint-mini",
"python-XStatic-tv4",
"python-packaging",
"python-semantic_version",
"python-testscenarios",
"python-pretend",
"python-testresources",
"python-pycparser",
"apache-rpm-macros",
"python-PasteDeploy",
"python-psycopg2",
"python-iso8601",
"nodejs6",
"python-demjson",
"python-heat-cfntools",
"python-openstack-doc-tools",
"python-PySocks",
"python-netmiko",
"python-pycurl",
"python-jsonpatch",
"python-XStatic",
"python-oauth2client",
"python-microversion_parse",
"python-lxml",
"python-XStatic-JQuery.TableSorter",
"openstack-monasca-persister-java-kit",
"python-openstack.nose_plugin",
},
},
7: {
"in_product_but_not_in_devel": {
"python-numexpr",
"crudini",
"python-pyquery",
"python-greenlet",
"python-zake",
"python-pymemcache",
"python-pandas",
"python-testrepository",
"python-mox",
"python-traceback2",
"python-nose-cover3",
"python-tenacity",
"python-docutils",
"python-extras",
"python-cursive",
"python-oslo.privsep",
"python-websockify",
"python-thrift",
"erlang-sd_notify",
"python-jsonrpclib",
"python-swift3",
"python-ciscoconfparse",
"python-ldap3",
"python-python-editor",
"python-setproctitle",
"python-XStatic-objectpath",
"python-unittest2",
"python-funcsigs",
"python-PasteDeploy",
"python-croniter",
"jeos-firstboot",
"python-PyMySQL",
"python-nose",
"python-nose-exclude",
"python-neutron-lib",
"mongodb",
"python-discover",
"python-pymongo",
"python-pika-pool",
"python-pytest-cov",
"python-XStatic-tv4",
"apache2-mod_wsgi",
"python-uncertainties",
"python-stevedore",
"python-kafka-python",
"python-repoze.who",
"python-cov-core",
"liberasurecode",
"python-sphinxcontrib-seqdiag",
"python-alembic",
"python-os-client-config",
"python-sphinxcontrib-pecanwsme",
"erlang-retest",
"python-python-openid",
"python-coverage",
"python-overtest",
"python-mock",
"python-trollius",
"python-hp3parclient",
"python-MySQL-python",
"python-Routes",
"python-pyghmi",
"python-pytimeparse",
"bzr.SUSE_SLE-12-SP2_Update_Products_Cloud7_Update",
"python-WebTest",
"python-fake-factory",
"python-suds-jurko",
"python-happybase",
"python-cliff",
"python-linecache2",
"python-PyECLib",
"python-Pint",
"python-xmltodict",
"python-ldappool",
"python-sphinx_rtd_theme",
"python-pytidylib6",
"python-html5lib",
"python-XStatic-Angular-Schema-Form",
"python-logutils",
"python-webcolors",
"python-ironic-lib",
"python-termcolor",
"python-django-pyscss",
"python-Genshi",
"python-flup",
"python-Beaker",
"python-testtools",
"euca2ools",
"python-Bottleneck",
"python-microversion_parse",
"python-pydotplus",
"python-retrying",
"python-versiontools",
"python-beautifulsoup4",
"python-proboscis",
"python-openstackdocstheme",
"python-pifpaf",
"python-tooz",
"python-kazoo",
"python-positional",
"python-testscenarios",
"python-pyinotify",
"python-python-subunit",
"python-rfc3986",
"python-semantic_version",
"python-jsonpath-rw-ext",
"python-sysv_ipc",
"python-PySocks",
"python-automaton",
"python-Paste",
"python-networkx",
"erlang-rebar-obs",
"python-XStatic-Angular-FileUpload",
"python-pecan",
"python-Babel",
"python-cachetools",
"python-repoze.lru",
"python-requests",
"python-blockdiag",
"python-dnspython",
"python-appdirs",
"python-funcparserlib",
"python-dogpile.core",
"python-SQLAlchemy-Utils",
"python-requestsexceptions",
"python-WebOb",
"python-defusedxml",
"python-yaql",
"python-alabaster",
"python-apicapi",
"python-python-dateutil",
"python-httpretty",
"python-sphinxcontrib-docbookrestapi",
"python-singledispatch",
"python-python-memcached",
"python-castellan",
"python-monotonic",
"python-jsonpath-rw",
"python-pysendfile",
"python-requests-mock",
"python-click",
"python-ncclient",
"python-sphinxcontrib-httpdomain",
"python-netaddr",
"python-reno",
"python-debtcollector",
"python-osc-lib",
"python-Tempita",
"python-betamax",
"python-contextlib2",
"python-posix_ipc",
"python-fasteners",
"erlang-rebar",
"python-sqlparse",
"python-dogpile.cache",
"python-pysnmp",
"python-seqdiag",
"containment-rpm",
"js",
"python-futurist",
"python-snowballstemmer",
"python-pbr",
"python-hypothesis",
"python-openstack.nose_plugin",
"python-pyScss",
"python-requests-kerberos",
"python-os-cloud-config",
"python-psycopg2",
"python-kombu",
"python-WSGIProxy2",
"python-django-appconf",
"python-pysmi",
"python-anyjson",
"python-requests-aws",
"obs-service-tar_scm.SUSE_SLE-12-SP2_Update_Products_Cloud7_Update", # noqa: E501
"python-pykerberos",
"python-unicodecsv",
"python-pathlib",
"python-oslosphinx",
"python-pika",
"python-testresources",
"python-voluptuous",
"python-aioeventlet",
"python-docker-py",
"python-sqlalchemy-migrate",
"python-pydot",
"python-fixtures",
"python-WSME",
"python-UcsSdk",
"python-simplegeneric",
"python-passlib",
"xtrabackup",
"python-wrapt",
},
"in_ibs_devel_but_not_in_staging": set(),
"in_obs_but_not_in_staging": set(),
"in_rubygem_but_not_in_product": {
"rubygem-arel-5",
"rubygem-ruby-shadow",
"rubygem-uglifier-2_2",
"rubygem-rspec-core",
"rubygem-actionpack-4_1",
"rubygem-dotenv-deployment",
"rubygem-thor",
"rubygem-rspec-expectations",
"rubygem-yajl-ruby",
"rubygem-raindrops",
"rubygem-multi_json",
"ruby-common",
"rubygem-mail",
"rubygem-rest-client.cloud5",
"rubygem-builder",
"rubygem-tilt-1_4",
"rubygem-js-routes",
"rubygem-minitest",
"rubygem-rspec-support",
"rubygem-activemodel-4_1",
"rubygem-systemu-2_5",
"rubygem-mixlib-cli",
"rubygem-hike-1_2",
"rubygem-active_model_serializers",
"rubygem-moneta-0_6",
"ruby-packaging-helper",
"rubygem-haml",
"rubygem-rspec-mocks",
"ruby.SLE_12",
"rubygem-erubis",
"rubygem-haml-rails-0_5",
"rubygem-ipaddress",
"rubygem-bundler",
"rubygem-systemu",
"rubygem-thread_safe",
"rubygem-hashie-3_3",
"rubygem-tzinfo",
"rubygem-byebug",
"rubygem-polyglot",
"rubygem-sass-rails-4",
"rubygem-net-ssh",
"rubygem-rack-test-0_6",
"rubygem-actionmailer-4_1",
"rubygem-mixlib-log",
"rubygem-arel",
"rubygem-activesupport-4_1",
"rubygem-diff-lcs",
"rubygem-mixlib-authentication",
"rubygem-net-ssh-gateway",
"rubygem-mixlib-shellout",
"rubygem-jquery-rails",
"rubygem-js-routes-0_9",
"rubygem-rack",
"rubygem-highline",
"rubygem-sprockets-rails",
"rubygem-i18n",
"rubygem-rails-4_1",
"ruby2.1",
"rubygem-mime-types-1",
"rubygem-rack-1_5",
"rubygem-rake",
"rubygem-railties-4_1",
"rubygem-treetop-1_4",
"rubygem-gem2rpm",
"rubygem-activerecord-4_1",
"rubygem-sass-3_2",
"rubygem-actionview-4_1",
},
"in_ibs_devel_but_not_in_product": {
"crowbar-monitoring",
"openstack-monasca-persister",
"thrift",
"perl-Devel-CheckBin",
"elasticsearch-sources",
"perl-Class-Accessor",
"runc",
"perl-Sub-Name",
"python-keystone-json-assignment",
"docker",
"python-oauth2client",
"storm-sources",
"python-botocore",
"python-google-api-python-client",
"containerd",
"kafka-sources",
"zookeeper-sources",
"golang-github-jteeuwen-go-bindata",
},
"in_product_but_not_in_obs": {
"rubygem-get_process_mem",
"rubygem-addressable",
"rubygem-ohai-6",
"rubygem-apipie-rails",
"rubygem-sinatra-contrib",
"rubygem-pg",
"rubygem-httparty",
"rubygem-webmock",
"rubygem-metaclass",
"python-pymemcache",
"rubygem-mysql2",
"openstack-magnum-k8s-image",
"rubygem-brakeman",
"sles12sp2-docker-image",
"rubygem-mimemagic",
"rubygem-terminal-table",
"openstack-ironic-doc",
"rubygem-mysql",
"rubygem-unf_ext",
"_product:suse-openstack-cloud-ftp-POOL-s390x",
"rubygem-bunny-0_6",
"rubygem-docile",
"rubygem-handlebars-source",
"rubygem-wsman",
"rubygem-unicorn",
"rubygem-yaml_db",
"python-jsonrpclib",
"python-ciscoconfparse",
"rubygem-faraday",
"patterns-cloud",
"sleshammer",
"rubygem-fast_xs-0_7",
"skelcd-cloud",
"caasp-openstack-heat-templates",
"rubygem-netrc",
"rubygem-merb-helpers",
"rubygem-chef-expander",
"rubygem-thin",
"mariadb-connector-c",
"jeos-firstboot",
"rubygem-sprockets-standalone",
"suse-openstack-cloud-user_en",
"mongodb",
"rubygem-ember-data-source",
"python-discover",
"rubygem-barber",
"rubygem-uuidtools",
"rubygem-inifile",
"_product:suse-openstack-cloud-cd-cd-ppc64le",
"rubygem-http_parser.rb",
"_product:suse-openstack-cloud-ftp-POOL-x86_64",
"rubygem-delayed_job_active_record",
"_product:suse-openstack-cloud-cd-cd-x86_64",
"rubygem-simplecov-html",
"rubygem-activeresource",
"galera-3",
"rubygem-rest-client-1_6",
"galera-python-clustercheck",
"couchdb",
"rubygem-ffi",
"rubygem-rspec-rails",
"mariadb",
"rubygem-sprockets-helpers",
"jemalloc",
"rubygem-sprockets-2_12",
"rubygem-fastercsv",
"rubygem-closure-compiler",
"rubygem-redis",
"python-hp3parclient",
"python-yarb",
"rubygem-rainbows",
"rubygem-knife-backup",
"bzr.SUSE_SLE-12-SP2_Update_Products_Cloud7_Update",
"rubygem-kgio",
"rubygem-eventmachine",
"rubygem-dep_selector-0_1",
"release-notes-suse-openstack-cloud",
"rubygem-httmultiparty",
"rubygem-merb-assets",
"_product:openstack-cloud-magnum-orchestration-SP1-migration", # noqa: E501
"rubygem-timers-1_1",
"crowbar",
"_product:suse-openstack-cloud-release",
"rubygem-sprockets-2_11",
"crowbar-ceph",
"rubygem-font-awesome-sass",
"python-pytidylib6",
"rubygem-em-http-request-1_0",
"rubygem-net-http-digest_auth",
"_product:cloud-testing-release",
"python-sphinx_rtd_theme",
"rubygem-activerecord-session_store",
"rubygem-sqlite3",
"rubygem-chef-zero",
"rubygem-extlib",
"crowbar-core",
"rubygem-rack-mini-profiler",
"rubygem-rails-observers",
"crowbar-openstack",
"rubygem-celluloid-0_15",
"crowbar-hyperv",
"rubygem-ember-source",
"yast2-crowbar",
"rubygem-execjs",
"rubygem-archive",
"jsmn",
"gecode",
"rubygem-redis-namespace",
"rubygem-connection_pool",
"openstack-dashboard-theme-SUSE",
"rubygem-sidekiq",
"rubygem-syslogger",
"rubygem-json-1_7",
"rubygem-dotenv",
"crowbar-init",
"crowbar-ui",
"rubygem-simplecov",
"rubygem-net-ssh-multi-1_1",
"rubygem-chef-server",
"rubygem-mongo",
"suse-openstack-cloud-monitor-msoperator_en",
"suse-openstack-cloud-monitor-osoperator_en",
"suse-openstack-cloud-supplement_en",
"rubygem-multipart-post",
"_product:cloud-testing-cd-cd-aarch64",
"kubernetes-node-image-pause",
"python-sysv_ipc",
"rubygem-xml-simple",
"suse-openstack-cloud-admin_en",
"etcd",
"_product:cloud-testing-cd-cd-s390x",
"erlang-rebar-obs",
"rubygem-libxml-ruby",
"rubygem-mocha",
"rubygem-sinatra",
"rubygem-chef-solr",
"_product:suse-openstack-cloud-cd-cd-aarch64",
"rubygem-simple_navigation_renderers",
"_product:suse-openstack-cloud-ftp-POOL-ppc64le",
| |
# "ts":1590228000000,
# "o":"9139.59",
# "c":"9131.94",
# "h":"9139.99",
# "l":"9121.71",
# "v":"25.20648"
# }
# }
#
data = self.safe_value(ohlcv, 'data', {})
return [
self.safe_integer(data, 'ts'),
self.safe_number(data, 'o'),
self.safe_number(data, 'h'),
self.safe_number(data, 'l'),
self.safe_number(data, 'c'),
self.safe_number(data, 'v'),
]
def fetch_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
'interval': self.timeframes[timeframe],
}
# if since and limit are not specified
# the exchange will return just 1 last candle by default
duration = self.parse_timeframe(timeframe)
options = self.safe_value(self.options, 'fetchOHLCV', {})
defaultLimit = self.safe_integer(options, 'limit', 500)
if since is not None:
request['from'] = since
if limit is None:
limit = defaultLimit
else:
limit = min(limit, defaultLimit)
request['to'] = self.sum(since, limit * duration * 1000, 1)
elif limit is not None:
request['n'] = limit # max 500
response = self.v1PublicGetBarhist(self.extend(request, params))
#
# {
# "code":0,
# "data":[
# {
# "m":"bar",
# "s":"BTC/USDT",
# "data":{
# "i":"1",
# "ts":1590228000000,
# "o":"9139.59",
# "c":"9131.94",
# "h":"9139.99",
# "l":"9121.71",
# "v":"25.20648"
# }
# }
# ]
# }
#
data = self.safe_value(response, 'data', [])
return self.parse_ohlcvs(data, market, timeframe, since, limit)
def parse_trade(self, trade, market=None):
#
# public fetchTrades
#
# {
# "p":"9128.5", # price
# "q":"0.0030", # quantity
# "ts":1590229002385, # timestamp
# "bm":false, # if True, the buyer is the market maker, we only use self field to "define the side" of a public trade
# "seqnum":180143985289898554
# }
#
timestamp = self.safe_integer(trade, 'ts')
priceString = self.safe_string_2(trade, 'price', 'p')
amountString = self.safe_string(trade, 'q')
buyerIsMaker = self.safe_value(trade, 'bm', False)
makerOrTaker = 'maker' if buyerIsMaker else 'taker'
side = 'buy' if buyerIsMaker else 'sell'
market = self.safe_market(None, market)
return self.safe_trade({
'info': trade,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': market['symbol'],
'id': None,
'order': None,
'type': None,
'takerOrMaker': makerOrTaker,
'side': side,
'price': priceString,
'amount': amountString,
'cost': None,
'fee': None,
}, market)
def fetch_trades(self, symbol, since=None, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
}
if limit is not None:
request['n'] = limit # max 100
response = self.v1PublicGetTrades(self.extend(request, params))
#
# {
# "code":0,
# "data":{
# "m":"trades",
# "symbol":"BTC-PERP",
# "data":[
# {"p":"9128.5","q":"0.0030","ts":1590229002385,"bm":false,"seqnum":180143985289898554},
# {"p":"9129","q":"0.0030","ts":1590229002642,"bm":false,"seqnum":180143985289898587},
# {"p":"9129.5","q":"0.0030","ts":1590229021306,"bm":false,"seqnum":180143985289899043}
# ]
# }
# }
#
records = self.safe_value(response, 'data', [])
trades = self.safe_value(records, 'data', [])
return self.parse_trades(trades, market, since, limit)
def parse_order_status(self, status):
statuses = {
'PendingNew': 'open',
'New': 'open',
'PartiallyFilled': 'open',
'Filled': 'closed',
'Canceled': 'canceled',
'Rejected': 'rejected',
}
return self.safe_string(statuses, status, status)
def parse_order(self, order, market=None):
#
# createOrder
#
# {
# "id": "16e607e2b83a8bXHbAwwoqDo55c166fa",
# "orderId": "16e85b4d9b9a8bXHbAwwoqDoc3d66830",
# "orderType": "Market",
# "symbol": "BTC/USDT",
# "timestamp": 1573576916201
# }
#
# {
# "ac": "FUTURES",
# "accountId": "<KEY>",
# "time": 1640819389454,
# "orderId": "a17e0874ecbdU0711043490bbtcpDU5X",
# "seqNum": -1,
# "orderType": "Limit",
# "execInst": "NULL_VAL",
# "side": "Buy",
# "symbol": "BTC-PERP",
# "price": "30000",
# "orderQty": "0.002",
# "stopPrice": "0",
# "stopBy": "ref-px",
# "status": "Ack",
# "lastExecTime": 1640819389454,
# "lastQty": "0",
# "lastPx": "0",
# "avgFilledPx": "0",
# "cumFilledQty": "0",
# "fee": "0",
# "cumFee": "0",
# "feeAsset": "",
# "errorCode": "",
# "posStopLossPrice": "0",
# "posStopLossTrigger": "market",
# "posTakeProfitPrice": "0",
# "posTakeProfitTrigger": "market",
# "liquidityInd": "n"
# }
#
# fetchOrder, fetchOpenOrders, fetchClosedOrders
#
# {
# "symbol": "BTC/USDT",
# "price": "8131.22",
# "orderQty": "0.00082",
# "orderType": "Market",
# "avgPx": "7392.02",
# "cumFee": "0.005152238",
# "cumFilledQty": "0.00082",
# "errorCode": "",
# "feeAsset": "USDT",
# "lastExecTime": 1575953151764,
# "orderId": "a16eee20b6750866943712zWEDdAjt3",
# "seqNum": 2623469,
# "side": "Buy",
# "status": "Filled",
# "stopPrice": "",
# "execInst": "NULL_VAL"
# }
#
# {
# "ac": "FUTURES",
# "accountId": "testabcdefg",
# "avgPx": "0",
# "cumFee": "0",
# "cumQty": "0",
# "errorCode": "NULL_VAL",
# "execInst": "NULL_VAL",
# "feeAsset": "USDT",
# "lastExecTime": 1584072844085,
# "orderId": "r170d21956dd5450276356bbtcpKa74",
# "orderQty": "1.1499",
# "orderType": "Limit",
# "price": "4000",
# "sendingTime": 1584072841033,
# "seqNum": 24105338,
# "side": "Buy",
# "status": "Canceled",
# "stopPrice": "",
# "symbol": "BTC-PERP"
# },
#
status = self.parse_order_status(self.safe_string(order, 'status'))
marketId = self.safe_string(order, 'symbol')
symbol = self.safe_symbol(marketId, market, '/')
timestamp = self.safe_integer_2(order, 'timestamp', 'sendingTime')
lastTradeTimestamp = self.safe_integer(order, 'lastExecTime')
price = self.safe_string(order, 'price')
amount = self.safe_string(order, 'orderQty')
average = self.safe_string(order, 'avgPx')
filled = self.safe_string_2(order, 'cumFilledQty', 'cumQty')
id = self.safe_string(order, 'orderId')
clientOrderId = self.safe_string(order, 'id')
if clientOrderId is not None:
if len(clientOrderId) < 1:
clientOrderId = None
type = self.safe_string_lower(order, 'orderType')
side = self.safe_string_lower(order, 'side')
feeCost = self.safe_number(order, 'cumFee')
fee = None
if feeCost is not None:
feeCurrencyId = self.safe_string(order, 'feeAsset')
feeCurrencyCode = self.safe_currency_code(feeCurrencyId)
fee = {
'cost': feeCost,
'currency': feeCurrencyCode,
}
stopPrice = self.safe_number(order, 'stopPrice')
return self.safe_order({
'info': order,
'id': id,
'clientOrderId': clientOrderId,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': lastTradeTimestamp,
'symbol': symbol,
'type': type,
'timeInForce': None,
'postOnly': None,
'side': side,
'price': price,
'stopPrice': stopPrice,
'amount': amount,
'cost': None,
'average': average,
'filled': filled,
'remaining': None,
'status': status,
'fee': fee,
'trades': None,
}, market)
def fetch_trading_fees(self, params={}):
self.load_markets()
self.load_accounts()
account = self.safe_value(self.accounts, 0, {})
accountGroup = self.safe_string(account, 'id')
request = {
'account-group': accountGroup,
}
response = self.v1PrivateAccountGroupGetSpotFee(self.extend(request, params))
#
# {
# code: '0',
# data: {
# domain: 'spot',
# userUID: 'U1479576458',
# vipLevel: '0',
# fees: [
# {symbol: 'HT/USDT', fee: {taker: '0.001', maker: '0.001'}},
# {symbol: 'LAMB/BTC', fee: {taker: '0.002', maker: '0.002'}},
# {symbol: 'STOS/USDT', fee: {taker: '0.002', maker: '0.002'}},
# ...
# ]
# }
# }
#
data = self.safe_value(response, 'data', {})
fees = self.safe_value(data, 'fees', [])
result = {}
for i in range(0, len(fees)):
fee = fees[i]
marketId = self.safe_string(fee, 'symbol')
symbol = self.safe_symbol(marketId, None, '/')
takerMaker = self.safe_value(fee, 'fee', {})
result[symbol] = {
'info': fee,
'symbol': symbol,
'maker': self.safe_number(takerMaker, 'maker'),
'taker': self.safe_number(takerMaker, 'taker'),
}
return result
def create_order(self, symbol, type, side, amount, price=None, params={}):
self.load_markets()
self.load_accounts()
market = self.market(symbol)
marketType = None
marketType, params = self.handle_market_type_and_params('createOrder', market, params)
options = self.safe_value(self.options, 'createOrder', {})
accountsByType = self.safe_value(self.options, 'accountsByType', {})
accountCategory = self.safe_string(accountsByType, marketType, 'cash')
account = self.safe_value(self.accounts, 0, {})
accountGroup = self.safe_value(account, 'id')
clientOrderId = self.safe_string_2(params, 'clientOrderId', 'id')
reduceOnly = self.safe_value(params, 'execInst')
if reduceOnly is not None:
if (marketType != 'swap'):
raise InvalidOrder(self.id + ' createOrder() does not support reduceOnly for ' + marketType + ' orders, reduceOnly orders are supported for perpetuals only')
request = {
'account-group': accountGroup,
'account-category': accountCategory,
'symbol': market['id'],
'time': self.milliseconds(),
'orderQty': self.amount_to_precision(symbol, amount),
'orderType': type, # "limit", "market", "stop_market", "stop_limit"
'side': side, # "buy" or "sell"
# 'orderPrice': self.price_to_precision(symbol, price),
# 'stopPrice': self.price_to_precision(symbol, stopPrice), # required for stop orders
# 'postOnly': 'false', # 'false', 'true'
# 'timeInForce': 'GTC', # GTC, IOC, FOK
# 'respInst': 'ACK', # ACK, 'ACCEPT, DONE
# 'posStopLossPrice': position stop loss price( v2 swap orders only)
# 'posTakeProfitPrice': position take profit price(v2 swap orders only)
}
if clientOrderId is not None:
request['id'] = clientOrderId
params = self.omit(params, ['clientOrderId', 'id'])
if (type == 'limit') or (type == 'stop_limit'):
request['orderPrice'] = self.price_to_precision(symbol, price)
if (type == 'stop_limit') or (type == 'stop_market'):
stopPrice = self.safe_number(params, 'stopPrice')
if stopPrice is None:
raise InvalidOrder(self.id + ' createOrder() requires a stopPrice parameter for ' + type + ' orders')
else:
request['stopPrice'] = self.price_to_precision(symbol, stopPrice)
params = self.omit(params, 'stopPrice')
timeInForce = self.safe_string(params, 'timeInForce')
postOnly = self.safe_value(params, 'postOnly', False)
if (timeInForce == 'PO') or (postOnly):
request['postOnly'] = True
params = self.omit(params, ['postOnly', 'timeInForce'])
defaultMethod = self.safe_string(options, 'method', 'v1PrivateAccountCategoryPostOrder')
method = self.get_supported_mapping(marketType, {
'spot': defaultMethod,
'margin': defaultMethod,
'swap': 'v2PrivateAccountGroupPostFuturesOrder',
})
if method == 'v1PrivateAccountCategoryPostOrder':
if accountCategory is not None:
request['category'] = accountCategory
else:
request['account-category'] = accountCategory
response = getattr(self, method)(self.extend(request, params))
#
# AccountCategoryPostOrder
#
# {
# "code": 0,
# "data": {
# "ac": "MARGIN",
# "accountId": "<KEY>",
# "action": "place-order",
# "info": {
# "id": "16e607e2b83a8bXHbAwwoqDo55c166fa",
# "orderId": "16e85b4d9b9a8bXHbAwwoqDoc3d66830",
# "orderType": "Market",
# "symbol": "BTC/USDT",
# "timestamp": 1573576916201
# },
# "status": "Ack"
# }
# }
#
# AccountGroupPostFuturesOrder
#
# {
# "code": 0,
# "data": {
# "meta": {
# "id": "",
# "action": "place-order",
# "respInst": "ACK"
# },
# "order": {
# "ac": "FUTURES",
# "accountId": "<KEY>",
| |
Constraint(expr= m.b5 - m.b13 + m.b40 <= 1)
m.c94 = Constraint(expr= m.b5 - m.b15 + m.b41 <= 1)
m.c95 = Constraint(expr= m.b5 - m.b17 + m.b42 <= 1)
m.c96 = Constraint(expr= m.b5 - m.b19 + m.b43 <= 1)
m.c97 = Constraint(expr= m.b5 - m.b21 + m.b44 <= 1)
m.c98 = Constraint(expr= m.b5 - m.b23 + m.b45 <= 1)
m.c99 = Constraint(expr= m.b5 - m.b25 + m.b46 <= 1)
m.c100 = Constraint(expr= m.b7 - m.b9 + m.b47 <= 1)
m.c101 = Constraint(expr= m.b7 - m.b11 + m.b48 <= 1)
m.c102 = Constraint(expr= m.b7 - m.b13 + m.b49 <= 1)
m.c103 = Constraint(expr= m.b7 - m.b15 + m.b50 <= 1)
m.c104 = Constraint(expr= m.b7 - m.b17 + m.b51 <= 1)
m.c105 = Constraint(expr= m.b7 - m.b19 + m.b52 <= 1)
m.c106 = Constraint(expr= m.b7 - m.b21 + m.b53 <= 1)
m.c107 = Constraint(expr= m.b7 - m.b23 + m.b54 <= 1)
m.c108 = Constraint(expr= m.b7 - m.b25 + m.b55 <= 1)
m.c109 = Constraint(expr= m.b9 - m.b11 + m.b56 <= 1)
m.c110 = Constraint(expr= m.b9 - m.b13 + m.b57 <= 1)
m.c111 = Constraint(expr= m.b9 - m.b15 + m.b58 <= 1)
m.c112 = Constraint(expr= m.b9 - m.b17 + m.b59 <= 1)
m.c113 = Constraint(expr= m.b9 - m.b19 + m.b60 <= 1)
m.c114 = Constraint(expr= m.b9 - m.b21 + m.b61 <= 1)
m.c115 = Constraint(expr= m.b9 - m.b23 + m.b62 <= 1)
m.c116 = Constraint(expr= m.b9 - m.b25 + m.b63 <= 1)
m.c117 = Constraint(expr= m.b11 - m.b13 + m.b64 <= 1)
m.c118 = Constraint(expr= m.b11 - m.b15 + m.b65 <= 1)
m.c119 = Constraint(expr= m.b11 - m.b17 + m.b66 <= 1)
m.c120 = Constraint(expr= m.b11 - m.b19 + m.b67 <= 1)
m.c121 = Constraint(expr= m.b11 - m.b21 + m.b68 <= 1)
m.c122 = Constraint(expr= m.b11 - m.b23 + m.b69 <= 1)
m.c123 = Constraint(expr= m.b11 - m.b25 + m.b70 <= 1)
m.c124 = Constraint(expr= m.b13 - m.b15 + m.b71 <= 1)
m.c125 = Constraint(expr= m.b13 - m.b17 + m.b72 <= 1)
m.c126 = Constraint(expr= m.b13 - m.b19 + m.b73 <= 1)
m.c127 = Constraint(expr= m.b13 - m.b21 + m.b74 <= 1)
m.c128 = Constraint(expr= m.b13 - m.b23 + m.b75 <= 1)
m.c129 = Constraint(expr= m.b13 - m.b25 + m.b76 <= 1)
m.c130 = Constraint(expr= m.b15 - m.b17 + m.b77 <= 1)
m.c131 = Constraint(expr= m.b15 - m.b19 + m.b78 <= 1)
m.c132 = Constraint(expr= m.b15 - m.b21 + m.b79 <= 1)
m.c133 = Constraint(expr= m.b15 - m.b23 + m.b80 <= 1)
m.c134 = Constraint(expr= m.b15 - m.b25 + m.b81 <= 1)
m.c135 = Constraint(expr= m.b17 - m.b19 + m.b82 <= 1)
m.c136 = Constraint(expr= m.b17 - m.b21 + m.b83 <= 1)
m.c137 = Constraint(expr= m.b17 - m.b23 + m.b84 <= 1)
m.c138 = Constraint(expr= m.b17 - m.b25 + m.b85 <= 1)
m.c139 = Constraint(expr= m.b19 - m.b21 + m.b86 <= 1)
m.c140 = Constraint(expr= m.b19 - m.b23 + m.b87 <= 1)
m.c141 = Constraint(expr= m.b19 - m.b25 + m.b88 <= 1)
m.c142 = Constraint(expr= m.b21 - m.b23 + m.b89 <= 1)
m.c143 = Constraint(expr= m.b21 - m.b25 + m.b90 <= 1)
m.c144 = Constraint(expr= m.b23 - m.b25 + m.b91 <= 1)
m.c145 = Constraint(expr= m.b26 - m.b27 + m.b37 <= 1)
m.c146 = Constraint(expr= m.b26 - m.b28 + m.b38 <= 1)
m.c147 = Constraint(expr= m.b26 - m.b29 + m.b39 <= 1)
m.c148 = Constraint(expr= m.b26 - m.b30 + m.b40 <= 1)
m.c149 = Constraint(expr= m.b26 - m.b31 + m.b41 <= 1)
m.c150 = Constraint(expr= m.b26 - m.b32 + m.b42 <= 1)
m.c151 = Constraint(expr= m.b26 - m.b33 + m.b43 <= 1)
m.c152 = Constraint(expr= m.b26 - m.b34 + m.b44 <= 1)
m.c153 = Constraint(expr= m.b26 - m.b35 + m.b45 <= 1)
m.c154 = Constraint(expr= m.b26 - m.b36 + m.b46 <= 1)
m.c155 = Constraint(expr= m.b27 - m.b28 + m.b47 <= 1)
m.c156 = Constraint(expr= m.b27 - m.b29 + m.b48 <= 1)
m.c157 = Constraint(expr= m.b27 - m.b30 + m.b49 <= 1)
m.c158 = Constraint(expr= m.b27 - m.b31 + m.b50 <= 1)
m.c159 = Constraint(expr= m.b27 - m.b32 + m.b51 <= 1)
m.c160 = Constraint(expr= m.b27 - m.b33 + m.b52 <= 1)
m.c161 = Constraint(expr= m.b27 - m.b34 + m.b53 <= 1)
m.c162 = Constraint(expr= m.b27 - m.b35 + m.b54 <= 1)
m.c163 = Constraint(expr= m.b27 - m.b36 + m.b55 <= 1)
m.c164 = Constraint(expr= m.b28 - m.b29 + m.b56 <= 1)
m.c165 = Constraint(expr= m.b28 - m.b30 + m.b57 <= 1)
m.c166 = Constraint(expr= m.b28 - m.b31 + m.b58 <= 1)
m.c167 = Constraint(expr= m.b28 - m.b32 + m.b59 <= 1)
m.c168 = Constraint(expr= m.b28 - m.b33 + m.b60 <= 1)
m.c169 = Constraint(expr= m.b28 - m.b34 + m.b61 <= 1)
m.c170 = Constraint(expr= m.b28 - m.b35 + m.b62 <= 1)
m.c171 = Constraint(expr= m.b28 - m.b36 + m.b63 <= 1)
m.c172 = Constraint(expr= m.b29 - m.b30 + m.b64 <= 1)
m.c173 = Constraint(expr= m.b29 - m.b31 + m.b65 <= 1)
m.c174 = Constraint(expr= m.b29 - m.b32 + m.b66 <= 1)
m.c175 = Constraint(expr= m.b29 - m.b33 + m.b67 <= 1)
m.c176 = Constraint(expr= m.b29 - m.b34 + m.b68 <= 1)
m.c177 = Constraint(expr= m.b29 - m.b35 + m.b69 <= 1)
m.c178 = Constraint(expr= m.b29 - m.b36 + m.b70 <= 1)
m.c179 = Constraint(expr= m.b30 - m.b31 + m.b71 <= 1)
m.c180 = Constraint(expr= m.b30 - m.b32 + m.b72 <= 1)
m.c181 = Constraint(expr= m.b30 - m.b33 + m.b73 <= 1)
m.c182 = Constraint(expr= m.b30 - m.b34 + m.b74 <= 1)
m.c183 = Constraint(expr= m.b30 - m.b35 + m.b75 <= 1)
m.c184 = Constraint(expr= m.b30 - m.b36 + m.b76 <= 1)
m.c185 = Constraint(expr= m.b31 - m.b32 + m.b77 <= 1)
m.c186 = Constraint(expr= m.b31 - m.b33 + m.b78 <= 1)
m.c187 = Constraint(expr= m.b31 - m.b34 + m.b79 <= 1)
m.c188 = Constraint(expr= m.b31 - m.b35 + m.b80 <= 1)
m.c189 = Constraint(expr= m.b31 - m.b36 + m.b81 <= 1)
m.c190 = Constraint(expr= m.b32 - m.b33 + m.b82 <= 1)
m.c191 = Constraint(expr= m.b32 - m.b34 + m.b83 <= 1)
m.c192 = Constraint(expr= m.b32 - m.b35 + m.b84 <= 1)
m.c193 = Constraint(expr= m.b32 - m.b36 + m.b85 <= 1)
m.c194 = Constraint(expr= m.b33 - m.b34 + m.b86 <= 1)
m.c195 = Constraint(expr= m.b33 - m.b35 + m.b87 <= 1)
m.c196 = Constraint(expr= m.b33 - m.b36 + m.b88 <= 1)
m.c197 = Constraint(expr= m.b34 - m.b35 + m.b89 <= 1)
m.c198 = Constraint(expr= m.b34 - m.b36 + m.b90 <= 1)
m.c199 = Constraint(expr= m.b35 - m.b36 + m.b91 <= 1)
m.c200 = Constraint(expr= m.b37 - m.b38 + m.b47 <= 1)
m.c201 = Constraint(expr= m.b37 - m.b39 + m.b48 <= 1)
m.c202 = Constraint(expr= m.b37 - m.b40 + m.b49 <= 1)
m.c203 = Constraint(expr= m.b37 - m.b41 + m.b50 <= 1)
m.c204 = Constraint(expr= m.b37 - m.b42 + m.b51 <= 1)
m.c205 = Constraint(expr= m.b37 - m.b43 + m.b52 <= 1)
m.c206 = Constraint(expr= m.b37 - m.b44 + m.b53 <= 1)
m.c207 = Constraint(expr= m.b37 - m.b45 + m.b54 <= 1)
m.c208 = Constraint(expr= m.b37 - m.b46 + m.b55 <= 1)
m.c209 = Constraint(expr= m.b38 - m.b39 + m.b56 <= 1)
m.c210 = Constraint(expr= m.b38 - m.b40 + m.b57 <= 1)
m.c211 = Constraint(expr= m.b38 - m.b41 + m.b58 <= 1)
m.c212 = Constraint(expr= m.b38 - m.b42 + m.b59 <= 1)
m.c213 = Constraint(expr= m.b38 - m.b43 + m.b60 <= 1)
m.c214 = Constraint(expr= m.b38 - m.b44 + m.b61 <= 1)
m.c215 = Constraint(expr= m.b38 - m.b45 + m.b62 <= 1)
m.c216 = Constraint(expr= m.b38 - m.b46 + m.b63 <= 1)
m.c217 = Constraint(expr= m.b39 - m.b40 + m.b64 <= 1)
m.c218 = Constraint(expr= m.b39 - m.b41 + m.b65 <= 1)
m.c219 = Constraint(expr= m.b39 - m.b42 + m.b66 <= 1)
m.c220 = Constraint(expr= m.b39 - m.b43 + m.b67 <= 1)
m.c221 = Constraint(expr= m.b39 - m.b44 + m.b68 <= 1)
m.c222 = Constraint(expr= m.b39 - m.b45 + m.b69 <= 1)
m.c223 = Constraint(expr= m.b39 - m.b46 + m.b70 <= 1)
m.c224 = Constraint(expr= m.b40 - m.b41 + m.b71 <= 1)
m.c225 = Constraint(expr= m.b40 - m.b42 + m.b72 <= 1)
m.c226 = Constraint(expr= m.b40 - m.b43 + m.b73 <= 1)
m.c227 = Constraint(expr= m.b40 - m.b44 + m.b74 <= 1)
m.c228 = Constraint(expr= m.b40 - m.b45 + m.b75 <= 1)
m.c229 = Constraint(expr= m.b40 - m.b46 + m.b76 <= 1)
m.c230 = Constraint(expr= m.b41 - m.b42 + m.b77 <= 1)
m.c231 = Constraint(expr= m.b41 - m.b43 + m.b78 <= 1)
m.c232 = Constraint(expr= m.b41 - m.b44 + m.b79 <= 1)
m.c233 = Constraint(expr= m.b41 - m.b45 + m.b80 <= 1)
m.c234 = Constraint(expr= m.b41 - m.b46 + m.b81 <= 1)
m.c235 = Constraint(expr= m.b42 | |
<filename>microstrategy_api/task_proc/task_proc.py<gh_stars>0
import re
import urllib.parse
import warnings
from enum import Enum
import time
from fnmatch import fnmatch
from typing import Optional, List, Set, Union
import requests
import logging
from bs4 import BeautifulSoup
from microstrategy_api.task_proc.document import Document
from microstrategy_api.task_proc.privilege_types import PrivilegeTypes, PrivilegeTypesIDDict
from microstrategy_api.task_proc.report import Report
from microstrategy_api.task_proc.attribute import Attribute
from microstrategy_api.task_proc.bit_set import BitSet
from microstrategy_api.task_proc.exceptions import MstrClientException
from microstrategy_api.task_proc.executable_base import ExecutableBase
from microstrategy_api.task_proc.object_type import ObjectType, ObjectTypeIDDict, ObjectSubTypeIDDict, ObjectSubType
BASE_PARAMS = {'taskEnv': 'xml', 'taskContentType': 'xml'}
class TaskProc(object):
"""
Class encapsulating base logic for the MicroStrategy Task Proc API
"""
def __init__(self,
base_url,
username=None,
password=<PASSWORD>,
server=None,
project_source=None, # deprecated
project_name=None,
session_state=None,
concurrent_max=5,
max_retries=3,
retry_delay=2,
):
"""
Initialize the MstrClient by logging in and retrieving a session.
Arguments
----------
base_url (str):
base url of form http://hostname/MicroStrategy/asp/TaskProc.aspx
username (str):
username for project
password (str):
<PASSWORD>
server (str):
The machine name (or IP) of the MicroStrategy Intelligence Server to connect to.
project_name (str):
The name of the MicroStrategy project to connect to.
"""
self.log = logging.getLogger("{mod}.{cls}".format(mod=self.__class__.__module__, cls=self.__class__.__name__))
if 'TaskProc' in base_url:
if base_url[-1] != '?':
base_url += '?'
self._base_url = base_url
self.cookies = None
self.trace = False
self.retry_delay = retry_delay
self.max_retries = max_retries
self.concurrent_max = concurrent_max
self.server = server
self.project_name = project_name
self.username = username
self.password = password
self.__messages_to_retry_list = None
if session_state is None:
if project_source is not None:
warnings.warn('project_source parameter is deprecated, use server parameter instead')
if self.server is None:
self.server = project_source
else:
warnings.warn('both project_source deprecated param and server parameter provided!'
' server parameter value used')
else:
if self.server is None:
raise ValueError('Neither server nor project_source (deprecated) parameter provided!')
if self.username is not None:
self.login()
else:
self.login_guest()
else:
self._session = session_state
def __str__(self):
return 'MstrClient session: {}'.format(self._session)
@property
def _messages_to_retry(self):
if self.__messages_to_retry_list is None:
regex_list = \
[
'There are too many auditor handles at the moment. Please try again later.',
'There is possible deadlock. Please try to run the report later.',
'Failed to create job.',
'.* Number of jobs has exceeded maximum for project .*',
'Maximum number of executing jobs exceeded .*',
]
self.__messages_to_retry_list = [re.compile(pattern) for pattern in regex_list]
return self.__messages_to_retry_list
@property
def base_url(self):
return self._base_url
def login(self,
server: str=None,
project_name: str=None,
username: str=None,
password: str=None,
):
"""
Login to taskproc API
Arguments
----------
server (str):
The machine name (or IP) of the MicroStrategy Intelligence Server to connect to.
project_name (str):
The name of the MicroStrategy project to connect to.
username (str):
username for project
password (str):
password for project
"""
if server:
self.server = server
if project_name:
self.project_name = project_name
if username:
self.username = username
if password:
self.password = password
# getSessionState is used instead of login because we can set the rws parameter that way.
# arguments = {
# 'taskId': 'login',
# 'server': self.server,
# 'project': self.project_name,
# 'userid': self.username,
# 'password': <PASSWORD>
# }
arguments = {
'taskId': 'getSessionState',
'server': self.server,
'project': self.project_name,
'uid': self.username,
'pwd': <PASSWORD>,
'rws': self.concurrent_max,
}
self.log.debug("logging in.")
response = self.request(arguments)
if self.trace:
self.log.debug("logging in returned %s" % response)
# self._session_state = response.find('sessionState')
self._session = response.find('max-state').string
def login_guest(self,
server: str=None,
project_name: str=None,
):
"""
Login to taskproc API
Arguments
----------
server (str):
The machine name (or IP) of the MicroStrategy Intelligence Server to connect to.
project_name (str):
The name of the MicroStrategy project to connect to.
"""
if server:
self.server = server
if project_name:
self.project_name = project_name
arguments = {
'taskId': 'getSessionState',
'server': self.server,
'project': self.project_name,
'authMode': 8,
'rws': self.concurrent_max,
}
self.log.debug("logging in as guest")
response = self.request(arguments)
if self.trace:
self.log.debug("logging in returned %s" % response)
# self._session_state = response.find('sessionState')
self._session = response.find('max-state').string
@property
def session(self):
return self._session
class SystemFolders(Enum): # EnumDSSXMLFolderNames
"""
This interface defines the enumeration constants used to specify the folder names internally defined in MicroStrategy 7.
"""
PublicObjects = 1 # DssXmlFolderNamePublicObjects Specifies the folder "Public Objects".
PublicConsolidations = 2 # DssXmlFolderNamePublicConsolidations Specifies the folder "Consolidations" under the folder "Public Objects".
PublicCustomGroups = 3 # DssXmlFolderNamePublicCustomGroups Specifies the folder "Custom Groups" under the folder "Public Objects".
PublicFilters = 4 # DssXmlFolderNamePublicFilters Specifies the folder "Filters" under the folder "Public Objects".
PublicMetrics = 5 # DssXmlFolderNamePublicMetrics Specifies the folder "Metrics" under the folder "Public Objects".
PublicPrompts = 6 # DssXmlFolderNamePublicPrompts Specifies the folder "Prompts" under the folder "Public Objects".
PublicReports = 7 # DssXmlFolderNamePublicReports Specifies the folder "Reports" under the folder "Public Objects".
PublicSearches = 8 # DssXmlFolderNamePublicSearches Specifies the folder "Searches" under the folder "Public Objects".
PublicTemplates = 9 # DssXmlFolderNamePublicTemplates Specifies the folder "Templates" under the folder "Public Objects".
TemplateObjects = 10 # DssXmlFolderNameTemplateObjects Specifies the folder "Template Objects".
TemplateConsolidations = 11 # DssXmlFolderNameTemplateConsolidations Specifies the folder "Consolidations" under the folder "Template Objects".
TemplateCustomGroups = 12 # DssXmlFolderNameTemplateCustomGroups Specifies the folder "Custom Groups" under the folder "Template Objects".
TemplateFilters = 13 # DssXmlFolderNameTemplateFilters Specifies the folder "Filters" under the folder "Template Objects".
TemplateMetrics = 14 # DssXmlFolderNameTemplateMetrics Specifies the folder "Metrics" under the folder "Template Objects".
TemplatePrompts = 15 # DssXmlFolderNameTemplatePrompts Specifies the folder "Prompts" under the folder "Template Objects".
TemplateReports = 16 # DssXmlFolderNameTemplateReports Specifies the folder "Reports" under the folder "Template Objects".
TemplateSearches = 17 # DssXmlFolderNameTemplateSearches Specifies the folder "Searches" under the folder "Template Objects".
TemplateTemplates = 18 # DssXmlFolderNameTemplateTemplates Specifies the folder "Templates" under the folder "Template Objects".
ProfileObjects = 19 # DssXmlFolderNameProfileObjects Specifies the folder "Profile" of a user.
ProfileReports = 20 # DssXmlFolderNameProfileReports Specifies the folder "Reports" under the folder "Profile" of a user.
ProfileAnswers = 21 # DssXmlFolderNameProfileAnswers Specifies the folder "Answers" under the folder "Profile" of a user.
ProfileFavorites = 22 # DssXmlFolderNameProfileFavorites Specifies the folder "Favorites" under the folder "Profile" of a user.
ProfileOther = 23 # DssXmlFolderNameProfileOther Specifies the folder "Other" under the folder "Profile" of a user.
SchemaObjects = 24 # DssXmlFolderNameSchemaObjects Specifies the folder "Schema Objects".
SchemaAttributeForms = 25 # DssXmlFolderNameSchemaAttributeForms Specifies the folder "Attribute Forms" under the folder "Schema Objects".
SchemaAttributes = 26 # DssXmlFolderNameSchemaAttributes Specifies the folder "Attributes" under the folder "Schema Objects".
SchemaColumns = 27 # DssXmlFolderNameSchemaColumns Specifies the folder "Columns" under the folder "Schema Objects".
SchemaDataExplorer = 28 # DssXmlFolderNameSchemaDataExplorer Specifies the folder "Data Explorer" under the folder "Schema Objects".
SchemaFacts = 29 # DssXmlFolderNameSchemaFacts Specifies the folder "Facts" under the folder "Schema Objects".
SchemaFunctions = 30 # DssXmlFolderNameSchemaFunctions Specifies the folder "Functions" under the folder "Schema Objects".
SchemaHierarchies = 31 # DssXmlFolderNameSchemaHierarchies Specifies the folder "Hierarchies" under the folder "Schema Objects".
SchemaPartitionFilters = 32 # DssXmlFolderNameSchemaPartitionFilters Specifies the folder "Partition Filters" under the folder "Schema Objects".
SchemaPartitionMappings = 33 # DssXmlFolderNameSchemaPartitionMappings Specifies the folder "Partition Mappings" under the folder "Schema Objects".
SchemaSubtotals = 34 # DssXmlFolderNameSchemaSubtotals Specifies the folder"Subtotals" under the folder "Schema Objects".
SchemaTables = 35 # DssXmlFolderNameSchemaTables Specifies the folder "Tables" under the folder "Schema Objects".
SchemaWarehouseTables = 36 # DssXmlFolderNameSchemaWarehouseTables Specifies the folder "Warehouse Tables" under the folder "Schema Objects".
SchemaTransformationAttributes = 37 # DssXmlFolderNameSchemaTransformationAttributes Specifies the folder "Transformation Attributes" under the folder "Schema Objects".
SchemaTransformations = 38 # DssXmlFolderNameSchemaTransformations Specifies the folder "Transformations" under the folder "Schema Objects".
Root = 39 # DssXmlFolderNameRoot Specifies the root folder of the project.
SchemaFunctionsNested = 40 # DssXmlFolderNameSchemaFunctionsNested Specifies the "Functions" folder nested several levels deep in the "Schema Objects" folder.
SchemaBasicFunctions = 41 # DssXmlFolderNameSchemaBasicFunctions Specifies the "Basic Functions" folder nested several levels deep in the "Schema Objects" folder.
SchemaDateAndTimeFunctions = 42 # DssXmlFolderNameSchemaDateAndTimeFunctions Specifies the "Date and Time Functions" folder nested several levels deep in the "Schema Objects" folder.
SchemaInternalFunctions = 43 # DssXmlFolderNameSchemaInternalFunctions Specifies the "Internal Functions" folder nested several levels deep in the "Schema Objects" folder.
SchemaNullZeroFunctions = 44 # DssXmlFolderNameSchemaNullZeroFunctions Specifies the "Null/Zero Functions" folder nested several levels deep in the "Schema Objects" folder.
SchemaOlapFunctions = 45 # DssXmlFolderNameSchemaOlapFunctions Specifies the "OLAP Functions" folder nested several levels deep in the "Schema Objects" folder.
SchemaRankAndNTileFunctions = 46 # DssXmlFolderNameSchemaRankAndNTileFunctions Specifies the "Rank and NTile Functions" folder nested several levels deep in the "Schema Objects" folder.
SchemaStringFunctions = 47 # DssXmlFolderNameSchemaStringFunctions Specifies the "String Functions" folder | |
<filename>DNN/Intention/NeuralNet_BinaryClass-Intention.py
# -*- coding: utf-8 -*-
"""
Created on Wed Jan 15 21:56:08 2020
@author: <NAME>
"""
# STEP1----------------- # Importing the libraries------------
#-------------------------------------------------------------
import os
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
from sklearn.impute import SimpleImputer
from sklearn.preprocessing import StandardScaler # for preprocessing the data
from sklearn.ensemble import RandomForestClassifier # Random forest classifier
from sklearn.tree import DecisionTreeClassifier # for Decision Tree classifier
from sklearn.svm import SVC # for SVM classification
from sklearn.decomposition import PCA
from sklearn.preprocessing import OneHotEncoder, LabelEncoder # # Encoding categorical variables
from sklearn.compose import ColumnTransformer, make_column_transformer #labelencoder class takes cat. var. and assign value to them
from sklearn.pipeline import Pipeline
from sklearn.utils import resample
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split # to split the data
from sklearn.model_selection import KFold # For cross vbalidation
from sklearn.model_selection import GridSearchCV # for tunnig hyper parameter it will use all combination of given parameters
from sklearn.model_selection import RandomizedSearchCV # same for tunning hyper parameter but will use random combinations of parameters
from sklearn.metrics import confusion_matrix,recall_score,precision_recall_curve,auc,roc_curve,roc_auc_score,classification_report
from sklearn.datasets import make_moons, make_circles, make_classification
from sklearn.neural_network import MLPClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.gaussian_process import GaussianProcessClassifier
from sklearn.gaussian_process.kernels import RBF
from sklearn.ensemble import AdaBoostClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
from sklearn.metrics import f1_score
from sklearn.model_selection import cross_val_score, cross_val_predict
from sklearn.utils.validation import check_random_state
from sklearn.model_selection import StratifiedKFold
from sklearn.ensemble import GradientBoostingClassifier
from rgf.sklearn import RGFClassifier
from sklearn.metrics import accuracy_score# same for tunning hyper parameter but will use random combinations of parameters
# STEP2------------------# Importing the DATASET ------------
#------------------------------------------------------------
# Loading data from the iMotions the path to csv file directory
os.chdir("C:\ML4TakeOver\\Data\\RawData")
directory = os.getcwd()
#dataFrame_takeover_feature = pd.read_csv('takeover_cleaned_feature4ML.csv', index_col=[0])
dataFrame_takeover_feature = pd.read_csv('takeover4ML.csv', index_col=[0])
dataset = dataFrame_takeover_feature
chunk_users = ['015_M3', '015_m2', '015_M1', '014_M3', #Select a handful of ppl for saving resource
'014_M2', '014_m1']
chunk_dataset = dataset[dataset['Name'].isin(chunk_users)]
dataset = chunk_dataset
dataset.shape
###### ======================================Encoding notes=======================================
# Alarm Type: TA =2, NoA =1, FA = 0 , Z = 3
# TakeOver : TK =1 , NTK= 0
# Alarm : 339.0 =339.0, 103.0= 4, 332.0=14, 259.0=11, 16.0=2, 178.0=6, 284.0=12,
# 213.0=9, 323.0=13, 185.0=7, 84.0=3, 137.0=5, 5.0=1, 191.0=8, 254.0=10
# Mode : +1 (Auto)= +1, -1(Manual)= 0
## STEP3========================= Eploring the data, mainly the Label (Takeover) ====================
## ===================================================================================================
# let's check the "Takeover" distributions
sns.countplot("TOT_Class",data=dataset)
# Let's check the Percentage for "ReactionTime"
Count_FastRT = len(dataset[dataset["TOT_Class"]== 0 ]) # Faster: <4000
Count_LowRT = len(dataset[dataset["TOT_Class"]== 1 ]) # Slower: >4000
Percentage_of_FastRT = Count_FastRT/(Count_FastRT+Count_LowRT)
print("Percentage_of_FastRT, 0 = ",Percentage_of_FastRT*100)
Percentage_of_SlowRT= Count_LowRT/(Count_FastRT+Count_LowRT)
print("Percentage_of_SlowRT, 1 = ",Percentage_of_SlowRT*100)
# the amount related to valid "TakeOver" and "None-Takeover"
Amount_SlowRT = dataset[dataset["TOT_Class"]== 1] #Slower
Amount_FastRT = dataset[dataset["TOT_Class"]== 0] #Faster
plt.figure(figsize=(10,6))
plt.subplot(121)
Amount_SlowRT.plot.hist(title="SlowReaction", legend =None)
plt.subplot(122)
Amount_FastRT.plot.hist(title="FastReaction",legend =None)
# Pandas offers us out-of-the-box three various correlation coefficients 1) Pearson's 2) Spearman rank 3) Kendall Tau
pearson = dataset.corr(method='pearson')
# assume target attr is the "Takeover or -3", then remove corr with itself
corr_with_target = pearson.iloc[-1][:]
# attributes sorted from the most predictive
predictivity = corr_with_target.sort_values(ascending=False)
## STEP4=========================-# Prepration for Machine Learning algorithms=========================
## ====================================================================================================
# Drop useless features for ML
dataset = dataset.drop(['Timestamp','index','ID', 'Name', 'EventSource', 'ManualGear','EventW','EventN','GazeDirectionLeftY','Alarm',
'GazeDirectionLeftX', 'GazeDirectionRightX', 'GazeDirectionRightY','CurrentBrake',
'PassBy','RangeN'], axis=1) #ManualGear has only "one" value
#EventW is pretty similar to EventN
dataset.shape
#---------------------------------------------------------
# convert categorical value to the number
# convert datatype of object to int and strings
dataset['LeftLaneType'] = dataset.LeftLaneType.astype(object)
dataset['RightLaneType'] = dataset.RightLaneType.astype(object)
dataset['TOT_Class'] = dataset.TOT_Class.astype(object)
dataset['Coming_Alarm'] = dataset.Coming_Alarm.astype(object)
dataset['Takeover'] = dataset.Takeover.astype(object)
dataset['Coming_AlarmType'] = dataset.Coming_AlarmType.astype(object)
dataset['NDTask'] = dataset.NDTask.astype(object)
#****** Drop features that happing after Alarm (anything after alarm interupt takeover prediction)****************
dataset = dataset.drop(['Mode','AlarmDuration','Coming_Alarm'], axis=1) # Coming Alarm maybe helpful for ReactionTime
# Check the reaction time values in each category of Alarm
print('FalseAlarm ReactionTime:', dataset[dataset['Coming_AlarmType']== 'FA'].ReactionTime.mean()) # 2007.2
print('TrueAlarm ReactionTime:', dataset[dataset['Coming_AlarmType']== 'TA'].ReactionTime.mean()) # 4712.5
print('NoAlarm ReactionTime:', dataset[dataset['Coming_AlarmType']== 'NoA'].ReactionTime.mean()) # 5003.5
# How many times they takeover in each alarm
len(dataset[dataset['Coming_AlarmType']== 'FA'][dataset['Takeover']=='TK'].ReactionTime.unique()) #92
len(dataset[dataset['Coming_AlarmType']== 'FA'][dataset['Takeover']=='NTK'].ReactionTime.unique())
len(dataset[dataset['Coming_AlarmType']== 'TA'][dataset['Takeover']=='TK'].ReactionTime.unique()) #355
len(dataset[dataset['Coming_AlarmType']== 'TA'][dataset['Takeover']=='NTK'].ReactionTime.unique())
len(dataset[dataset['Coming_AlarmType']== 'NoA'][dataset['Takeover']=='TK'].ReactionTime.unique()) #81
len(dataset[dataset['Coming_AlarmType']== 'NoA'][dataset['Takeover']=='NTK'].ReactionTime.unique())
dataFrame_takeover_feature[dataFrame_takeover_feature['Coming_AlarmType']== 'NoA'][
dataFrame_takeover_feature['Takeover']=='NTK'].Name.value_counts()
# Drop Reaction Time feature
dataset = dataset.drop(['ReactionTime','Coming_AlarmType'], axis=1)
# ------------------------------------------------------.
# takeover (NT, TK) is our target
input_data = dataset.iloc[:, dataset.columns != 'Takeover']
X = input_data
y = dataset[['Takeover']].values.ravel()
# ======================================= Encoding Categorical variables =========================
# # Encoding categorical variables
from sklearn.preprocessing import StandardScaler,LabelEncoder, OneHotEncoder
from sklearn.compose import ColumnTransformer, make_column_transformer #labelencoder class takes cat. var. and assign value to them
# List of all Categorical features
Cat_Features= ['LeftLaneType','RightLaneType','NDTask']
# Get the column index of the categorical features
categorical_features = []
for i in Cat_Features:
position = dataset.columns.get_loc(i)
categorical_features.append(position)
print(categorical_features)
# Get the column index of the Contin. features
conti_features = []
Cont_Filter = dataset.dtypes!=object
Cont_Filter = dataset.columns.where(Cont_Filter).tolist()
Cont_Filter_Cleaned = [name for name in Cont_Filter if str(name) !='nan']
for i in Cont_Filter_Cleaned:
position = dataset.columns.get_loc(i)
conti_features.append(position)
print(conti_features)
# How many columns will be needed for each categorical feature?
print(dataset[Cat_Features].nunique(),
'There are',"--",sum(dataset[Cat_Features].nunique().loc[:]),"--",'groups in the whole dataset')
# ===============================Create pipeline for data transformatin (normalize numeric, and hot encoder categorical)
# =============================================================================
from sklearn.pipeline import make_pipeline
numeric = make_pipeline(
StandardScaler())
categorical = make_pipeline(
# handles categorical features
# sparse = False output an array not sparse matrix
OneHotEncoder(sparse=False)) # Automatically take care of Dummy Trap
# creates a simple preprocessing pipeline (that will be combined in a full prediction pipeline below)
# to scale the numerical features and one-hot encode the categorical features.
preprocess = make_column_transformer((numeric, Cont_Filter_Cleaned),
(categorical, ['LeftLaneType','RightLaneType','Coming_AlarmType','NDTask']),
remainder='passthrough')
# =============================================================================
# Taking care of splitting
from sklearn.model_selection import train_test_split
from sklearn.svm import SVC
# =============================================================================
#SVM is usually optimized using two parameters gamma,C .
# Set the parameters by cross-validation
tuned_parameters = [{'kernel': ['rbf'], 'gamma': [1e-3, 1e-4],
'C': [1, 10, 100, 1000]},
{'kernel': ['linear'], 'C': [1, 10, 100, 1000]}] # C: the Cost parameter, Gamma: Control Bias and variance
# A High value of Gamma leads to more accuracy but biased results and vice-versa.
# Similarly, a large value of Cost parameter (C) indicates poor accuracy but low bias and vice-versa.
tuned_parameters2 = [{'kernel': ['linear'], 'C': [1, 100]}]
model = make_pipeline(
preprocess,
SVC())
##### Try Simple Version ##############
from sklearn import svm
clf = svm.SVC()
X_train = preprocess.fit_transform(X_train)
grid_result = clf.fit(X_train, y_train)
X_test = preprocess.fit_transform(X_test)
clf.predict(X_test)
## we should try this: https://machinelearningmastery.com/multi-class-classification-tutorial-keras-deep-learning-library/
##############
############################
##########################################
########################################################
######################################################################
# the GridSearchCV object with pipeline and the parameter space with 5 folds cross validation.
scores = ['precision', 'recall']
best_params = []
for score in scores:
print("# Tuning hyper-parameters for %s" % score)
print()
clf = GridSearchCV(
SVC(), tuned_parameters2, scoring='%s_macro' % score
)
X_train = preprocess.fit_transform(X_train)
grid_result = clf.fit(X_train, y_train)
best_params.append(grid_result.best_params_)
print("Best parameters set found on development set:")
print()
print(clf.best_params_)
print()
print("Grid scores on development set:")
print()
means = clf.cv_results_['mean_test_score']
stds = clf.cv_results_['std_test_score']
for mean, std, params in zip(means, stds, clf.cv_results_['params']):
print("%0.3f (+/-%0.03f) for %r"
% (mean, std * 2, params))
print()
print("Detailed classification report:")
print()
print("The model is trained on the full development set.")
print("The scores are computed on the full evaluation set.")
print()
X_test = preprocess.fit_transform(X_test)
y_true, y_pred = y_test, clf.predict(X_test)
print(classification_report(y_true, y_pred))
print()
# =============================================================================
# ================= Resampling the imbalanced Label of "TakeOver" ========================================
#==========================================================================================================
# We create the preprocessing pipelines for both numeric and categorical data.
from sklearn.pipeline import Pipeline
from sklearn.utils import resample
numeric_features = Cont_Filter_Cleaned
numeric_transformer = Pipeline(steps=[
('imputer', SimpleImputer(strategy='median')),
('scaler', StandardScaler())])
categorical_features = ['LeftLaneType','RightLaneType','Coming_AlarmType','NDTask']
categorical_transformer = Pipeline(steps=[
('imputer', SimpleImputer(strategy='constant', fill_value='missing')),
('onehot', OneHotEncoder(handle_unknown='ignore'))])
preprocessor = ColumnTransformer(
transformers=[
('num', numeric_transformer, numeric_features),
('cat', categorical_transformer, categorical_features)])
# Append classifier to preprocessing pipeline.
# Separate input features and target
y = dataset.Takeover
X = dataset.drop('Takeover', axis=1)
# setting up testing and training sets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.20, random_state=27)
# concatenate our training data back together
X = pd.concat([X_train, y_train], axis=1)
# separate minority and majority classes
take_over = X[X.Takeover=='TK']
not_takeover = X[X.Takeover=='NTK']
# upsample minority
not_takeover_upsampled = resample(not_takeover,
replace=True, # sample with replacement
n_samples=len(take_over), # match number in majority class
random_state=27) # reproducible results
# combine majority and upsampled minority
upsampled = pd.concat([take_over, not_takeover_upsampled])
# check new class counts
upsampled.Takeover.value_counts() #713585
# trying logistic regression again with the balanced dataset
y_train = upsampled.Takeover
X_train = upsampled.drop('Takeover', axis=1)
##### LOGISTIC REGRESSION ###############################
#########################################################
# Now we have a full prediction pipeline.
clf = Pipeline(steps=[('preprocessor', preprocessor),
('classifier', LogisticRegression())])
y_score = clf.fit(X_train, y_train)
print("model score: %.3f" % clf.score(X_test, y_test)) # model score: 0.846
y_true, y_pred = y_test, clf.predict(X_test)
print(classification_report(y_true, y_pred))
# # =============================================================================
# example of one hot encoding for a neural network
from pandas import read_csv
import keras
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import OneHotEncoder
from keras.models import Sequential
from keras.layers import Dense
from keras.callbacks import EarlyStopping
from keras.callbacks import ModelCheckpoint
from keras.models import load_model
import h5py
import pytest
# Check the GPU availability
from tensorflow.python.client import device_lib
device_lib.list_local_devices()
# Assigning values to X, Y
y = dataset.TOT_Class
X = dataset.drop('TOT_Class', axis=1)
# setting up testing and training sets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.20, random_state=27)
# concatenate our training data back together
X = pd.concat([X_train, y_train], axis=1)
# separate minority and majority classes
FastRT = X[X.TOT_Class==0]
SlowRT = X[X.TOT_Class==1]
# upsample minority
not_takeover_upsampled = resample(FastRT,
replace=True, # sample with replacement
n_samples=len(SlowRT), # match number in majority class
random_state=27) # reproducible results
# combine majority and upsampled minority
upsampled = pd.concat([SlowRT, not_takeover_upsampled])
# check new class counts
upsampled.TOT_Class.value_counts() | |
"""
This test suite contains tests to validate certificate create/edit/delete with
different possible way and with different roles of users.
Test requirement:
Below Env variables need to be set
CATTLE_TEST_URL - url to rancher server
ADMIN_TOKEN - Admin token from rancher
USER_TOKEN - User token from rancher
RANCHER_CLUSTER_NAME - Cluster name to run test on
RANCHER_VALID_TLS_KEY - takes authentic certificate key base64 encoded
RANCHER_VALID_TLS_CERT - takes authentic certificate base64 encoded
RANCHER_BYO_TLS_KEY - takes self signed certificate key base64 encoded
RANCHER_BYO_TLS_CERT - takes self signed certificate base64 encoded
AWS_HOSTED_ZONE_ID - Zone Id in AWS route53 where route53 will be created.
RANCHER_TEST_RBAC - To enable rbac tests
"""
from .common import (ApiError, CLUSTER_MEMBER, CLUSTER_OWNER, create_kubeconfig,
create_ns, create_project_and_ns,
get_cluster_client_for_token, get_project_client_for_token,
get_user_client, get_user_client_and_cluster, if_test_rbac,
PROJECT_OWNER, PROJECT_MEMBER, PROJECT_READ_ONLY,
random_test_name, rbac_get_namespace, rbac_get_project,
rbac_get_user_token_by_role, TEST_IMAGE, USER_TOKEN,
validate_ingress_using_endpoint, validate_workload,
wait_for_ingress_to_active, base64, TEST_IMAGE_PORT)
from lib.aws import AmazonWebServices
from pathlib import Path
import pytest
import os
import time
namespace = {"p_client": None, "ns": None, "cluster": None, "project": None,
"c_client": None, "cert_valid": None, "cert_ssc": None,
"cert_allns_valid": None, "cert_allns_ssc": None, "node_id": None}
route_entry_53_1 = random_test_name('auto-valid') + '.qa.rancher.space'
route_entry_53_2 = random_test_name('auto-ssc') + '.qa.rancher.space'
def get_ssh_key(ssh_key_name):
home = str(Path.home())
path = '{}/.ssh/{}'.format(home, ssh_key_name)
if os.path.exists(path):
with open(path, 'r') as f:
ssh_key = f.read()
return ssh_key
def get_private_key(env_var, key_name):
key = os.environ.get(env_var)
if key is not None:
return base64.b64decode(key).decode("utf-8")
else:
return get_ssh_key(key_name)
rancher_private_key = get_private_key('RANCHER_VALID_TLS_KEY',
'privkey.pem')
rancher_cert = get_private_key('RANCHER_VALID_TLS_CERT', 'fullchain.pem')
rancher_ssc_private_key = get_private_key('RANCHER_BYO_TLS_KEY',
'key.pem')
rancher_ssc_cert = get_private_key('RANCHER_BYO_TLS_CERT', 'cert.pem')
rbac_role_list = [
CLUSTER_OWNER,
CLUSTER_MEMBER,
PROJECT_OWNER,
PROJECT_MEMBER,
PROJECT_READ_ONLY
]
@pytest.mark.usefixtures("create_project_client")
class TestCertificate:
@pytest.fixture(autouse="True")
def certificate_test_setup(self):
"""
Test set up which runs before and after all the tests in the class
Creates Workload_2 if required and delete all the workload and ingres
created after test execution.
"""
self.p_client = namespace["p_client"]
self.ns = namespace["ns"]
self.c_client = namespace["c_client"]
self.cluster = namespace["cluster"]
self.project = namespace["project"]
self.certificate_valid = namespace["cert_valid"]
self.certificate_ssc = namespace["cert_ssc"]
self.certificate_all_ns_valid = namespace["cert_allns_valid"]
self.certificate_all_ns_ssc = namespace["cert_allns_ssc"]
self.node_id = namespace["node_id"]
wl_name = random_test_name("workload-test")
wl_con = [{"name": "wk1-test",
"image": TEST_IMAGE}]
scheduling = {"node": {"nodeId": self.node_id}}
self.workload = self.p_client.create_workload(
name=wl_name, containers=wl_con, namespaceId=self.ns.id,
scheduling=scheduling
)
self.ingress = None
self.workload_2 = None
yield
self.p_client.delete(self.workload)
if self.workload_2 is not None:
self.p_client.delete(self.workload_2)
if self.ingress is not None:
self.p_client.delete(self.ingress)
def test_certificate_create_validcert_for_single_ns(self):
"""
Test steps:
1. Validate the workload available in ns-certificate namespace
2. Create an ingress including trusted certificate scoped for current
namespace and route53 host.
3. validate the ingress using endpoint
"""
ingress_name = random_test_name("ingress-test")
host = route_entry_53_1
path = "/name.html"
rule = {"host": host,
"paths": [{"path": path, "workloadIds": [self.workload.id],
"targetPort": TEST_IMAGE_PORT}]}
tls = {"certificateId": self.certificate_valid.id, "hosts": [host]}
validate_workload(self.p_client, self.workload, "deployment",
self.ns.name)
self.ingress = self.p_client.create_ingress(
name=ingress_name, namespaceId=self.ns.id, rules=[rule], tls=[tls]
)
wait_for_ingress_to_active(self.p_client, self.ingress)
validate_ingress_using_endpoint(
self.p_client, self.ingress, [self.workload], certcheck=True)
def test_certificate_create_validcert_for_all_ns(self):
"""
Test steps:
1. Validate the workload available in ns-certificate namespace
2. Create an ingress including trusted certificate scoped for all
namespace and route53 host.
3. validate the ingress using endpoint
"""
ingress_name = random_test_name("ingress-test")
host = route_entry_53_1
path = "/name.html"
rule = {"host": host,
"paths": [{"path": path, "workloadIds": [self.workload.id],
"targetPort": TEST_IMAGE_PORT}]
}
tls = {"certificateId": self.certificate_all_ns_valid.id,
"hosts": [host]
}
validate_workload(self.p_client, self.workload, "deployment",
self.ns.name)
self.ingress = self.p_client.create_ingress(
name=ingress_name, namespaceId=self.ns.id, rules=[rule], tls=[tls]
)
wait_for_ingress_to_active(self.p_client, self.ingress)
validate_ingress_using_endpoint(
self.p_client, self.ingress, [self.workload], certcheck=True)
def test_certificate_create_validcert_for_all_ns_2(self):
"""
Test steps:
1. Create a namespace
2. Create a workload in namespace created above.
3. Validate the workload.
4. Create an ingress including trusted certificate scoped for all
namespace and route53 host.
5. validate the ingress using endpoint
"""
wl_name = random_test_name("workload-test")
wl_con = [{"name": "wk2-test",
"image": TEST_IMAGE}]
scheduling = {"node": {"nodeId": self.node_id}}
ns_2 = create_ns(self.c_client, self.cluster, self.project)
self.workload_2 = self.p_client.create_workload(
name=wl_name, containers=wl_con, namespaceId=ns_2.id,
scheduling=scheduling
)
validate_workload(self.p_client, self.workload_2, "deployment",
ns_2.name)
ingress_name = random_test_name("ingress-test")
host = route_entry_53_1
path = "/name.html"
rule = {"host": host,
"paths": [{"path": path, "workloadIds": [self.workload_2.id],
"targetPort": TEST_IMAGE_PORT}]
}
tls = {"certificateId": self.certificate_all_ns_valid.id,
"hosts": [host]
}
self.ingress = self.p_client.create_ingress(
name="{}-2".format(ingress_name), namespaceId=ns_2.id,
rules=[rule], tls=[tls]
)
wait_for_ingress_to_active(self.p_client, self.ingress)
validate_ingress_using_endpoint(
self.p_client, self.ingress, [self.workload_2], certcheck=True)
def test_certificate_create_ssc_for_single_ns(self):
"""
Test steps:
1. Validate the workload available in ns-certificate namespace
2. Create an ingress including self signed certificate scoped for
current namespace and route53 host.
3. validate the ingress using endpoint
"""
validate_workload(self.p_client, self.workload, "deployment",
self.ns.name)
ingress_name = random_test_name("ingress-test")
host = route_entry_53_2
path = "/name.html"
rule = {"host": host,
"paths": [{"path": path, "workloadIds": [self.workload.id],
"targetPort": TEST_IMAGE_PORT}]}
tls = {"certificateId": self.certificate_ssc.id, "hosts": [host]}
self.ingress = self.p_client.create_ingress(
name=ingress_name, namespaceId=self.ns.id, rules=[rule], tls=[tls]
)
wait_for_ingress_to_active(self.p_client, self.ingress)
# validate_ingress(host, path)
validate_ingress_using_endpoint(
self.p_client, self.ingress, [self.workload], certcheck=True,
is_insecure=True
)
def test_certificate_create_ssc_for_all_ns(self):
"""
Test steps:
1. Validate the workload available in ns-certificate namespace
2. Create an ingress including self signed certificate scoped for
all namespace and route53 host.
3. validate the ingress using endpoint
"""
ingress_name = random_test_name("ingress-test")
host = route_entry_53_2
path = "/name.html"
rule = {"host": host,
"paths": [{"path": path, "workloadIds": [self.workload.id],
"targetPort": TEST_IMAGE_PORT}]
}
tls = {"certificateId": self.certificate_all_ns_ssc.id, "hosts": [host]}
self.ingress = self.p_client.create_ingress(
name=ingress_name, namespaceId=self.ns.id, rules=[rule], tls=[tls]
)
wait_for_ingress_to_active(self.p_client, self.ingress)
validate_ingress_using_endpoint(
self.p_client, self.ingress, [self.workload], certcheck=True,
is_insecure=True
)
def test_certificate_create_ssc_for_all_ns_2(self):
"""
Test steps:
1. Create a namespace
2. Create a workload in namespace created above.
3. Validate the workload.
4. Create an ingress including trusted certificate scoped for all
namespace and route53 host.
5. validate the ingress using endpoint
"""
wl_name = random_test_name("workload-test")
wl_con = [{"name": "wk2-test",
"image": TEST_IMAGE}]
scheduling = {"node": {"nodeId": self.node_id}}
ns_2 = create_ns(self.c_client, self.cluster, self.project)
self.workload_2 = self.p_client.create_workload(
name=wl_name, containers=wl_con, namespaceId=ns_2.id,
scheduling=scheduling
)
validate_workload(self.p_client, self.workload_2, "deployment",
ns_2.name)
ingress_name = random_test_name("ingress-test")
host = route_entry_53_2
path = "/name.html"
rule = {"host": host,
"paths": [{"path": path, "workloadIds": [self.workload_2.id],
"targetPort": TEST_IMAGE_PORT}]
}
tls = {"certificateId": self.certificate_all_ns_ssc.id, "hosts": [host]}
self.ingress = self.p_client.create_ingress(
name="{}-2".format(ingress_name), namespaceId=ns_2.id, rules=[rule],
tls=[tls])
wait_for_ingress_to_active(self.p_client, self.ingress)
validate_ingress_using_endpoint(
self.p_client, self.ingress, [self.workload_2], certcheck=True,
is_insecure=True
)
def test_certificate_edit_ssc_to_valid_for_single_ns(self):
"""
Test steps:
1. Create an ingress pointing to self signed certificate scoped to
current namespace.
2. Update the certificate key to trusted.
3. Reload the certificate.
4. Update the ingress.
5. validate the ingress using endpoint.
"""
ingress_name = random_test_name("ingress-test")
host_1 = route_entry_53_2
host_2 = route_entry_53_1
path = "/name.html"
rule_1 = {"host": host_1,
"paths": [{"path": path, "workloadIds": [self.workload.id],
"targetPort": TEST_IMAGE_PORT}]}
rule_2 = {"host": host_2,
"paths": [{"path": path, "workloadIds": [self.workload.id],
"targetPort": TEST_IMAGE_PORT}]}
tls = {"certificateId": self.certificate_ssc.id, "hosts": [host_1]}
tls_2 = {"certificateId": self.certificate_ssc.id, "hosts": [host_2]}
self.ingress = self.p_client.create_ingress(
name=ingress_name, namespaceId=self.ns.id, rules=[rule_1],
tls=[tls]
)
wait_for_ingress_to_active(self.p_client, self.ingress)
self.p_client.update(
self.certificate_ssc, key=rancher_private_key, certs=rancher_cert
)
self.p_client.reload(self.certificate_ssc)
self.p_client.update(self.ingress, rules=[rule_2], tls=[tls_2])
self.p_client.reload(self.ingress)
wait_for_ingress_to_active(self.p_client, self.ingress)
validate_ingress_using_endpoint(
self.p_client, self.ingress, [self.workload], certcheck=True)
def test_certificate_edit_ssc_to_valid_cert_for_all_ns(self):
"""
Test steps:
1. Create an ingress pointing to self signed certificate scoped to
all namespace.
2. Update the certificate key to trusted.
3. Reload the certificate.
4. Update the ingress.
5. validate the ingress using endpoint.
"""
ingress_name = random_test_name("ingress-test")
host_1 = route_entry_53_2
host_2 = route_entry_53_1
path = "/name.html"
rule_1 = {"host": host_1,
"paths": [{"path": path, "workloadIds": [self.workload.id],
"targetPort": TEST_IMAGE_PORT}]
}
rule_2 = {"host": host_2,
"paths": [{"path": path, "workloadIds": [self.workload.id],
"targetPort": TEST_IMAGE_PORT}]
}
tls = {"certificateId": self.certificate_all_ns_ssc.id,
"hosts": [host_1]}
tls_2 = {"certificateId": self.certificate_all_ns_ssc.id,
"hosts": [host_2]}
self.ingress = self.p_client.create_ingress(
name=ingress_name, namespaceId=self.ns.id, rules=[rule_1],
tls=[tls]
)
wait_for_ingress_to_active(self.p_client, self.ingress)
self.p_client.update(
self.certificate_all_ns_ssc, key=rancher_private_key,
certs=rancher_cert
)
self.p_client.reload(self.certificate_all_ns_ssc)
self.p_client.update(self.ingress, rules=[rule_2], tls=[tls_2])
self.p_client.reload(self.ingress)
wait_for_ingress_to_active(self.p_client, self.ingress)
validate_ingress_using_endpoint(
self.p_client, self.ingress, [self.workload], certcheck=True)
@if_test_rbac
@pytest.mark.parametrize("role", rbac_role_list)
def test_create_certificate(self, role):
"""
Test steps:
1. Create certificate all namespace for all role
2. Delete the certificate
"""
token = rbac_get_user_token_by_role(role)
project = rbac_get_project()
p_client = get_project_client_for_token(project, token)
cert_name = random_test_name("cert-rbac")
if role in (CLUSTER_MEMBER, PROJECT_READ_ONLY):
with pytest.raises(ApiError) as e:
p_client.create_certificate(
name=cert_name, key=rancher_private_key,
certs=rancher_cert
)
assert e.value.error.status == 403
assert e.value.error.code == 'Forbidden'
else:
certificate_allns_valid = p_client.create_certificate(
name=cert_name, key=rancher_private_key,
certs=rancher_cert
)
assert certificate_allns_valid.issuer == 'R3'
# Delete the certificate
p_client.delete(certificate_allns_valid)
@if_test_rbac
@pytest.mark.parametrize("role", rbac_role_list)
def test_create_namespaced_certificate(self, role):
"""
Test steps:
1. Create certificate for single namespace for all role
2. Delete the certificate
"""
token = rbac_get_user_token_by_role(role)
project = rbac_get_project()
ns = rbac_get_namespace()
p_client = get_project_client_for_token(project, token)
cert_name = random_test_name("cert-rbac")
if role in (CLUSTER_MEMBER, PROJECT_READ_ONLY):
with pytest.raises(ApiError) as e:
p_client.create_namespaced_certificate(
name=cert_name, key=rancher_private_key,
certs=rancher_cert,
namespaceId=ns['name']
)
assert e.value.error.status == 403
assert e.value.error.code == 'Forbidden'
else:
certificate_valid = p_client.create_namespaced_certificate(
name=cert_name, key=rancher_private_key, certs=rancher_cert,
namespaceId=ns['name']
)
assert certificate_valid.issuer == 'R3'
# Delete the certificate
p_client.delete(certificate_valid)
@if_test_rbac
@pytest.mark.parametrize("role", rbac_role_list)
def test_list_namespaced_certificate(self, role):
"""
Test steps:
1. Create certificate for single namespace for all role as
cluster owner
2. List the | |
958361, 958367, 958369, 958381, 958393,
958423, 958439, 958459, 958481, 958487, 958499, 958501, 958519,
958523, 958541, 958543, 958547, 958549, 958553, 958577, 958609,
958627, 958637, 958667, 958669, 958673, 958679, 958687, 958693,
958729, 958739, 958777, 958787, 958807, 958819, 958829, 958843,
958849, 958871, 958877, 958883, 958897, 958901, 958921, 958931,
958933, 958957, 958963, 958967, 958973, 959009, 959083, 959093,
959099, 959131, 959143, 959149, 959159, 959173, 959183, 959207,
959209, 959219, 959227, 959237, 959263, 959267, 959269, 959279,
959323, 959333, 959339, 959351, 959363, 959369, 959377, 959383,
959389, 959449, 959461, 959467, 959471, 959473, 959477, 959479,
959489, 959533, 959561, 959579, 959597, 959603, 959617, 959627,
959659, 959677, 959681, 959689, 959719, 959723, 959737, 959759,
959773, 959779, 959801, 959809, 959831, 959863, 959867, 959869,
959873, 959879, 959887, 959911, 959921, 959927, 959941, 959947,
959953, 959969, 960017, 960019, 960031, 960049, 960053, 960059,
960077, 960119, 960121, 960131, 960137, 960139, 960151, 960173,
960191, 960199, 960217, 960229, 960251, 960259, 960293, 960299,
960329, 960331, 960341, 960353, 960373, 960383, 960389, 960419,
960467, 960493, 960497, 960499, 960521, 960523, 960527, 960569,
960581, 960587, 960593, 960601, 960637, 960643, 960647, 960649,
960667, 960677, 960691, 960703, 960709, 960737, 960763, 960793,
960803, 960809, 960829, 960833, 960863, 960889, 960931, 960937,
960941, 960961, 960977, 960983, 960989, 960991, 961003, 961021,
961033, 961063, 961067, 961069, 961073, 961087, 961091, 961097,
961099, 961109, 961117, 961123, 961133, 961139, 961141, 961151,
961157, 961159, 961183, 961187, 961189, 961201, 961241, 961243,
961273, 961277, 961283, 961313, 961319, 961339, 961393, 961397,
961399, 961427, 961447, 961451, 961453, 961459, 961487, 961507,
961511, 961529, 961531, 961547, 961549, 961567, 961601, 961613,
961619, 961627, 961633, 961637, 961643, 961657, 961661, 961663,
961679, 961687, 961691, 961703, 961729, 961733, 961739, 961747,
961757, 961769, 961777, 961783, 961789, 961811, 961813, 961817,
961841, 961847, 961853, 961861, 961871, 961879, 961927, 961937,
961943, 961957, 961973, 961981, 961991, 961993, 962009, 962011,
962033, 962041, 962051, 962063, 962077, 962099, 962119, 962131,
962161, 962177, 962197, 962233, 962237, 962243, 962257, 962267,
962303, 962309, 962341, 962363, 962413, 962417, 962431, 962441,
962447, 962459, 962461, 962471, 962477, 962497, 962503, 962509,
962537, 962543, 962561, 962569, 962587, 962603, 962609, 962617,
962623, 962627, 962653, 962669, 962671, 962677, 962681, 962683,
962737, 962743, 962747, 962779, 962783, 962789, 962791, 962807,
962837, 962839, 962861, 962867, 962869, 962903, 962909, 962911,
962921, 962959, 962963, 962971, 962993, 963019, 963031, 963043,
963047, 963097, 963103, 963121, 963143, 963163, 963173, 963181,
963187, 963191, 963211, 963223, 963227, 963239, 963241, 963253,
963283, 963299, 963301, 963311, 963323, 963331, 963341, 963343,
963349, 963367, 963379, 963397, 963419, 963427, 963461, 963481,
963491, 963497, 963499, 963559, 963581, 963601, 963607, 963629,
963643, 963653, 963659, 963667, 963689, 963691, 963701, 963707,
963709, 963719, 963731, 963751, 963761, 963763, 963779, 963793,
963799, 963811, 963817, 963839, 963841, 963847, 963863, 963871,
963877, 963899, 963901, 963913, 963943, 963973, 963979, 964009,
964021, 964027, 964039, 964049, 964081, 964097, 964133, 964151,
964153, 964199, 964207, 964213, 964217, 964219, 964253, 964259,
964261, 964267, 964283, 964289, 964297, 964303, 964309, 964333,
964339, 964351, 964357, 964363, 964373, 964417, 964423, 964433,
964463, 964499, 964501, 964507, 964517, 964519, 964531, 964559,
964571, 964577, 964583, 964589, 964609, 964637, 964661, 964679,
964693, 964697, 964703, 964721, 964753, 964757, 964783, 964787,
964793, 964823, 964829, 964861, 964871, 964879, 964883, 964889,
964897, 964913, 964927, 964933, 964939, 964967, 964969, 964973,
964981, 965023, 965047, 965059, 965087, 965089, 965101, 965113,
965117, 965131, 965147, 965161, 965171, 965177, 965179, 965189,
965191, 965197, 965201, 965227, 965233, 965249, 965267, 965291,
965303, 965317, 965329, 965357, 965369, 965399, 965401, 965407,
965411, 965423, 965429, 965443, 965453, 965467, 965483, 965491,
965507, 965519, 965533, 965551, 965567, 965603, 965611, 965621,
965623, 965639, 965647, 965659, 965677, 965711, 965749, 965759,
965773, 965777, 965779, 965791, 965801, 965843, 965851, 965857,
965893, 965927, 965953, 965963, 965969, 965983, 965989, 966011,
966013, 966029, 966041, 966109, 966113, 966139, 966149, 966157,
966191, 966197, 966209, 966211, 966221, 966227, 966233, 966241,
966257, 966271, 966293, 966307, 966313, 966319, 966323, 966337,
966347, 966353, 966373, 966377, 966379, 966389, 966401, 966409,
966419, 966431, 966439, 966463, 966481, 966491, 966499, 966509,
966521, 966527, 966547, 966557, 966583, 966613, 966617, 966619,
966631, 966653, 966659, 966661, 966677, 966727, 966751, 966781,
966803, 966817, 966863, 966869, 966871, 966883, 966893, 966907,
966913, 966919, 966923, 966937, 966961, 966971, 966991, 966997,
967003, 967019, 967049, 967061, 967111, 967129, 967139, 967171,
967201, 967229, 967259, 967261, 967289, 967297, 967319, 967321,
967327, 967333, 967349, 967361, 967363, 967391, 967397, 967427,
967429, 967441, 967451, 967459, 967481, 967493, 967501, 967507,
967511, 967529, 967567, 967583, 967607, 967627, 967663, 967667,
967693, 967699, 967709, 967721, 967739, 967751, 967753, 967763,
967781, 967787, 967819, 967823, 967831, 967843, 967847, 967859,
967873, 967877, 967903, 967919, 967931, 967937, 967951, 967961,
967979, 967999, 968003, 968017, 968021, 968027, 968041, 968063,
968089, 968101, 968111, 968113, 968117, 968137, 968141, 968147,
968159, 968173, 968197, 968213, 968237, 968239, 968251, 968263,
968267, 968273, 968291, 968299, 968311, 968321, 968329, 968333,
968353, 968377, 968381, 968389, 968419, 968423, 968431, 968437,
968459, 968467, 968479, 968501, 968503, 968519, 968521, 968537,
968557, 968567, 968573, 968593, 968641, 968647, 968659, 968663,
968689, 968699, 968713, 968729, 968731, 968761, 968801, 968809,
968819, 968827, 968831, 968857, 968879, 968897, 968909, 968911,
968917, 968939, 968959, 968963, 968971, 969011, 969037, 969041,
969049, 969071, 969083, 969097, 969109, 969113, 969131, 969139,
969167, 969179, 969181, 969233, 969239, 969253, 969257, 969259,
969271, 969301, 969341, 969343, 969347, 969359, 969377, 969403,
969407, 969421, 969431, 969433, 969443, 969457, 969461, 969467,
969481, 969497, 969503, 969509, 969533, 969559, 969569, 969593,
969599, 969637, 969641, 969667, 969671, 969677, 969679, 969713,
969719, 969721, 969743, 969757, 969763, 969767, 969791, 969797,
969809, 969821, 969851, 969863, 969869, 969877, 969889, 969907,
969911, 969919, 969923, 969929, 969977, 969989, 970027, 970031,
970043, 970051, 970061, 970063, 970069, 970087, 970091, 970111,
970133, 970147, 970201, 970213, 970217, 970219, 970231, 970237,
970247, 970259, 970261, 970267, 970279, 970297, 970303, 970313,
970351, 970391, 970421, 970423, 970433, 970441, 970447, 970457,
970469, 970481, 970493, 970537, 970549, 970561, 970573, 970583,
970603, 970633, 970643, 970657, 970667, 970687, 970699, 970721,
970747, 970777, 970787, 970789, 970793, 970799, 970813, 970817,
970829, 970847, 970859, 970861, 970867, 970877, 970883, 970903,
970909, 970927, 970939, 970943, 970961, 970967, 970969, 970987,
970997, 970999, 971021, 971027, 971029, 971039, 971051, 971053,
971063, 971077, 971093, 971099, 971111, 971141, 971143, 971149,
971153, 971171, 971177, 971197, 971207, 971237, 971251, 971263,
971273, 971279, 971281, 971291, 971309, 971339, 971353, 971357,
971371, 971381, 971387, 971389, 971401, 971419, 971429, 971441,
971473, 971479, 971483, 971491, 971501, 971513, 971521, 971549,
971561, 971563, 971569, 971591, 971639, 971651, 971653, 971683,
971693, 971699, 971713, 971723, 971753, 971759, 971767, 971783,
971821, 971833, 971851, 971857, 971863, 971899, 971903, 971917,
971921, 971933, 971939, 971951, 971959, 971977, 971981, 971989,
972001, 972017, 972029, 972031, 972047, 972071, 972079, 972091,
972113, 972119, 972121, 972131, 972133, 972137, 972161, 972163,
972197, 972199, 972221, 972227, 972229, 972259, 972263, 972271,
972277, 972313, 972319, 972329, 972337, 972343, 972347, 972353,
972373, 972403, 972407, 972409, 972427, 972431, 972443, 972469,
972473, 972481, 972493, 972533, 972557, 972577, 972581, 972599,
972611, 972613, 972623, 972637, 972649, 972661, 972679, 972683,
972701, 972721, 972787, 972793, 972799, 972823, 972827, 972833,
972847, 972869, 972887, 972899, 972901, 972941, 972943, 972967,
972977, 972991, 973001, 973003, 973031, 973033, 973051, 973057,
973067, 973069, 973073, 973081, 973099, 973129, 973151, 973169,
973177, 973187, 973213, 973253, 973277, 973279, 973283, 973289,
973321, 973331, 973333, 973367, 973373, 973387, 973397, 973409,
973411, 973421, 973439, 973459, 973487, 973523, 973529, 973537,
973547, 973561, 973591, 973597, 973631, 973657, 973669, 973681,
973691, 973727, 973757, 973759, 973781, 973787, 973789, 973801,
973813, 973823, 973837, 973853, 973891, 973897, 973901, 973919,
973957, 974003, 974009, 974033, 974041, 974053, 974063, 974089,
974107, 974123, 974137, 974143, 974147, 974159, 974161, 974167,
974177, 974179, 974189, 974213, 974249, 974261, 974269, 974273,
974279, 974293, 974317, 974329, 974359, 974383, 974387, 974401,
974411, 974417, 974419, 974431, 974437, 974443, 974459, 974473,
974489, 974497, 974507, 974513, 974531, 974537, 974539, 974551,
974557, 974563, 974581, 974591, 974599, 974651, 974653, 974657,
974707, 974711, 974713, 974737, 974747, 974749, 974761, 974773,
974803, 974819, 974821, 974837, 974849, 974861, 974863, 974867,
974873, 974879, 974887, 974891, 974923, 974927, 974957, 974959,
974969, 974971, 974977, 974983, 974989, 974999, 975011, 975017,
975049, 975053, 975071, 975083, 975089, 975133, 975151, 975157,
975181, 975187, 975193, 975199, 975217, 975257, 975259, 975263,
975277, 975281, 975287, 975313, 975323, 975343, 975367, 975379,
975383, 975389, 975421, 975427, 975433, 975439, 975463, 975493,
975497, 975509, 975521, 975523, 975551, 975553, 975581, 975599,
975619, 975629, 975643, 975649, 975661, 975671, 975691, 975701,
975731, 975739, 975743, | |
<gh_stars>0
# -*- coding: utf-8 -*-
#########################################################################
#
# Copyright (C) 2016 OSGeo
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#########################################################################
import re
import uuid
import logging
from django.db import models
from django.db.models import signals
from django.contrib.contenttypes.models import ContentType
from django.conf import settings
from django.utils.timezone import now
from django.utils.translation import ugettext_lazy as _
from django.urls import reverse
from django.core.files.storage import FileSystemStorage
from pinax.ratings.models import OverallRating
from tinymce.models import HTMLField
from geonode.base.models import ResourceBase, ResourceBaseManager, resourcebase_post_save
from geonode.people.utils import get_valid_user
from geonode.utils import check_shp_columnnames
from geonode.security.models import PermissionLevelMixin
from geonode.security.utils import remove_object_permissions
from geonode.notifications_helper import (
send_notification,
get_notification_recipients)
from ..services.enumerations import CASCADED
from ..services.enumerations import INDEXED
logger = logging.getLogger("geonode.layers.models")
shp_exts = ['.shp', ]
csv_exts = ['.csv']
kml_exts = ['.kml']
vec_exts = shp_exts + csv_exts + kml_exts
cov_exts = ['.tif', '.tiff', '.geotiff', '.geotif', '.asc']
TIME_REGEX = (
('[0-9]{8}', _('YYYYMMDD')),
('[0-9]{8}T[0-9]{6}', _("YYYYMMDD'T'hhmmss")),
('[0-9]{8}T[0-9]{6}Z', _("YYYYMMDD'T'hhmmss'Z'")),
)
TIME_REGEX_FORMAT = {
'[0-9]{8}': '%Y%m%d',
'[0-9]{8}T[0-9]{6}': '%Y%m%dT%H%M%S',
'[0-9]{8}T[0-9]{6}Z': '%Y%m%dT%H%M%SZ'
}
# these are only used if there is no user-configured value in the settings
_DEFAULT_CASCADE_WORKSPACE = "cascaded-services"
_DEFAULT_WORKSPACE = "cascaded-services"
class Style(models.Model, PermissionLevelMixin):
"""Model for storing styles.
"""
name = models.CharField(_('style name'), max_length=255, unique=True)
sld_title = models.CharField(max_length=255, null=True, blank=True)
sld_body = models.TextField(_('sld text'), null=True, blank=True)
sld_version = models.CharField(
_('sld version'),
max_length=12,
null=True,
blank=True)
sld_url = models.CharField(_('sld url'), null=True, max_length=1000)
workspace = models.CharField(max_length=255, null=True, blank=True)
def __str__(self):
return f"{self.name}"
def absolute_url(self):
if self.sld_url:
if self.sld_url.startswith(
settings.OGC_SERVER['default']['LOCATION']):
return self.sld_url.split(
settings.OGC_SERVER['default']['LOCATION'], 1)[1]
elif self.sld_url.startswith(settings.OGC_SERVER['default']['PUBLIC_LOCATION']):
return self.sld_url.split(
settings.OGC_SERVER['default']['PUBLIC_LOCATION'], 1)[1]
return self.sld_url
else:
logger.error(
f"SLD URL is empty for Style {self.name}")
return None
def get_self_resource(self):
"""Get associated resource base."""
# Associate this model with resource
try:
layer = self.layer_styles.first()
""":type: Layer"""
return layer.get_self_resource()
except Exception:
return None
class LayerManager(ResourceBaseManager):
def __init__(self):
models.Manager.__init__(self)
class UploadSession(models.Model):
"""Helper class to keep track of uploads.
"""
resource = models.ForeignKey(ResourceBase, blank=True, null=True, on_delete=models.CASCADE)
date = models.DateTimeField(auto_now=True)
user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE)
processed = models.BooleanField(default=False)
error = models.TextField(blank=True, null=True)
traceback = models.TextField(blank=True, null=True)
context = models.TextField(blank=True, null=True)
def successful(self):
return self.processed and self.errors is None
def __str__(self):
_s = f"[Upload session-id: {self.id}]"
try:
_s += f" - {self.resource.title}"
except Exception:
pass
return f"{_s}"
def __unicode__(self):
return f"{self.__str__()}"
class Layer(ResourceBase):
"""
Layer (inherits ResourceBase fields)
"""
PERMISSIONS = {
'write': [
'change_layer_data',
'change_layer_style',
]
}
# internal fields
objects = LayerManager()
workspace = models.CharField(_('Workspace'), max_length=128)
store = models.CharField(_('Store'), max_length=128)
storeType = models.CharField(_('Storetype'), max_length=128)
name = models.CharField(_('Name'), max_length=128)
typename = models.CharField(_('Typename'), max_length=128, null=True, blank=True)
is_mosaic = models.BooleanField(_('Is mosaic?'), default=False)
has_time = models.BooleanField(_('Has time?'), default=False)
has_elevation = models.BooleanField(_('Has elevation?'), default=False)
time_regex = models.CharField(
_('Time regex'),
max_length=128,
null=True,
blank=True,
choices=TIME_REGEX)
elevation_regex = models.CharField(_('Elevation regex'), max_length=128, null=True, blank=True)
default_style = models.ForeignKey(
Style,
on_delete=models.SET_NULL,
related_name='layer_default_style',
null=True,
blank=True)
styles = models.ManyToManyField(Style, related_name='layer_styles')
remote_service = models.ForeignKey("services.Service", null=True, blank=True, on_delete=models.CASCADE)
charset = models.CharField(max_length=255, default='UTF-8')
upload_session = models.ForeignKey(UploadSession, blank=True, null=True, on_delete=models.CASCADE)
use_featureinfo_custom_template = models.BooleanField(
_('use featureinfo custom template?'),
help_text=_('specifies wether or not use a custom GetFeatureInfo template.'),
default=False
)
featureinfo_custom_template = HTMLField(
_('featureinfo custom template'),
help_text=_('the custom GetFeatureInfo template HTML contents.'),
unique=False,
blank=True,
null=True)
def is_vector(self):
return self.storeType == 'dataStore'
def get_upload_session(self):
return self.upload_session
@property
def processed(self):
self.upload_session = UploadSession.objects.filter(resource=self).first()
if self.upload_session:
return self.upload_session.processed
else:
return True
@property
def display_type(self):
if self.storeType == "dataStore":
return "Vector Data"
elif self.storeType == "coverageStore":
return "Raster Data"
else:
return "Data"
@property
def data_model(self):
if hasattr(self, 'modeldescription_set'):
lmd = self.modeldescription_set.all()
if lmd.exists():
return lmd.get().get_django_model()
return None
@property
def data_objects(self):
if self.data_model is not None:
return self.data_model.objects.using('datastore')
return None
@property
def ows_url(self):
if self.remote_service is not None and self.remote_service.method == INDEXED:
result = self.remote_service.service_url
else:
result = f"{settings.OGC_SERVER['default']['PUBLIC_LOCATION']}ows"
return result
@property
def ptype(self):
return self.remote_service.ptype if self.remote_service else "gxp_wmscsource"
@property
def service_typename(self):
if self.remote_service is not None:
return f"{self.remote_service.name}:{self.alternate}"
else:
return self.alternate
@property
def attributes(self):
if self.attribute_set and self.attribute_set.count():
_attrs = self.attribute_set
else:
_attrs = Attribute.objects.filter(layer=self)
return _attrs.exclude(attribute='the_geom').order_by('display_order')
# layer geometry type.
@property
def gtype(self):
# return attribute type without 'gml:' and 'PropertyType'
if self.attribute_set and self.attribute_set.count():
_attrs = self.attribute_set
else:
_attrs = Attribute.objects.filter(layer=self)
if _attrs.filter(attribute='the_geom').exists():
_att_type = _attrs.filter(attribute='the_geom').first().attribute_type
_gtype = re.match(r'\(\'gml:(.*?)\',', _att_type)
return _gtype.group(1) if _gtype else None
return None
def get_base_file(self):
"""Get the shp or geotiff file for this layer.
"""
# If there was no upload_session return None
try:
if self.upload_session is None:
return None, None
except Exception:
return None, None
base_exts = [x.replace('.', '') for x in cov_exts + vec_exts]
base_files = self.upload_session.layerfile_set.filter(
name__in=base_exts)
base_files_count = base_files.count()
# If there are no files in the upload_session return None
if base_files_count == 0:
return None, None
msg = f'There should only be one main file (.shp or .geotiff or .asc), found {base_files_count}'
assert base_files_count == 1, msg
# we need to check, for shapefile, if column names are valid
list_col = None
if self.storeType == 'dataStore':
valid_shp, wrong_column_name, list_col = check_shp_columnnames(
self)
if wrong_column_name:
msg = f'Shapefile has an invalid column name: {wrong_column_name}'
else:
msg = _('File cannot be opened, maybe check the encoding')
# AF: Removing assertion since if the original file does not exists anymore
# it won't be possible to update Metadata anymore
# assert valid_shp, msg
# no error, let's return the base files
return base_files.get(), list_col
def get_absolute_url(self):
return reverse(
'layer_detail',
args=(f"{self.store}:{self.alternate}",)
)
def attribute_config(self):
# Get custom attribute sort order and labels if any
cfg = {}
visible_attributes = self.attribute_set.visible()
if (visible_attributes.count() > 0):
cfg["getFeatureInfo"] = {
"fields": [lyr.attribute for lyr in visible_attributes],
"propertyNames": {lyr.attribute: lyr.attribute_label for lyr in visible_attributes},
"displayTypes": {lyr.attribute: lyr.featureinfo_type for lyr in visible_attributes}
}
if self.use_featureinfo_custom_template:
cfg["ftInfoTemplate"] = self.featureinfo_custom_template
return cfg
def __str__(self):
return f"{self.alternate}"
class Meta:
# custom permissions,
# change and delete are standard in django-guardian
permissions = (
('change_layer_data', 'Can edit layer data'),
('change_layer_style', 'Can change layer style'),
)
# Permission Level Constants
# LEVEL_NONE inherited
LEVEL_READ = 'layer_readonly'
LEVEL_WRITE = 'layer_readwrite'
LEVEL_ADMIN = 'layer_admin'
def maps(self):
from geonode.maps.models import MapLayer
return MapLayer.objects.filter(name=self.alternate)
@property
def class_name(self):
return self.__class__.__name__
def view_count_up(self, user, do_local=False):
""" increase view counter, if user is not owner and not super
@param user which views layer
@type User model
@param do_local - do local counter update even if pubsub is enabled
@type bool
"""
if user == self.owner or user.is_superuser:
return
if not do_local:
from geonode.messaging import producer
producer.viewing_layer(str(user), str(self.owner), self.id)
else:
Layer.objects.filter(id=self.id)\
.update(popular_count=models.F('popular_count') + 1)
class LayerFile(models.Model):
"""Helper class to store original files.
"""
upload_session = models.ForeignKey(UploadSession, on_delete=models.CASCADE)
name = models.CharField(max_length=255)
base = models.BooleanField(default=False)
file = models.FileField(
upload_to='layers/%Y/%m/%d',
storage=FileSystemStorage(
base_url=settings.LOCAL_MEDIA_URL),
max_length=255)
class AttributeManager(models.Manager):
"""Helper class to access filtered attributes
"""
def visible(self):
return self.get_queryset().filter(
visible=True).order_by('display_order')
class Attribute(models.Model):
"""
Auxiliary model for storing layer attributes.
This helps reduce the need for runtime lookups
to other servers, and lets users customize attribute titles,
sort order, and visibility.
"""
layer = models.ForeignKey(
Layer,
blank=False,
null=False,
unique=False,
on_delete=models.CASCADE,
related_name='attribute_set')
attribute = models.CharField(
_('attribute name'),
help_text=_('name of attribute as stored in shapefile/spatial database'),
max_length=255,
blank=False,
null=True,
unique=False)
description = models.CharField(
_('attribute description'),
help_text=_('description of attribute to be used in metadata'),
max_length=255,
blank=True,
null=True)
attribute_label = models.CharField(
_('attribute label'),
help_text=_('title of attribute as displayed in GeoNode'),
max_length=255,
blank=True,
null=True,
unique=False)
attribute_type = models.CharField(
_('attribute type'),
help_text=_('the data type of the attribute (integer, string, geometry, etc)'),
max_length=50,
blank=False,
null=False,
default='xsd:string',
unique=False)
visible = models.BooleanField(
_('visible?'),
help_text=_('specifies if the attribute should be displayed in identify results'),
default=True)
display_order = models.IntegerField(
_('display order'),
help_text=_('specifies the order in which attribute should be displayed in identify results'),
default=1)
"""
Attribute FeatureInfo-Type list
"""
TYPE_PROPERTY = 'type_property'
TYPE_HREF = 'type_href'
TYPE_IMAGE = 'type_image'
TYPE_VIDEO_MP4 = 'type_video_mp4'
TYPE_VIDEO_OGG = 'type_video_ogg'
TYPE_VIDEO_WEBM = 'type_video_webm'
TYPE_VIDEO_3GP = 'type_video_3gp'
TYPE_VIDEO_FLV = 'type_video_flv'
TYPE_VIDEO_YOUTUBE = 'type_video_youtube'
TYPE_AUDIO = 'type_audio'
TYPE_IFRAME = 'type_iframe'
TYPES = ((TYPE_PROPERTY, _("Label"),),
| |
<filename>intrinsic/dashboard.py
from plagcomps.intrinsic.featureextraction import FeatureExtractor
from plagcomps.evaluation.intrinsic import evaluate_n_documents
from plagcomps.shared.util import IntrinsicUtility
from plagcomps.shared.util import BaseUtility
from plagcomps.dbconstants import username
from plagcomps.dbconstants import password
from plagcomps.dbconstants import dbname
import datetime
import time
import itertools
import os.path
import cPickle
import glob
import pprint
import numpy
import sqlalchemy
from sqlalchemy import Table, Column, Sequence, Integer, String, Float, DateTime, Boolean, and_, cast
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.dialects.postgresql import ARRAY
from sqlalchemy.orm import sessionmaker
DASHBOARD_VERSION = 3
DASHBOARD_WEIGHTING_FILENAME = 'weighting_schemes/scheme*.pkl'
printer = pprint.PrettyPrinter(indent=3)
Base = declarative_base()
class IntrinsicTrial(Base):
'''
'''
__tablename__ = 'intrinsic_trials'
id = Column(Integer, Sequence("intrinsic_trials_id_seq"), primary_key=True)
# Parameters
atom_type = Column(String)
cluster_type = Column(String)
features = Column(ARRAY(String))
# Only used when features are weighted
feature_weights = Column(ARRAY(Float))
feature_weights_file = Column(String)
first_doc_num = Column(Integer)
n = Column(Integer)
min_len = Column(Integer)
# Metadata
# 'intrinsic' or 'extrinsic'
figure_path = Column(String)
timestamp = Column(DateTime)
version_number = Column(Integer)
corpus = Column(String)
cheating = Column(Boolean)
# Actual results
time_elapsed = Column(Float)
auc = Column(Float)
precision = Column(Float)
recall = Column(Float)
fmeasure = Column(Float)
granularity = Column(Float)
overall = Column(Float)
threshold = Column(Float)
def __init__(self, **args):
'''
All arguments are wrapped as keywords in the **args argument to avoid a HUGE parameter
list.
Note that required arguments are accessed using ['name'] accessors, which
will raise an error if the key is not present in the dictionary. This is on
purpose, since these arguments are REQUIRED!
Arguments accessed with .get('name', default_val) are optional/have default values
that are assumed unless specified otherwise
'''
self.atom_type = args['atom_type']
self.cluster_type = args['cluster_type']
self.features = args['features']
# Only used when features are weighted
self.feature_weights = args.get('feature_weights', []) # either raw weights on the features or weights for the individual confidences
self.feature_weights_file = args.get('feature_weights_file', '')
self.first_doc_num = args.get('first_doc_num', 0)
self.n = args['n']
self.min_len = args.get('min_len', 0)
# Metadata
# 'intrinsic' or 'extrinsic'
self.timestamp = datetime.datetime.now()
self.version_number = args['version_number']
self.corpus = args.get('corpus', 'intrinsic')
self.cheating = args.get('cheating', False)
# Actual results
self.time_elapsed = args['time_elapsed']
self.auc = args.get('auc', None)
self.figure_path = args.get('figure_path', None)
# Benno definitions
self.precision = args.get('precision', None)
self.recall = args.get('recall', None)
self.fmeasure = args.get('fmeasure', None)
self.granularity = args.get('granularity', None)
self.overall = args.get('overall', None)
self.threshold = args.get('threshold', None)
def get_feature_sets():
'''
Returns a list containing every set of features we want to test. Since we want to test
each feature individually, for example, we will return something like:
[['feat1'], ['feat2'], ..., ['feat1', 'feat2']]
'''
all_features = FeatureExtractor.get_all_feature_function_names()
individual_features = [[feat] for feat in all_features]
# Test all features as a feature set, as well
all_sets = individual_features + [all_features]
return all_sets
def all_k_sets_of_features(k=2):
all_features = FeatureExtractor.get_all_feature_function_names()
k_sets = [list(combo) for combo in itertools.combinations(all_features, k)]
return k_sets
def run_one_trial(feature_set, atom_type, cluster_type, k, first_doc_num, n, min_len, cheating, eval_method='roc'):
'''
Runs <evaluate_n_documents> and saves trial to DB
'''
session = Session()
version_number = DASHBOARD_VERSION
trial_results = {
'atom_type' : atom_type,
'cluster_type' : cluster_type,
'features' : feature_set,
'first_doc_num' : first_doc_num,
'n' : n,
'min_len' : min_len,
'version_number' : version_number
}
if eval_method == 'roc':
start = time.time()
path, auc = evaluate_n_documents(feature_set, cluster_type, k, atom_type, n, min_len=min_len, cheating=cheating, eval_method=eval_method)
end = time.time()
time_elapsed = end - start
further_params = {
'time_elapsed' : time_elapsed,
'auc' : auc,
'figure_path' : os.path.basename(path),
'cheating' : cheating
}
trial_results.update(further_params)
trial = IntrinsicTrial(**trial_results)
session.add(trial)
elif eval_method == 'prec_recall':
start = time.time()
thresh_prec_avgs, thresh_recall_avgs, thresh_fmeasure_avgs, thresh_granularity_avgs, thresh_overall_avgs = \
evaluate_n_documents(feature_set, cluster_type, k, atom_type, n, min_len=min_len, cheating=cheating, eval_method=eval_method)
end = time.time()
time_elapsed = end - start
for thresh in thresh_prec_avgs.keys():
precision = thresh_prec_avgs[thresh]
recall = thresh_recall_avgs[thresh]
fmeasure = thresh_fmeasure_avgs[thresh]
granularity = thresh_granularity_avgs[thresh]
overall = thresh_overall_avgs[thresh]
further_params = {
'threshold' : thresh,
'time_elapsed' : time_elapsed,
'precision' : precision,
'recall' : recall,
'fmeasure' : fmeasure,
'granularity' : granularity,
'overall' : overall
}
# Thanks to http://stackoverflow.com/questions/6005066/adding-dictionaries-together-python
one_trial_params = dict(trial_results, **further_params)
# print 'Would populate with:'
# printer.pprint(one_trial_params)
# print '-'*40
trial = IntrinsicTrial(**one_trial_params)
session.add(trial)
print 'Made a trial!'
session.commit()
session.close()
def run_one_trial_weighted(feature_set, feature_set_weights, feature_weights_filename, atom_type, cluster_type, k, first_doc_num, n, min_len, cheating):
'''
Runs <evaluate_n_documents> using the given raw feature weights or confidence
weights, and saves trail to DB.
'''
session = Session()
start = time.time()
if cluster_type == "combine_confidences":
path, auc, _, _, _, _, _ = evaluate_n_documents(feature_set, cluster_type, k, atom_type, n, min_len=min_len, feature_confidence_weights=feature_set_weights, cheating=cheating)
else:
path, auc, _, _, _, _, _ = evaluate_n_documents(feature_set, cluster_type, k, atom_type, n, min_len=min_len, feature_weights=feature_set_weights, cheating=cheating)
end = time.time()
time_elapsed = end - start
version_number = DASHBOARD_VERSION
trial_results = {
'atom_type' : atom_type,
'cluster_type' : cluster_type,
'features' : feature_set,
'feature_weights' : feature_set_weights,
'feature_weights_file' : feature_weights_filename,
'first_doc_num' : first_doc_num,
'n' : n,
'min_len' : min_len,
'figure_path' : os.path.basename(path),
'version_number' : version_number,
'time_elapsed' : time_elapsed,
'auc' : auc,
'cheating' : cheating
}
trial = IntrinsicTrial(**trial_results)
session.add(trial)
print 'Made a weighted trial!'
session.commit()
session.close()
return trial
def run_all_dashboard(num_files, cheating=False, feature_set=None, eval_method='roc'):
'''
Runs through all parameter options as listed below, writing results to DB as it goes
'''
if feature_set:
feature_set_options = feature_set
else:
feature_set_options = get_feature_sets()
atom_type_options = [
'nchars',
# 'paragraph'
]
cluster_type_options = [
'outlier',
# 'kmeans'
]
# For now, test on all documents (not just "long" ones)
min_len_options = [0]
for feature_set, atom_type, cluster_type, min_len in \
itertools.product(feature_set_options, atom_type_options, cluster_type_options, min_len_options):
print feature_set, atom_type, cluster_type, min_len
params = {
'atom_type' : atom_type,
'cluster_type' : cluster_type,
'feature_set' : feature_set,
'first_doc_num' : 0,
'n' : num_files,
'min_len' : min_len,
'k' : 2,
'cheating' : cheating
}
trial = run_one_trial(eval_method=eval_method, **params)
# run_all_weighting_schemes(num_files, atom_type_options, cluster_type_options, min_len_options, cheating)
def run_all_weighting_schemes(num_files, atom_types, cluster_types, min_len_options, cheating):
'''
Reads the weighting schemes from 'feature_weights.txt' and write the results to DB.
'''
# weighting_schemes list contains entries like (weighting_type, [feature_set], [feature_weights]) (weighting_type = {confidence_weights, raw_weights})
weighting_schemes = []
weighting_scheme_filenames = glob.glob(os.path.join(os.path.dirname(__file__), DASHBOARD_WEIGHTING_FILENAME))
weighting_scheme_filenames.sort()
for filepath in weighting_scheme_filenames:
f = open(filepath, 'rb')
scheme = cPickle.load(f)
scheme = (scheme[0], scheme[1], scheme[2], filepath.rsplit("/", 1)[1])
weighting_schemes.append(scheme)
f.close()
for scheme, atom_type, min_len in itertools.product(weighting_schemes, atom_types, min_len_options):
used_confidence_weights = False
for cluster_type in cluster_types:
if scheme[0] == "confidence_weights":
if used_confidence_weights:
continue
cluster_type = "combine_confidences"
used_confidence_weights = True
print scheme[1], scheme[2], scheme[3], atom_type, cluster_type, min_len
params = {
'atom_type' : atom_type,
'cluster_type' : cluster_type,
'feature_set' : scheme[1],
'feature_set_weights' : scheme[2],
'feature_weights_filename' : scheme[3],
'first_doc_num' : 0,
'n' : num_files,
'min_len' : min_len,
'k' : 2,
'cheating' : cheating
}
print
trial = run_one_trial_weighted(**params)
def get_latest_dashboard():
'''
TODO finish this -- should grab/display latest dashboard runs broken
down by various params. Perhaps like:
| PARAGRAPH | NCHARS |
| kmeans | outlier | kmeans | outlier |
'''
feature_set_options = get_feature_sets()
atom_type_options = [
'nchars',
'paragraph'
]
cluster_type_options = [
'outlier',
'kmeans'
]
for feature_set, atom_type, cluster_type, min_len in \
itertools.product(feature_set_options, atom_type_options, cluster_type_options, min_len_options):
print feature_set, atom_type, cluster_type, min_len
try:
q = session.query(IntrinsicTrial).filter(
and_(IntrinsicTrial.atom_type == atom_type,
IntrinsicTrial.cluster_type == cluster_type,
IntrinsicTrial.features == feature_set,
IntrinsicTrial.min_len == min_len)).order_by(IntrinsicTrial.timestamp)
latest_matching_trial = q.first()
except sqlalchemy.orm.exc.NoResultFound, e:
print 'Didn\'t find a trial for %s, %s, min_len = %i' % (atom_type, cluster_type, )
print 'Using'
def get_pairwise_results(atom_type, cluster_type, n, min_len, feature_set=None, cheating=False, write_output=False):
'''
Generates a table for the results of all feature pairs.
'''
all_features = FeatureExtractor.get_all_feature_function_names()
if not feature_set:
feature_set = list(itertools.combinations(all_features, 2))
feature_set += [(x,x) for x in all_features]
session = Session()
values = []
results = {}
for feature_pair in feature_set:
if feature_pair[0] == feature_pair[1]:
feature_pair = [feature_pair[0]]
trial = _get_latest_trial(atom_type, cluster_type, n, min_len, list(feature_pair), cheating, session)
if trial:
results[tuple(feature_pair)] = round(trial.auc, 4)
values.append(trial.auc)
else:
results[tuple(feature_pair)] = "n/a"
mean = numpy.array(values).mean()
stdev = numpy.array(values).std()
columns = all_features
rows = all_features
cells = []
for feature_a in rows:
row = []
for feature_b in columns:
if feature_a == feature_b:
row.append(results[tuple([feature_a])])
else:
if (feature_a, feature_b) in results:
row.append(results[(feature_a, feature_b)])
elif (feature_b, feature_a) in results:
row.append(results[(feature_b, feature_a)])
else:
row.append('???')
cells.append(row)
# Is html table the best way to view it?
html = '<html><head></head><body>'
html += '<h1>Pairwise Feature Results</h1>'
html += '<p>DASHBOARD_VERSION = ' + str(DASHBOARD_VERSION) + '</p>'
html += '<p>cheating = ' + str(cheating) + '</p>'
html += '<p>atom_type = ' + str(atom_type) + '</p>'
html += '<p>cluster_type = ' + str(cluster_type) + '</p>'
html += '<p>n >= ' + | |
0.00435488,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00248228,
'Renaming Unit/Peak Dynamic': 3.58947,
'Renaming Unit/Runtime Dynamic': 0.32097,
'Renaming Unit/Subthreshold Leakage': 0.0552466,
'Renaming Unit/Subthreshold Leakage with power gating': 0.0276461,
'Runtime Dynamic': 4.97548,
'Subthreshold Leakage': 6.16288,
'Subthreshold Leakage with power gating': 2.55328},
{'Area': 32.0201,
'Execution Unit/Area': 7.68434,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 0.078721,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.264519,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 0.361795,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.120359,
'Execution Unit/Instruction Scheduler/Area': 1.66526,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.275653,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.000977433,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.04181,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.243305,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.0143453,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00810519,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00568913,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 0.805223,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00414562,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 1.6763,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 0.392443,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0625755,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0355964,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 3.82262,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.584388,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.00056608,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.10451,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.198092,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.00906853,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00364446,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 0.83384,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.0859892,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.047346,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.222802,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 4.93269,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.0683509,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.0102053,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.105844,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.0754746,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.174194,
'Execution Unit/Register Files/Runtime Dynamic': 0.0856799,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0390912,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00537402,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.242667,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 0.611647,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.081478,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0305543,
'Execution Unit/Runtime Dynamic': 2.20106,
'Execution Unit/Subthreshold Leakage': 1.79543,
'Execution Unit/Subthreshold Leakage with power gating': 0.688821,
'Gate Leakage': 0.368936,
'Instruction Fetch Unit/Area': 5.85939,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.00145074,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.00145074,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.00130694,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 0.000529647,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.0010842,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.00529262,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.0123608,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0589979,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.0725557,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 4.61516,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.205219,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.246432,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 7.05766,
'Instruction Fetch Unit/Runtime Dynamic': 0.54186,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932286,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.40843,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.018408,
'L2/Runtime Dynamic': 0.00388542,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80901,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 3.88173,
'Load Store Unit/Data Cache/Runtime Dynamic': 1.27705,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0350888,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.0855594,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.0855595,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 4.28576,
'Load Store Unit/Runtime Dynamic': 1.78456,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.210975,
'Load Store Unit/StoreQ/Runtime Dynamic': 0.42195,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591321,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283293,
'Memory Management Unit/Area': 0.4339,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.0748756,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.0750482,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00808595,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.286954,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.0339504,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.571686,
'Memory Management Unit/Runtime Dynamic': 0.108999,
'Memory Management Unit/Subthreshold Leakage': 0.0766103,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0398333,
'Peak Dynamic': 20.4557,
'Renaming Unit/Area': 0.303608,
'Renaming Unit/FP Front End RAT/Area': 0.131045,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00351123,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 2.51468,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.179799,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0308571,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.0175885,
'Renaming Unit/Free List/Area': 0.0340654,
'Renaming Unit/Free List/Gate Leakage': 2.5481e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0306032,
'Renaming Unit/Free List/Runtime Dynamic': 0.0131654,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000370144,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000201064,
'Renaming Unit/Gate Leakage': 0.00708398,
'Renaming Unit/Int Front End RAT/Area': 0.0941223,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.000283242,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.731965,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.121393,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage': 0.00435488,
| |
= CONFIG.FILTERS.SOBEL_3x3
# find best threshold for first level
for sigma in range(100, 175, 25):
s = sigma / 100
blured_img = Application.do_gaussian_blur_image_job(port_input_name='GREY', sigma=s,
port_output_name='BLURED_S_' + str(s).replace('.', '_'))
for low in range(70, 150, 10):
for high in range(90, 200, 10):
# for high in [90]:
if low < high:
canny_result = Application.do_canny_config_job(port_input_name=blured_img, edge_detector=edge, canny_config=CONFIG.CANNY_VARIANTS.MANUAL_THRESHOLD,
low_manual_threshold = low, high_manual_threshold=high, canny_config_value=None,
port_output_name='CANNY_' + edge + '_S_' + str(s).replace('.', '_') + '_L_' + str(low) + '_H_' + str(high),
do_blur=False)
list_to_save.append(canny_result + '_L0')
Application.create_config_file()
Application.configure_save_pictures(ports_to_save=list_to_save, job_name_in_port=False)
# Application.configure_save_pictures(ports_to_save='ALL', job_name_in_port=True)
# Application.run_application()
# Do bsds benchmarking
# Be ware not to activate job_name_in_port in Application.configure_save_pictures
# Benchmarking.run_bsds500_boundary_benchmark(input_location='Logs/application_results',
# gt_location='TestData/BSR/BSDS500/data/groundTruth/' + dataset,
# raw_image='TestData/BSR/BSDS500/data/images/' + dataset,
# jobs_set=list_to_save, do_thinning=False)
Utils.plot_first_cpm_results(prefix='FINAL', level='L0', order_by='f1', name='canny_sigma_results_finder',
suffix_to_cut_legend='_L0',
list_of_data=list_to_save, number_of_series=25,
replace_list=[('CANNY_SOBEL_3x3', ''), ('_S_', ' S='), ('_L_', ' L='), ('_H_', ' H='), ('_L0', ''), ('_', '.')],
inputs=[''], self_contained_list=True,
save_plot=True, show_plot=False)
Utils.close_files()
# def main_canny(dataset):
# Application.delete_folder_appl_out()
# Benchmarking.delete_folder_benchmark_out()
#
# Application.set_input_image_folder('TestData/BSR/BSDS500/data/images/' + dataset)
#
# list_to_save = []
#
# Application.do_get_image_job(port_output_name='RAW')
# Application.do_grayscale_transform_job(port_input_name='RAW', port_output_name='GREY')
#
# first_order_edge = [
# CONFIG.FILTERS.PIXEL_DIFF_3x3, CONFIG.FILTERS.PIXEL_DIFF_SEPARATED_3x3
# , CONFIG.FILTERS.PIXEL_DIFF_SEPARATED_5x5, CONFIG.FILTERS.PIXEL_DIFF_SEPARATED_7x7
# , CONFIG.FILTERS.PIXEL_DIFF_5x5, CONFIG.FILTERS.PIXEL_DIFF_7x7
#
# , CONFIG.FILTERS.SOBEL_3x3, CONFIG.FILTERS.SOBEL_5x5, CONFIG.FILTERS.SOBEL_7x7
# , CONFIG.FILTERS.SOBEL_DILATED_5x5, CONFIG.FILTERS.SOBEL_DILATED_7x7
#
# , CONFIG.FILTERS.PREWITT_3x3, CONFIG.FILTERS.PREWITT_5x5, CONFIG.FILTERS.PREWITT_7x7
# , CONFIG.FILTERS.PREWITT_DILATED_5x5, CONFIG.FILTERS.PREWITT_DILATED_7x7
#
# , CONFIG.FILTERS.KIRSCH_3x3, CONFIG.FILTERS.KIRSCH_5x5
# , CONFIG.FILTERS.KIRSCH_DILATED_5x5, CONFIG.FILTERS.KIRSCH_DILATED_7x7
#
# , CONFIG.FILTERS.KITCHEN_MALIN_3x3
# , CONFIG.FILTERS.KITCHEN_MALIN_DILATED_5x5, CONFIG.FILTERS.KITCHEN_MALIN_DILATED_7x7
#
# , CONFIG.FILTERS.KAYYALI_3x3
# , CONFIG.FILTERS.KAYYALI_DILATED_5x5, CONFIG.FILTERS.KAYYALI_DILATED_7x7
#
# , CONFIG.FILTERS.SCHARR_3x3, CONFIG.FILTERS.SCHARR_5x5
# , CONFIG.FILTERS.SCHARR_DILATED_5x5, CONFIG.FILTERS.SCHARR_DILATED_7x7
#
# , CONFIG.FILTERS.KROON_3x3
# , CONFIG.FILTERS.KROON_DILATED_5x5, CONFIG.FILTERS.KROON_DILATED_7x7
#
# , CONFIG.FILTERS.ORHEI_3x3, CONFIG.FILTERS.ORHEI_B_5x5
# , CONFIG.FILTERS.ORHEI_DILATED_5x5, CONFIG.FILTERS.ORHEI_DILATED_7x7
# ]
#
# s = 1.25
# # find best threshold for first level
# for edge in first_order_edge:
# blured_img = Application.do_gaussian_blur_image_job(port_input_name='GREY', sigma=s,
# port_output_name='BLURED_S_' + str(s).replace('.', '_'))
# Application.do_max_pixel_image_job(port_input_name=blured_img, port_output_name='MAX_' + blured_img)
# canny_result = Application.do_canny_ratio_threshold_job(port_input_name=blured_img, edge_detector=edge,
# port_output_name='CANNY_' + edge + '_S_' + str(s).replace('.', '_'),
# canny_config_value='MAX_' + blured_img, do_blur=False)
# list_to_save.append(canny_result + '_L0')
#
# Application.create_config_file()
# Application.configure_save_pictures(ports_to_save=list_to_save, job_name_in_port=False)
# # Application.configure_save_pictures(ports_to_save='ALL', job_name_in_port=True)
# Application.run_application()
#
# # Do bsds benchmarking
# # Be ware not to activate job_name_in_port in Application.configure_save_pictures
# Benchmarking.run_bsds500_boundary_benchmark(input_location='Logs/application_results',
# gt_location='TestData/BSR/BSDS500/data/groundTruth/' + dataset,
# raw_image='TestData/BSR/BSDS500/data/images/' + dataset,
# jobs_set=list_to_save, do_thinning=False)
#
# Utils.plot_first_cpm_results(prefix='FINAL', level='L0', order_by='f1', name='canny_results',
# suffix_to_cut_legend='_S_1_25_L0', prefix_to_cut_legend='CANNY_',
# list_of_data=list_to_save, number_of_series=40,
# replace_list=[('SEPARATED_PIXEL_DIFFERENCE_', 'Separated Px Dif '),
# ('PIXEL_DIFFERENCE_', 'Pixel Dif '),
# ('PREWITT_', 'Prewitt '), ('KIRSCH_', 'Kirsch '), ('SOBEL_', 'Sobel '),
# ('SCHARR_', 'Scharr '), ('KROON_', 'Kroon '), ('ORHEI_V1_', 'Orhei '),
# ('ORHEI_', 'Orhei '),
# ('KITCHEN_', 'Kitchen '), ('KAYYALI_', 'Kayyali '),
# ('DILATED_', 'dilated ')],
# inputs=[''], self_contained_list=True,
# save_plot=True, show_plot=False)
#
# Utils.create_latex_cpm_table(list_of_data=list_to_save, name_of_table='canny_latex_table_results', print_to_console=True,
# header_list=['Variant', '', '3x3', '5x5', 'Dilated 5x5', '7x7', 'Dilated 7x7'],
# prefix_data_name='FINAL', suffix_data_name='BLURED', level_data_name='L0',
# version_data_name=['3x3', '5x5', 'DILATED_5x5', '7x7', 'DILATED_7x7'],
# data_per_variant=['R', 'P', 'F1'], version_separation='DILATED')
#
# Utils.close_files()
def main_canny_2(dataset):
# Application.delete_folder_appl_out()
# Benchmarking.delete_folder_benchmark_out()
Application.set_input_image_folder('TestData/BSR/BSDS500/data/images/' + dataset)
list_to_save = []
Application.do_get_image_job(port_output_name='RAW')
Application.do_grayscale_transform_job(port_input_name='RAW', port_output_name='GREY')
first_order_edge = [
CONFIG.FILTERS.PIXEL_DIFF_3x3, CONFIG.FILTERS.PIXEL_DIFF_SEPARATED_3x3
, CONFIG.FILTERS.PIXEL_DIFF_SEPARATED_5x5, CONFIG.FILTERS.PIXEL_DIFF_SEPARATED_7x7
, CONFIG.FILTERS.PIXEL_DIFF_5x5, CONFIG.FILTERS.PIXEL_DIFF_7x7
, CONFIG.FILTERS.SOBEL_3x3, CONFIG.FILTERS.SOBEL_5x5, CONFIG.FILTERS.SOBEL_7x7
, CONFIG.FILTERS.SOBEL_DILATED_5x5, CONFIG.FILTERS.SOBEL_DILATED_7x7
, CONFIG.FILTERS.PREWITT_3x3, CONFIG.FILTERS.PREWITT_5x5, CONFIG.FILTERS.PREWITT_7x7
, CONFIG.FILTERS.PREWITT_DILATED_5x5, CONFIG.FILTERS.PREWITT_DILATED_7x7
, CONFIG.FILTERS.KIRSCH_3x3, CONFIG.FILTERS.KIRSCH_5x5
, CONFIG.FILTERS.KIRSCH_DILATED_5x5, CONFIG.FILTERS.KIRSCH_DILATED_7x7
, CONFIG.FILTERS.KITCHEN_MALIN_3x3
, CONFIG.FILTERS.KITCHEN_MALIN_DILATED_5x5, CONFIG.FILTERS.KITCHEN_MALIN_DILATED_7x7
, CONFIG.FILTERS.KAYYALI_3x3
, CONFIG.FILTERS.KAYYALI_DILATED_5x5, CONFIG.FILTERS.KAYYALI_DILATED_7x7
, CONFIG.FILTERS.SCHARR_3x3, CONFIG.FILTERS.SCHARR_5x5
, CONFIG.FILTERS.SCHARR_DILATED_5x5, CONFIG.FILTERS.SCHARR_DILATED_7x7
, CONFIG.FILTERS.KROON_3x3
, CONFIG.FILTERS.KROON_DILATED_5x5, CONFIG.FILTERS.KROON_DILATED_7x7
, CONFIG.FILTERS.ORHEI_3x3, CONFIG.FILTERS.ORHEI_B_5x5
, CONFIG.FILTERS.ORHEI_DILATED_5x5, CONFIG.FILTERS.ORHEI_DILATED_7x7
]
s = 1.5
# find best threshold for first level
for edge in first_order_edge:
blured_img = Application.do_gaussian_blur_image_job(port_input_name='GREY', sigma=s,
port_output_name='BLURED_S_' + str(s).replace('.', '_'))
low = 80
high = 90
canny_result = Application.do_canny_config_job(port_input_name=blured_img, edge_detector=edge, canny_config=CONFIG.CANNY_VARIANTS.MANUAL_THRESHOLD,
low_manual_threshold = low, high_manual_threshold=high, canny_config_value=None,
port_output_name='CANNY_' + edge + '_S_' + str(s).replace('.', '_') + '_L_' + str(low) + '_H_' + str(high),
do_blur=False)
list_to_save.append(canny_result + '_L0')
Application.create_config_file()
Application.configure_save_pictures(ports_to_save=list_to_save, job_name_in_port=True)
# Application.configure_save_pictures(ports_to_save='ALL', job_name_in_port=True)
# Application.run_application()
# Do bsds benchmarking
# Be ware not to activate job_name_in_port in Application.configure_save_pictures
# Benchmarking.run_bsds500_boundary_benchmark(input_location='Logs/application_results',
# gt_location='TestData/BSR/BSDS500/data/groundTruth/' + dataset,
# raw_image='TestData/BSR/BSDS500/data/images/' + dataset,
# jobs_set=list_to_save, do_thinning=False)
Utils.plot_first_cpm_results(prefix='FINAL', level='L0', order_by='f1', name='canny_results',
suffix_to_cut_legend='_S_1_5_L_80_H_90_L0', prefix_to_cut_legend='CANNY_',
list_of_data=list_to_save, number_of_series=40,
replace_list=[('SEPARATED_PIXEL_DIFFERENCE_', 'Separated Px Dif '),
('PIXEL_DIFFERENCE_', 'Pixel Dif '),
('PREWITT_', 'Prewitt '), ('KIRSCH_', 'Kirsch '), ('SOBEL_', 'Sobel '),
('SCHARR_', 'Scharr '), ('KROON_', 'Kroon '), ('ORHEI_V1_', 'Orhei '),
('ORHEI_', 'Orhei '),
('KITCHEN_', 'Kitchen '), ('KAYYALI_', 'Kayyali '),
('DILATED_', 'dilated ')],
inputs=[''], self_contained_list=True,
save_plot=True, show_plot=False)
Utils.create_latex_cpm_table(list_of_data=list_to_save, name_of_table='canny_latex_table_results', print_to_console=True,
header_list=['Variant', '', '3x3', '5x5', 'Dilated 5x5', '7x7', 'Dilated 7x7'],
prefix_data_name='CA', suffix_data_name='BLURED', level_data_name='L0',
version_data_name=['3x3', '5x5', 'DILATED_5x5', '7x7', 'DILATED_7x7'],
data_per_variant=['R', 'P', 'F1'], version_separation='DILATED')
Utils.close_files()
def main_param_shen_finder(dataset):
# Application.delete_folder_appl_out()
# Benchmarking.delete_folder_benchmark_out()
Application.set_input_image_folder('TestData/BSR/BSDS500/data/images/' + dataset)
list_to_save = []
Application.do_get_image_job(port_output_name='RAW')
Application.do_grayscale_transform_job(port_input_name='RAW', port_output_name='GREY')
for s in [0.5, 0.9]:
for w in [5, 7, 11]:
for r in [0.5, 0.9]:
for th in [0, 0.5, 0.9]:
for thr in [4]:
edge_result = Application.do_shen_castan_job(port_input_name='GREY',
laplacian_kernel=CONFIG.FILTERS_SECOND_ORDER.LAPLACE_1,
laplacian_threhold=thr, smoothing_factor=s, zc_window_size=w,
thinning_factor=th, ratio=r,
port_output_name='SHEN_CASTAN_' + 'THR_' + str(thr).replace('.', '_')
+ '_S_' + str(s).replace('.', '_') + '_W_' + str(w) +
'_R_' + str(r).replace('.', '_') + '_TH_' + str(
th).replace('.', '_'))
list_to_save.append(edge_result + '_L0')
Application.create_config_file()
Application.configure_save_pictures(ports_to_save=list_to_save, job_name_in_port=False)
# Application.configure_save_pictures(ports_to_save='ALL', job_name_in_port=True)
# Application.run_application()
# Do bsds benchmarking
# Be ware not to activate job_name_in_port in Application.configure_save_pictures
# Benchmarking.run_bsds500_boundary_benchmark(input_location='Logs/application_results',
# gt_location='TestData/BSR/BSDS500/data/groundTruth/' + dataset,
# raw_image='TestData/BSR/BSDS500/data/images/' + dataset,
# jobs_set=list_to_save, do_thinning=False)
Utils.plot_first_cpm_results(prefix='', level='L0', order_by='f1', name='shen_tunning',
list_of_data=list_to_save, number_of_series=25,
suffix_to_cut_legend='_L0',
replace_list=[('SHEN_CASTAN_', ''), ('THR_', ' Thr='), ('_S_', ' S='), ('_W_', ' W='),
('_R_', ' R='), ('_TH_', ' Tn='), ('_', '.')],
inputs=[''], self_contained_list=True, set_legend_left=False,
save_plot=True, show_plot=False)
Utils.close_files()
def main_shen_edges(dataset):
# Application.delete_folder_appl_out()
# Benchmarking.delete_folder_benchmark_out()
Application.set_input_image_folder('TestData/BSR/BSDS500/data/images/' + dataset)
list_to_save = []
Application.do_get_image_job(port_output_name='RAW')
Application.do_grayscale_transform_job(port_input_name='RAW', port_output_name='GREY')
laplace_edges = [
CONFIG.FILTERS_SECOND_ORDER.LAPLACE_1, CONFIG.FILTERS_SECOND_ORDER.LAPLACE_5x5_1
, CONFIG.FILTERS_SECOND_ORDER.LAPLACE_DILATED_5x5_1, CONFIG.FILTERS_SECOND_ORDER.LAPLACE_DILATED_7x7_1
, CONFIG.FILTERS_SECOND_ORDER.LAPLACE_2, CONFIG.FILTERS_SECOND_ORDER.LAPLACE_5x5_2
, CONFIG.FILTERS_SECOND_ORDER.LAPLACE_DILATED_5x5_2, CONFIG.FILTERS_SECOND_ORDER.LAPLACE_DILATED_7x7_2
, CONFIG.FILTERS_SECOND_ORDER.LAPLACE_3
, CONFIG.FILTERS_SECOND_ORDER.LAPLACE_DILATED_5x5_3, CONFIG.FILTERS_SECOND_ORDER.LAPLACE_DILATED_7x7_3
, CONFIG.FILTERS_SECOND_ORDER.LAPLACE_4
, CONFIG.FILTERS_SECOND_ORDER.LAPLACE_DILATED_5x5_4, CONFIG.FILTERS_SECOND_ORDER.LAPLACE_DILATED_7x7_4
, CONFIG.FILTERS_SECOND_ORDER.LAPLACE_5
, CONFIG.FILTERS_SECOND_ORDER.LAPLACE_DILATED_5x5_5, CONFIG.FILTERS_SECOND_ORDER.LAPLACE_DILATED_7x7_5
]
thr = 4
s = 0.9
w = 7
th = 0.5
r = 0.9
for edge in laplace_edges:
edge_result = Application.do_shen_castan_job(port_input_name='GREY', laplacian_kernel=edge,
laplacian_threhold=thr, smoothing_factor=s, zc_window_size=w,
thinning_factor=th, ratio=r,
port_output_name='SHEN_CASTAN_' + edge)
list_to_save.append(edge_result + '_L0')
Application.create_config_file()
Application.configure_save_pictures(ports_to_save=list_to_save, job_name_in_port=False)
# Application.configure_save_pictures(ports_to_save='ALL', job_name_in_port=True)
# Application.run_application()
# Do bsds benchmarking
# Be ware not to activate job_name_in_port in Application.configure_save_pictures
# Benchmarking.run_bsds500_boundary_benchmark(input_location='Logs/application_results',
# gt_location='TestData/BSR/BSDS500/data/groundTruth/' + dataset,
# raw_image='TestData/BSR/BSDS500/data/images/' + dataset,
# jobs_set=list_to_save, do_thinning=False)
Utils.plot_first_cpm_results(prefix='FINAL', level='L0', order_by='f1', name='shen_edge_results',
list_of_data=list_to_save, number_of_series=30,
inputs=[''], self_contained_list=True,
replace_list=[('SHEN_CASTAN_LAPLACE_', ''), ('_DILATED_', ' Dilated '), ('_3x3', ' 3x3'), ('_5x5', ' 5x5'),
('_L0', '')],
prefix_to_cut_legend='FINAL_', suffix_to_cut_legend='_GREY_L0',
save_plot=True, show_plot=False)
Utils.create_latex_cpm_table(list_of_data=list_to_save, name_of_table='shen_latex_table_results', print_to_console=True,
header_list=['Variant', '', '3x3', '5x5', 'Dilated 5x5', 'Dilated 7x7'],
list_of_series=['LAPLACE_V1', 'LAPLACE_V2', 'LAPLACE_V3', 'LAPLACE_V4', 'LAPLACE_V5'],
prefix_data_name='FINAL', suffix_data_name='GREY', level_data_name='L0',
version_data_name=['3x3', '5x5', 'DILATED_5x5', 'DILATED_7x7'], version_separation='DILATED',
data_per_variant=['R', 'P', 'F1']
)
Utils.close_files()
def main_ed_parsing(dataset):
"""
Main function of framework Please look in example_main for all functions
you can use
"""
Application.set_input_image_folder('TestData/BSR/BSDS500/data/images/' + dataset)
# Application.delete_folder_appl_out()
# Benchmarking.delete_folder_benchmark_out()
Application.do_get_image_job(port_output_name='RAW')
Application.do_grayscale_transform_job(port_input_name='RAW', port_output_name='GRAY_RAW')
list = []
first_order_edge = [
CONFIG.FILTERS.SOBEL_3x3
]
for edge in first_order_edge:
for kernel_gaus in [3, 5, 7, 9]:
for grad_thr in [10, 30, 40, 50, 60, 70, 90, 110, 130, 150]:
for anc_thr in [10, 20, 30, 40, 60]:
for sc_int in [1, 3, 5]:
blur = Application.do_gaussian_blur_image_job(port_input_name='GRAY_RAW', kernel_size=kernel_gaus, sigma=0)
e3, e4 = Application.do_edge_drawing_mod_job(port_input_name=blur, operator=edge,
gradient_thr=grad_thr, anchor_thr=anc_thr, scan_interval=sc_int,
max_edges=100, max_points_edge=100)
list.append(e3 + '_L0')
Application.create_config_file()
Application.configure_save_pictures(ports_to_save=list)
# Application.configure_show_pictures(ports_to_show=list, time_to_show=0)
# Application.run_application()
# Do bsds benchmarking
# Be ware not to activate job_name_in_port in Application.configure_save_pictures
# Benchmarking.run_bsds500_boundary_benchmark(input_location='Logs/application_results',
# gt_location='TestData/BSR/BSDS500/data/groundTruth/test',
# raw_image='TestData/BSR/BSDS500/data/images/test',
# jobs_set=list, do_thinning=False)
Utils.plot_first_cpm_results(prefix='EDGE_DRAWING_MOD_', level='L0', order_by='f1', name='ed_finder_thr',
list_of_data=list, number_of_series=25,
inputs=[''], self_contained_list=True, set_legend_left=False,
suffix_to_cut_legend='_S_0_GRAY_RAW_L0',
replace_list=[('EDGE_DRAWING_MOD_THR_', 'TG='), ('_ANC_THR_', ' TA='), ('_SCAN_', ' SI='), ('_SOBEL_3x3_GAUSS_BLUR_K_', ' GK=')],
save_plot=True, show_plot=False, set_all_to_legend=False)
Utils.close_files()
def main_ededge(dataset):
"""
Main function of framework Please look in example_main for all functions
you can use
"""
Application.set_input_image_folder('TestData/BSR/BSDS500/data/images/' + dataset)
# Application.delete_folder_appl_out()
# Benchmarking.delete_folder_benchmark_out()
Application.do_get_image_job(port_output_name='RAW')
Application.do_grayscale_transform_job(port_input_name='RAW', port_output_name='GRAY_RAW')
blur = Application.do_gaussian_blur_image_job(port_input_name='GRAY_RAW', sigma=0, kernel_size=9)
list_to_eval_edge = []
first_order_edge = [
CONFIG.FILTERS.PIXEL_DIFF_3x3, CONFIG.FILTERS.PIXEL_DIFF_SEPARATED_3x3
, CONFIG.FILTERS.PIXEL_DIFF_SEPARATED_5x5, CONFIG.FILTERS.PIXEL_DIFF_SEPARATED_7x7
, CONFIG.FILTERS.PIXEL_DIFF_5x5, CONFIG.FILTERS.PIXEL_DIFF_7x7
, CONFIG.FILTERS.SOBEL_3x3, CONFIG.FILTERS.SOBEL_5x5, CONFIG.FILTERS.SOBEL_7x7
, CONFIG.FILTERS.SOBEL_DILATED_5x5, CONFIG.FILTERS.SOBEL_DILATED_7x7
, CONFIG.FILTERS.PREWITT_3x3, CONFIG.FILTERS.PREWITT_5x5, CONFIG.FILTERS.PREWITT_7x7
, CONFIG.FILTERS.PREWITT_DILATED_5x5, CONFIG.FILTERS.PREWITT_DILATED_7x7
, CONFIG.FILTERS.KIRSCH_3x3, CONFIG.FILTERS.KIRSCH_5x5
, CONFIG.FILTERS.KIRSCH_DILATED_5x5, CONFIG.FILTERS.KIRSCH_DILATED_7x7
, CONFIG.FILTERS.KITCHEN_MALIN_3x3
, CONFIG.FILTERS.KITCHEN_MALIN_DILATED_5x5, CONFIG.FILTERS.KITCHEN_MALIN_DILATED_7x7
, CONFIG.FILTERS.KAYYALI_3x3
, CONFIG.FILTERS.KAYYALI_DILATED_5x5, CONFIG.FILTERS.KAYYALI_DILATED_7x7
, CONFIG.FILTERS.SCHARR_3x3, CONFIG.FILTERS.SCHARR_5x5
, CONFIG.FILTERS.SCHARR_DILATED_5x5, CONFIG.FILTERS.SCHARR_DILATED_7x7
, CONFIG.FILTERS.KROON_3x3
, CONFIG.FILTERS.KROON_DILATED_5x5, CONFIG.FILTERS.KROON_DILATED_7x7
, CONFIG.FILTERS.ORHEI_3x3, CONFIG.FILTERS.ORHEI_B_5x5
, CONFIG.FILTERS.ORHEI_DILATED_5x5, CONFIG.FILTERS.ORHEI_DILATED_7x7
]
for edge in first_order_edge:
for gr_thr in [50]:
for anc_thr in [10]:
e1, e2, = Application.do_edge_drawing_mod_job(port_input_name=blur, operator=edge,
gradient_thr=gr_thr, anchor_thr=anc_thr, scan_interval=1,
max_edges=100, max_points_edge=100)
list_to_eval_edge.append(e1 + '_L0')
Application.create_config_file(verbose=False)
Application.configure_save_pictures(job_name_in_port=False, ports_to_save='ALL')
# Application.configure_show_pictures(ports_to_show=list_to_save, time_to_show=200)
# Application.run_application()
# Do bsds benchmarking
# Be ware not to activate job_name_in_port in Application.configure_save_pictures
# Benchmarking.run_bsds500_boundary_benchmark(input_location='Logs/application_results',
# gt_location='TestData/BSR/BSDS500/data/groundTruth/' + dataset,
# raw_image='TestData/BSR/BSDS500/data/images/' + dataset,
# jobs_set=list_to_eval_edge, do_thinning=False)
Utils.plot_first_cpm_results(prefix='EDGE_DRAWING_MOD_', level='L0', order_by='f1', name='ed_results',
list_of_data=list_to_eval_edge, number_of_series=50,
inputs=[''], self_contained_list=True, set_legend_left=False,
suffix_to_cut_legend='_S_0_GRAY_RAW_L0',
replace_list=[('EDGE_DRAWING_MOD_THR_50_ANC_THR_10_SCAN_1_', ''),
('SEPARATED_PIXEL_DIFFERENCE_', 'Separated Px Dif '),
('PIXEL_DIFFERENCE_', 'Pixel Dif '),
('PREWITT_', 'Prewitt '), ('KIRSCH_', 'Kirsch '), ('SOBEL_', 'Sobel '),
('SCHARR_', 'Scharr '), ('KROON_', 'Kroon '), ('ORHEI_V1_', 'Orhei '),
('ORHEI_', 'Orhei '),
('KITCHEN_', 'Kitchen '), ('KAYYALI_', 'Kayyali '),
('DILATED_', 'dilated '),
('_GAUSS_BLUR_K_9', '')],
save_plot=True, show_plot=False, set_all_to_legend=False)
# Utils.create_latex_cpm_table_list()
Utils.close_files()
if __name__ == "__main__":
# dataset = 'test'
dataset = | |
import pytest
from polyline import ClosedPolyline
def test_polygon_init_empty():
path = ClosedPolyline()
assert not len(path)
def test_polygon_init_nonempty():
"""You can give any number of points to a path at instantiation"""
points = [(0, 0), (5, 0), (5, 5), (5, 10), (10, 10)]
path = ClosedPolyline(*points)
assert len(path) == len(points)
def test_polyline_number_of_line_segments():
"""an open polyline has as many line segments as it has points"""
points = [(0, 0), (10, 0), (10, 10)]
path = ClosedPolyline(*points)
assert len(path) == len(list(path.line_segments()))
def test_polygon_init_with_list():
"""You can instantiate a ClosedPolyline with a list of points (one argument)"""
points = [(0, 0), (5, 0), (5, 5), (5, 10), (10, 10)]
path1 = ClosedPolyline(points)
path2 = ClosedPolyline(*points)
assert path1 == path2
def test_polygon_init_with_polygon():
"""You can instantiate a ClosedPolyline with another ClosedPolyline instance"""
points = [(0, 0), (5, 0), (5, 5), (5, 10), (10, 10)]
path1 = ClosedPolyline(points)
path2 = ClosedPolyline(path1)
assert path1 == path2
assert path1 is not path2
def test_polygon_line_segments():
"""You can give any number of points to a path at instantiation"""
points = [(0, 0), (5, 0), (5, 5), (5, 10), (10, 10)]
path = ClosedPolyline(*points)
assert len(list(path.line_segments())) == len(points)
def test_polygon_insert_point():
points = [(0, 0), (10, 0), (10, 10)]
path = ClosedPolyline(*points)
the_point = (5, 0)
path.insert(the_point)
assert len(path) == len(points) + 1
assert the_point in path
def test_polygon_insert_point_between_last_and_first():
"""the point to be inserted may lie on the line from the last to the first point."""
points = [(0, 0), (10, 0), (10, 10), (0, 10)]
path = ClosedPolyline(*points)
the_point = (0, 5)
path.insert(the_point)
assert len(path) == len(points) + 1
assert the_point in path
def test_polygon_insert_point_in_correct_position1():
"""A new point on an existing line segment is added between the
endpoints of that line segment
"""
points = [(0, 0), (0, 10), (10, 10)]
path = ClosedPolyline(*points)
first_point = (0, 5)
path.insert(first_point)
second_point = (5, 10)
path.insert(second_point)
assert path.points.index(first_point) == 1
assert path.points.index(second_point) == 3
def test_polygon_insert_point_in_correct_position2():
"""A new point on an existing line segment is added between the
endpoints of that line segment: works also between last and first point
"""
points = [(0, 0), (0, 10), (10, 10), (10, 0)]
path = ClosedPolyline(*points)
the_point = (5, 0)
path.insert(the_point)
assert path.points.index(the_point) == 4
def test_polygon_insert_point_error():
"""A new point is refused if it is not on an existing line segment"""
points = [(0, 0), (0, 10), (10, 10), (10, 0)]
path = ClosedPolyline(*points)
the_point = (5, 5)
with pytest.raises(ValueError) as exc:
path.insert(the_point)
assert "not on path" in str(exc)
def test_polygon_can_insert_point_at_specific_linesegment():
"""You can tell a polygon to split a specified line segment by a point,
even if the new point is not 'on' an existing line segment
"""
points = [(0, 0), (0, 10), (10, 10), (10, 0)]
path = ClosedPolyline(*points)
the_point = (5, 5)
path.insert(the_point, after=(0, 0))
assert path.points[1] == the_point
def test_polygon_replace_makes_new_path():
points = [(0, 0), (0, 5), (0, 10), (10, 10), (10, 0), (10, 5)]
path = ClosedPolyline(*points)
new_points = [(0, 5), (10, 5)]
new_path = path.replace(new_points)
assert new_path is not path
def test_polygon_replace_part1():
"""A single line segment that is given as a replacement will be part of the new ClosedPolyline"""
points = [(0, 0), (0, 5), (0, 10), (10, 10), (10, 5), (10, 0)]
path = ClosedPolyline(*points)
new_points = [(0, 5), (10, 5)]
# replace the "bottom" part of the path with the line across the middle ((0,5), (10,5))
new_path = path.replace(new_points)
assert tuple(new_points) in new_path.line_segments()
def test_polygon_replace_part_replaces():
"""The segments that shall be replaced are not in the resulting ClosedPolyline"""
points = [(0, 0), (0, 5), (0, 10), (10, 10), (10, 5), (10, 0)]
path = ClosedPolyline(*points)
new_points = [(0, 5), (10, 5)]
# replace the "bottom" part of the path with the line across the middle ((0,5), (10,5))
expected = [(0, 0), (0, 5), (10, 5), (10, 0)]
new_path = path.replace(new_points)
assert (0, 10) not in new_path.points
assert (10, 10) not in new_path.points
assert expected == new_path.points
def test_polygon_replace_part_replaces_across_ends():
"""The segments that shall be replaced are not in the resulting ClosedPolyline.
This also works when the part to be replaced wraps around from the end of the list to the start."""
points = [(10, 10), (10, 5), (10, 0), (0, 0), (0, 5), (0, 10)]
path = ClosedPolyline(*points)
new_points = [(0, 5), (10, 5)]
# replace the "bottom" part of the path with the line across the middle ((0,5), (10,5))
expected = [(0, 5), (10, 5), (10, 0), (0, 0)]
new_path = path.replace(new_points)
assert (0, 10) not in new_path.points
assert (10, 10) not in new_path.points
assert expected == new_path.points
def test_polygon_replace_requires_points_on_the_polygon():
points = [(10, 10), (10, 5), (10, 0), (0, 0), (0, 5), (0, 10)]
path = ClosedPolyline(*points)
new_points = [(0, 3), (10, 5)]
with pytest.raises(ValueError) as exc:
path.replace(new_points)
assert "must be on polygon" in str(exc)
def test_polygon_split1():
"""Like .replace(), .split() returns a new polygon with the relevant part replaced
by the polyline given, but it returns a pair of polygons"""
points = [(0, 0), (0, 5), (0, 10), (10, 10), (10, 5), (10, 0)]
path = ClosedPolyline(*points)
new_points = [(0, 5), (10, 5)]
# replace the "bottom" part of the path with the line across the middle ((0,5), (10,5))
new_path, _ = path.split(new_points)
assert tuple(new_points) in new_path.line_segments()
def test_polygon_split2():
"""The second polygon returned by .split() is the 'other half' that is defined by the
splitting path. There the splitting path is reversed in order to keep the overall
orientation of each polygon consistent"""
points = [(0, 0), (0, 5), (0, 10), (10, 10), (10, 5), (10, 0)]
path = ClosedPolyline(*points)
new_points = [(0, 5), (10, 5)]
# replace the "bottom" part of the path with the line across the middle ((0,5), (10,5))
_, new_path = path.split(new_points)
assert type(new_path) == ClosedPolyline
assert tuple(reversed(new_points)) in new_path.line_segments()
def test_polygon_split3():
"""The first and second polygons returned by .split() are indeed complements"""
points = [(0, 0), (0, 5), (0, 10), (10, 10), (10, 5), (10, 0)]
path = ClosedPolyline(*points)
new_points = [(0, 5), (10, 5)]
# replace the "bottom" part of the path with the line across the middle ((0,5), (10,5))
path1, path2 = path.split(new_points)
assert set(path1).intersection(set(path2)) == set(new_points)
def test_polygon_surrounds1():
points = [(0, 0), (0, 10), (10, 10), (10, 0)]
path = ClosedPolyline(*points)
the_point = (5, 5)
assert path.surrounds(the_point)
def test_polygon_surrounds2():
points = [(0, 0), (0, 10), (5, 10), (5, 1), (7, 1), (7, 10), (10, 10), (10, 0)]
path = ClosedPolyline(*points)
the_point = (2, 2)
assert path.surrounds(the_point)
def test_polygon_surrounds_not1():
points = [(0, 0), (0, 10), (10, 10), (10, 0)]
path = ClosedPolyline(*points)
the_point = (5, 15)
assert not path.surrounds(the_point)
def test_polygon_surrounds_not2():
points = [(0, 0), (0, 10), (5, 10), (5, 1), (7, 1), (7, 10), (10, 10), (10, 0)]
path = ClosedPolyline(*points)
the_point = (6, 2)
assert not path.surrounds(the_point)
def test_polygon_surrounds_edge1():
points = [(0, 0), (0, 10), (10, 10), (10, 0)]
path = ClosedPolyline(*points)
the_point = (5, 0)
assert path.surrounds(the_point)
def test_polygon_surrounds_edge2():
points = [(0, 0), (0, 10), (10, 10), (10, 0)]
path = ClosedPolyline(*points)
the_point = (0, 5)
assert path.surrounds(the_point)
def test_polygon_surrounds_not_edge_right():
points = [(0, 0), (0, 10), (10, 10), (10, 0)]
path = ClosedPolyline(*points)
the_point = (10, 0)
assert not path.surrounds(the_point)
def test_polygon_surrounds_not_edge_bottom():
points = [(0, 0), (0, 10), (10, 10), (10, 0)]
path = ClosedPolyline(*points)
the_point = (0, 10)
assert not path.surrounds(the_point)
def test_polygon_reversed():
"""reversed() can be used on Polygons and behaves as expected."""
points = [(0, 0), (0, 10), (10, 10), (10, 0)]
path = ClosedPolyline(*points)
assert [p for p in reversed(path)] == list(reversed(points))
def test_instantiate_reversed_polygon():
"""reversed() can be used on Polygons and behaves as expected."""
points = [(0, 0), (0, 10), (10, 10), (10, 0)]
path = ClosedPolyline(*points)
htap = ClosedPolyline(*reversed(path))
assert [p for p in htap] == list(reversed(points))
def test_polygon_area1():
points = [(0, 0), (10, 0), (10, 10), (0, 10)]
path = ClosedPolyline(*points)
assert path.area() == 100
def test_polygon_area2():
points = [
(0, 0),
(10, 0),
(10, 10),
(8, 10),
(8, 5),
(3, 5),
(3, 10), # cut out 5x5
(0, 10),
]
path = ClosedPolyline(*points)
assert path.area() == 100 - 25
def test_polygon_area3():
points = [
(0, 0),
(10, 0),
(10, 3),
(5, | |
<reponame>carderne/descarteslabs-python
# -*- coding: utf-8 -*-
import pytest
import responses
import textwrap
import warnings
from datetime import datetime
from mock import patch
from descarteslabs.client.exceptions import BadRequestError
from .. import properties
from ..attributes import AttributeValidationError, ListAttribute
from ..band import DerivedBand
from ..catalog_base import DocumentState, DeletedObjectError
from ..image_upload import ImageUploadStatus
from ..product import (
Product,
Resolution,
TaskState,
TaskStatus,
DeletionTaskStatus,
UpdatePermissionsTaskStatus,
)
from .base import ClientTestCase
class TestProduct(ClientTestCase):
def test_constructor(self):
p = Product(
id="p1", name="Test Product", start_datetime="2019-01-01", tags=["tag"]
)
assert p.id == "p1"
assert p.name == "Test Product"
assert p.tags == ["tag"]
assert p.state == DocumentState.UNSAVED
def test_repr_non_ascii(self):
p = Product(id="plieades", name="Pléiades")
p_repr = repr(p)
match_str = """\
Product: Pléiades
id: plieades
* Not up-to-date in the Descartes Labs catalog. Call `.save()` to save or update this record."""
assert p_repr.strip("\n") == textwrap.dedent(match_str)
def test_resolution(self):
p = Product(
id="p1",
name="Test Product",
resolution_min=Resolution(value=10.0, unit="meters"),
_saved=True,
)
assert isinstance(p.resolution_min, Resolution)
assert not p.is_modified
p.tags = ["tag"]
assert p.is_modified
def test_resolution_new(self):
p = Product(
id="p1",
name="Test Product",
resolution_min={"value": 10.0, "unit": "miles"},
_saved=True,
)
assert p.resolution_min.unit == "miles"
with pytest.raises(AttributeValidationError):
Resolution(value=15.0, unit="miles")
@responses.activate
def test_list(self):
self.mock_response(
responses.PUT,
{
"meta": {"count": 1},
"data": [
{
"attributes": {
"owners": ["org:descarteslabs"],
"name": "<NAME>",
"readers": [],
"revisit_period_minutes_min": None,
"revisit_period_minutes_max": None,
"modified": "2019-06-10T18:48:13.066192Z",
"created": "2019-06-10T18:48:13.066192Z",
"start_datetime": "2019-01-01T00:00:00Z",
"writers": [],
"end_datetime": None,
"description": None,
"resolution_min": {"value": 10.0, "unit": "meters"},
},
"type": "product",
"id": "descarteslabs:test",
}
],
"jsonapi": {"version": "1.0"},
"links": {"self": "https://example.com/catalog/v2/products"},
},
)
r = list(Product.search(client=self.client))
assert len(r) == 1
product = r[0]
assert responses.calls[0].request.url == self.url + "/products"
assert product.id == "descarteslabs:test"
assert isinstance(product.created, datetime)
assert isinstance(product.resolution_min, Resolution)
with pytest.raises(AttributeValidationError):
product.created = "2018-06-10T18:48:13.066192Z"
assert isinstance(product.start_datetime, datetime)
@responses.activate
def test_list_no_results(self):
self.mock_response(
responses.PUT,
{
"meta": {"count": 0},
"data": [],
"jsonapi": {"version": "1.0"},
"links": {"self": "https://example.com/catalog/v2/products"},
},
)
r = list(Product.search(client=self.client))
assert r == []
@responses.activate
def test_save(self):
self.mock_response(
responses.POST,
{
"data": {
"attributes": {
"owners": ["org:descarteslabs"],
"name": "My Test Product",
"readers": [],
"revisit_period_minutes_min": None,
"revisit_period_minutes_max": None,
"modified": "2019-06-10T18:48:13.066192Z",
"created": "2019-06-10T18:48:13.066192Z",
"start_datetime": "2019-01-01T00:00:00Z",
"writers": [],
"end_datetime": None,
"description": None,
"resolution_min": {"value": 10.0, "unit": "meters"},
},
"type": "product",
"id": "descarteslabs:test",
},
"jsonapi": {"version": "1.0"},
},
)
p = Product(id="p1", name="Test Product", client=self.client)
assert p.state == DocumentState.UNSAVED
p.save()
assert responses.calls[0].request.url == self.url + "/products"
assert p.state == DocumentState.SAVED
# id updated on initial save
assert "p1" != p.id
assert isinstance(p.start_datetime, datetime)
@responses.activate
def test_save_dupe(self):
self.mock_response(
responses.POST,
{
"errors": [
{
"status": "400",
"detail": "A document with id `descarteslabs:p1` already exists.",
"title": "Bad request",
}
],
"jsonapi": {"version": "1.0"},
},
status=400,
)
p = Product(id="p", name="Test Product", client=self.client)
with pytest.raises(BadRequestError):
p.save()
@responses.activate
def test_an_update(self):
self.mock_response(
responses.GET,
{
"data": {
"attributes": {
"owners": ["org:descarteslabs"],
"name": "<NAME>",
"readers": [],
"modified": "2019-06-11T23:59:46.800792Z",
"created": "2019-06-11T23:52:35.114938Z",
"start_datetime": None,
"writers": [],
"end_datetime": None,
"description": "A descriptive description",
},
"type": "product",
"id": "descarteslabs:my-product",
},
"jsonapi": {"version": "1.0"},
},
)
p1 = Product.get("descarteslabs:my-product", client=self.client)
assert p1.state == DocumentState.SAVED
p1_repr = repr(p1)
match_str = """\
Product: My Product
id: descarteslabs:my-product
created: Tue Jun 11 23:52:35 2019"""
assert p1_repr.strip("\n") == textwrap.dedent(match_str)
p1.description = "An updated description"
assert p1.state == DocumentState.MODIFIED
self.mock_response(
responses.PATCH,
{
"data": {
"attributes": {
"owners": ["org:descarteslabs"],
"name": "<NAME>",
"readers": [],
"modified": "2019-06-11T23:59:46.800792Z",
"created": "2019-06-11T23:52:35.114938Z",
"start_datetime": None,
"writers": [],
"end_datetime": None,
"description": "An updated description",
},
"type": "product",
"id": "descarteslabs:my-product",
},
"jsonapi": {"version": "1.0"},
},
)
p1_repr = repr(p1)
match_str = """\
Product: My Product
id: descarteslabs:my-product
created: Tue Jun 11 23:52:35 2019
* Not up-to-date in the Descartes Labs catalog. Call `.save()` to save or update this record."""
assert p1_repr.strip("\n") == textwrap.dedent(match_str)
p1.save()
assert self.get_request_body(1) == {
"data": {
"type": "product",
"id": "descarteslabs:my-product",
"attributes": {"description": "An updated description"},
}
}
@responses.activate
def test_delete(self):
p = Product(
id="descarteslabs:my-product",
name="My Product",
client=self.client,
_saved=True,
)
self.mock_response(
responses.DELETE,
{
"meta": {"message": "Object successfully deleted"},
"jsonapi": {"version": "1.0"},
},
)
p.delete()
assert p.state == DocumentState.DELETED
@responses.activate
def test_delete_non_existent(self):
p = Product(
id="ne-my-product", name="Non-existent", client=self.client, _saved=True
)
self.mock_response(responses.DELETE, self.not_found_json, status=404)
with pytest.raises(DeletedObjectError):
p.delete()
@responses.activate
def test_exists(self):
# head request, no JSON is returned
self.mock_response(responses.HEAD, {})
assert Product.exists("my-id:id", client=self.client)
assert (
responses.calls[0].request.url
== "https://example.com/catalog/v2/products/my-id:id"
)
@responses.activate
def test_exists_false(self):
self.mock_response(responses.HEAD, self.not_found_json, status=404)
assert not Product.exists("my-id:id", client=self.client)
assert (
responses.calls[0].request.url
== "https://example.com/catalog/v2/products/my-id:id"
)
@responses.activate
def test_get_unknown_attribute(self):
self.mock_response(
responses.GET,
{
"data": {
"attributes": {
"owners": ["org:descarteslabs"],
"name": "My Product",
"readers": [],
"modified": "2019-06-11T23:59:46.800792Z",
"created": "2019-06-11T23:52:35.114938Z",
"start_datetime": None,
"writers": [],
"end_datetime": None,
"description": "A descriptive description",
"foobar": "unkown",
},
"type": "product",
"id": "descarteslabs:my-product",
},
"jsonapi": {"version": "1.0"},
},
)
p = Product.get("descarteslabs:my-product", client=self.client)
assert not hasattr(p, "foobar")
@responses.activate
def test_create_product_delete_task(self):
p = Product(id="p1", name="Test Product", client=self.client)
self.mock_response(
responses.POST,
{
"data": {
"attributes": {"status": "RUNNING"},
"type": "product_delete_task",
"id": "descarteslabs:test-product",
},
"jsonapi": {"version": "1.0"},
},
status=201,
)
r = p.delete_related_objects()
req = responses.calls[0].request
assert r.status == TaskState.RUNNING
assert (
req.url
== "https://example.com/catalog/v2/products/p1/delete_related_objects"
)
assert req.body == b'{"data": {"type": "product_delete_task"}}'
@responses.activate
def test_no_objects_to_delete(self):
p = Product(id="p1", name="Test Product", client=self.client)
self.mock_response(
responses.POST,
{
"errors": [
{
"status": "204",
"detail": "A 'delete related objects' operation is not needed: p1",
"title": "No related objects found",
}
],
"jsonapi": {"version": "1.0"},
},
status=204,
)
r = p.delete_related_objects()
assert not r
def test_abstract_status_class(self):
with pytest.raises(TypeError):
TaskStatus()
@responses.activate
def test_get_delete_status(self):
p = Product(id="p1", name="Test Product", client=self.client)
self.mock_response(
responses.GET,
{
"data": {
"attributes": {
"status": "SUCCESS",
"start_datetime": "2019-08-10T00:10:17.528903Z",
"errors": None,
"duration_in_seconds": 0.36756521779382323,
"objects_deleted": 2,
},
"type": "product_delete_task",
"id": "p1",
},
"jsonapi": {"version": "1.0"},
},
)
r = p.get_delete_status()
assert r.status == TaskState.SUCCEEDED
assert isinstance(r, DeletionTaskStatus)
status_repr = repr(r)
match_str = """\
p1 delete task status: SUCCESS
- started: 2019-08-10T00:10:17.528903Z
- took 0.3676 seconds
- 2 objects deleted"""
assert status_repr.strip("\n") == textwrap.dedent(match_str)
@responses.activate
def test_update_related_acls_task(self):
p = Product(id="p1", name="Test Product", client=self.client, _saved=True)
self.mock_response(
responses.POST,
{
"data": {
"type": "product_update_acls",
"attributes": {"status": "RUNNING"},
"id": "p1",
},
"jsonapi": {"version": "1.0"},
},
status=201,
)
r = p.update_related_objects_permissions(
owners=["org:descarteslabs"], readers=["group:public"]
)
assert r.status == TaskState.RUNNING
req = self.get_request(0)
assert (
req.url
== "https://example.com/catalog/v2/products/p1/update_related_objects_acls"
)
body_attributes = self.get_request_body(0)["data"]["attributes"]
assert body_attributes["readers"] == ["group:public"]
assert body_attributes["owners"] == ["org:descarteslabs"]
assert body_attributes["writers"] is None
@responses.activate
def test_update_related_acls_using_listattribute(self):
p = Product(
id="p1",
name="Test Product",
owners=["user:owner"],
readers=["user:reader"],
writers=["user:writer"],
client=self.client,
_saved=True,
)
self.mock_response(
responses.POST,
{
"data": {
"type": "product_update_acls",
"attributes": {"status": "RUNNING"},
"id": "p1",
},
"jsonapi": {"version": "1.0"},
},
status=201,
)
assert isinstance(p.owners, ListAttribute)
assert isinstance(p.readers, ListAttribute)
p.update_related_objects_permissions(owners=p.owners, readers=p.readers)
@responses.activate
def test_update_related_acls_with_single_value(self):
owner = "org:descarteslabs"
reader = "group:public"
self.mock_response(
responses.POST,
{
"data": {
"type": "product_update_acls",
"attributes": {"status": "RUNNING"},
"id": "p1",
},
"jsonapi": {"version": "1.0"},
},
status=201,
)
p = Product(id="p1", name="Test Product", client=self.client, _saved=True)
p.update_related_objects_permissions(owners=owner, readers=reader)
body_attributes = self.get_request_body(0)["data"]["attributes"]
assert body_attributes["owners"] == [owner]
assert body_attributes["readers"] == [reader]
@responses.activate
def test_update_acls_task_status(self):
p = Product(
id="p1",
name="Test Product",
readers=["group:public"],
client=self.client,
_saved=True,
)
self.mock_response(
responses.GET,
{
"data": {
"type": "product_update_acls",
"attributes": {
"start_datetime": "2019-09-17T21:53:07.348000Z",
"duration_in_seconds": 0.0153,
"status": "SUCCESS",
"objects_updated": 1,
"errors": None,
},
"id": "descarteslabs:prod4",
},
"jsonapi": {"version": "1.0"},
},
)
r = p.get_update_permissions_status()
assert isinstance(r, UpdatePermissionsTaskStatus)
assert r.status == TaskState.SUCCEEDED
status_repr = repr(r)
match_str = """\
p1 update permissions task status: SUCCESS
- started: 2019-09-17T21:53:07.348000Z
- took 0.0153 seconds
- 1 objects updated"""
assert status_repr.strip("\n") == textwrap.dedent(match_str)
@responses.activate
@patch(
"descarteslabs.catalog.product.UpdatePermissionsTaskStatus._POLLING_INTERVAL", 1
)
def test_wait_for_completion(self):
p = Product(id="p1", name="Test Product", client=self.client, _saved=True)
self.mock_response(
responses.GET,
{
"data": {
"type": "product_update_acls",
"id": "p1",
"attributes": {"status": "RUNNING"},
},
"jsonapi": {"version": "1.0"},
},
)
self.mock_response(
responses.GET,
{
"data": {
"type": "product_update_acls",
"id": "p1",
"attributes": {"status": "RUNNING"},
},
"jsonapi": {"version": "1.0"},
},
)
self.mock_response(
responses.GET,
{
"data": {
"type": "product_update_acls",
"id": "p1",
"attributes": {
"status": "SUCCESS",
"errors": None,
"duration_in_seconds": 0.012133697,
"objects_updated": 1,
"start_datetime": "2019-09-18T00:27:43.230000Z",
},
},
"jsonapi": {"version": "1.0"},
},
)
update_status = p.get_update_permissions_status()
assert update_status.status == TaskState.RUNNING
update_status.wait_for_completion()
assert update_status.status == TaskState.SUCCEEDED
@responses.activate
def test_image_uploads(self):
product_id = "p1"
self.mock_response(
responses.GET,
{
"data": {
"attributes": {
"readers": [],
"writers": [],
"owners": ["org:descarteslabs"],
"modified": "2019-06-11T23:31:33.714883Z",
"created": "2019-06-11T23:31:33.714883Z",
},
"type": "product",
"id": product_id,
},
"jsonapi": {"version": "1.0"},
},
)
self.mock_response(
responses.PUT,
{
"meta": {"count": 1},
"data": [
{
"type": "image_upload",
"id": "1",
"attributes": {
"created": "2020-01-01T00:00:00.000000Z",
"modified": "2020-01-01T00:00:00.000000Z",
"product_id": product_id,
"image_id": product_id + ":image",
"start_datetime": "2020-01-01T00:00:00Z",
"end_datetime": "2020-01-01T00:00:00Z",
"status": ImageUploadStatus.SUCCESS.value,
},
},
{
"type": "image_upload",
"id": "2",
"attributes": {
"created": "2020-01-01T00:00:00.000000Z",
"modified": "2020-01-01T00:00:00.000000Z",
"product_id": product_id,
"image_id": | |
<reponame>darcy-xiao/TupleNet
from __future__ import print_function
import os
import sys
import subprocess
import time
import struct
import socket
from optparse import OptionParser
parent_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.append(parent_dir)
from lcp.flow_common import table_note_dict
from lcp import flow_common
TUPLENET_DIR = ''
TUPLENET_ENTITY_VIEW_DIR = 'entity_view/'
UNKNOW_SYMBOL = "<UNKNOW>"
logical_view = None
etcd_env = os.environ.copy()
etcd_env['ETCDCTL_API'] = '3'
etcd_endpoints = "localhost:2379"
def errprint(*args, **kwargs):
print(*args, file=sys.stderr, **kwargs)
def call_popen(cmd, shell=False):
child = subprocess.Popen(cmd, shell, stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=etcd_env)
output = child.communicate()
if child.returncode:
raise RuntimeError("error executing %s" % (cmd))
if len(output) == 0 or output[0] is None:
output = ""
else:
output = output[0].strip()
return output
def call_prog(prog, args_list):
cmd = [prog] + args_list
return call_popen(cmd)
def etcdctl(*args):
args = ['--endpoints={}'.format(etcd_endpoints)] + list(args)
return call_prog("etcdctl", args)
def etcdctl_lease(key, value, ttl):
key = TUPLENET_DIR + 'communicate/push/' + key
lease_str = etcdctl('lease', 'grant', str(ttl))
lease = lease_str.split(' ')[1]
etcdctl('put', '--lease={}'.format(lease), key, value)
def split_hop_path(other_hop_path, prev_hop_tun_ip, prev_hop_src_port_id):
trace_path = []
total_trace_num = 0
try_num = 0
for _, path in other_hop_path.items():
total_trace_num += len(path)
while len(trace_path) != total_trace_num and try_num < 100:
try_num += 1
for chassis_id, path in other_hop_path.items():
if len(path) == 0:
continue
# only need to check first trace path in each hop
table_id = int(path[0]["table_id"])
src_port_id = path[0]["src_port_id"]
tun_src = path[0]["tun_src"]
if tun_src != prev_hop_tun_ip or \
(table_id != flow_common.TABLE_LSP_TRACE_EGRESS_IN and table_id != flow_common.TABLE_LRP_TRACE_INGRESS_OUT) or \
src_port_id != prev_hop_src_port_id:
continue
final_idx = 0
for i in xrange(len(path)):
trace = path[i]
if trace["tun_src"] != prev_hop_tun_ip:
break
trace_path.append(trace)
final_idx = i
# check if next trace not in same hop
if int(trace["table_id"]) == \
flow_common.TABLE_LSP_TRACE_INGRESS_OUT and \
i + 1 < len(path) and \
path[i+1]["src_port_id"] != trace["src_port_id"]:
break
# del trace which had been inserted in trace_path
other_hop_path[chassis_id] = path[final_idx+1:]
prev_hop_tun_ip = get_ip_by_chassis(trace_path[-1]["chassis_id"])
prev_hop_src_port_id = trace_path[-1]["src_port_id"]
return trace_path
def get_ip_by_chassis(chassis_id):
for i in xrange(0, len(logical_view), 2):
if logical_view[i] != TUPLENET_ENTITY_VIEW_DIR + 'chassis/{}'.format(chassis_id):
continue
properties = logical_view[i + 1].split(',')
for p in properties:
p = p.split('=')
pname = p[0]
if pname == 'ip':
ip = p[1]
return ip
def find_chassis_by_port(lport):
global logical_view
chassis_id = None
output = etcdctl('get', '--prefix', TUPLENET_ENTITY_VIEW_DIR)
output = output.split('\n')
if len(output) < 2:
errprint('cannot get enough data from etcd')
return
for i in xrange(0, len(output), 2):
key = output[i].split('/')
if key[-2] != 'lsp' or key[-1] != lport:
continue
properties = output[i + 1].split(',')
for p in properties:
p = p.split('=')
pname = p[0]
if pname == 'chassis':
chassis_id = p[1]
break
logical_view = output
return chassis_id
def etcdctl_config_pkt_trace(lport, packet):
chassis_id = find_chassis_by_port(lport)
if chassis_id is None:
errprint("cannot found logical port %s pin on a chassis" % lport)
return
cmd_id = int(time.time() * 100) & 0xffff
key = chassis_id + <KEY>)
value = "cmd=pkt_trace,packet={},port={}".format(packet, lport)
etcdctl_lease(key, value, 10)
return cmd_id
def etcdctl_read_cmd_result(cmd_id):
output = etcdctl('get', '--prefix',
TUPLENET_DIR + 'communicate/cmd_result/{}/'.format(cmd_id))
output = output.split('\n')
if len(output) < 2:
errprint("cannot read any cmd result from etcd")
return []
trace_info = []
for i in xrange(0, len(output), 2):
key = output[i].split('/')
chassis_id = key[-1]
seq_n = int(key[-2])
value = output[i + 1]
trace_info.append((chassis_id, value, seq_n))
trace_info = sorted(trace_info, key = lambda t:(t[0], t[2]))
first_hop_path = []
other_hop_path = {}
for i in xrange(len(trace_info)):
chassis_id, trace_path, _ = trace_info[i]
table_id, datapath_id, src_port_id, dst_port_id, tun_src = parse_trace_path(trace_path)
# the first hop should get no tun_src
if tun_src != '0.0.0.0':
if not other_hop_path.has_key(chassis_id):
other_hop_path[chassis_id] = []
other_hop_path[chassis_id].append({"table_id":table_id,
"datapath_id":datapath_id,
"src_port_id":src_port_id,
"dst_port_id":dst_port_id,
"tun_src":tun_src,
"chassis_id":chassis_id})
continue
first_hop_path.append({"table_id":table_id,
"datapath_id":datapath_id,
"src_port_id":src_port_id,
"dst_port_id":dst_port_id,
"tun_src":tun_src,
"chassis_id":chassis_id})
first_hop_chassis = first_hop_path[0]["chassis_id"]
prev_hop_src_port_id = first_hop_path[-1]["src_port_id"]
prev_hop_tun_ip = get_ip_by_chassis(first_hop_chassis)
trace_path = split_hop_path(other_hop_path, prev_hop_tun_ip,
prev_hop_src_port_id)
trace_path = first_hop_path + trace_path
#TODO
# we have to replace current datapath with previous datapath,
# before entering TABLE_LRP_TRACE_EGRESS_OUT, the datapath had been
# change into next pipeline datapath id
for i in xrange(len(trace_path)):
trace = trace_path[i]
if int(trace["table_id"]) == flow_common.TABLE_LRP_TRACE_EGRESS_OUT and i > 0:
prev_datapath = trace_path[i-1]["datapath_id"]
trace["datapath_id"] = prev_datapath
return trace_path
def find_datapath_by_id(datapath_id):
for i in xrange(0, len(logical_view), 2):
key = logical_view[i].split('/')
if key[-2] != 'LR' and key[-2] != 'LS':
continue
datapath_name = logical_view[i]
properties = logical_view[i + 1].split(',')
for p in properties:
p = p.split('=')
pname = p[0]
pval = p[1]
if pname == 'id' and pval == datapath_id:
return datapath_name
def find_port_by_id(datapath_name, port_id):
if port_id == '0':
return UNKNOW_SYMBOL
for i in xrange(0, len(logical_view), 2):
if not logical_view[i].startswith(datapath_name):
continue
if logical_view[i] == datapath_name:
# the LS/LR, not lsp,lrp
continue
port_name = logical_view[i].split('/')[-1]
properties = logical_view[i + 1].split(',')
for p in properties:
p = p.split('=')
pname = p[0]
pval = p[1]
if pname == 'ip':
ip_int = struct.unpack("!L", socket.inet_aton(pval))[0]
if str(ip_int & 0xffff) == port_id:
return port_name
def parse_trace_path(trace_path):
properties = trace_path.split(',')
for p in properties:
p = p.split('=')
pname = p[0]
pval = p[1]
if pname == 'table_id':
table_id = pval
continue
if pname == 'datapath_id':
datapath_id = pval
continue
if pname == 'src_port_id':
src_port_id = pval
continue
if pname == 'dst_port_id':
dst_port_id = pval
continue
if pname == 'tun_src':
ip_int = int(pval)
tun_src = socket.inet_ntoa(struct.pack('I',socket.htonl(ip_int)))
continue
return table_id, datapath_id, src_port_id, dst_port_id, tun_src
def run_pkt_trace(lport, packet):
cmd_id = etcdctl_config_pkt_trace(lport, packet)
try:
cmd_id = int(cmd_id)
except Exception as err:
errprint('config pkt trace cmd hit error')
return
time.sleep(5)
trace_path = etcdctl_read_cmd_result(cmd_id)
for trace in trace_path:
datapath_name = find_datapath_by_id(trace["datapath_id"])
src_port_name = find_port_by_id(datapath_name, trace["src_port_id"])
dst_port_name = find_port_by_id(datapath_name, trace["dst_port_id"])
entity_name = datapath_name.split('/')[-1]
entity_type = datapath_name.split('/')[-2]
stage = table_note_dict[int(trace["table_id"])]
trace = "type:{},pipeline:{},from:{},to:{},stage:{},chassis:{}".format(
entity_type, entity_name, src_port_name, dst_port_name,
stage, trace["chassis_id"])
print(trace)
def cal_checksum(header):
header = struct.unpack("!10H", header)
sum_num = 0
reverse_str = ""
for h in header:
sum_num += h
if sum_num > 0xffff:
sum_num &= 0xffff
sum_num += 1
sum_num = "{:0>16b}".format(sum_num)
for i in xrange(16):
if sum_num[i] == "0":
reverse_str += "1"
else:
reverse_str += "0"
reverse = int(reverse_str, 2)
header = struct.pack("!H", reverse)
return header
def construct_icmp(src_mac, dst_mac, src_ip, dst_ip):
src_mac = src_mac.split(":")
dst_mac = dst_mac.split(":")
for i in xrange(6):
src_mac[i] = int(src_mac[i], 16)
dst_mac[i] = int(dst_mac[i], 16)
src_ip = struct.unpack("!L", socket.inet_aton(src_ip))[0]
dst_ip = struct.unpack("!L", socket.inet_aton(dst_ip))[0]
src_mac = struct.pack("6B", src_mac[0], src_mac[1], src_mac[2],
src_mac[3], src_mac[4], src_mac[5])
dst_mac = struct.pack("6B", dst_mac[0], dst_mac[1], dst_mac[2],
dst_mac[3], dst_mac[4], dst_mac[5])
l2_proto = struct.pack("!H", 0x0800)
eth_header = dst_mac + src_mac + l2_proto
l3_head = struct.pack("8B", 0x45, 0x00, 0x00, 0x54,
0x00, 0x00, 0x40, 0x00)
ttl = struct.pack("B", 9)
protocol = struct.pack("B", 1)
ip_checksum = struct.pack("BB", 0, 0)
src_ip = struct.pack("!L", src_ip)
dst_ip = struct.pack("!L", dst_ip)
ip_checksum = cal_checksum(l3_head + ttl + protocol +
ip_checksum + src_ip + dst_ip)
ip_header = l3_head + ttl + protocol + ip_checksum + src_ip + dst_ip
icmp_type = struct.pack("!H", 0x0800)
icmp_chksum = struct.pack("!H", 0x8510)
icmp_id = struct.pack("!H", 0x5fbf)
icmp_seq = struct.pack("!H", 0x0001)
icmp_data = struct.pack("B", 1)
for i in range(2, 57):
icmp_data += struct.pack("B", i)
icmp_payload = icmp_type + icmp_chksum + icmp_id + icmp_seq + icmp_data
icmp_packet = eth_header + ip_header + icmp_payload
icmp = struct.unpack("98B", icmp_packet)
icmp_str = ""
for i in icmp:
icmp_str += "{:02x}".format(i)
return icmp_str
if __name__ == "__main__":
usage = """usage: python %prog [options]
-j, --port inject src port
-p, --prefix prefix path in etcd
--src_mac source macaddress of packet
--dst_mac destination macaddress of packet
--src_ip source ip address of packet
--dst_ip destination ip address of packet
-d, --header packet header and payload"""
parser = OptionParser(usage)
parser.add_option("-j", "--port", dest = "inject_port",
action = "store", type = "string",
default = "",
help = "which port you want inject packet in")
parser.add_option("-p", "--prefix", dest = "path_prefix",
action = "store", type = "string",
default = "/tuplenet/", help = "etcd tuplenet prefix path")
parser.add_option("--src_mac", dest = "src_mac",
action = "store", type = "string",
default = "", help = "source macaddress of packet")
parser.add_option("--dst_mac", dest = "dst_mac",
action = "store", type = "string",
default = "", help = "destination macaddress of packet")
parser.add_option("--src_ip", dest = "src_ip",
action = "store", type = "string",
default = "", help = "source ip address of packet")
parser.add_option("--dst_ip", dest = "dst_ip",
action = "store", type = "string",
default = "", help = "destination ip address of packet")
parser.add_option("-d", "--header", dest = "packet",
action = "store", type = "string",
default = "",
help = "packet header and payload, it should be hex")
parser.add_option("--endpoints", "--endpoints", dest = "endpoints",
action = "store", type = "string",
default = "localhost:2379",
| |
<gh_stars>0
import os
import sys
import time
import glob
import numpy as np
import torch
import util
import logging
import argparse
import torch.nn as nn
import torch.utils
import utils
from matplotlib import pyplot as plt
import torch.nn.functional as F
import torchvision.datasets as dset
import torch.backends.cudnn as cudnn
from torch.autograd import Variable
from models.model import Informer
from util.metrics import metric
from torch.utils.data import DataLoader
from architect1 import Architect
from data.data_loader import Dataset_ETT_hour, Dataset_ETT_minute, Dataset_Custom, Dataset_Pred
from util.tools import EarlyStopping, adjust_learning_rate
parser = argparse.ArgumentParser("cifar")
parser.add_argument('--name', required=True)
parser.add_argument('--momentum', type=float, default=0.9, help='momentum')
parser.add_argument('--weight_decay', type=float, default=3e-4, help='weight decay')
parser.add_argument('--report_freq', type=float, default=20, help='report frequency')
parser.add_argument('--gpu', type=int, default=0, help='gpu device id')
parser.add_argument('--epochs', type=int, default=12, help='num of training epochs')
parser.add_argument('--model_path', type=str, default='saved_models', help='path to save the model')
parser.add_argument('--save', type=str, default='EXP', help='experiment name')
parser.add_argument('--seed', type=int, default=2, help='random seed')
parser.add_argument('--grad_clip', type=float, default=5, help='gradient clipping')
parser.add_argument('--unrolled', action='store_true', default=True, help='use one-step unrolled validation loss')
parser.add_argument('--arch_learning_rate', type=float, default=0.1, help='learning rate for arch encoding')
parser.add_argument('--arch_weight_decay', type=float, default=0, help='weight decay for arch encoding')
parser.add_argument('--lambda_par', type=float, default=1.0, help='unlabeled ratio')
parser.add_argument('--data', type=str, required=True, default='ETTh1', help='data')
parser.add_argument('--root_path', type=str, default='/home/LAB/gaoch/asdf/data/ETDataset/ETT-small/', help='root path of the data file')
parser.add_argument('--data_path', type=str, default='ETTh1.csv', help='data file')
parser.add_argument('--features', type=str, default='M',
help='forecasting task, options:[M, S, MS]; M:multivariate predict multivariate, S:univariate predict univariate, MS:multivariate predict univariate')
parser.add_argument('--target', type=str, default='OT', help='target feature in S or MS task')
parser.add_argument('--freq', type=str, default='h',
help='freq for time features encoding, options:[s:secondly, t:minutely, h:hourly, d:daily, b:business days, w:weekly, m:monthly], you can also use more detailed freq like 15min or 3h')
parser.add_argument('--checkpoints', type=str, default='./checkpoints/', help='location of model checkpoints')
parser.add_argument('--seq_len', type=int, default=96, help='input sequence length of Informer encoder')
parser.add_argument('--label_len', type=int, default=48, help='start token length of Informer decoder')
parser.add_argument('--pred_len', type=int, default=24, help='prediction sequence length')
# Informer decoder input: concat[start token series(label_len), zero padding series(pred_len)]
parser.add_argument('--enc_in', type=int, default=7, help='encoder input size')
parser.add_argument('--dec_in', type=int, default=7, help='decoder input size')
parser.add_argument('--c_out', type=int, default=7, help='output size')
parser.add_argument('--d_model', type=int, default=512, help='dimension of model')
parser.add_argument('--n_heads', type=int, default=8, help='num of heads')
parser.add_argument('--e_layers', type=int, default=2, help='num of encoder layers')
parser.add_argument('--d_layers', type=int, default=1, help='num of decoder layers')
parser.add_argument('--s_layers', type=str, default='3,2,1', help='num of stack encoder layers')
parser.add_argument('--d_ff', type=int, default=2048, help='dimension of fcn')
parser.add_argument('--factor', type=int, default=5, help='probsparse attn factor')
parser.add_argument('--padding', type=int, default=0, help='padding type')
parser.add_argument('--distil', action='store_false',
help='whether to use distilling in encoder, using this argument means not using distilling',
default=True)
parser.add_argument('--dropout', type=float, default=0.05, help='dropout')
parser.add_argument('--attn', type=str, default='prob', help='attention used in encoder, options:[prob, full]')
parser.add_argument('--embed', type=str, default='timeF',
help='time features encoding, options:[timeF, fixed, learned]')
parser.add_argument('--activation', type=str, default='gelu', help='activation')
parser.add_argument('--output_attention', action='store_true', help='whether to output attention in ecoder')
parser.add_argument('--do_predict', action='store_true', help='whether to predict unseen future data')
parser.add_argument('--mix', action='store_false', help='use mix attention in generative decoder', default=True)
parser.add_argument('--cols', type=str, nargs='+', help='file list')
parser.add_argument('--num_workers', type=int, default=0, help='data loader num workers')
parser.add_argument('--itr', type=int, default=8, help='experiments times')
parser.add_argument('--train_epochs', type=int, default=6, help='train epochs')
parser.add_argument('--batch_size', type=int, default=32, help='batch size of train input data')
parser.add_argument('--patience', type=int, default=3, help='early stopping patience')
parser.add_argument('--learning_rate', type=float, default=0.0001, help='optimizer learning rate')
parser.add_argument('--des', type=str, default='test', help='exp description')
parser.add_argument('--loss', type=str, default='mse', help='loss function')
parser.add_argument('--lradj', type=str, default='type1', help='adjust learning rate')
parser.add_argument('--use_amp', action='store_true', help='use automatic mixed precision training',
default=False)
parser.add_argument('--inverse', action='store_true', help='inverse output data', default=False)
parser.add_argument('--use_gpu', type=bool, default=True, help='use gpu')
parser.add_argument('--use_multi_gpu', action='store_true', help='use multiple gpus', default=False)
parser.add_argument('--devices', type=str, default='0,1,2,3', help='device ids of multile gpus')
# other settings
args = parser.parse_args()
args.path = os.path.join('run/search/', os.environ["SLURM_JOBID"])
args.save = args.path
try:
os.makedirs(args.path)
except FileExistsError:
pass
utils.create_exp_dir(args.save, scripts_to_save=glob.glob('*.py'))
log_format = '%(asctime)s %(message)s'
logging.basicConfig(stream=sys.stdout, level=logging.INFO,
format=log_format, datefmt='%m/%d %I:%M:%S %p')
fh = logging.FileHandler(os.path.join(args.save, 'log.txt'))
fh.setFormatter(logging.Formatter(log_format))
logging.getLogger().addHandler(fh)
def main():
np.random.seed(args.seed)
torch.cuda.set_device(args.gpu)
cudnn.benchmark = True
torch.manual_seed(args.seed)
cudnn.enabled=True
torch.cuda.manual_seed(args.seed)
logging.info('gpu device = %d' % args.gpu)
logging.info("args = %s", args)
os.environ["CUDA_VISIBLE_DEVICES"] = str(args.gpu) if not args.use_multi_gpu else args.devices
device = torch.device('cuda:{}'.format(args.gpu))
data_parser = {
'ETTh1': {'data': 'ETTh1.csv', 'T': 'OT', 'M': [7, 7, 7], 'S': [1, 1, 1], 'MS': [7, 7, 1]},
'ETTh2': {'data': 'ETTh2.csv', 'T': 'OT', 'M': [7, 7, 7], 'S': [1, 1, 1], 'MS': [7, 7, 1]},
'ETTm1': {'data': 'ETTm1.csv', 'T': 'OT', 'M': [7, 7, 7], 'S': [1, 1, 1], 'MS': [7, 7, 1]},
'ETTm2': {'data': 'ETTm2.csv', 'T': 'OT', 'M': [7, 7, 7], 'S': [1, 1, 1], 'MS': [7, 7, 1]},
'WTH': {'data': 'WTH.csv', 'T': 'WetBulbCelsius', 'M': [12, 12, 12], 'S': [1, 1, 1], 'MS': [12, 12, 1]},
'ECL': {'data': 'ECL.csv', 'T': 'MT_320', 'M': [321, 321, 321], 'S': [1, 1, 1], 'MS': [321, 321, 1]},
'Solar': {'data': 'solar_AL.csv', 'T': 'POWER_136', 'M': [137, 137, 137], 'S': [1, 1, 1], 'MS': [137, 137, 1]},
}
if args.data in data_parser.keys():
data_info = data_parser[args.data]
args.data_path = data_info['data']
args.target = data_info['T']
args.enc_in, args.dec_in, args.c_out = data_info[args.features]
args.s_layers = [int(s_l) for s_l in args.s_layers.replace(' ', '').split(',')]
args.detail_freq = args.freq
args.freq = args.freq[-1:]
mses, maes = [[],[],[]], [[],[],[]]
for i in range(args.itr):
teacher = Informer(args.enc_in, args.dec_in, args.c_out, args.seq_len, args.label_len, args.pred_len, args.factor,
args.d_model, args.n_heads, args.e_layers, args.d_layers, args.d_ff, args.dropout, args.attn,
args.embed, args.freq, args.activation, args.output_attention, args.distil, args.mix,
device, args).float().cuda()
assistant = Informer(args.enc_in, args.dec_in, args.c_out, args.seq_len, args.label_len, args.pred_len, args.factor,
args.d_model, args.n_heads, args.e_layers, args.d_layers, args.d_ff, args.dropout, args.attn,
args.embed, args.freq, args.activation, args.output_attention, args.distil, args.mix,
device, args).float().cuda()
student = Informer(args.enc_in, args.dec_in, args.c_out, args.seq_len, args.label_len, args.pred_len, args.factor,
args.d_model, args.n_heads, args.e_layers, args.d_layers, args.d_ff, args.dropout, args.attn,
args.embed, args.freq, args.activation, args.output_attention, args.distil, args.mix,
device, args).float().cuda()
criterion_t = nn.MSELoss().cuda()
criterion_a = nn.MSELoss().cuda()
criterion_s = nn.MSELoss().cuda()
cus_loss = nn.MSELoss().cuda()
optimizer_t = torch.optim.Adam(teacher.W(), 0.00005, weight_decay=1e-2)
optimizer_a = torch.optim.Adam(assistant.W(), 0.00005, weight_decay=1e-2)
optimizer_s = torch.optim.Adam(student.W(), 0.00005, weight_decay=1e-2)
trn_data, trn_loader = _get_data(flag='train')
val_data, val_loader = _get_data(flag='val')
unl_data, unl_loader = _get_data(flag='train')
test_data, test_loader = _get_data(flag='test')
architect = Architect(teacher, assistant, student, args, device)
early_stopping = [EarlyStopping(patience=args.patience, verbose=True, tag=i) for i in range(3)]
STAT_arch = []
STAT_arch_grad = []
STAT_arch_std = []
for epoch in range(args.epochs):
logging.info('epoch %d', epoch)
# training
train(trn_loader, val_loader, unl_loader, test_loader, teacher, assistant, student, architect,
criterion_t, criterion_a, criterion_s, cus_loss,
optimizer_t, optimizer_a, optimizer_s, args.learning_rate, epoch, early_stopping, i, STAT_arch, STAT_arch_grad, STAT_arch_std) # todo: learning_rate ->lr
# validation
test(teacher, 'teacher:')
test(assistant, 'assistant:')
test(student, 'student:')
# adjust_learning_rate(optimizer_t, epoch + 1, args)
# adjust_learning_rate(optimizer_a, epoch + 1, args)
# adjust_learning_rate(optimizer_s, epoch + 1, args)
if early_stopping[0].early_stop and early_stopping[1].early_stop and early_stopping[2].early_stop:
print("EARLY_stopping")
break
# plt.figure()
# plt.subplot(211)
# plt.plot(STAT_arch)
# plt.title('arch_parameters mean')
# plt.xlabel('step')
# plt.subplot(212)
# plt.plot(STAT_arch_std)
# plt.title('arch_parameters std')
# plt.xlabel('step')
# plt.savefig(args.path + '/' + 'arch{}.jpg'.format(i))
#
# np.save(args.path + '/' + 'arch{}.npy'.format(i), teacher.architect_param123.detach().squeeze().cpu().numpy())
# np.save(args.path + '/' + 'arch_mean{}.npy'.format(i), torch.tensor(STAT_arch_grad).cpu().numpy())
best_teacher_path = args.path + '/checkpoint0.pth'
best_assistant_path = args.path + '/checkpoint1.pth'
best_student_path = args.path + '/checkpoint2.pth'
teacher.load_state_dict(torch.load(best_teacher_path))
assistant.load_state_dict(torch.load(best_assistant_path))
student.load_state_dict(torch.load(best_student_path))
mse_t, mae_t = test(teacher)
mse_a, mae_a = test(assistant)
mse_s, mae_s = test(student)
mses[0].append(mse_t)
maes[0].append(mae_t)
mses[1].append(mse_a)
maes[1].append(mae_a)
mses[2].append(mse_s)
maes[2].append(mae_s)
# args.lambda_par += 0.1
logging.info('MSE Final {} MAE Final {}'.format(torch.tensor(mses).mean(dim=-1), torch.tensor(maes).mean(dim=-1)))
def train(trn_loader, val_loader, unl_loader, test_loader, teacher, assistant, student, architect,
criterion_t, criterion_a, criterion_s, cus_loss, optimizer_t, optimizer_a, optimizer_s, lr, epoch, early_stopping, i,
STAT_arch, STAT_arch_grad, STAT_arch_std):
loss_counter = utils.AvgrageMeter()
data_count = 0
for step, trn_data in enumerate(trn_loader):
teacher.train()
# get a random minibatch from the search queue with replacement
try:
val_data = next(val_iter)
except:
val_iter = iter(val_loader)
val_data = next(val_iter)
# get a random minibatch from the unlabeled queue with replacement
try:
unl_data = next(unl_iter)
except:
unl_iter = iter(unl_loader)
unl_data = next(unl_iter)
implicit_grads = architect.step_all3(trn_data, val_data, unl_data, lr, optimizer_t, optimizer_a, optimizer_s, args.unrolled, data_count, step%40==0)
# STAT_arch_grad.append(implicit_grads[0].mean().item())
STAT_arch.append(teacher.architect_param123.mean().item())
STAT_arch_std.append(teacher.architect_param123.std().item())
optimizer_t.zero_grad()
logit_t, true = _process_one_batch(trn_data, teacher)
# loss_t = critere(criterion_t, teacher, logit_t, true, data_count)
loss_t = criterion_t(logit_t, true)
loss_t.backward()
optimizer_t.step()
##########################################################################################################
optimizer_a.zero_grad()
teacher.eval()
logit_t, _ = _process_one_batch(unl_data, teacher)
teacher.train()
logit_a, _ = _process_one_batch(unl_data, assistant)
loss_a1 = cus_loss(logit_a, logit_t)
logit_a, true = _process_one_batch(trn_data, assistant)
loss_a2 = criterion_a(logit_a, true)
loss_a = args.lambda_par * loss_a1 + loss_a2
loss_a.backward()
optimizer_a.step()
##########################################################################################################
optimizer_s.zero_grad()
assistant.eval()
logit_a, true = _process_one_batch(unl_data, assistant)
assistant.train()
logit_s, true = _process_one_batch(unl_data, student)
loss_s1 = cus_loss(logit_s, logit_a.detach())
logit_s, true = _process_one_batch(trn_data, student)
loss_s2 = criterion_s(logit_s, true)
loss_s = args.lambda_par * loss_s1 + loss_s2
loss_s.backward()
optimizer_s.step()
##########################################################################################################
if step % args.report_freq == 0:
logging.info("\tstep: {}, epoch: {} | loss: {:.7f}".format(step, epoch, loss_t.item()))
loss_counter.update(loss_t.item())
data_count += args.batch_size
vali_loss_t = vali(val_loader, criterion_t, teacher)
vali_loss_a = vali(val_loader, criterion_a, assistant)
vali_loss_s = vali(val_loader, criterion_s, student)
test_loss = vali(test_loader, criterion_t, teacher)
logging.info("Epoch: {} | Train Loss: {:.7f} Vali Loss: {:.7f} Test Loss: {:.7f} Assis_val Loss: {:.7f} Stud_val Loss: {:.7f}".format(
epoch, loss_counter.avg, vali_loss_t, test_loss, vali_loss_a, vali_loss_s))
early_stopping[0](vali_loss_t, teacher, args.path)
early_stopping[1](vali_loss_a, assistant, args.path)
early_stopping[2](vali_loss_s, student, args.path)
def test(teacher, message=''):
test_data, test_loader = _get_data(flag='test')
teacher.eval()
preds = []
trues = []
for i, test_d in enumerate(test_loader):
pred, true = _process_one_batch(test_d, teacher)
preds.append(pred.detach().cpu().numpy())
trues.append(true.detach().cpu().numpy())
preds = np.array(preds)
trues = np.array(trues)
preds = preds.reshape((-1, preds.shape[-2], preds.shape[-1]))
trues = trues.reshape((-1, trues.shape[-2], trues.shape[-1]))
# result save
folder_path = './results/'
if not os.path.exists(folder_path):
os.makedirs(folder_path)
mae, mse, rmse, mape, mspe = metric(preds, trues)
logging.info(message + 'mse:{}, mae:{}'.format(mse, mae))
np.save(folder_path + 'metrics.npy', np.array([mae, mse, rmse, mape, mspe]))
np.save(folder_path + 'pred.npy', preds)
np.save(folder_path + 'true.npy', trues)
return mse, mae
def | |
# =================================================================
# Copyright (C) 2021-2021 52°North Spatial Information Research GmbH
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Based on pygeoapi's RasterioProvider
# https://github.com/geopython/pygeoapi/blob/master/pygeoapi/provider/rasterio_.py
#
# =================================================================
import logging
import json
# ToDo move to OdcConnector somehow
from datacube.utils.geometry import CRS as CRS_DATACUBE, BoundingBox
from pygeoapi.provider.base import (BaseProvider,
ProviderConnectionError,
ProviderGenericError,
ProviderQueryError,
ProviderInvalidQueryError,
ProviderNoDataError)
from pyproj import CRS, Transformer
from rasterio import Affine
from rasterio.io import MemoryFile
from .connector import OdcConnector
from .utils import meter2degree
import numpy as np
LOGGER = logging.getLogger(__name__)
CAST_MAP = {
'uint8': 'int16',
'uint16': 'int32',
'uint32': 'int64'
}
TYPE_URI_MAP = {
'int8': 'http://defs.opengis.net/vocprez/object?uri=http%3A//www.opengis.net/def/dataType/OGC/0/signedByte',
'int16': 'http://defs.opengis.net/vocprez/object?uri=http%3A//www.opengis.net/def/dataType/OGC/0/signedShort',
'int32': 'http://defs.opengis.net/vocprez/object?uri=http%3A//www.opengis.net/def/dataType/OGC/0/signedInt',
'int64': 'http://defs.opengis.net/vocprez/object?uri=http%3A//www.opengis.net/def/dataType/OGC/0/signedLong',
'uint8': 'http://defs.opengis.net/vocprez/object?uri=http%3A//www.opengis.net/def/dataType/OGC/0/unsignedByte',
'uint16': 'http://defs.opengis.net/vocprez/object?uri=http%3A//www.opengis.net/def/dataType/OGC/0/unsignedShort',
'uint32': 'http://defs.opengis.net/vocprez/object?uri=http%3A//www.opengis.net/def/dataType/OGC/0/unsignedInt',
'uint64': 'http://defs.opengis.net/vocprez/object?uri=http%3A//www.opengis.net/def/dataType/OGC/0/unsignedLong',
'float16': 'http://defs.opengis.net/vocprez/object?uri=http://www.opengis.net/def/dataType/OGC/0/float16',
'float32': 'http://defs.opengis.net/vocprez/object?uri=http://www.opengis.net/def/dataType/OGC/0/float32',
'float64': 'http://defs.opengis.net/vocprez/object?uri=http://www.opengis.net/def/dataType/OGC/0/float64',
'float128': 'http://defs.opengis.net/vocprez/object?uri=http://www.opengis.net/def/dataType/OGC/0/float128',
'double': 'http://defs.opengis.net/vocprez/object?uri=http%3A//www.opengis.net/def/dataType/OGC/0/double'
}
class OpenDataCubeCoveragesProvider(BaseProvider):
"""OpenDataCube Provider
This provider plugin maps an OGC collection to an ODC product
"""
def __init__(self, provider_def):
"""
Initialize object
:param provider_def: provider definition
:returns: pygeoapi.provider.rasterio_.RasterioProvider
"""
super().__init__(provider_def)
self.dc = OdcConnector()
if self.data not in self.dc.list_product_names():
raise ProviderGenericError("Configured product '{}' is not contained in OpenDataCube instance"
.format(self.data))
LOGGER.info('Start initializing product {}'.format(self.data))
try:
# datacube.utils.geometry.CRS
self.crs_obj = None
self.native_format = provider_def['format']['name']
self._coverage_properties = self._get_coverage_properties(self._get_bbox())
self._measurement_properties = self._get_measurement_properties()
# axes, crs and num_bands is need for coverage providers
# (see https://github.com/geopython/pygeoapi/blob/master/pygeoapi/provider/base.py#L65)
self.axes = self._coverage_properties['axes']
self.crs = self._coverage_properties['crs_uri']
self.num_bands = self._coverage_properties['num_bands']
self.fields = [field['name'] for field in self._measurement_properties]
LOGGER.info('Finished initializing product {}'.format(self.data))
except Exception as err:
LOGGER.warning(err)
raise ProviderConnectionError(err)
def query(self, range_subset=[], subsets={}, bbox=[], datetime_=None,
format_='json', **kwargs):
"""
Extract data from collection
:param range_subset: list of bands
:param subsets: dict of subset names with lists of ranges
:param bbox: bounding box [minx,miny,maxx,maxy]
:param datetime_: temporal (datestamp or extent)
:param format_: data format of output
:returns: coverage data as dict of CoverageJSON or native format
"""
# ---------------- #
# Query parameters (https://ogcapi.ogc.org/coverages/overview.html)
# url: {datasetAPI}/collections/{coverageid}/coverage
# Subset with well-defined ranges for named axes
# ?subset=Lat(40:50),Lon(10: 20)
# ?subset=time("2019-03-27")
# Band subset
# ?rangeSubset=B02,B03,B04
# Bbox (in WGS84 or WGS84h)
# ?bbox=10,40,20,50
# Scaling
# ?scaleSize=Lon(800),Lat(400)
# ?scaleFactor=2
# ?scaleAxes=Lon(2)
# ---------------- #
bands = range_subset
LOGGER.info('Bands: {}, subsets: {}, bbox: {}'.format(bands, subsets, bbox))
# initial bbox, full extent of collection
minx, miny, maxx, maxy = self._coverage_properties['bbox']
if all([not bands, not subsets, not bbox]):
LOGGER.info('No parameters specified')
if all([self._coverage_properties['x_axis_label'] not in subsets,
self._coverage_properties['y_axis_label'] not in subsets,
not bbox]):
msg = 'spatial subsetting via bbox parameter or subset is mandatory'
LOGGER.warning(msg)
raise ProviderInvalidQueryError(msg)
if all([self._coverage_properties['x_axis_label'] in subsets,
self._coverage_properties['y_axis_label'] in subsets,
len(bbox) > 0]):
msg = 'bbox and subsetting by coordinates are exclusive'
LOGGER.warning(msg)
raise ProviderInvalidQueryError(msg)
# -------------- #
# Spatial subset #
# -------------- #
if len(bbox) > 0:
# fixed by specification
crs_src = CRS.from_epsg(4326)
crs_dest = CRS.from_epsg(self.crs_obj.to_epsg())
LOGGER.debug('Source EPSG: {}'.format(crs_src.to_epsg()))
LOGGER.debug('Target EPSG: {}'.format(crs_dest.to_epsg()))
if crs_src == crs_dest:
LOGGER.info('source bbox CRS and data CRS are the same')
minx, miny, maxx, maxy = bbox
else:
LOGGER.info('source bbox CRS and data CRS are different')
LOGGER.info('reprojecting bbox into native coordinates')
minxbox, minybox, maxxbox, maxybox = bbox
t = Transformer.from_crs(crs_src, crs_dest, always_xy=True)
minx, miny = t.transform(minxbox, minybox)
maxx, maxy = t.transform(maxxbox, maxybox)
LOGGER.info('Source coordinates in {}: {}'.format(
crs_src.to_epsg(),
[minxbox, minybox, maxxbox, maxybox]))
LOGGER.info('Destination coordinates in {}: {}'.format(
crs_dest.to_epsg(),
[minx, miny, maxx, maxy]))
elif (self._coverage_properties['x_axis_label'] in subsets and
self._coverage_properties['y_axis_label'] in subsets):
LOGGER.info('Creating spatial subset')
x = self._coverage_properties['x_axis_label']
y = self._coverage_properties['y_axis_label']
minx = subsets[x][0]
maxx = subsets[x][1]
miny = subsets[y][0]
maxy = subsets[y][1]
# ToDo consider resolution in next development iteration
if minx > maxx or miny > maxy:
msg = 'spatial subsetting invalid min > max'
LOGGER.warning(msg)
raise ProviderInvalidQueryError(msg)
if self.data != 'landsat8_c2_l2':
if self.crs_obj.projected:
max_allowed_delta = 7500
else:
max_allowed_delta = 0.125
if maxx - minx > max_allowed_delta:
msg = 'spatial subsetting too large {}. please request max {}'.format(maxx - minx, max_allowed_delta)
LOGGER.warning(msg)
raise ProviderInvalidQueryError(msg)
if maxy - miny > max_allowed_delta:
msg = 'spatial subsetting too large {}. please request max {}'.format(maxy - miny, max_allowed_delta)
LOGGER.warning(msg)
raise ProviderInvalidQueryError(msg)
# ---------------------- #
# Load data via datacube #
# ---------------------- #
# Note:
# - resolution and align expect the following coordinate order: (y, x)
# - datacube.Datacube.load accepts all of the following parameters for spatial subsets independent of the crs:
# 'latitude' or 'lat' or 'y' / 'longitude' or 'lon' or 'long' or 'x'
# - See for details on parameters and load() method:
# https://datacube-core.readthedocs.io/en/latest/dev/api/generate/datacube.Datacube.load.html#datacube-datacube-load
params = {
'crs': 'epsg:{}'.format(self.crs_obj.to_epsg()),
'x': (minx, maxx),
'y': (miny, maxy),
"align": (abs(self._coverage_properties['resy'] / 2),
abs(self._coverage_properties['resx'] / 2)),
'resolution': (self._coverage_properties['resy'], self._coverage_properties['resx']),
'output_crs': 'epsg:{}'.format(self.crs_obj.to_epsg()),
# 'resampling': 'nearest' # nearest is the default value
}
if len(bands) > 0:
params['measurements'] = bands
# ToDo: enable output in different crs? Does API Coverages support this?
# ToDo: check if re-projection is necessary
LOGGER.debug('RAW params for dc.load:\n{}'.format(json.dumps(params, indent=4)))
LOGGER.debug('self.data: "{}"'.format(self.data))
LOGGER.debug('Load data from ODC...')
dataset = self.dc.load(product=self.data, **params)
if len(list(dataset.keys())) == 0:
LOGGER.debug('...request resulted in empty dataset')
raise ProviderNoDataError('An empty dataset was returned. Please check your request.')
else:
LOGGER.debug('...received data')
# Use 'dataset.time.attrs.pop('units', None)' to prevent the following error:
# "ValueError: failed to prevent overwriting existing key units in attrs on variable 'time'.
# This is probably an encoding field used by xarray to describe how a variable is serialized.
# To proceed, remove this key from the variable's attributes manually."
# Check for existence to "prevent AttributeError: 'Dataset' object has no attribute 'time'"
if hasattr(dataset, 'time') and dataset.time is not None and hasattr(dataset.time, 'attrs') and \
dataset.time.attrs is not None:
dataset.time.attrs.pop('units', None)
# ----------------- #
# Return data #
# ----------------- #
if len(bands) == 0:
# if no bands are specified in the request ODC loads all bands by default
bands = list(dataset.keys())
out_meta = {
'bbox': [minx, miny, maxx, maxy],
'width': abs((maxx - minx) / self._coverage_properties['resx']),
'height': abs((maxy - miny) / self._coverage_properties['resy']),
'bands': bands
}
if self.options is not None:
LOGGER.info('Adding dataset options')
for key, value in self.options.items():
out_meta[key] = value
LOGGER.debug('Processed dataset')
if format_ == 'json':
LOGGER.info('Creating output in CoverageJSON')
return self.gen_covjson(out_meta, dataset)
elif format_.lower() == 'geotiff':
LOGGER.info('Returning data as GeoTIFF')
# ToDo: check if there is more than one time slice
out_meta['driver'] = 'GTiff'
out_meta['crs'] = self.crs_obj.to_epsg()
out_meta['dtype'] = self._measurement_properties[0]['dtype']
out_meta['nodata'] = self._measurement_properties[0]['nodata']
out_meta['count'] = len(bands)
out_meta['transform'] = Affine(self._coverage_properties['resx'],
0.0,
minx,
0.0,
self._coverage_properties['resy'],
maxy)
LOGGER.debug("out_meta:\n{}".format(json.dumps(out_meta, indent=4)))
LOGGER.debug('Writing to in-memory file')
with MemoryFile() as memfile:
with memfile.open(**out_meta) as dest:
# input is expected as (bands, rows, cols)
dest.write(np.stack(
[dataset.squeeze(dim='time', drop=True)[band].values for band in bands],
axis=0)
)
LOGGER.debug('Finished writing to in-memory file')
return memfile.read()
else:
LOGGER.info('Returning data as netCDF')
# ToDo: what if different measurements have different dtypes?
for data_var in dataset.data_vars:
dtype = dataset[data_var].dtype.name
break
# scipy cannot save arrays with unsigned type to netCDF
if dtype.startswith('u'):
dataset = dataset.astype(CAST_MAP[dtype], copy=False)
# Note: "If no path is provided, this function returns the resulting netCDF file as bytes; in this case,
# we need to use scipy, which does not support netCDF version 4 (the default format becomes NETCDF3_64BIT)."
# (http://xarray.pydata.org/en/stable/generated/xarray.Dataset.to_netcdf.html)
# ToDo: implement netCDF version 4 option using in-memory file with lib netCDF4
# (http://unidata.github.io/netcdf4-python/#in-memory-diskless-datasets)
# see also https://stackoverflow.com/questions/46433812/simple-conversion-of-netcdf4-dataset-to-xarray-dataset
return dataset.to_netcdf()
def gen_covjson(self, metadata, dataset):
"""
Generate coverage as CoverageJSON representation
:param metadata: coverage metadata
:param dataset: xarray Dataset object
:returns: dict of CoverageJSON representation
"""
# ToDo: support time dimension
LOGGER.info('Creating CoverageJSON domain')
minx, miny, maxx, maxy = metadata['bbox']
cj = {
'type': 'Coverage',
'domain': {
'type': 'Domain',
'domainType': 'Grid',
'axes': {
'x': {
'start': minx,
'stop': maxx,
'num': metadata['width']
},
'y': {
'start': miny,
'stop': maxy,
'num': metadata['height']
}
},
'referencing': [{
'coordinates': ['x', 'y'],
'system': {
'type': self._coverage_properties['crs_type'],
'id': self._coverage_properties['crs_uri']
}
}]
},
'parameters': {},
'ranges': {}
| |
<reponame>functor/operator<filename>plsync/mlabconfig_test.py
"""Tests for mlabconfig."""
import contextlib
import logging
import mlabconfig
import mock
import optparse
import os
from planetlab import model
import StringIO
import textwrap
import time
import unittest
@contextlib.contextmanager
def OpenStringIO(sio):
"""Creates a StringIO object that is context aware.
OpenStringIO is useful for testing functions that open and write to a file.
Example:
@mock.patch('__builtin__.open')
def test_some_function(self, mock_open):
output = StringIO.StringIO()
mock_open.return_value = OpenStringIO(output)
some_function()
self.assertEqual(output.getvalue(), 'Expected content')
Args:
sio: StringIO.StringIO, the instance returned by 'open'.
"""
try:
yield sio
finally:
# Do not close the StringIO object, so testers can access getvalue().
pass
class BracketTemplateTest(unittest.TestCase):
def setUp(self):
self.vars = {'var1': 'Spot', 'var2': 'Dog'}
def test_substitute_when_template_is_correct(self):
tmpl = mlabconfig.BracketTemplate('{{var1}} is a {{var2}}')
actual = tmpl.safe_substitute(self.vars)
self.assertEqual(actual, 'Spot is a Dog')
def test_substitute_when_template_is_broken(self):
tmpl = mlabconfig.BracketTemplate('var1}} is a {{var2')
actual = tmpl.safe_substitute(self.vars)
self.assertEqual(actual, 'var1}} is a {{var2')
def test_substitute_when_template_is_shell(self):
tmpl1 = mlabconfig.BracketTemplate('$var1 == {{var1}}')
tmpl2 = mlabconfig.BracketTemplate('${var2} == {{var2}}')
actual1 = tmpl1.safe_substitute(self.vars)
actual2 = tmpl2.safe_substitute(self.vars)
self.assertEqual(actual1, '$var1 == Spot')
self.assertEqual(actual2, '${var2} == Dog')
def test_substitute_without_value_returns_unchanged_template(self):
tmpl = mlabconfig.BracketTemplate('{{evaluated}} {{unevaluated}}')
actual = tmpl.safe_substitute({'evaluated': 'okay'})
self.assertEqual(actual, 'okay {{unevaluated}}')
class MlabconfigTest(unittest.TestCase):
def setUp(self):
self.users = [('User', 'Name', '<EMAIL>')]
self.sites = [model.makesite('abc01',
'192.168.1.0',
'2400:1002:4008::',
'Some City',
'US',
36.850000,
74.783000,
self.users,
nodegroup='MeasurementLabCentos')]
self.attrs = [model.Attr('MeasurementLabCentos', disk_max='60000000')]
# Turn off logging output during testing (unless CRITICAL).
logging.disable(logging.ERROR)
def assertContainsItems(self, results, expected_items):
"""Asserts that every element of expected is present in results."""
for expected in expected_items:
self.assertIn(expected, results)
def assertDoesNotContainsItems(self, results, unexpected_items):
"""Asserts that every element of unexpected is NOT in results."""
for unexpected in unexpected_items:
self.assertNotIn(unexpected, results)
def test_export_mlab_host_ips(self):
# Setup synthetic user, site, and experiment configuration data.
experiments = [model.Slice(name='abc_bar',
index=1,
attrs=self.attrs,
users=self.users,
use_initscript=True,
ipv6='all')]
# Assign experiments to nodes.
for node in self.sites[0]['nodes'].values():
experiments[0].add_node_address(node)
expected_results = [
{'hostname': 'mlab1.abc01.measurement-lab.org', 'ipv4': '192.168.1.9', 'ipv6': 'fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b'},
{'hostname': 'mlab2.abc01.measurement-lab.org', 'ipv4': '192.168.1.22', 'ipv6': 'fc00:db20:35b:7399::5'},
{'hostname': 'mlab3.abc01.measurement-lab.org', 'ipv4': '192.168.1.35', 'ipv6': 'fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b'},
{'hostname': 'bar.abc.mlab1.abc01.measurement-lab.org', 'ipv4': '192.168.1.11', 'ipv6': 'fdf8:f53e:61e4::18'},
{'hostname': 'bar.abc.mlab2.abc01.measurement-lab.org', 'ipv4': '192.168.1.24', 'ipv6': 'fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b'},
{'hostname': 'bar.abc.mlab3.abc01.measurement-lab.org', 'ipv4': '192.168.1.37', 'ipv6': 'fc00:db20:35b:7399::5'},
]
results = mlabconfig.export_mlab_host_ips(self.sites, experiments)
self.assertItemsEqual(results, expected_results)
def test_export_mlab_site_stats(self):
expected_results = [{"city": "Some City",
"metro": ["abc01", "abc"],
"country": "US",
"site": "abc01",
"longitude": 74.783,
"latitude": 36.85,
"roundrobin": False}]
sitestats = mlabconfig.export_mlab_site_stats(self.sites)
self.assertItemsEqual(sitestats, expected_results)
def test_export_router_and_switch_records(self):
output = StringIO.StringIO()
expected_results = [
mlabconfig.format_a_record('r1.abc01', '192.168.1.1'),
mlabconfig.format_a_record('s1.abc01', '192.168.1.2'),
]
mlabconfig.export_router_and_switch_records(output, self.sites)
results = output.getvalue().split('\n')
self.assertContainsItems(results, expected_results)
def test_export_pcu_records(self):
output = StringIO.StringIO()
expected_results = [
mlabconfig.format_a_record('mlab1d.abc01', '192.168.1.4'),
mlabconfig.format_a_record('mlab2d.abc01', '192.168.1.5'),
mlabconfig.format_a_record('mlab3d.abc01', '192.168.1.6'),
]
mlabconfig.export_pcu_records(output, self.sites)
results = output.getvalue().split('\n')
self.assertContainsItems(results, expected_results)
def test_export_server_records(self):
output = StringIO.StringIO()
# This is a subset of expected results.
expected_results = [
mlabconfig.format_a_record('mlab1.abc01', '192.168.1.9'),
mlabconfig.format_a_record('mlab2v4.abc01', '192.168.1.22'),
mlabconfig.format_aaaa_record('mlab3.abc01', 'fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b'),
mlabconfig.format_aaaa_record('mlab1v6.abc01', 'fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b')
]
mlabconfig.export_server_records(output, self.sites)
results = output.getvalue().split('\n')
self.assertContainsItems(results, expected_results)
def test_export_experiment_records(self):
output = StringIO.StringIO()
experiments = [model.Slice(name='abc_bar',
index=1,
attrs=self.attrs,
users=self.users,
use_initscript=True,
ipv6='all')]
expected_results = [
mlabconfig.format_a_record('bar.abc.abc01', '192.168.1.11'),
mlabconfig.format_a_record('bar.abc.mlab2.abc01', '192.168.1.24'),
mlabconfig.format_a_record('bar.abcv4.abc01', '192.168.1.11'),
mlabconfig.format_a_record('bar.abc.mlab2v4.abc01', '192.168.1.24'),
mlabconfig.format_aaaa_record('bar.abc.abc01',
'fdf8:f53e:61e4::18'),
mlabconfig.format_aaaa_record('bar.abc.abc01',
'fc00:db20:35b:7399::5'),
mlabconfig.format_aaaa_record('bar.abc.mlab3.abc01',
'fc00:db20:35b:7399::5'),
mlabconfig.format_aaaa_record('bar.abcv6.abc01',
'fdf8:f53e:61e4::18'),
mlabconfig.format_aaaa_record('bar.abc.mlab1v6.abc01',
'fdf8:f53e:61e4::18'),
]
mlabconfig.export_experiment_records(output, self.sites, experiments)
results = output.getvalue().split('\n')
self.assertContainsItems(results, expected_results)
def test_export_experiment_records_flattened(self):
output = StringIO.StringIO()
experiments = [model.Slice(name='abc_foo',
index=1,
attrs=self.attrs,
users=self.users,
use_initscript=True,
ipv6='all')]
expected_results = [
mlabconfig.format_a_record('foo-abc-mlab2-abc01', '192.168.1.24'),
mlabconfig.format_a_record('foo-abc-mlab2v4-abc01', '192.168.1.24'),
mlabconfig.format_aaaa_record('foo-abc-mlab3-abc01',
'fc00:db20:35b:7399::5'),
mlabconfig.format_aaaa_record('foo-abc-mlab1v6-abc01',
'fdf8:f53e:61e4::18'),
]
unexpected_results = [
mlabconfig.format_a_record('foo-abc-abc01', '192.168.1.24'),
mlabconfig.format_a_record('foo-abcv4-abc01', '192.168.1.24'),
mlabconfig.format_aaaa_record('foo-abc-abc01',
'fc00:db20:35b:7399::5'),
mlabconfig.format_aaaa_record('foo-abcv6-abc01',
'fdf8:f53e:61e4::18'),
]
mlabconfig.SSL_EXPERIMENTS = ['abc_foo']
mlabconfig.export_experiment_records(output, self.sites, experiments)
results = output.getvalue().split('\n')
# We are using custom functions here because of the size of the results
# list. The results list will contain 50+ items. Using the built-in
# assertItemsEqual() would require creating a very large, unwieldy
# expected_results list. Using the custom functions allows us to not
# have to verify the entirety of results, but simply assert that certain
# key items are in the results. This is likely sufficient because most
# of the items in results are redundant in form.
self.assertContainsItems(results, expected_results)
self.assertDoesNotContainsItems(results, unexpected_results)
@mock.patch.object(mlabconfig, 'get_revision')
def test_serial_rfc1912(self, mock_get_revision):
# Fri Oct 31 00:45:00 2015 UTC.
# 45-minutes should result in 03.
ts = 1446252300
mock_get_revision.return_value = '03'
serial = mlabconfig.serial_rfc1912(time.gmtime(ts))
self.assertEqual('2015103103', serial)
@mock.patch.object(os.path, 'exists')
@mock.patch('__builtin__.open')
def test_get_revision_when_saved_prefix_is_old_and_revision_is_reset(
self, mopen, mock_exists):
prefix = '20151031'
# All open and disk I/O is mocked out.
# Pretend the file exists already.
mock_exists.return_value = True
# Hold the fake writer so we can check how it was called.
mock_writer = mock.mock_open()
# Open is called twice, once to read, and then to write.
mopen.side_effect = [
mock.mock_open(
# Saved prefix is older than current prefix.
read_data='{"prefix": "20140931", "revision": 1}').return_value,
mock_writer.return_value
]
s = mlabconfig.get_revision(prefix, '/tmp/fakepath')
self.assertEqual(s, '00')
mock_writer.return_value.write.assert_called_once_with(
'{"prefix": "20151031", "revision": 0}')
@mock.patch.object(os.path, 'exists')
@mock.patch('__builtin__.open')
def test_get_revision_when_file_exists_increments_revision(self, mopen,
mock_exists):
prefix = '20151031'
# All open and disk I/O is mocked out.
# Pretend the file exists already.
mock_exists.return_value = True
# Hold the fake writer so we can check how it was called.
mock_writer = mock.mock_open()
# Open is called twice, once to read, and then to write.
mopen.side_effect = [
mock.mock_open(
read_data='{"prefix": "20151031", "revision": 1}').return_value,
mock_writer.return_value
]
s = mlabconfig.get_revision(prefix, '/tmp/fakepath')
self.assertEqual(s, '02')
mock_writer.return_value.write.assert_called_once_with(
'{"prefix": "20151031", "revision": 2}')
@mock.patch.object(os.path, 'exists')
@mock.patch('__builtin__.open')
def test_get_revision_when_file_is_corrupt_default_values_saved(
self, mopen, mock_exists):
prefix = '20151031'
# All open and disk I/O is mocked out.
# Pretend the file exists already.
mock_exists.return_value = True
# Hold the fake writer so we can check how it was called.
mock_writer = mock.mock_open()
# Open is called twice, once to read, and then to write.
mopen.side_effect = [
mock.mock_open(read_data='THIS IS NOT JSON').return_value,
mock_writer.return_value
]
s = mlabconfig.get_revision(prefix, '/tmp/fakepath')
self.assertEqual(s, '00')
mock_writer.return_value.write.assert_called_once_with(
'{"prefix": "20151031", "revision": 0}')
def test_export_mlab_zone_header(self):
options = optparse.Values()
options.value = 'middle'
output = StringIO.StringIO()
header = StringIO.StringIO('before; %(value)s; after')
mlabconfig.export_mlab_zone_header(output, header, options)
self.assertEqual(output.getvalue(), 'before; middle; after')
@mock.patch('__builtin__.open')
def test_export_mlab_server_network_config(self, mock_open):
stdout = StringIO.StringIO()
name_tmpl = '{{hostname}}-foo.ipxe'
input_tmpl = StringIO.StringIO('ip={{ip}} ; echo ${ip} {{extra}}')
file_output = StringIO.StringIO()
mock_open.return_value = OpenStringIO(file_output)
mlabconfig.export_mlab_server_network_config(
stdout, self.sites, name_tmpl, input_tmpl, 'mlab1.abc01',
{'extra': 'value'})
self.assertEqual(
file_output.getvalue(), 'ip=192.168.1.9 ; echo ${ip} value')
@mock.patch('__builtin__.open')
def test_export_scraper_kubernetes_config(self, mock_open):
virtual_output_files = {}
def create_new_fake_file(*args):
virtual_output_files[args[0]] = StringIO.StringIO()
return OpenStringIO(virtual_output_files[args[0]])
mock_open.side_effect = create_new_fake_file
experiments = [model.Slice(name='abc_foo',
index=1,
attrs=self.attrs,
users=self.users,
use_initscript=True,
rsync_modules=['test1', 'test2'],
ipv6='all')]
for node in self.sites[0]['nodes'].values():
experiments[0].add_node_address(node)
output_template = textwrap.dedent("""\
host: {{rsync_host}}
site: {{site_safe}}
node: {{node_safe}}
experiment: {{experiment}}
module: {{rsync_module}}
""")
filename_template = ('deployment/{{site_safe}}-{{node_safe}}-'
'{{experiment_safe}}-{{rsync_module}}.yml')
mlabconfig.export_scraper_kubernetes_config(filename_template,
experiments,
output_template,
None)
expected_output = {
'deployment/abc01-mlab1-foo-abc-test1.yml': textwrap.dedent("""\
host: foo.abc.mlab1.abc01.measurement-lab.org
site: abc01
node: mlab1
experiment: foo.abc
module: test1"""),
'deployment/abc01-mlab1-foo-abc-test2.yml': textwrap.dedent("""\
host: foo.abc.mlab1.abc01.measurement-lab.org
site: abc01
node: mlab1
experiment: foo.abc
module: test2"""),
'deployment/abc01-mlab2-foo-abc-test1.yml': textwrap.dedent("""\
host: foo.abc.mlab2.abc01.measurement-lab.org
site: abc01
node: mlab2
experiment: foo.abc
module: test1"""),
'deployment/abc01-mlab2-foo-abc-test2.yml': textwrap.dedent("""\
host: foo.abc.mlab2.abc01.measurement-lab.org
site: abc01
node: mlab2
experiment: foo.abc
module: test2"""),
'deployment/abc01-mlab3-foo-abc-test1.yml': textwrap.dedent("""\
host: foo.abc.mlab3.abc01.measurement-lab.org
site: abc01
node: mlab3
experiment: foo.abc
module: test1"""),
'deployment/abc01-mlab3-foo-abc-test2.yml': textwrap.dedent("""\
host: foo.abc.mlab3.abc01.measurement-lab.org
site: abc01
node: mlab3
experiment: foo.abc
module: test2""")
}
self.assertEqual(set(expected_output.keys()),
set(virtual_output_files.keys()))
for fname, contents in expected_output.items():
self.assertIn(fname, virtual_output_files)
self.assertEqual(contents.strip(),
virtual_output_files[fname].getvalue().strip())
@mock.patch('__builtin__.open')
def test_export_scraper_kubernetes_config_subset(self, mock_open):
virtual_output_files = {}
def create_new_fake_file(*args):
virtual_output_files[args[0]] = StringIO.StringIO()
return OpenStringIO(virtual_output_files[args[0]])
mock_open.side_effect = create_new_fake_file
experiments = [model.Slice(name='abc_foo',
index=1,
attrs=self.attrs,
users=self.users,
use_initscript=True,
rsync_modules=['test1', 'test2'],
ipv6='all')]
for node in self.sites[0]['nodes'].values():
experiments[0].add_node_address(node)
output_template = textwrap.dedent("""\
machine: {{machine}}
host: {{rsync_host}}
site: {{site}}
node: {{node}}
experiment: {{experiment}}
module: {{rsync_module}}
""")
filename_template = ('deployment/{{site}}-{{node}}-'
'{{experiment_safe}}-{{rsync_module}}.yml')
mlabconfig.export_scraper_kubernetes_config(filename_template,
experiments,
output_template,
".*mlab3.*")
expected_output = {
'deployment/abc01-mlab3-foo-abc-test1.yml': textwrap.dedent("""\
machine: mlab3.abc01.measurement-lab.org
host: foo.abc.mlab3.abc01.measurement-lab.org
site: abc01
node: mlab3
experiment: foo.abc
module: test1"""),
'deployment/abc01-mlab3-foo-abc-test2.yml': textwrap.dedent("""\
machine: mlab3.abc01.measurement-lab.org
host: foo.abc.mlab3.abc01.measurement-lab.org
site: abc01
node: mlab3
experiment: foo.abc
module: test2""")
}
self.assertEqual(set(expected_output.keys()),
set(virtual_output_files.keys()))
for fname, contents in expected_output.items():
self.assertIn(fname, virtual_output_files)
self.assertEqual(contents.strip(),
virtual_output_files[fname].getvalue().strip())
def test_select_prometheus_experiment_targets_includes_all_experiments(
self):
# Setup synthetic user, site, and experiment configuration data.
experiments = [model.Slice(name='abc_bar',
index=1,
attrs=self.attrs,
users=self.users,
use_initscript=True,
ipv6='all')]
# Assign experiments to nodes.
for node in self.sites[0]['nodes'].values():
experiments[0].add_node_address(node)
expected_targets = [
{
'labels': {
'experiment': 'bar.abc',
'machine': 'mlab2.abc01.measurement-lab.org'
},
'targets': [
'bar.abc.mlab2.abc01.measurement-lab.org:9090'
]
},
{
'labels': {
'experiment': 'bar.abc',
'machine': 'mlab1.abc01.measurement-lab.org'
},
'targets': [
'bar.abc.mlab1.abc01.measurement-lab.org:9090'
]
},
{
'labels': {
'experiment': 'bar.abc',
'machine': 'mlab3.abc01.measurement-lab.org'
},
'targets': [
'bar.abc.mlab3.abc01.measurement-lab.org:9090'
]
}
]
actual_targets = mlabconfig.select_prometheus_experiment_targets(
experiments, None, ['{{hostname}}:9090'], {}, False, False, '')
self.assertEqual(len(actual_targets), 3)
self.assertItemsEqual(expected_targets, actual_targets)
def test_select_prometheus_experiment_targets_includes_selected(self):
# Setup synthetic user, site, and experiment configuration data.
experiments = [model.Slice(name='abc_bar',
index=1,
attrs=self.attrs,
users=self.users,
use_initscript=True,
ipv6='all')]
# Assign experiments to nodes.
for node in self.sites[0]['nodes'].values():
experiments[0].add_node_address(node)
expected_targets = [
{
'labels': {
'machine': 'mlab2.abc01.measurement-lab.org',
'experiment': 'bar.abc'
},
'targets': | |
<gh_stars>1-10
import os
import sys
import math
import pytest
import numpy as np
import osmnx as ox
import logging as lg
import networkx as nx
import anprx.core as core
import anprx.helpers as helpers
import anprx.exceptions as exceptions
from anprx.constants import Units
###
###
def get_lat_lng():
latitudes = [54.97092396,54.97080711]
longitudes = [-1.622966153, -1.622935367]
return latitudes, longitudes
def get_points():
latitudes, longitudes = get_lat_lng()
point1 = core.Point(lat = latitudes[0],
lng = longitudes[0])
point2 = core.Point(lat = latitudes[1],
lng = longitudes[1])
return (point1, point2)
def get_bbox(size):
if size == "small":
return core.BBox(54.97092396, 54.97080711,
-1.622966153, -1.622935367)
elif size == "medium":
return core.BBox(*ox.bbox_from_point(
point= (54.97351405, -1.62545930208892),
distance = 500))
elif size == "uk":
return core.BBox(59.478568831926395, 49.82380908513249,
-10.8544921875, 2.021484375)
else:
raise ValueError("No such bbox size")
def assert_bbox_almost_equal(bbox1, bbox2, decimal = 5):
np.testing.assert_almost_equal(bbox1.north, bbox2.north, decimal = decimal)
np.testing.assert_almost_equal(bbox1.south, bbox2.south, decimal = decimal)
np.testing.assert_almost_equal(bbox1.west, bbox2.west, decimal = decimal)
np.testing.assert_almost_equal(bbox1.east, bbox2.east, decimal = decimal)
def get_network(distance = 1000, center = (54.97351, -1.62545)):
network_pickle_filename = "tests/data/test_network_USB_{}.pkl".format(distance)
if os.path.exists(network_pickle_filename):
network = nx.read_gpickle(path = network_pickle_filename)
else:
network = ox.graph_from_point(
center_point = center,
distance = distance, #meters
distance_type='bbox',
network_type="drive_service")
nx.write_gpickle(G = network, path = network_pickle_filename)
return network
def test_bbox_area_small():
bbox = get_bbox(size = "small")
expected_area_km2 = 2.55e-05
observed_area_km2_simple = core.get_bbox_area(
bbox = bbox,
unit = Units.km,
method = "simple")
observed_area_km2_sins = core.get_bbox_area(
bbox = bbox,
unit = Units.km,
method = "sins")
expected_area_m2 = 2.55e-05 * 1e6
observed_area_m2_simple = core.get_bbox_area(
bbox = bbox,
unit = Units.m,
method = "simple")
observed_area_m2_sins = core.get_bbox_area(
bbox = bbox,
unit = Units.m,
method = "sins")
np.testing.assert_almost_equal(expected_area_km2, observed_area_km2_simple, decimal = 6)
np.testing.assert_almost_equal(expected_area_m2, observed_area_m2_simple, decimal = 1)
np.testing.assert_almost_equal(expected_area_km2, observed_area_km2_sins, decimal = 6)
np.testing.assert_almost_equal(expected_area_m2, observed_area_m2_sins, decimal = 1)
def test_bbox_area_large():
bbox = get_bbox(size = "uk")
expected_area_km2 = 888000
observed_area_km2_simple = core.get_bbox_area(
bbox = bbox,
unit = Units.km,
method = "simple")
observed_area_km2_sins = core.get_bbox_area(
bbox = bbox,
unit = Units.km,
method = "sins")
expected_area_m2 = 888000 * 1e6
observed_area_m2_simple = core.get_bbox_area(
bbox = bbox,
unit = Units.m,
method = "simple")
observed_area_m2_sins = core.get_bbox_area(
bbox = bbox,
unit = Units.m,
method = "sins")
np.testing.assert_almost_equal(expected_area_km2, observed_area_km2_simple, decimal = -5)
np.testing.assert_almost_equal(expected_area_m2, observed_area_m2_simple, decimal = -10)
np.testing.assert_almost_equal(expected_area_km2, observed_area_km2_sins, decimal = -5)
np.testing.assert_almost_equal(expected_area_m2, observed_area_m2_sins, decimal = -10)
def test_meanpoint():
point1, point2 = get_points()
meanpoint = core.get_meanpoint([point1, point2])
np.testing.assert_almost_equal(54.97086, meanpoint.lat, decimal=5)
np.testing.assert_almost_equal(-1.622945, meanpoint.lng, decimal=5)
def test_empty_bbox_from_points():
with pytest.raises(ValueError):
core.bbox_from_points([])
def test_small_bbox_from_points():
point1, point2 = get_points()
bbox = get_bbox(size = "small")
nw = core.Point(bbox.north, bbox.west)
sw = core.Point(bbox.south, bbox.west)
ne = core.Point(bbox.north, bbox.east)
se = core.Point(bbox.south, bbox.east)
points = [nw, sw, ne, se]
bbox = core.bbox_from_points(points)
expected_bbox = core.BBox(*ox.bbox_from_point(
point= core.get_meanpoint([point1, point2]),
distance = 100))
assert_bbox_almost_equal(bbox, expected_bbox)
def test_large_bbox_from_points():
bbox = get_bbox(size = "uk")
nw = core.Point(bbox.north, bbox.west)
sw = core.Point(bbox.south, bbox.west)
ne = core.Point(bbox.north, bbox.east)
se = core.Point(bbox.south, bbox.east)
points = [nw, sw, ne, se]
with pytest.raises(exceptions.BBoxAreaSafetyError):
core.bbox_from_points(points)
def test_bbox_from_points_no_margins():
bbox = get_bbox(size = "medium")
nw = core.Point(bbox.north, bbox.west)
sw = core.Point(bbox.south, bbox.west)
ne = core.Point(bbox.north, bbox.east)
se = core.Point(bbox.south, bbox.east)
points = [nw, sw, ne, se]
bbox = core.bbox_from_points(points, rel_margins = core.RelativeMargins(0,0,0,0))
expected_bbox = core.BBox(*ox.bbox_from_point(
point= (54.97351405, -1.62545930208892),
distance = 500))
assert_bbox_almost_equal(bbox, expected_bbox)
def test_bbox_from_points_with_margins():
bbox = get_bbox(size = "medium")
nw = core.Point(bbox.north, bbox.west)
sw = core.Point(bbox.south, bbox.west)
ne = core.Point(bbox.north, bbox.east)
se = core.Point(bbox.south, bbox.east)
points = [nw, sw, ne, se]
bbox = core.bbox_from_points(points)
expected_bbox = core.BBox(*ox.bbox_from_point(
point= (54.97351405, -1.62545930208892),
distance = 500))
assert_bbox_almost_equal(bbox, expected_bbox, decimal = 3)
def test_edges_from_osmid():
expected_osmids = \
[37899441,
461119586,
4725926,
4692270,
4655478,
2544439,
31992849]
network = get_network(distance = 1000)
all_osmids = list(helpers.flatten(network.edges(data = "osmid")))
assert not set(expected_osmids).isdisjoint(set(all_osmids))
edges = list(core.edges_from_osmid(network, expected_osmids))
returned_osmids = set(helpers.flatten(map(lambda edge: network[edge.u][edge.v][edge.k]["osmid"], edges)))
assert not set(returned_osmids).isdisjoint(set(expected_osmids))
def test_distance_to_edge():
point1, point2 = get_points()
network = get_network(distance = 1000)
edge = core.Edge(u = 826286632,
v = 29825878,
k = 0)
assert \
core.distance_to_edge(
network = network,
edge = edge,
point = point1,
method = core.EdgeDistanceMethod.farthest_node) \
< 100
assert \
core.distance_to_edge(
network = network,
edge = edge,
point = point2,
method = core.EdgeDistanceMethod.mean_of_distances) \
< 100
def test_lvector():
origin, actual_point = get_points()
lvector = core.as_lvector(origin, actual_point)
desired_point = core.from_lvector(origin, lvector)
np.testing.assert_almost_equal(
actual_point,
desired_point,
decimal = 7)
def test_nodes_and_edges_in_range():
point1, point2 = get_points()
network = get_network(distance = 1000)
nn_ids, nn_distances = core.get_nodes_in_range(network, [point1, point2], 100)
assert len(nn_ids) == 2
assert len(nn_distances) == 2
assert len(nn_ids[0]) > 0
assert len(nn_ids[1]) > 0
assert len(nn_distances[0]) == len(nn_ids[0])
assert len(nn_distances[1]) == len(nn_ids[1])
edges = core.get_edges_in_range(network, nn_ids)
assert len(edges) == 2
assert len(edges[0]) >= len(nn_ids[0])
assert len(edges[1]) >= len(nn_ids[1])
def test_filter_by_address_and_get_local_coordinate_system():
network = get_network(distance = 1000)
address = "Pitt Street, Newcastle Upon Tyne, UK"
point = core.Point(lat = 54.974537, lng = -1.625644)
nn_ids, nn_distances = core.get_nodes_in_range(network, [point], 100)
nn_edges = core.get_edges_in_range(network, nn_ids)[0]
all_nodes = { edge[0] for edge in nn_edges } | \
{ edge[1] for edge in nn_edges }
assert len(all_nodes) > len(nn_ids[0])
candidate_edges = core.filter_by_address(network, nn_edges, address)
assert len(candidate_edges) < len(nn_edges)
candidate_nodes = { edge[0] for edge in candidate_edges } | \
{ edge[1] for edge in candidate_edges }
nodes_lvectors, edges_lvectors = \
core.local_coordinate_system(
network = network,
origin = point,
nodes = candidate_nodes,
edges = candidate_edges)
assert len(nodes_lvectors) == len(candidate_nodes)
assert len(edges_lvectors) == len(candidate_edges)
for id in candidate_nodes:
ox_distance = ox.great_circle_vec(
lat1 = network.node[id]['y'],
lng1 = network.node[id]['x'],
lat2 = point.lat,
lng2 = point.lng)
lvector = nodes_lvectors[id]
lvector_distance = math.sqrt(lvector[0] ** 2 + lvector[1] ** 2)
np.testing.assert_almost_equal(
ox_distance,
lvector_distance,
decimal = 6)
def test_gen_lsystem_recursive():
network = get_network(distance = 1000)
neighborless_point = core.Point(lat=54.959224, lng=-1.663313)
with pytest.raises(exceptions.ZeroNeighborsError):
lsystem = core.gen_lsystem(
network,
origin = neighborless_point,
radius = 40)
def test_estimate_camera_edge():
network = get_network(distance = 1000)
point = core.Point(lat = 54.974537, lng = -1.625644)
lsystem = core.gen_lsystem(network, point, 40)
assert 'nnodes' in lsystem
assert 'nedges' in lsystem
assert 'cedges' in lsystem
assert 'lnodes' in lsystem
assert 'ledges' in lsystem
camera_edge, p_cedges, samples = \
core.estimate_camera_edge(network,
lsystem,
nsamples = 100,
return_samples = True)
assert camera_edge is not None
assert p_cedges is not None
assert samples is not None
assert set(p_cedges.keys()) == set(lsystem['cedges'])
assert set(samples.keys()) == set(lsystem['cedges'])
for element in samples.values():
assert len(element) == 2
assert len(element[0]) == 100 + 1
assert len(element[1]) == 100 + 1
##
##
##
points_1q = np.array([(2,2), (9,1), (1,9), (0,1), (3,0)],
dtype = [('x', 'i8'), ('y', 'i8')])
points_2q = np.array([(-2,2), (-9,1), (-1,9)],
dtype = [('x', 'i8'), ('y', 'i8')])
points_3q = np.array([(-2,-2), (-9,-1), (-1,-9), (0,-1), (-3,0)],
dtype = [('x', 'i8'), ('y', 'i8')])
points_4q = np.array([(2,-2), (9,-1), (1,-9)],
dtype = [('x', 'i8'), ('y', 'i8')])
def test_direction_of_flow_q1_q2():
q1_q2 = np.array(np.meshgrid(points_1q, points_2q, indexing = 'xy')).T.reshape(-1,2)
for q1,q2 in q1_q2:
assert core.flow_of_closest_lane(q1,q2,
left_handed = True) == (q1,q2)
assert core.flow_of_closest_lane(q2,q1,
left_handed = True) == (q1,q2)
assert core.flow_of_closest_lane(q1,q2,
left_handed = False) == (q2,q1)
assert core.flow_of_closest_lane(q2,q1,
left_handed = False) == (q2,q1)
def test_direction_of_flow_q2_q3():
q2_q3 = np.array(np.meshgrid(points_2q, points_3q, indexing = 'xy')).T.reshape(-1,2)
for q2,q3 in q2_q3:
assert core.flow_of_closest_lane(q2,q3,
left_handed = True) == (q2,q3)
assert core.flow_of_closest_lane(q3,q2,
left_handed = True) == (q2,q3)
assert core.flow_of_closest_lane(q2,q3,
left_handed = False) == (q3,q2)
assert core.flow_of_closest_lane(q3,q2,
left_handed = False) == (q3,q2)
def test_direction_of_flow_q3_q4():
q3_q4 = np.array(np.meshgrid(points_3q, points_4q, indexing = 'xy')).T.reshape(-1,2)
for q3,q4 in q3_q4:
assert core.flow_of_closest_lane(q3,q4,
left_handed = True) == (q3,q4)
assert core.flow_of_closest_lane(q4,q3,
left_handed = True) == (q3,q4)
assert core.flow_of_closest_lane(q3,q4,
left_handed = False) == (q4,q3)
assert core.flow_of_closest_lane(q4,q3,
left_handed = False) == (q4,q3)
def test_direction_of_flow_q4_q1():
q4_q1 = np.array(np.meshgrid(points_4q, points_1q, indexing = 'xy')).T.reshape(-1,2)
for q4,q1 in q4_q1:
assert core.flow_of_closest_lane(q4,q1,
left_handed = True) == (q4,q1)
assert core.flow_of_closest_lane(q1,q4,
left_handed = True) == (q4,q1)
assert core.flow_of_closest_lane(q4,q1,
left_handed = False) == (q1,q4)
assert core.flow_of_closest_lane(q1,q4,
left_handed = False) == (q1,q4)
def test_get_dead_end_nodes():
network = get_network(distance = 1000)
dead_end_nodes = core.get_dead_end_nodes(network)
assert len(dead_end_nodes) > 0
core.remove_dead_end_nodes(network)
for node in dead_end_nodes:
assert not network.has_node(node)
def test_add_address_details(monkeypatch):
dummy_address_details = {
'road' : 'Spring Street',
'suburb' : 'Arthur\'s Hill',
'place_rank' : 26,
'class' : 'highway',
'type' : 'residential',
'importance' : '0.1',
'postcode' : 'NE4 5TB'
}
network = get_network(distance = 1000)
subnetwork = network.subgraph([4519161284, 4519161278])
monkeypatch.setattr('anprx.nominatim.lookup_address',
lambda osmids,entity,drop_keys,email:
[dummy_address_details] * len(osmids))
subnetwork = core.add_address_details(subnetwork)
for (u,v,k,d) in subnetwork.edges(keys = True, data = True):
assert all(item in d.items() for item in dummy_address_details.items())
def test_enrich_network(monkeypatch):
def mock_osmnx_elevation(G, api_key, max_locations_per_batch=350,
pause_duration=0.02):
nx.set_node_attributes(G, 100, 'elevation')
return G
dummy_address_details = {
'road' : 'Spring Street',
'suburb' : 'Arthur\'s Hill',
'place_rank' : 26,
'class' : 'highway',
'type' : 'residential',
'importance' : '0.1',
'postcode' : 'NE4 5TB'
}
network = get_network(distance = 1000)
monkeypatch.setattr('anprx.nominatim.lookup_address',
lambda osmids,entity,drop_keys,email:
| |
from time import sleep
from pysphere import VITask, FaultTypes
from pysphere.vi_virtual_machine import VIVirtualMachine
from pysphere.resources.vi_exception import VIException, VIApiException
from pysphere.vi_mor import VIMor
from pysphere.vi_task import VITask
import ssl
import pypacksrc
import re, subprocess
def vs_connect(host, user, password, unverify=True):
if unverify:
try:
ssl._create_default_https_context = ssl._create_unverified_context
except:
pass
con = VIServer()
con.connect(host, user,password,'/var/log/pysphere.log')
return con
def find_vm(vCenterserver, user, password, name):
con = vs_connect(vCenterserver, user, password, unverify=True)
try:
vm = con.get_vm_by_name(name)
return vm
except VIException:
return None
def get_RP_by_name(host, user, password, name):
con = vs_connect(host, user, password, unverify=True)
rps = con.get_resource_pools()
for mor, path in rps.iteritems():
if re.match('.*%s' % name,path):
return mor
return None
def run_post_script(name,ip, post_script):
retcode = subprocess.call([post_script,name,ip])
if retcode < 0:
resp = 'ERROR: %s %s %s : Returned a non-zero result' % (post_script,name,ip)
return resp
def get_vm_ip_addresses(vCenterserver, username, password,vm_name, ipv6=False, maxwait=120):
vm_obj = find_vm(vCenterserver, username, password, vm_name)
net_info = None
waitcount = 0
while net_info is None:
if waitcount > maxwait:
break
net_info = vm_obj.get_property('net',False)
waitcount += 5
sleep(5)
if net_info:
return net_info
return None
def get_NIC_address_per_connected_net(vCenterserver, username, password,vm_name, net_name, ipv6=False, maxwait=120):
vm_obj = find_vm(vCenterserver, username, password, vm_name)
net_info = None
waitcount = 0
while net_info is None:
if waitcount > maxwait:
break
net_info = vm_obj.get_property('net',False)
waitcount += 5
sleep(5)
if net_info:
for i in range(len(net_info)):
for ip in net_info[i]['ip_addresses']:
if ipv6 and re.match('\d{1,4}\:.*',ip) and not re.match('fe83\:.*',ip):
if(net_info[i]['network']==net_name):
return ip
elif not ipv6 and re.match('\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}',ip) and ip != '127.0.0.1':
if(net_info[i]['network']==net_name):
return ip
return None
def get_dvSwitchs_by_DCname(vCenterserver, username, password, datacentername):
con = vs_connect(vCenterserver, username, password)
dcmor = [k for k,v in con.get_datacenters().items() if v==datacentername][0]
dcprops = VIProperty(con, dcmor)
nfmor = dcprops.networkFolder._obj
dvswitch_mors = con._retrieve_properties_traversal(property_names=['name'],from_node=nfmor, obj_type = 'DistributedVirtualSwitch')
respdict={}
for dvswitch_mor in dvswitch_mors:
respdict[dvswitch_mor.PropSet[0]._val] = dvswitch_mor.Obj
return respdict
def get_dvSwitchuuid_by_dvsname_and_DC(vCenterserver, username, password, datacentername, dvSname):
con = vs_connect(vCenterserver, username, password)
dcmor = [k for k,v in con.get_datacenters().items() if v==datacentername][0]
dcprops = VIProperty(con, dcmor)
nfmor = dcprops.networkFolder._obj
dvswitch_mors = con._retrieve_properties_traversal(property_names=['name',"uuid"],from_node=nfmor, obj_type = 'DistributedVirtualSwitch')
for dvswitch_mor in dvswitch_mors:
if dvswitch_mor.PropSet[0]._val == dvSname:
return dvswitch_mor.PropSet[1]._val
return "Failure, dvswitch not found"
def get_portgroupname_by_ref(vCenterserver, username, password,datacentername, pgRef):
con = vs_connect(vCenterserver, username, password)
dcmor = [k for k,v in con.get_datacenters().items() if v==datacentername][0]
dcprops = VIProperty(con, dcmor)
nfmor = dcprops.networkFolder._obj
portgroup_mors = con._retrieve_properties_traversal(property_names=['name','key'],from_node=nfmor, obj_type = 'DistributedVirtualPortgroup')
for portgroup_mor in portgroup_mors:
ref=portgroup_mor.get_element_propSet()[0].get_element_val()
if ref==pgRef:
return portgroup_mor.get_element_propSet()[1].get_element_val()
return None
def get_portgroupref_by_name(vCenterserver, username, password,datacentername, PGname):
con = vs_connect(vCenterserver, username, password)
dcmor = [k for k,v in con.get_datacenters().items() if v==datacentername][0]
dcprops = VIProperty(con, dcmor)
nfmor = dcprops.networkFolder._obj
portgroup_mors = con._retrieve_properties_traversal(property_names=['name','key'],from_node=nfmor, obj_type = 'DistributedVirtualPortgroup')
for portgroup_mor in portgroup_mors:
name = portgroup_mor.get_element_propSet()[1].get_element_val()
if name==PGname:
return portgroup_mor.get_element_propSet()[0].get_element_val()
return None
def get_portgroup_by_dvSwitchname(vCenterserver, username, password, datacentername, dvSwitchname):
con = vs_connect(vCenterserver, username, password)
dcmor = [k for k,v in con.get_datacenters().items() if v==datacentername][0]
dcprops = VIProperty(con, dcmor)
nfmor = dcprops.networkFolder._obj
portgroup_mors = con._retrieve_properties_traversal(property_names=['name','portgroup'],from_node=nfmor, obj_type = 'VmwareDistributedVirtualSwitch')
RespDic={}
for portgroup_mor in portgroup_mors:
if (portgroup_mor.get_element_propSet()[0].get_element_val()==dvSwitchname):
pgRefs = portgroup_mor.get_element_propSet()[1].get_element_val().ManagedObjectReference
for pgRef in pgRefs:
portgroup_mors = con._retrieve_properties_traversal(property_names=['name','key'],from_node=nfmor, obj_type = 'DistributedVirtualPortgroup')
for portgroup_mor in portgroup_mors:
ref=portgroup_mor.get_element_propSet()[0].get_element_val()
if ref==pgRef:
name = portgroup_mor.get_element_propSet()[1].get_element_val()
RespDic[name]=pgRef
return RespDic
from pysphere import MORTypes
from pysphere import VIServer, VIProperty
from pysphere.resources import VimService_services as VI
def create_portgroup_in_host(vCenterserver, username, password, host, pgname, vswitchname, vlan_id):
resp = "succeeded"
con = None
try:
con = vs_connect(vCenterserver, username, password)
hostmor = [k for k, v in con.get_hosts().items() if v == host][0]
prop = VIProperty(con, hostmor)
network_system = prop.configManager.networkSystem._obj
request = VI.AddPortGroupRequestMsg()
_this = request.new__this(network_system)
_this.set_attribute_type(network_system.get_attribute_type())
request.set_element__this(_this)
portgrp = request.new_portgrp()
portgrp.set_element_name(pgname)
portgrp.set_element_vlanId(int(vlan_id))
portgrp.set_element_vswitchName(vswitchname)
portgrp.set_element_policy(portgrp.new_policy())
request.set_element_portgrp(portgrp)
con._proxy.AddPortGroup(request)
except Exception, error:
resp = str_remove_specialchars(error)
if con:
con.disconnect()
return resp
def get_standardvS_by_DCname(vCenterserver, username, password, datacentername):
con = vs_connect(vCenterserver, username, password)
dcmor = [k for k,v in con.get_datacenters().items() if v==datacentername][0]
dcprops = VIProperty(con, dcmor)
nfmor = dcprops.networkFolder._obj
dvswitch_mors = con._retrieve_properties_traversal(property_names=['name'],from_node=nfmor, obj_type = 'Network')
respdict={}
for dvswitch_mor in dvswitch_mors:
var=dvswitch_mor.get_element_obj().lower()
if 'network' in var :
respdict[dvswitch_mor.PropSet[0]._val] = dvswitch_mor.Obj
return respdict
def vs_find_datacenter_by_name(vCenterserver, user, password, name):
response = "failure datcenter not found"
if name.isspace() or not(name) or (name=="None"):
return "None"
con = None
try:
con = vs_connect(vCenterserver, user, password)
rps = con.get_datacenters()
for mor, path in rps.iteritems():
if re.match('.*%s' % name, mor):
response = str(path)
break
except Exception, error:
response = str_remove_specialchars( error )
if con:
con.disconnect()
return response
def str_remove_specialchars( s ):
resp = None
if hasattr(s, 'status') and hasattr(s, 'message'):
resp = "provider.status: " + str(s.status) + " provider.message: failure "+ str(s.message)
else:
resp = "failure " + str(s)
response = resp
response = response.replace(pypacksrc.dcvt_delimiter," ")
return response
def add_nic_vm_and_connect_to_net(vCenterserver, username, password, datacentername, vm, dvswitch_uuid, portgroupKey, network_name="VM Network", nic_type="vmxnet3", network_type="standard"):
### add a NIC
# The network Name must be set as the device name to create the NIC.
# Different network card types are: "VirtualE1000", "VirtualE1000e","VirtualPCNet32", "VirtualVmxnet", "VirtualNmxnet2", "VirtualVmxnet3"
net_device = None
con = vs_connect(vCenterserver, username, password)
vm_obj = con.get_vm_by_name(vm,datacenter=datacentername)
if not vm_obj:
raise Exception("VM %s not found" % vm)
#Find nic device
for dev in vm_obj.properties.config.hardware.device:
if dev._type in ["VirtualE1000", "VirtualE1000e","VirtualPCNet32", "VirtualVmxnet","VirtualNmxnet2", "VirtualVmxnet3"]:
net_device = dev._obj
break
request = VI.ReconfigVM_TaskRequestMsg()
_this = request.new__this(vm_obj._mor)
_this.set_attribute_type(vm_obj._mor.get_attribute_type())
request.set_element__this(_this)
spec = request.new_spec()
dev_change = spec.new_deviceChange()
dev_change.set_element_device(net_device)
#dev_change.set_element_operation("edit")
if network_name:
dev_change.set_element_operation("add")
if nic_type == "e1000":
nic_ctlr = VI.ns0.VirtualE1000_Def("nic_ctlr").pyclass()
elif nic_type == "e1000e":
nic_ctlr = VI.ns0.VirtualE1000e_Def("nic_ctlr").pyclass()
elif nic_type == "pcnet32":
nic_ctlr = VI.ns0.VirtualPCNet32_Def("nic_ctlr").pyclass()
elif nic_type == "vmxnet":
nic_ctlr = VI.ns0.VirtualVmxnet_Def("nic_ctlr").pyclass()
elif nic_type == "vmxnet2":
nic_ctlr = VI.ns0.VirtualVmxnet2_Def("nic_ctlr").pyclass()
elif nic_type == "vmxnet3":
nic_ctlr = VI.ns0.VirtualVmxnet3_Def("nic_ctlr").pyclass()
if network_type == "standard":
# Standard switch
nic_backing = VI.ns0.VirtualEthernetCardNetworkBackingInfo_Def("nic_backing").pyclass()
nic_backing.set_element_deviceName(network_name)
elif network_type == "dvs":
nic_backing_port = VI.ns0.DistributedVirtualSwitchPortConnection_Def("nic_backing_port").pyclass()
nic_backing_port.set_element_switchUuid(dvswitch_uuid)
nic_backing_port.set_element_portgroupKey(portgroupKey)
# http://www.vmware.com/support/developer/vc-sdk/visdk400pubs/ReferenceGuide/vim.vm.device.VirtualEthernetCard.DistributedVirtualPortBackingInfo.html
nic_backing = VI.ns0.VirtualEthernetCardDistributedVirtualPortBackingInfo_Def("nic_backing").pyclass()
nic_backing.set_element_port(nic_backing_port)
# How they do it in powershell
# http://www.lucd.info/2010/03/04/dvswitch-scripting-part-8-get-and-set-network-adapters/
# How they do it in ruby
# https://github.com/fog/fog/pull/1431/files
nic_ctlr.set_element_addressType("generated")
nic_ctlr.set_element_backing(nic_backing)
nic_ctlr.set_element_key(4)
dev_change.set_element_device(nic_ctlr)
spec.set_element_deviceChange([dev_change])
request.set_element_spec(spec)
ret = con._proxy.ReconfigVM_Task(request)._returnval
#Wait for the task to finish
task = VITask(ret, con)
status = task.wait_for_state([task.STATE_SUCCESS, task.STATE_ERROR])
if status == task.STATE_SUCCESS:
return "VM successfully reconfigured"
elif status == task.STATE_ERROR:
return "failure reconfiguring vm: " + str(task.get_error_message())
else:
return "failure reconfiguring vm network_name is mandatory"
def disconnect_nic_from_network(vCenterserver, username, password, datacentername, vmname, dvswitch_uuid, portgroupKey, network_name="VM Network", nic_type="vmxnet3", network_type="standard"):
con = vs_connect(vCenterserver, username, password)
vm_obj = con.get_vm_by_name(vmname, datacenter=datacentername)
#Disconnect 3rd adaptar if its connected to network "VM Network"
#network_name = "VM Network"
device_name = "Network adapter 3"
#Find Virtual Nic device
net_device = None
for dev in vmname.properties.config.hardware.device:
if (dev._type in ["VirtualE1000", "VirtualE1000e","VirtualPCNet32", "VirtualVmxnet","VirtualNmxnet2", "VirtualVmxnet3"]
and dev.deviceInfo.label == network_name
and dev.deviceInfo.summary == device_name):
net_device = dev._obj
break
if not net_device:
s.disconnect()
raise Exception("The vm seems to lack a Virtual Nic")
#Disconnect the device
net_device.Connectable.Connected = True
#Invoke ReconfigVM_Task
request = VI.ReconfigVM_TaskRequestMsg()
_this = request.new__this(vmname._mor)
_this.set_attribute_type(vmname._mor.get_attribute_type())
request.set_element__this(_this)
spec = request.new_spec()
dev_change = spec.new_deviceChange()
dev_change.set_element_device(net_device)
dev_change.set_element_operation("edit")
spec.set_element_deviceChange([dev_change])
request.set_element_spec(spec)
ret = s._proxy.ReconfigVM_Task(request)._returnval
#Wait for the task to finish
task = VITask(ret, s)
status = task.wait_for_state([task.STATE_SUCCESS, task.STATE_ERROR])
if status == task.STATE_SUCCESS:
print "VM successfully reconfigured"
elif status == task.STATE_ERROR:
print "Error reconfiguring vm:", task.get_error_message()
s.disconnect()
def get_vm_nics(vCenterserver, username, password, datacentername, vm_name):
" To reteive status VM should vm power on "
con = vs_connect(vCenterserver, username, password)
net_device = None
vm_obj = con.get_vm_by_name(vm_name,datacenter=datacentername)
if not vm_obj:
raise Exception("VM %s not found" % vm_name)
respdict ={}
sVSName = None
dvs = None
#Find nic device
for dev in vm_obj.properties.config.hardware.device:
if (dev._type in ["VirtualE1000", "VirtualE1000e","VirtualPCNet32", "VirtualVmxnet","VirtualNmxnet2", "VirtualVmxnet3"]
and hasattr(dev, "backing") and hasattr(dev.backing, "deviceName")):
label = dev.deviceInfo.label
sVSName = str(dev.backing.deviceName)
net_device = dev._obj
status= net_device.Connectable.Connected
respdict[label]=[sVSName,status]
if (dev._type in ["VirtualE1000", "VirtualE1000e","VirtualPCNet32", "VirtualVmxnet","VirtualNmxnet2", "VirtualVmxnet3"]
and hasattr(dev, "backing") and hasattr(dev.backing, "port")):
label = dev.deviceInfo.label
#label=unicode(label1, "utf-8")
pgRef = str(dev.backing.port.portgroupKey)
PGname = get_portgroupname_by_ref(vCenterserver, username, password,datacentername, pgRef)
net_device = dev._obj
status = net_device.Connectable.Connected
respdict[label]=[PGname,status]
if (dev._type in ["VirtualE1000", "VirtualE1000e","VirtualPCNet32", "VirtualVmxnet","VirtualNmxnet2", "VirtualVmxnet3"]
and not hasattr(dev.backing, "deviceName")
and not hasattr(dev.backing, "port")
):
label = dev.deviceInfo.label
respdict[label]=["No connexion","no status"]
return respdict
def remove_nic_vm(vCenterserver, username, password, datacentername, vm_name, networklabel):
con = vs_connect(vCenterserver, username, password)
net_device = None
vm_obj = con.get_vm_by_name(vm_name,datacenter=datacentername)
if not vm_obj:
raise Exception("VM %s not found" % vm_name)
#Find nic device
for dev in vm_obj.properties.config.hardware.device:
if (dev._type in ["VirtualE1000", "VirtualE1000e","VirtualPCNet32", "VirtualVmxnet","VirtualNmxnet2", "VirtualVmxnet3"]
and hasattr(dev, "backing")
and dev.deviceInfo.label == networklabel):
net_device = dev._obj
break
if not net_device:
raise Exception("The vm_name seems to lack a Virtual Nic")
request = VI.ReconfigVM_TaskRequestMsg()
_this = request.new__this(vm_obj._mor)
_this.set_attribute_type(vm_obj._mor.get_attribute_type())
request.set_element__this(_this)
spec | |
<reponame>whoarethebritons/appscale
""" Utility functions used by the AdminServer. """
import errno
import json
import hmac
import logging
import os
import shutil
import socket
import tarfile
from appscale.common.constants import HTTPCodes
from appscale.common.constants import InvalidConfiguration
from appscale.common.constants import VERSION_PATH_SEPARATOR
from appscale.taskqueue import constants as tq_constants
from appscale.taskqueue.constants import InvalidQueueConfiguration
from kazoo.exceptions import NoNodeError
from . import constants
from .constants import (
CustomHTTPError,
GO,
JAVA,
SOURCES_DIRECTORY,
Types,
UNPACK_ROOT
)
from .instance_manager.utils import copy_modified_jars
from .instance_manager.utils import remove_conflicting_jars
logger = logging.getLogger(__name__)
def assert_fields_in_resource(required_fields, resource_name, resource):
""" Ensures the resource contains the required fields.
Args:
required_fields: An iterable specifying the required fields.
resource_name: A string specifying the resource name.
resource: A dictionary containing the resource details.
Raises:
CustomHTTPError if there are missing fields.
"""
def missing_field(prefix, group, resource_part):
field_name = group.pop(0)
if field_name not in resource_part:
return '.'.join([prefix, field_name])
if not group:
return
prefix += '.{}'.format(field_name)
return missing_field(prefix, group, resource_part[field_name])
missing_fields = []
for group in required_fields:
field = missing_field(resource_name, group.split('.'), resource)
if field is not None:
missing_fields.append(field)
if not missing_fields:
return
message = 'The request is invalid.'
description = 'This field is required.'
if len(missing_fields) == 1:
message = '{}: {}'.format(missing_fields[0], description)
violations = [{'field': field, 'description': description}
for field in missing_fields]
raise CustomHTTPError(
HTTPCodes.BAD_REQUEST,
message=message,
status='INVALID_ARGUMENT',
details=[{'@type': Types.BAD_REQUEST, 'fieldViolations': violations}])
def version_contains_field(version, field):
""" Checks if the given dictionary contains the given field.
Args:
version: A dictionary containing version details.
field: A string representing a key path.
Returns:
A boolean indicating whether or not the version contains the field.
"""
version_fragment = version
for field_part in field.split('.'):
try:
version_fragment = version_fragment[field_part]
except KeyError:
return False
return True
def apply_mask_to_version(given_version, desired_fields):
""" Reduces a version to the desired fields.
Example:
given_version: {'runtime': 'python27',
'appscaleExtensions': {'httpPort': 80}}
desired_fields: ['appscaleExtensions.httpPort']
output: {'appscaleExtensions': {'httpPort': 80}}
Args:
given_version: A dictionary containing version details.
desired_fields: A list of strings representing key paths.
Returns:
A dictionary containing some version details.
"""
masked_version = {}
for field in desired_fields:
if not version_contains_field(given_version, field):
continue
given_version_part = given_version
masked_version_part = masked_version
field_parts = field.split('.')
for index, field_part in enumerate(field_parts):
if field_part not in masked_version_part:
if index == (len(field_parts) - 1):
masked_version_part[field_part] = given_version_part[field_part]
elif isinstance(given_version_part[field_part], dict):
masked_version_part[field_part] = {}
elif isinstance(given_version_part[field_part], list):
masked_version_part[field_part] = []
given_version_part = given_version_part[field_part]
masked_version_part = masked_version_part[field_part]
return masked_version
def canonical_path(path, base=os.curdir):
""" Resolves a path, following symlinks.
Args:
path: A string specifying a file system location.
base: The path against which to resolve relative paths.
Returns:
A string specifying a file system location.
"""
return os.path.realpath(os.path.abspath(os.path.join(base, path)))
def valid_link(link_name, link_target, base):
""" Checks if a link points to a location that resides within base.
Args:
link_name: A string specifying the location of the link.
link_target: A string specifying the target of the link.
base: A string specifying the root path of the archive.
Returns:
A boolean indicating whether or not the link is valid.
"""
tip = canonical_path(os.path.dirname(link_name), base)
target = canonical_path(os.path.join(tip, link_target), base)
return target.startswith(base)
def ensure_path(path):
""" Ensures directory exists.
Args:
path: A string specifying the path to ensure.
"""
try:
os.makedirs(os.path.join(path))
except OSError as os_error:
if os_error.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
def extract_source(revision_key, location, runtime):
""" Unpacks an archive from a given location.
Args:
revision_key: A string specifying the revision key.
location: A string specifying the location of the source archive.
runtime: A string specifying the revision's runtime.
Raises:
IOError if version source archive does not exist.
InvalidSource if the source archive is not valid.
"""
revision_base = os.path.join(UNPACK_ROOT, revision_key)
ensure_path(os.path.join(revision_base, 'log'))
app_path = os.path.join(revision_base, 'app')
ensure_path(app_path)
if runtime == JAVA:
config_file_name = 'appengine-web.xml'
def is_version_config(path):
return path.endswith(config_file_name)
else:
config_file_name = 'app.yaml'
def is_version_config(path):
return canonical_path(path, app_path) == os.path.join(app_path, config_file_name)
with tarfile.open(location, 'r:gz') as archive:
# Check if the archive is valid before extracting it.
has_config = False
for file_info in archive:
file_name = file_info.name
if not canonical_path(file_name, app_path).startswith(app_path):
raise constants.InvalidSource(
'Invalid location in archive: {}'.format(file_name))
if file_info.issym() or file_info.islnk():
if not valid_link(file_name, file_info.linkname, app_path):
raise constants.InvalidSource(
'Invalid link in archive: {}'.format(file_name))
if is_version_config(file_name):
has_config = True
if not has_config:
raise constants.InvalidSource(
'Archive must have {}'.format(config_file_name))
archive.extractall(path=app_path)
if runtime == GO:
try:
shutil.move(os.path.join(app_path, 'gopath'), revision_base)
except IOError:
logger.debug(
'{} does not have a gopath directory'.format(revision_key))
if runtime == JAVA:
remove_conflicting_jars(app_path)
copy_modified_jars(app_path)
def port_is_open(host, port):
""" Checks if the given port is open.
Args:
host: A string specifying the location of the host.
port: An integer specifying the port to check.
Returns:
A boolean indicating whether or not the port is open.
"""
sock = socket.socket()
result = sock.connect_ex((host, port))
return result == 0
def rename_source_archive(project_id, service_id, version):
""" Renames the given source archive to keep track of it.
Args:
project_id: A string specifying a project ID.
service_id: A string specifying a service ID.
version: A dictionary containing version details.
Returns:
A string specifying the new location of the archive.
"""
new_filename = VERSION_PATH_SEPARATOR.join(
[project_id, service_id, version['id'],
'{}.tar.gz'.format(version['revision'])])
new_location = os.path.join(SOURCES_DIRECTORY, new_filename)
os.rename(version['deployment']['zip']['sourceUrl'], new_location)
return new_location
def remove_old_archives(project_id, service_id, version):
""" Cleans up old revision archives.
Args:
project_id: A string specifying a project ID.
service_id: A string specifying a service ID.
version: A dictionary containing version details.
"""
prefix = VERSION_PATH_SEPARATOR.join(
[project_id, service_id, version['id']])
current_name = os.path.basename(version['deployment']['zip']['sourceUrl'])
old_sources = [os.path.join(SOURCES_DIRECTORY, archive) for archive
in os.listdir(SOURCES_DIRECTORY)
if archive.startswith(prefix) and archive < current_name]
for archive in old_sources:
os.remove(archive)
def assigned_locations(zk_client):
""" Discovers the locations assigned for all existing versions.
Args:
zk_client: A KazooClient.
Returns:
A set containing used ports.
"""
try:
project_nodes = [
'/appscale/projects/{}'.format(project)
for project in zk_client.get_children('/appscale/projects')]
except NoNodeError:
project_nodes = []
service_nodes = []
for project_node in project_nodes:
project_id = project_node.split('/')[3]
try:
new_service_ids = zk_client.get_children(
'{}/services'.format(project_node))
except NoNodeError:
continue
service_nodes.extend([
'/appscale/projects/{}/services/{}'.format(project_id, service_id)
for service_id in new_service_ids])
version_nodes = []
for service_node in service_nodes:
project_id = service_node.split('/')[3]
service_id = service_node.split('/')[5]
try:
new_version_ids = zk_client.get_children(
'{}/versions'.format(service_node))
except NoNodeError:
continue
version_nodes.extend([
constants.VERSION_NODE_TEMPLATE.format(
project_id=project_id, service_id=service_id, version_id=version_id)
for version_id in new_version_ids])
locations = set()
for version_node in version_nodes:
try:
version = json.loads(zk_client.get(version_node)[0])
except NoNodeError:
continue
# Extensions and ports should always be defined when written to a node.
extensions = version['appscaleExtensions']
locations.add(extensions['httpPort'])
locations.add(extensions['httpsPort'])
locations.add(extensions['haproxyPort'])
return locations
def assign_ports(old_version, new_version, zk_client):
""" Assign ports for a version.
Args:
old_version: A dictionary containing version details.
new_version: A dictionary containing version details.
zk_client: A KazooClient.
Returns:
A dictionary specifying the ports to reserve for the version.
"""
old_extensions = old_version.get('appscaleExtensions', {})
old_http_port = old_extensions.get('httpPort')
old_https_port = old_extensions.get('httpsPort')
haproxy_port = old_extensions.get('haproxyPort')
new_extensions = new_version.get('appscaleExtensions', {})
new_http_port = new_extensions.get('httpPort')
new_https_port = new_extensions.get('httpsPort')
# If this is not the first revision, and the client did not request
# particular ports, just use the ports from the last revision.
if old_http_port is not None and new_http_port is None:
new_http_port = old_http_port
if old_https_port is not None and new_https_port is None:
new_https_port = old_https_port
# If the ports have not changed, do not check for conflicts.
if (new_http_port == old_http_port and new_https_port == old_https_port and
haproxy_port is not None):
return {'httpPort': new_http_port, 'httpsPort': new_https_port,
'haproxyPort': haproxy_port}
taken_locations = assigned_locations(zk_client)
# Consider the version's old ports as available.
taken_locations.discard(old_http_port)
taken_locations.discard(old_https_port)
# If ports were requested, make sure they are available.
if new_http_port is not None and new_http_port in taken_locations:
raise CustomHTTPError(HTTPCodes.BAD_REQUEST,
message='Requested httpPort is already taken')
if new_https_port is not None and new_https_port in taken_locations:
raise CustomHTTPError(HTTPCodes.BAD_REQUEST,
message='Requested httpsPort is already taken')
if new_http_port is None:
try:
new_http_port = next(port for port in constants.AUTO_HTTP_PORTS
if port not in taken_locations)
except StopIteration:
raise CustomHTTPError(HTTPCodes.INTERNAL_ERROR,
message='Unable to find HTTP port for version')
if new_https_port is None:
try:
new_https_port = next(port for port in constants.AUTO_HTTPS_PORTS
if port not in taken_locations)
except StopIteration:
raise CustomHTTPError(HTTPCodes.INTERNAL_ERROR,
message='Unable to find HTTPS port for version')
if haproxy_port is None:
try:
haproxy_port = next(port for port in constants.HAPROXY_PORTS
if port not in taken_locations)
except StopIteration:
raise CustomHTTPError(HTTPCodes.INTERNAL_ERROR,
message='Unable to find HAProxy port for version')
return {'httpPort': new_http_port, 'httpsPort': new_https_port,
'haproxyPort': haproxy_port}
def validate_job(job):
""" Checks if a cron job configuration is valid.
Args:
job: A dictionary containing cron job configuration details.
Raises:
InvalidConfiguration if configuration is invalid.
"""
required_fields = ('schedule', 'url')
supported_fields = ('description', 'schedule', 'url', | |
city, Montana",1808),
("East Missoula CDP, Montana",2132),
("Edgar CDP, Montana",103),
("Ekalaka town, Montana",373),
("Elkhorn CDP, Montana",0),
("Elliston CDP, Montana",230),
("Elmo CDP, Montana",177),
("Emigrant CDP, Montana",233),
("Ennis town, Montana",925),
("Eureka town, Montana",1405),
("Evaro CDP, Montana",416),
("Evergreen CDP, Montana",7907),
("Fairfield town, Montana",654),
("Fairview town, Montana",978),
("Fallon CDP, Montana",68),
("Finley Point CDP, Montana",471),
("Flaxville town, Montana",34),
("Florence CDP, Montana",581),
("Forest Hill Village CDP, Montana",240),
("Forsyth city, Montana",1673),
("Fort Belknap Agency CDP, Montana",1514),
("Fort Benton city, Montana",1462),
("Fortine CDP, Montana",321),
("Fort Peck town, Montana",218),
("Fort Shaw CDP, Montana",125),
("Fort Smith CDP, Montana",107),
("Four Corners CDP, Montana",4336),
("Fox Lake CDP, Montana",79),
("Frazer CDP, Montana",330),
("Frenchtown CDP, Montana",2330),
("Froid town, Montana",206),
("Fromberg town, Montana",398),
("Gallatin Gateway CDP, Montana",868),
("Gallatin River Ranch CDP, Montana",105),
("Gardiner CDP, Montana",971),
("Garrison CDP, Montana",52),
("Geraldine town, Montana",202),
("Geyser CDP, Montana",95),
("Gibson Flats CDP, Montana",219),
("Gildford CDP, Montana",158),
("Glasgow city, Montana",3348),
("Glendive city, Montana",5232),
("Grass Range town, Montana",126),
("Great Falls city, Montana",58990),
("Greycliff CDP, Montana",96),
("Hamilton city, Montana",4628),
("Happys Inn CDP, Montana",102),
("Hardin city, Montana",3842),
("Harlem city, Montana",857),
("Harlowton city, Montana",1076),
("Harrison CDP, Montana",144),
("Havre city, Montana",9762),
("Havre North CDP, Montana",670),
("Hays CDP, Montana",996),
("Heart Butte CDP, Montana",571),
("Hebgen Lake Estates CDP, Montana",88),
("Helena city, Montana",31212),
("Helena Flats CDP, Montana",999),
("Helena Valley Northeast CDP, Montana",3201),
("Helena Valley Northwest CDP, Montana",4163),
("Helena Valley Southeast CDP, Montana",8067),
("Helena Valley West Central CDP, Montana",8277),
("Helena West Side CDP, Montana",1320),
("Heron CDP, Montana",133),
("Herron CDP, Montana",210),
("Highwood CDP, Montana",197),
("Hingham town, Montana",100),
("Hinsdale CDP, Montana",285),
("Hobson city, Montana",198),
("Hot Springs town, Montana",506),
("Hungry Horse CDP, Montana",575),
("Huntley CDP, Montana",471),
("Huson CDP, Montana",18),
("Hysham town, Montana",293),
("Indian Springs CDP, Montana",63),
("Inverness CDP, Montana",41),
("Ismay town, Montana",0),
("Jardine CDP, Montana",33),
("Jefferson City CDP, Montana",521),
("Jette CDP, Montana",174),
("Joliet town, Montana",475),
("Joplin CDP, Montana",242),
("Jordan town, Montana",473),
("<NAME>ap city, Montana",89),
("Kalispell city, Montana",22621),
("Kerr CDP, Montana",84),
("Kevin town, Montana",126),
("Kicking Horse CDP, Montana",74),
("Kila CDP, Montana",321),
("King Arthur Park CDP, Montana",1239),
("Kings Point CDP, Montana",113),
("Klein CDP, Montana",75),
("Kremlin CDP, Montana",79),
("Lake Mary Ronan CDP, Montana",77),
("Lakeside CDP, Montana",2160),
("Lame Deer CDP, Montana",2156),
("Laurel city, Montana",6849),
("Lavina town, Montana",212),
("Lewistown city, Montana",5895),
("Lewistown Heights CDP, Montana",559),
("Libby city, Montana",2663),
("Lima town, Montana",147),
("Lincoln CDP, Montana",898),
("Lindisfarne CDP, Montana",255),
("Little Bitterroot Lake CDP, Montana",144),
("Little Browning CDP, Montana",260),
("Livingston city, Montana",7478),
("Lockwood CDP, Montana",7479),
("Lodge Grass town, Montana",419),
("Lodge Pole CDP, Montana",232),
("Logan CDP, Montana",31),
("Lolo CDP, Montana",3780),
("Loma CDP, Montana",82),
("Lonepine CDP, Montana",162),
("Malmstrom AFB CDP, Montana",4116),
("Malta city, Montana",2090),
("Manhattan town, Montana",1506),
("Marion CDP, Montana",930),
("Martin City CDP, Montana",391),
("Martinsdale CDP, Montana",26),
("Marysville CDP, Montana",67),
("Maxville CDP, Montana",214),
("Medicine Lake town, Montana",284),
("Melstone town, Montana",101),
("Miles City city, Montana",8576),
("Missoula city, Montana",72125),
("Montana City CDP, Montana",2796),
("Moore town, Montana",183),
("Muddy CDP, Montana",626),
("Musselshell CDP, Montana",57),
("Nashua town, Montana",315),
("Neihart town, Montana",40),
("Niarada CDP, Montana",11),
("North Browning CDP, Montana",2720),
("Noxon CDP, Montana",252),
("Old Agency CDP, Montana",88),
("Olney CDP, Montana",225),
("Opheim town, Montana",89),
("Orchard Homes CDP, Montana",5859),
("Outlook town, Montana",87),
("Ovando CDP, Montana",73),
("Pablo CDP, Montana",2655),
("Paradise CDP, Montana",189),
("Park City CDP, Montana",746),
("Parker School CDP, Montana",522),
("Philipsburg town, Montana",630),
("Piltzville CDP, Montana",433),
("Pinesdale town, Montana",955),
("Pioneer Junction CDP, Montana",849),
("Plains town, Montana",893),
("Plentywood city, Montana",1858),
("Plevna town, Montana",226),
("Polson city, Montana",4843),
("Ponderosa Pines CDP, Montana",328),
("Pony CDP, Montana",125),
("Poplar city, Montana",896),
("Power CDP, Montana",186),
("Pray CDP, Montana",751),
("Pryor CDP, Montana",557),
("Rader Creek CDP, Montana",195),
("Radersburg CDP, Montana",112),
("Ravalli CDP, Montana",38),
("Red Lodge city, Montana",2277),
("Reed Point CDP, Montana",277),
("Reserve CDP, Montana",34),
("Rexford town, Montana",109),
("Richey town, Montana",187),
("Riverbend CDP, Montana",358),
("Roberts CDP, Montana",295),
("Rocky Boy's Agency CDP, Montana",475),
("Rocky Boy West CDP, Montana",878),
("Rocky Point CDP, Montana",97),
("Rollins CDP, Montana",218),
("Ronan city, Montana",1985),
("Roscoe CDP, Montana",32),
("Rosebud CDP, Montana",76),
("Roundup city, Montana",1817),
("Roy CDP, Montana",119),
("Rudyard CDP, Montana",126),
("Ryegate town, Montana",195),
("Saco town, Montana",239),
("Saddle Butte CDP, Montana",55),
("St. Ignatius town, Montana",746),
("St. Marie CDP, Montana",489),
("St. Pierre CDP, Montana",327),
("St. Regis CDP, Montana",305),
("St. Xavier CDP, Montana",97),
("Sand Coulee CDP, Montana",276),
("Sangrey CDP, Montana",373),
("Santa Rita CDP, Montana",267),
("Savage CDP, Montana",283),
("Scobey city, Montana",1162),
("Sedan CDP, Montana",97),
("Seeley Lake CDP, Montana",1256),
("Shawmut CDP, Montana",56),
("Shelby city, Montana",3160),
("Shepherd CDP, Montana",595),
("Sheridan town, Montana",879),
("Sidney city, Montana",6475),
("Silesia CDP, Montana",10),
("Silver Gate CDP, Montana",25),
("Simms CDP, Montana",354),
("Somers CDP, Montana",1088),
("South Browning CDP, Montana",1522),
("South Glastonbury CDP, Montana",351),
("South Hills CDP, Montana",700),
("Spokane Creek CDP, Montana",344),
("Springdale CDP, Montana",57),
("Springhill CDP, Montana",175),
("Stanford town, Montana",330),
("Starr School CDP, Montana",292),
("Stevensville town, Montana",2163),
("Stockett CDP, Montana",224),
("Stryker CDP, Montana",38),
("Sula CDP, Montana",93),
("Sunburst town, Montana",276),
("Sun Prairie CDP, Montana",1515),
("Sun River CDP, Montana",88),
("Superior town, Montana",690),
("Swan Lake CDP, Montana",171),
("Sweet Grass CDP, Montana",55),
("Sylvanite CDP, Montana",70),
("Terry town, Montana",782),
("The Silos CDP, Montana",682),
("Thompson Falls city, Montana",1231),
("Three Forks city, Montana",1922),
("Toston CDP, Montana",79),
("Townsend city, Montana",2069),
("Trego CDP, Montana",554),
("Trout Creek CDP, Montana",150),
("Troy city, Montana",687),
("Turah CDP, Montana",394),
("Turner CDP, Montana",84),
("Turtle Lake CDP, Montana",325),
("Twin Bridges town, Montana",305),
("Ulm CDP, Montana",632),
("Valier town, Montana",617),
("Vaughn CDP, Montana",730),
("Victor CDP, Montana",753),
("Virginia City town, Montana",204),
("Walkerville town, Montana",737),
("Weeksville CDP, Montana",72),
("Westby town, Montana",139),
("West Glacier CDP, Montana",159),
("West Glendive CDP, Montana",1803),
("West Havre CDP, Montana",217),
("West Kootenai CDP, Montana",64),
("West Yellowstone town, Montana",1016),
("Wheatland CDP, Montana",398),
("Whitefish city, Montana",7309),
("Whitehall town, Montana",881),
("White Haven CDP, Montana",490),
("White Sulphur Springs city, Montana",939),
("Whitewater CDP, Montana",124),
("Wibaux town, Montana",556),
("Willow Creek CDP, Montana",247),
("Wilsall CDP, Montana",322),
("Wineglass CDP, Montana",411),
("Winifred town, Montana",118),
("Winnett town, Montana",165),
("Winston CDP, Montana",245),
("Wisdom CDP, Montana",96),
("Wolf Point city, Montana",2799),
("Woods Bay CDP, Montana",800),
("Worden CDP, Montana",668),
("Wye CDP, Montana",664),
("Wyola CDP, Montana",194),
("Yaak CDP, Montana",282),
("Zortman CDP, Montana",30),
("Abie village, Nebraska",82),
("Adams village, Nebraska",517),
("Ainsworth city, Nebraska",1722),
("Albion city, Nebraska",1501),
("Alda village, Nebraska",758),
("Alexandria village, Nebraska",153),
("Allen village, Nebraska",348),
("Alliance city, Nebraska",8313),
("Alma city, Nebraska",1378),
("Alvo village, Nebraska",138),
("Ames CDP, Nebraska",69),
("Amherst village, Nebraska",158),
("Anoka village, Nebraska",14),
("Anselmo village, Nebraska",164),
("Ansley village, Nebraska",470),
("Arapahoe city, Nebraska",1206),
("Arcadia village, Nebraska",316),
("Archer CDP, Nebraska",42),
("Arlington village, Nebraska",1534),
("Arnold village, Nebraska",750),
("Arthur village, Nebraska",118),
("Ashland city, Nebraska",2447),
("Ashton village, Nebraska",245),
("Aten CDP, Nebraska",24),
("Atkinson city, Nebraska",1418),
("Atlanta village, Nebraska",90),
("Auburn city, Nebraska",3322),
("Aurora city, Nebraska",4481),
("Avoca village, Nebraska",265),
("Axtell village, Nebraska",731),
("Ayr village, Nebraska",85),
("Bancroft village, Nebraska",481),
("Barada village, Nebraska",12),
("Barneston village, Nebraska",110),
("Bartlett village, Nebraska",112),
("Bartley village, Nebraska",342),
("Bassett city, Nebraska",659),
("Battle Creek city, Nebraska",1339),
("Bayard city, Nebraska",1013),
("Bazile Mills village, Nebraska",32),
("Beatrice city, Nebraska",12307),
("Beaver City city, Nebraska",495),
("Beaver Crossing village, Nebraska",367),
("Bee village, Nebraska",233),
("Beemer village, Nebraska",628),
("Belden village, Nebraska",117),
("Belgrade village, Nebraska",125),
("Bellevue city, Nebraska",53225),
("Bellwood village, Nebraska",351),
("Belmar CDP, Nebraska",86),
("Belvidere village, Nebraska",38),
("Benedict village, Nebraska",256),
("Benkelman city, Nebraska",1015),
("Bennet village, Nebraska",1049),
("Bennington city, Nebraska",1839),
("Berea CDP, Nebraska",16),
("Bertrand village, Nebraska",774),
("Berwyn village, Nebraska",86),
("Big Springs village, Nebraska",515),
("Bladen village, Nebraska",181),
("Blair city, Nebraska",7684),
("Bloomfield city, Nebraska",974),
("Bloomington village, Nebraska",137),
("Blue Hill city, Nebraska",1028),
("Blue Springs city, Nebraska",259),
("Bow Valley CDP, Nebraska",91),
("Boys Town village, Nebraska",900),
("Bradshaw village, Nebraska",321),
("Brady village, Nebraska",382),
("Brainard village, Nebraska",384),
("Brewster village, Nebraska",9),
("Bridgeport city, Nebraska",1700),
("Bristow village, Nebraska",86),
("Broadwater village, Nebraska",154),
("Brock village, Nebraska",116),
("Broken Bow city, Nebraska",3522),
("Brownlee CDP, Nebraska",4),
("Brownville village, Nebraska",99),
("Brule village, Nebraska",398),
("Bruning village, Nebraska",287),
("Bruno village, Nebraska",113),
("Brunswick village, Nebraska",191),
("Burchard village, Nebraska",46),
("Burr village, Nebraska",32),
("Burton village, Nebraska",0),
("Burwell city, Nebraska",1239),
("Bushnell village, Nebraska",184),
("Butte village, Nebraska",330),
("Byron village, Nebraska",95),
("Cairo village, Nebraska",980),
("Callaway village, Nebraska",746),
("Cambridge city, Nebraska",1281),
("Campbell village, Nebraska",332),
("Carleton village, Nebraska",98),
("Carroll village, Nebraska",209),
("Cedar Bluffs village, Nebraska",601),
("Cedar Creek village, Nebraska",442),
("Cedar Rapids village, Nebraska",471),
("Center village, Nebraska",85),
("Central City city, Nebraska",2893),
("Ceresco village, Nebraska",1054),
("Chadron city, Nebraska",5654),
("Chalco CDP, Nebraska",11456),
("Chambers village, Nebraska",316),
("Champion CDP, Nebraska",40),
("Chapman village, Nebraska",247),
("Chappell city, Nebraska",911),
("Chester village, Nebraska",268),
("Clarks village, Nebraska",360),
("Clarkson city, Nebraska",669),
("Clatonia village, Nebraska",389),
("Clay Center city, Nebraska",794),
("Clearwater village, Nebraska",366),
("Clinton village, Nebraska",35),
("Cody village, Nebraska",195),
("Coleridge village, Nebraska",554),
("Colon village, Nebraska",101),
("Columbus city, Nebraska",22992),
("Comstock village, Nebraska",131),
("Concord village, Nebraska",122),
("Cook village, Nebraska",387),
("Cordova village, Nebraska",103),
("Cornlea village, Nebraska",22),
("Cortland village, Nebraska",502),
("Cotesfield village, Nebraska",37),
("Cowles village, Nebraska",13),
("Cozad city, Nebraska",3832),
("Crab Orchard village, Nebraska",58),
("Craig village, Nebraska",166),
("Crawford city, Nebraska",1116),
("Creighton city, Nebraska",1194),
("Creston village, Nebraska",218),
("Crete city, Nebraska",7050),
("Crofton city, Nebraska",879),
("Crookston village, Nebraska",70),
("Culbertson village, Nebraska",562),
("Curtis city, Nebraska",848),
("Cushing village, Nebraska",46),
("Dakota City city, Nebraska",1919),
("Dalton village, Nebraska",313),
("Danbury village, Nebraska",76),
("Dannebrog village, Nebraska",336),
("Davenport village, Nebraska",358),
("Davey village, Nebraska",169),
("David City city, Nebraska",2831),
("Dawson village, Nebraska",166),
("Daykin village, Nebraska",221),
("Decatur village, Nebraska",364),
("Denton village, Nebraska",245),
("Deshler city, Nebraska",711),
("Deweese village, Nebraska",61),
("De Witt village, Nebraska",581),
("Diller village, Nebraska",265),
("Dix village, Nebraska",325),
("Dixon village, Nebraska",110),
("Dodge village, Nebraska",558),
("Doniphan village, Nebraska",1020),
("Dorchester village, Nebraska",679),
("Douglas village, Nebraska",161),
("Du Bois village, Nebraska",95),
("Dunbar village, Nebraska",152),
("Duncan village, Nebraska",491),
("Dunning village, Nebraska",109),
("Dwight village, Nebraska",189),
("Eagle village, Nebraska",988),
("Eddyville village, Nebraska",95),
("Edgar city, Nebraska",369),
("Edison village, Nebraska",204),
("Elba village, Nebraska",346),
("Elgin city, Nebraska",723),
("Elk Creek village, Nebraska",121),
("Elm Creek village, Nebraska",1160),
("Elmwood village, Nebraska",686),
("Elsie village, Nebraska",117),
("Elwood village, Nebraska",754),
("Elyria village, Nebraska",71),
("Emerson village, Nebraska",915),
("Emmet village, Nebraska",41),
("Enders CDP, Nebraska",8),
("Endicott village, Nebraska",151),
("Ericson village, Nebraska",169),
("Eustis village, Nebraska",514),
("Ewing village, Nebraska",454),
("Exeter village, Nebraska",610),
("Fairbury city, Nebraska",3711),
("Fairfield city, Nebraska",421),
("Fairmont village, Nebraska",656),
("Falls City city, Nebraska",4176),
("Farnam village, Nebraska",250),
("Farwell village, Nebraska",124),
("Filley village, Nebraska",123),
("Firth village, Nebraska",453),
("Fontanelle CDP, Nebraska",22),
("Fordyce village, Nebraska",131),
("Fort Calhoun city, Nebraska",893),
("Foster village, Nebraska",68),
("Franklin city, Nebraska",1041),
("Fremont city, Nebraska",26426),
("Friend city, Nebraska",1133),
("Fullerton city, Nebraska",1459),
("Funk village, Nebraska",253),
("Gandy village, Nebraska",52),
("Garland village, Nebraska",248),
("Garrison village, Nebraska",51),
("Geneva city, Nebraska",1997),
("Genoa city, Nebraska",1075),
("Gering city, Nebraska",8312),
("Gibbon city, Nebraska",2072),
("Gilead village, Nebraska",27),
("Giltner village, Nebraska",315),
("Glenvil village, Nebraska",306),
("Glenwood CDP, Nebraska",457),
("Goehner village, Nebraska",120),
("Gordon city, Nebraska",1726),
("Gothenburg city, Nebraska",3482),
("Grafton village, Nebraska",134),
("Grand Island city, Nebraska",51187),
("Grant city, Nebraska",1342),
("Greeley Center village, Nebraska",380),
("Greenwood village, Nebraska",546),
("Gresham village, Nebraska",244),
("Gretna city, Nebraska",5037),
("Gross village, Nebraska",0),
("Guide Rock village, Nebraska",209),
("Gurley village, Nebraska",221),
("Hadar village, Nebraska",231),
("Haigler village, Nebraska",228),
("Hallam village, Nebraska",244),
("Halsey village, Nebraska",67),
("Hamlet village, Nebraska",65),
("Hampton village, Nebraska",460),
("Harbine village, Nebraska",76),
("Hardy village, Nebraska",186),
("Harrisburg CDP, Nebraska",66),
("Harrison village, Nebraska",301),
("Hartington city, Nebraska",1611),
("Harvard city, Nebraska",1176),
("Hastings city, Nebraska",24922),
("Hayes Center village, Nebraska",323),
("Hay Springs village, Nebraska",602),
("Hazard village, Nebraska",58),
("Heartwell village, Nebraska",72),
("Hebron city, Nebraska",1694),
("Hemingford village, Nebraska",867),
("Henderson city, Nebraska",991),
("Hendley village, Nebraska",12),
("Henry village, Nebraska",81),
("Herman village, Nebraska",296),
("Hershey village, Nebraska",555),
("Hickman city, Nebraska",2146),
("Hildreth village, Nebraska",449),
("Holbrook village, Nebraska",259),
("Holdrege city, Nebraska",5476),
("Holmesville CDP, Nebraska",15),
("Holstein village, Nebraska",274),
("Homer village, Nebraska",485),
("Hooper city, Nebraska",789),
("Hordville village, Nebraska",110),
("Hoskins village, Nebraska",255),
("Howard City village, Nebraska",178),
("Howells village, Nebraska",702),
("Hubbard village, Nebraska",208),
("Hubbell village, Nebraska",80),
("Humboldt city, Nebraska",876),
("Humphrey city, Nebraska",960),
("Huntley village, Nebraska",25),
("Hyannis village, Nebraska",199),
("Imperial city, Nebraska",1857),
("Inavale CDP, Nebraska",65),
("Indianola city, Nebraska",603),
("Inglewood village, Nebraska",420),
("Inland CDP, Nebraska",7),
("Inman village, Nebraska",111),
("Ithaca village, Nebraska",148),
("Jackson village, Nebraska",151),
("Jansen village, Nebraska",140),
("Johnson village, Nebraska",320),
("Johnstown village, Nebraska",64),
("Julian village, Nebraska",53),
("Juniata village, Nebraska",711),
("Kearney city, Nebraska",33273),
("Kenesaw village, Nebraska",1132),
("Kennard village, Nebraska",383),
("Keystone CDP, Nebraska",46),
("Kilgore village, Nebraska",74),
("Kimball city, Nebraska",2762),
("King Lake CDP, Nebraska",132),
("Lakeview CDP, Nebraska",358),
("Lamar village, Nebraska",9),
("La Platte CDP, Nebraska",135),
("Laurel city, Nebraska",1047),
("La Vista city, Nebraska",17077),
("Lawrence village, Nebraska",391),
("Lebanon village, Nebraska",73),
("Leigh village, Nebraska",372),
("Lemoyne CDP, Nebraska",92),
("Leshara village, Nebraska",101),
("Lewellen village, Nebraska",198),
("Lewiston village, Nebraska",66),
("Lexington city, Nebraska",10067),
("Liberty | |
import time
import os
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from astropy.io import fits
from gwcs import wcstools
from gwcs.utils import _toindex
from jwst import datamodels
from jwst.assign_wcs import nirspec
from . import auxiliary_functions as auxfunc
"""
This script tests the pipeline flat field step output for IFU data. It is the python version of the IDL script
(with the same name) written by <NAME>, and changes on it made by <NAME>.
"""
# HEADER
__author__ = "<NAME>"
__version__ = "2.6"
# HISTORY
# Nov 2017 - Version 1.0: initial version completed
# May 2018 - Version 2.0: Completely changed script to use the datamodel instead of the compute_world_coordinates
# script, and added new routines for statistics calculations.
# Jun 2018 - Version 2.2: Removed function reverse_cols because it was not behaving as expected.
# Feb 2019 - Version 2.3: Maria added lines to properly rotate NRS2 s- and d-flats.
# Apr 2019 - Version 2.4: Implemented logging capability.
# May 2019 - Version 2.5: Implemented plot of residuals as well as histogram.
# Jun 2019 - Version 2.6: Updated name of interpolated flat to be the default pipeline name for this file.
def mk_hist(title, delfg, delfg_mean, delfg_median, delfg_std, save_figs, show_figs, plot_name):
# create histogram
font = {#'family' : 'normal',
'weight' : 'normal',
'size' : 16}
matplotlib.rc('font', **font)
alpha = 0.2
fontsize = 15
fig = plt.figure(1, figsize=(12, 10))
plt.subplots_adjust(hspace=.4)
ax = plt.subplot(111)
plt.title(title)
if "all_slices" in title:
plt.xlabel("Median values")
else:
plt.xlabel("flat$_{pipe}$ - flat$_{calc}$")
plt.ylabel("N")
xmin = min(delfg) - (max(delfg) - min(delfg))*0.1
xmax = max(delfg) + (max(delfg) - min(delfg))*0.1
plt.xlim(xmin, xmax)
if "all_slices" in title:
#x_median = r"$\mu$(medians) = {:0.5}".format(delfg_median)
x_stddev = r"$\sigma$(medians) = {:0.5}".format(delfg_std)
else:
#x_median = "median = {:0.3}".format(delfg_median)
x_stddev = "stddev = {:0.3}".format(delfg_std)
# add vertical line at mean and median
plt.axvline(delfg_mean, label="mean = %0.3e"%(delfg_mean), color="g")
plt.axvline(delfg_median, label="median = %0.3e"%(delfg_median), linestyle="-.", color="b")
plt.legend()
# add standard deviation
ax.text(0.74, 0.86, x_stddev, transform=ax.transAxes, fontsize=fontsize)
plt.tick_params(axis='both', which='both', bottom=True, top=True, right=True, direction='in', labelbottom=True)
binwidth = (xmax-xmin)/40.
_, _, _ = ax.hist(delfg, bins=np.arange(xmin, xmax + binwidth, binwidth), histtype='bar', ec='k', facecolor="red", alpha=alpha)
if save_figs:
if plot_name is None:
t = (title, ".pdf")
plot_name = "".join(t)
plt.savefig(plot_name)
print ('\n Plot saved: ', plot_name)
if show_figs:
plt.show()
plt.close()
def flattest(step_input_filename, dflatref_path=None, sfile_path=None, fflat_path=None, writefile=False,
mk_all_slices_plt=False, show_figs=True, save_figs=False, plot_name=None,
threshold_diff=1.0e-7, debug=False):
"""
This function calculates the difference between the pipeline and the calculated flat field values.
The functions uses the output of the compute_world_coordinates.py script.
Args:
step_input_filename: str, name of the output fits file from the 2d_extract step (with full path)
dflatref_path: str, path of where the D-flat reference fits files
sfile_path: str, path of where the S-flat reference fits files
fflat_path: str, path of where the F-flat reference fits files
msa_conf_root: str, path to where the MSA configuration fits file lives
writefile: boolean, if True writes the fits files of the calculated flat and difference images
show_figs: boolean, whether to show plots or not
save_figs: boolean, save the plots (the 3 plots can be saved or not independently with the function call)
plot_name: string, desired name (if name is not given, the plot function will name the plot by
default)
threshold_diff: float, threshold difference between pipeline output and ESA file
debug: boolean, if true a series of print statements will show on-screen
Returns:
- 1 plot, if told to save and/or show.
- median_diff: Boolean, True if smaller or equal to 1e-14
- log_msgs: list, all print statements are captured in this variable
"""
log_msgs = []
# start the timer
flattest_start_time = time.time()
# get info from the flat field file
file_path = step_input_filename.replace(os.path.basename(step_input_filename), "")
det = fits.getval(step_input_filename, "DETECTOR", 0)
exptype = fits.getval(step_input_filename, "EXP_TYPE", 0)
grat = fits.getval(step_input_filename, "GRATING", 0)
filt = fits.getval(step_input_filename, "FILTER", 0)
file_basename = os.path.basename(step_input_filename.replace(".fits", ""))
msg1 = 'step_input_filename='+step_input_filename
msg2 = "flat_field_file --> Grating:"+grat+" Filter:"+filt+" EXP_TYPE:"+exptype
print(msg1)
print(msg2)
log_msgs.append(msg1)
log_msgs.append(msg2)
# read in the on-the-fly flat image
flatfile = step_input_filename.replace("flat_field.fits", "interpolatedflat.fits")
pipeflat = fits.getdata(flatfile, "SCI")
# get the reference files
msg = "Getting and reading the D-, S-, and F-flats for this specific IFU configuration... "
print(msg)
log_msgs.append(msg)
# D-Flat
dflat_ending = "f_01.03.fits"
dfile = dflatref_path+"_nrs1_"+dflat_ending
if det == "NRS2":
dfile = dfile.replace("nrs1", "nrs2")
msg = "Using D-flat: "+dfile
print(msg)
log_msgs.append(msg)
dfim = fits.getdata(dfile, "SCI")#1)
dfimdq = fits.getdata(dfile, "DQ")#4)
# need to flip/rotate the image into science orientation
ns = np.shape(dfim)
dfim = np.transpose(dfim, (0, 2, 1)) # keep in mind that 0,1,2 = z,y,x in Python, whereas =x,y,z in IDL
dfimdq = np.transpose(dfimdq)
if det == "NRS2":
# rotate science data by 180 degrees for NRS2
dfim = dfim[..., ::-1, ::-1]
dfimdq = dfimdq[..., ::-1, ::-1]
naxis3 = fits.getval(dfile, "NAXIS3", "SCI")#1)
# get the wavelength values
dfwave = np.array([])
for i in range(naxis3):
keyword = "PFLAT_"+str(i+1)
dfwave = np.append(dfwave, fits.getval(dfile, keyword, "SCI"))#1))
dfrqe = fits.getdata(dfile, 2)
# S-flat
tsp = exptype.split("_")
mode = tsp[1]
if filt == "F070LP":
flat = "FLAT4"
elif filt == "F100LP":
flat = "FLAT1"
elif filt == "F170LP":
flat = "FLAT2"
elif filt == "F290LP":
flat = "FLAT3"
elif filt == "CLEAR":
flat = "FLAT5"
else:
msg = "No filter correspondence. Exiting the program."
print(msg)
log_msgs.append(msg)
# This is the key argument for the assert pytest function
msg = "Test skiped because there is no flat correspondence for the filter in the data: {}".format(filt)
median_diff = "skip"
return median_diff, msg
sflat_ending = "f_01.01.fits"
sfile = sfile_path+"_"+grat+"_OPAQUE_"+flat+"_nrs1_"+sflat_ending
if debug:
print("grat = ", grat)
print("flat = ", flat)
print("sfile used = ", sfile)
if det == "NRS2":
sfile = sfile.replace("nrs1", "nrs2")
msg = "Using S-flat: "+sfile
print(msg)
log_msgs.append(msg)
sfim = fits.getdata(sfile, "SCI")#1)
sfimdq = fits.getdata(sfile, "DQ")#3)
# need to flip/rotate image into science orientation
sfim = np.transpose(sfim)
sfimdq = np.transpose(sfimdq)
if det == "NRS2":
# rotate science data by 180 degrees for NRS2
sfim = sfim[..., ::-1, ::-1]
sfimdq = sfimdq[..., ::-1, ::-1]
sfv = fits.getdata(sfile, 5)
# F-Flat
fflat_ending = "_01.01.fits"
if mode in fflat_path:
ffile = fflat_path+"_"+filt+fflat_ending
else:
msg = "Wrong path in for mode F-flat. This script handles mode "+mode+"only."
print(msg)
log_msgs.append(msg)
# This is the key argument for the assert pytest function
result_msg = "Wrong path in for mode F-flat. Test skiped because mode is not IFU."
median_diff = "skip"
return median_diff, result_msg, log_msgs
msg = "Using F-flat: "+ffile
print(msg)
log_msgs.append(msg)
ffv = fits.getdata(ffile, "IFU")#1)
# now go through each pixel in the test data
if writefile:
# create the fits list to hold the calculated flat values for each slit
hdu0 = fits.PrimaryHDU()
outfile = fits.HDUList()
outfile.append(hdu0)
# create the fits list to hold the image of pipeline-calculated difference values
hdu0 = fits.PrimaryHDU()
complfile = fits.HDUList()
complfile.append(hdu0)
# get the datamodel from the assign_wcs output file
assign_wcs_file = step_input_filename.replace("_flat_field.fits", "_assign_wcs.fits")
model = datamodels.ImageModel(assign_wcs_file)
ifu_slits = nirspec.nrs_ifu_wcs(model)
# loop over the slices
all_delfg_mean, all_delfg_mean_arr, all_delfg_median, all_test_result = [], [], [], []
msg = "\n Now looping through the slices, this may take some time... "
print(msg)
log_msgs.append(msg)
for n_ext, slice in enumerate(ifu_slits):
if n_ext < 10:
pslice = "0"+repr(n_ext)
else:
pslice = repr(n_ext)
msg = "\nWorking with slice: "+pslice
print(msg)
log_msgs.append(msg)
# get the wavelength
# slice.x(y)start are 1-based, turn them to 0-based for extraction
x, y = wcstools.grid_from_bounding_box(slice.bounding_box, (1, 1), center=True)
ra, dec, wave = slice(x, y)
# get the subwindow origin (technically no subwindows for IFU, but need this for comparing to the
# full frame on-the-fly flat image).
px0 = model.meta.subarray.xstart - 1 + int(_toindex(slice.bounding_box[0][0])) + 1
py0 = model.meta.subarray.xstart - 1 + int(_toindex(slice.bounding_box[1][0])) + 1
n_p = np.shape(wave)
nx, ny = n_p[1], n_p[0]
nw = nx * ny
msg = " Subwindow origin: px0="+repr(px0)+" py0="+repr(py0)
print(msg)
log_msgs.append(msg)
if debug:
print("n_p = ", n_p)
print("nw = ", nw)
# initialize arrays of the right size
delf = np.zeros([nw]) + 999.0
flatcor = np.zeros([nw]) + 999.0
sffarr = np.zeros([nw])
calc_flat = np.zeros([2048, 2048]) + 999.0
# loop through the wavelengths
msg = " Looping through the wavelngth, this may take a little time ... "
print(msg)
| |
from __future__ import annotations
from typing import TYPE_CHECKING, Dict, List, NewType, Optional, Tuple, Union
if TYPE_CHECKING:
from typing_extensions import Literal
_OTTypeAction = NewType('_OTTypeAction', int)
_OTTypeActionNop = _OTTypeAction(0)
_OTTypeActionSkip = _OTTypeAction(1)
_OTTypeActionInsert = _OTTypeAction(2)
_OTTypeActionDelete = _OTTypeAction(3)
_OTType = Tuple[_OTTypeAction, Union[int, str]]
_OTRawType = Union[int, str, Dict[str, str]]
def _resolve_ot(ot_raw: _OTRawType) -> _OTType:
if isinstance(ot_raw, int):
if ot_raw <= 0:
raise ValueError('invalid OTSkip')
return (_OTTypeActionSkip, ot_raw)
elif isinstance(ot_raw, str):
if ot_raw == '':
raise ValueError('invalid OTInsert')
return (_OTTypeActionInsert, ot_raw)
elif isinstance(ot_raw, dict):
s = ot_raw.get('d', '')
if not isinstance(s, str) or s == '':
raise ValueError('invalid OTDelete')
return (_OTTypeActionDelete, s)
raise ValueError('unexpected OT structure')
def _make_iter_ots(ot_raw_list: List[_OTRawType]) -> List[_OTType]:
ots = []
try:
for ot_raw in ot_raw_list:
ots.append(_resolve_ot(ot_raw))
except ValueError:
pass
return ots
def _to_ot_raw_list(ots: List[_OTType]) -> List[_OTRawType]:
ot_raw_list = []
for ot_action, ot_arg in ots:
ot_raw: _OTRawType
if ot_action == _OTTypeActionSkip:
assert isinstance(ot_arg, int)
ot_raw = ot_arg
elif ot_action == _OTTypeActionInsert:
assert isinstance(ot_arg, str)
ot_raw = ot_arg
elif ot_action == _OTTypeActionDelete:
assert isinstance(ot_arg, str)
ot_raw = {'d': ot_arg}
ot_raw_list.append(ot_raw)
return ot_raw_list
class _Appender:
def __init__(self, ots: List[_OTType]) -> None:
self.ots = ots
def append(self, ot: Optional[_OTType]) -> None:
if ot is None:
return
if not self.ots:
self.ots.append(ot)
return
last_ot_action, last_ot_arg = self.ots[-1]
ot_action, ot_arg = ot
if last_ot_action == _OTTypeActionSkip and ot_action == _OTTypeActionSkip:
assert isinstance(last_ot_arg, int)
assert isinstance(ot_arg, int)
self.ots[-1] = (_OTTypeActionSkip, last_ot_arg + ot_arg)
elif last_ot_action == _OTTypeActionInsert and ot_action == _OTTypeActionInsert:
assert isinstance(last_ot_arg, str)
assert isinstance(ot_arg, str)
self.ots[-1] = (_OTTypeActionInsert, last_ot_arg + ot_arg)
elif last_ot_action == _OTTypeActionDelete and ot_action == _OTTypeActionDelete:
assert isinstance(last_ot_arg, str)
assert isinstance(ot_arg, str)
self.ots[-1] = (_OTTypeActionDelete, last_ot_arg + ot_arg)
else:
self.ots.append(ot)
class _Taker:
def __init__(self, ots: List[_OTType]) -> None:
self.ots = ots
self._idx = 0
self._offset = 0
def take(self, n: int, indivisable: Optional[Literal['d', 'i']] = None) -> Optional[_OTType]:
if self._idx == len(self.ots):
if n == -1:
return None
return (_OTTypeActionSkip, n)
ot_action, ot_arg = self.ots[self._idx]
ret_ot: Optional[_OTType] = None
if ot_action == _OTTypeActionSkip:
assert isinstance(ot_arg, int)
if n == -1 or ot_arg - self._offset <= n:
ret_ot = (_OTTypeActionSkip, ot_arg - self._offset)
self._idx += 1
self._offset = 0
else:
ret_ot = (_OTTypeActionSkip, n)
self._offset += n
elif ot_action == _OTTypeActionInsert:
assert isinstance(ot_arg, str)
if n == -1 or indivisable == 'i' or len(ot_arg) - self._offset <= n:
ret_ot = (_OTTypeActionInsert, ot_arg[self._offset:])
self._idx += 1
self._offset = 0
else:
ret_ot = (_OTTypeActionInsert, ot_arg[self._offset:self._offset + n])
self._offset += n
elif ot_action == _OTTypeActionDelete:
assert isinstance(ot_arg, str)
if n == -1 or indivisable == 'd' or len(ot_arg) - self._offset <= n:
ret_ot = (_OTTypeActionDelete, ot_arg[self._offset:])
self._idx += 1
self._offset = 0
else:
ret_ot = (_OTTypeActionDelete, ot_arg[self._offset:self._offset + n])
self._offset += n
return ret_ot
def peak_action(self) -> _OTTypeAction:
if 0 <= self._idx < len(self.ots):
return self.ots[self._idx][0]
return _OTTypeActionNop
def _trim(ots: List[_OTType]) -> None:
'''Trim ots in place
Discrade trailing OP_SKIPs in ots.
`ots` must be normalized.
'''
if ots and ots[-1][0] == _OTTypeActionSkip:
ots.pop()
def check(ot_raw_list: List[_OTRawType], *, check_unoptimized: bool = True) -> bool:
if not isinstance(ot_raw_list, list):
raise TypeError('`ot_raw_list` must be list')
last_ot_action = _OTTypeActionNop
try:
for ot_raw in ot_raw_list:
resolved_ot = _resolve_ot(ot_raw)
if check_unoptimized and last_ot_action == resolved_ot[0]:
# un-optimized ots
return False
last_ot_action = resolved_ot[0]
except (ValueError, TypeError):
return False
if check_unoptimized and last_ot_action == _OTTypeActionSkip:
return False
return True
def apply(doc: str, ot_raw_list: List[_OTRawType], *, check_unoptimized: bool = True) -> str:
'''Apply ots to doc
'''
if not isinstance(doc, str):
raise TypeError('`doc` must be string')
if not isinstance(ot_raw_list, list):
raise TypeError('`ot_raw_list` must be list')
if not check(ot_raw_list, check_unoptimized=check_unoptimized):
raise ValueError('invalid OTs')
new_doc = []
pos = 0
for ot_action, ot_arg in _make_iter_ots(ot_raw_list):
if ot_action == _OTTypeActionSkip:
assert isinstance(ot_arg, int)
if ot_arg > len(doc) - pos:
raise ValueError('skip exceeds doc length')
new_doc.append(doc[pos:pos + ot_arg])
pos += ot_arg
elif ot_action == _OTTypeActionInsert:
assert isinstance(ot_arg, str)
new_doc.append(ot_arg)
elif ot_action == _OTTypeActionDelete:
assert isinstance(ot_arg, str)
if doc[pos:pos + len(ot_arg)] != ot_arg:
raise ValueError('inconsistent delete (doc, OT.arg)', doc[pos:pos + len(ot_arg)], ot_arg)
pos += len(ot_arg)
new_doc.append(doc[pos:])
return ''.join(new_doc)
def inverse_apply(doc: str, ot_raw_list: List[_OTRawType], *, check_unoptimized: bool = True) -> str:
'''Inversely apply ots to doc
'''
if not isinstance(doc, str):
raise TypeError('`doc` must be string')
if not isinstance(ot_raw_list, list):
raise TypeError('`ot_raw_list` must be list')
if not check(ot_raw_list, check_unoptimized=check_unoptimized):
raise ValueError('invalid OTs')
ots = _make_iter_ots(ot_raw_list)
last_pos = 0
for ot_action, ot_arg in ots:
if ot_action == _OTTypeActionSkip:
assert isinstance(ot_arg, int)
last_pos += ot_arg
elif ot_action == _OTTypeActionInsert:
assert isinstance(ot_arg, str)
last_pos += len(ot_arg)
elif ot_action == _OTTypeActionDelete:
pass
if last_pos > len(doc):
raise ValueError('skip exceeds doc length')
old_doc = [doc[last_pos:]]
for ot_action, ot_arg in reversed(ots):
if ot_action == _OTTypeActionSkip:
assert isinstance(ot_arg, int)
old_doc.append(doc[last_pos - ot_arg:last_pos])
last_pos -= ot_arg
elif ot_action == _OTTypeActionInsert:
assert isinstance(ot_arg, str)
if doc[last_pos - len(ot_arg):last_pos] != ot_arg:
raise ValueError('inconsistent delete (doc, OT.arg)', doc[last_pos - len(ot_arg):last_pos], ot_arg)
last_pos -= len(ot_arg)
elif ot_action == _OTTypeActionDelete:
assert isinstance(ot_arg, str)
old_doc.append(ot_arg)
old_doc.append(doc[:last_pos])
return ''.join(reversed(old_doc))
def normalize(ot_raw_list: List[_OTRawType]) -> List[_OTRawType]:
'''Normalize ots
Merge consecutive operations and trim the result.
'''
if not check(ot_raw_list, check_unoptimized=False):
raise ValueError('invalid OTs')
new_ots: List[_OTType] = []
appender = _Appender(new_ots)
for ot in _make_iter_ots(ot_raw_list):
appender.append(ot)
_trim(new_ots)
return _to_ot_raw_list(new_ots)
def transform(
ot_raw_list_1: List[_OTRawType],
ot_raw_list_2: List[_OTRawType],
side: Literal['left', 'right'],
) -> List[_OTRawType]:
'''Transform `ot_raw_list_1` by `ot_raw_list_2`
Transform `ot_raw_list_1` to have same meaning when `ot_raw_list_2` is applied
to the doc before `ot_raw_list_1`.
`side` is required to break ties, for example, if we assume
`ot_raw_list_1 = ['a']` and `ot_raw_list_2 = ['b']`, the result can be either
'ab' or 'ba' depending on the side.
- `transform(['a'], ['b'], 'left') = [1, "a"]` (left <- right)
- `transform(['a'], ['b'], 'right') = ["a"]` (left -> right)
The result of transform satisfies that,
.. code::
apply(apply(doc, local_ots), transform(server_ots, local_ots, 'left'))
== apply(apply(doc, server_ots), transform(local_ots, server_ots, 'right'))
'''
if not isinstance(ot_raw_list_1, list):
raise TypeError('`ot_raw_list_1` must be list')
if not isinstance(ot_raw_list_2, list):
raise TypeError('`ot_raw_list_2` must be list')
if not isinstance(side, str):
raise TypeError('`side` must be str')
if not check(ot_raw_list_1) or not check(ot_raw_list_2):
raise ValueError('invalid OTs')
if side not in ['left', 'right']:
raise ValueError('invalid side')
new_ots: List[_OTType] = []
appender = _Appender(new_ots)
taker = _Taker(_make_iter_ots(ot_raw_list_1))
for ot_action, ot_arg in _make_iter_ots(ot_raw_list_2):
if ot_action == _OTTypeActionSkip:
assert isinstance(ot_arg, int)
n = ot_arg
while 0 < n:
chunk_ot = taker.take(n, 'i')
appender.append(chunk_ot)
if chunk_ot is None:
break # pragma: no cover
chunk_ot_action, chunk_ot_arg = chunk_ot
if chunk_ot_action == _OTTypeActionSkip:
assert isinstance(chunk_ot_arg, int)
n -= chunk_ot_arg
elif chunk_ot_action == _OTTypeActionInsert:
pass
elif chunk_ot_action == _OTTypeActionDelete:
assert isinstance(chunk_ot_arg, str)
n -= len(chunk_ot_arg)
elif ot_action == _OTTypeActionInsert:
assert isinstance(ot_arg, str)
n = len(ot_arg)
if (
side == 'left'
and taker.peak_action() == _OTTypeActionInsert
):
appender.append(taker.take(-1))
appender.append((_OTTypeActionSkip, n))
elif ot_action == _OTTypeActionDelete:
assert isinstance(ot_arg, str)
n = len(ot_arg)
while 0 < n:
chunk_ot = taker.take(n, 'i')
if chunk_ot is None:
break # pragma: no cover
chunk_ot_action, chunk_ot_arg = chunk_ot
if chunk_ot_action == _OTTypeActionSkip:
assert isinstance(chunk_ot_arg, int)
n -= chunk_ot_arg
elif chunk_ot_action == _OTTypeActionInsert:
appender.append(chunk_ot)
elif chunk_ot_action == _OTTypeActionDelete:
assert isinstance(chunk_ot_arg, str)
n -= len(chunk_ot_arg)
while True:
chunk_ot = taker.take(-1)
if chunk_ot is None:
break
appender.append(chunk_ot)
_trim(new_ots)
return _to_ot_raw_list(new_ots)
def compose(ot_raw_list_1: List[_OTRawType], ot_raw_list_2: List[_OTRawType]) -> List[_OTRawType]:
'''Compose `ot_raw_list_1` and `ot_raw_list_2`
The result of compose satisfies
.. code::
apply(apply(doc, ot_raw_list_1), ot_raw_list_2) == apply(doc, compose(ot_raw_list_1, ot_raw_list_2))
'''
if not isinstance(ot_raw_list_1, list):
raise TypeError('`ot_raw_list_1` must be list')
if not isinstance(ot_raw_list_2, list):
raise TypeError('`ot_raw_list_2` must be list')
if not check(ot_raw_list_1) or not check(ot_raw_list_2):
raise ValueError('invalid OTs')
new_ots: List[_OTType] = []
appender = _Appender(new_ots)
taker = _Taker(_make_iter_ots(ot_raw_list_1))
for ot in _make_iter_ots(ot_raw_list_2):
ot_action, ot_arg = ot
if ot_action == _OTTypeActionSkip:
assert isinstance(ot_arg, int)
n = ot_arg
while 0 < n:
chunk_ot = taker.take(n, 'd')
appender.append(chunk_ot)
if chunk_ot is None:
break # pragma: no cover
chunk_ot_action, chunk_ot_arg = chunk_ot
if chunk_ot_action == _OTTypeActionSkip:
assert isinstance(chunk_ot_arg, int)
n -= chunk_ot_arg
elif chunk_ot_action == _OTTypeActionInsert:
assert isinstance(chunk_ot_arg, str)
n -= len(chunk_ot_arg)
elif chunk_ot_action == _OTTypeActionDelete:
pass
elif ot_action == _OTTypeActionInsert:
appender.append(ot)
elif ot_action == _OTTypeActionDelete:
assert isinstance(ot_arg, str)
offset = 0
n = len(ot_arg)
while 0 < n:
chunk_ot = taker.take(n, 'd')
if chunk_ot is None:
break # | |
controllers.get_issue_by_jid() com vários itens, deve
deve retorna uma lista considerando o valor explicito do atributo
``order_by``
"""
journal = utils.makeOneJournal()
self._make_one({'_id': '1', 'journal': journal.id, 'number': '10'})
self._make_one({'_id': '2', 'journal': journal.id, 'number': '9'})
self._make_one({'_id': '3', 'journal': journal.id, 'number': '8'})
self._make_one({'_id': '4', 'journal': journal.id, 'number': '7'})
issues = [issue.id for issue in controllers.get_issues_by_jid(journal.id, order_by=['number'])]
self.assertListEqual(sorted(issues), sorted(['4', '3', '2', '1']))
def test_get_issues_by_jid_with_unknow_ids(self):
"""
Teste da função controllers.get_issue_by_jid() com um jid desconhecido,
deve retornar um None.
"""
issues = controllers.get_issues_by_jid('02i28wjs92u')
self.assertEqual([], list(issues))
def test_get_issue_by_journal_and_assets_code_raises_error_if_no_assets_code(self):
"""
Teste da função controllers.get_issue_by_journal_and_issue_info() com assets_code
vazio lança exceção.
"""
journal = utils.makeOneJournal()
with self.assertRaises(ValueError) as exc_info:
controllers.get_issue_by_journal_and_assets_code('', journal)
self.assertEqual(str(exc_info.exception), __('Obrigatório um assets_code.'))
def test_get_issue_by_journal_and_assets_code_raises_error_if_no_journal(self):
"""
Teste da função controllers.get_issue_by_journal_and_issue_info() com journal
vazio lança exceção.
"""
with self.assertRaises(ValueError) as exc_info:
controllers.get_issue_by_journal_and_assets_code('v1n1', {})
self.assertEqual(str(exc_info.exception), __('Obrigatório um journal.'))
@patch('webapp.controllers.Issue.objects')
def test_get_issue_by_journal_and_assets_code_returns_filter_first_result(
self, mk_issue_objects
):
"""
Teste da função controllers.get_issue_by_journal_and_issue_info() com issue não
encontrado com o assets_code e journal informado.
"""
journal = utils.makeOneJournal()
issue = utils.makeOneIssue()
mk_issue_objects.filter.return_value.first.return_value = issue
result = controllers.get_issue_by_journal_and_assets_code('v1n1', journal)
self.assertEqual(result, issue)
def test_get_issue_by_iid(self):
"""
Teste da função controllers.get_issue_by_iid() para retornar um objeto:
``Issue``.
"""
issue = self._make_one()
self.assertEqual(controllers.get_issue_by_iid(issue.id), issue)
def test_get_issue_by_label(self):
"""
Teste da função controllers.get_issue_by_label() para retornar um objeto:
``Issue``.
"""
issue = self._make_one()
self.assertEqual(controllers.get_issue_by_label(issue.journal, issue.label), issue)
def test_get_issue_by_iid_without_id(self):
"""
Teste da função controllers.get_issue_by_iid() com uma lista vazia,
deve retorna um exceção ValueError.
"""
self.assertRaises(ValueError, controllers.get_issue_by_iid, [])
def test_get_issue_by_iid_with_some_params(self):
"""
Teste da função controllers.get_issue_by_iid() para retornar um objeto:
``Issue``.
"""
issue = self._make_one({'volume': '10', 'number': '4'})
self._make_any(items=30)
self.assertEqual(controllers.get_issue_by_iid(issue.id, volume='10',
number='4'), issue)
def test_get_issues_by_iid(self):
"""
Testando a função controllers.get_issues_by_iid() deve retornar uma
lista contendo objetos ``Issue``.
"""
self._make_any(items=5)
self._make_one(attrib={'_id': 'iid1'})
self._make_one(attrib={'_id': 'iid12'})
self._make_one(attrib={'_id': 'iid123'})
self._make_any(items=5)
issues = controllers.get_issues_by_iid(['iid1', 'iid12', 'iid123'])
expected = ['iid1', 'iid12', 'iid123']
self.assertListEqual(sorted([issue for issue in issues.keys()]),
sorted(expected))
def test_get_issues_by_iid_without_issue(self):
"""
Testando controllers.get_issues_by_iid() sem issue, deve retornar None.
"""
issues = controllers.get_issues_by_iid(['iid1', 'iid12', 'iid123'])
self.assertEqual(issues, {})
def test_set_issue_is_public_bulk(self):
"""
Testando alterar o valor de um conjunto de issues.
"""
self._make_one(attrib={'_id': '012ijs9y24', 'is_public': True})
self._make_one(attrib={'_id': '2183ikos90', 'is_public': True})
self._make_one(attrib={'_id': '9298wjso89', 'is_public': True})
controllers.set_issue_is_public_bulk(
['012ijs9y24', '2183ikos90', '9298wjso89'], is_public=False)
ids = ['012ijs9y24', '2183ikos90', '9298wjso89']
issues = controllers.get_issues_by_iid(ids)
for issue in issues.values():
self.assertFalse(issue.is_public)
def test_set_issue_is_public_bulk_setting_reason(self):
"""
Testando alterar o valor de um conjunto de issues com o motivo, param
``reason``.
"""
unpublish_reason = 'plágio'
self._make_one(attrib={'_id': '012ijs9y24', 'is_public': True})
self._make_one(attrib={'_id': '2183ikos90', 'is_public': True})
self._make_one(attrib={'_id': '9298wjso89', 'is_public': True})
ids = ['012ijs9y24', '2183ikos90', '9298wjso89']
controllers.set_issue_is_public_bulk(ids, is_public=False,
reason='plágio')
issues = controllers.get_issues_by_iid(ids)
for issue in issues.values():
self.assertEqual(unpublish_reason, issue.unpublish_reason)
def test_set_issue_is_public_bulk_without_iids(self):
"""
Testando alterar o valor de um conjunto de journals, sem ids.
"""
self._make_one(attrib={'_id': '0ow9sms9ms', 'is_public': True})
self._make_one(attrib={'_id': '90k2ud90ds', 'is_public': True})
self._make_one(attrib={'_id': '98jd9dhydk', 'is_public': True})
self.assertRaises(ValueError,
controllers.set_issue_is_public_bulk, [], is_public=False)
ids = ['0ow9sms9ms', '90k2ud90ds', '98jd9dhydk']
issues = controllers.get_issues_by_iid(ids)
for issue in issues.values():
self.assertTrue(issue.is_public)
class ArticleControllerTestCase(BaseTestCase):
def _make_one(self, attrib=None):
"""
Retorna um objeto ``Article`` com os atributos obrigatórios:
``_id``, ``jid``, ``is_public`` e ``issue`` o param ``attrib`` atualiza
os atributos do objeto.
"""
return utils.makeOneArticle(attrib=attrib)
def _make_any(self, issue=None, items=3):
"""
Retorna uma lista de objetos ``Article`` com atributos ``jid``,
``is_public`` e ``acronym`` limitando a quantidade pelo param ``items``.
"""
return utils.makeAnyArticle(issue=issue, items=items)
def _make_same_issue_articles(self, articles_attribs=None):
issue = utils.makeOneIssue()
default_attribs = [
{
"original_language": "pt",
"languages": ["pt", ],
"abstract": "texto",
"abstract": "resumo",
"abstracts": [{"language": "pt", "text": "Resumo"}],
"abstract_languages": ["pt"],
},
{
"original_language": "es",
"languages": ["es", ],
"abstract": "texto",
"abstract": "resumo",
"abstracts": [{"language": "es", "text": "Resumo"}],
"abstract_languages": ["es"],
},
]
articles_attribs = articles_attribs or default_attribs
articles = []
for article_attribs in articles_attribs:
article_attribs.update({"issue": issue})
articles.append(self._make_one(article_attribs))
return articles
def test_get_article_returns_next_article(self):
"""
Teste da função controllers.get_article para retornar um objeto:
``Article``.
"""
articles = self._make_same_issue_articles()
article = articles[1]
lang, result = controllers.get_article(
articles[0].id,
articles[0].journal.url_segment,
articles[0].original_language,
gs_abstract=False,
goto="next",
)
self.assertEqual(article.id, result.id)
self.assertEqual(lang, "es")
def test_get_article_returns_previous_article(self):
"""
Teste da função controllers.get_article para retornar um objeto:
``Article``.
"""
articles = self._make_same_issue_articles()
article = articles[0]
lang, result = controllers.get_article(
articles[1].id,
articles[1].journal.url_segment,
articles[1].original_language,
gs_abstract=False,
goto="previous",
)
self.assertEqual(article.id, result.id)
self.assertEqual(lang, "pt")
def test_get_article_returns_article(self):
"""
Teste da função controllers.get_article para retornar um objeto:
``Article``.
"""
articles = self._make_same_issue_articles()
article = articles[0]
lang, result = controllers.get_article(
articles[0].id,
articles[0].journal.url_segment,
"pt",
gs_abstract=False,
)
self.assertEqual(article.id, result.id)
self.assertEqual(lang, "pt")
def test_get_article_returns_article_and_original_lang(self):
"""
Teste da função controllers.get_article para retornar um objeto:
``Article``.
"""
articles = self._make_same_issue_articles()
article = articles[0]
lang, result = controllers.get_article(
articles[0].id,
articles[0].journal.url_segment,
None,
gs_abstract=False,
)
self.assertEqual(article.id, result.id)
self.assertEqual(lang, "pt")
def test_get_article_returns_next_article_which_has_abstract(self):
"""
Teste da função controllers.get_article para retornar um objeto:
``Article``.
"""
articles = self._make_same_issue_articles()
article = articles[1]
lang, result = controllers.get_article(
articles[0].id,
articles[0].journal.url_segment,
"en",
gs_abstract=True,
goto="next",
)
self.assertEqual(article.id, result.id)
self.assertEqual(lang, "es")
def test_get_article_returns_previous_article_which_has_abstract(self):
"""
Teste da função controllers.get_article para retornar um objeto:
``Article``.
"""
articles = self._make_same_issue_articles()
article = articles[0]
lang, result = controllers.get_article(
articles[1].id,
articles[1].journal.url_segment,
"en",
gs_abstract=True,
goto="previous",
)
self.assertEqual(article.id, result.id)
self.assertEqual(lang, "pt")
def test_get_article_returns_article_which_has_abstract(self):
"""
Teste da função controllers.get_article para retornar um objeto:
``Article``.
"""
articles = self._make_same_issue_articles()
article = articles[0]
lang, result = controllers.get_article(
articles[0].id,
articles[0].journal.url_segment,
"pt",
gs_abstract=True,
)
self.assertEqual(article.id, result.id)
self.assertEqual(lang, "pt")
def test_goto_article_returns_next_article(self):
articles = self._make_same_issue_articles()
self.assertEqual(
controllers.goto_article(articles[0], "next").id,
articles[1].id
)
def test_goto_article_returns_no_next(self):
articles = self._make_same_issue_articles()
with self.assertRaises(controllers.PreviousOrNextArticleNotFoundError):
controllers.goto_article(articles[-1], "next")
def test_goto_article_returns_previous_article(self):
articles = self._make_same_issue_articles()
self.assertEqual(
controllers.goto_article(articles[-1], "previous").id,
articles[-2].id
)
def test_goto_article_returns_no_previous(self):
articles = self._make_same_issue_articles()
with self.assertRaises(controllers.PreviousOrNextArticleNotFoundError):
controllers.goto_article(articles[0], "previous")
def test_goto_article_returns_next_article_with_abstract(self):
articles = self._make_same_issue_articles()
self.assertEqual(
controllers.goto_article(articles[0], "next", True).id,
articles[1].id
)
def test_goto_article_returns_no_next_because_next_has_no_abstract(self):
attribs = [
{
"abstract": "texto",
"abstract": "resumo",
"abstracts": [{"language": "x", "text": "Resumo"}]
},
{},
]
articles = self._make_same_issue_articles(attribs)
with self.assertRaises(controllers.PreviousOrNextArticleNotFoundError):
controllers.goto_article(articles[0], "next", True)
def test_goto_article_returns_previous_article_with_abstract(self):
articles = self._make_same_issue_articles()
self.assertEqual(
controllers.goto_article(articles[-1], "previous", True).id,
articles[-2].id
)
def test_goto_article_returns_no_previous_because_previous_has_no_abstract(self):
attribs = [
{},
{"abstract": "resumo",
"abstracts": [{"language": "x", "text": "Resumo"}]},
]
articles = self._make_same_issue_articles(attribs)
with self.assertRaises(controllers.ArticleNotFoundError):
controllers.goto_article(articles[-1], "previous", True)
def test_goto_article_returns_no_previous_because_previous_has_no_abstract(self):
attribs = [
{},
{"abstract": "resumo",
"abstracts": [{"language": "x", "text": "Resumo"}]},
]
articles = self._make_same_issue_articles(attribs)
with self.assertRaises(ValueError) as exc:
controllers.goto_article(articles[-1], "prev", True)
self.assertIn("Expected: next or previous", str(exc.exception))
def test__articles_or_abstracts_sorted_by_order_or_date_returns_empty_list(self):
a = self._make_one({"abstracts": []})
articles = controllers._articles_or_abstracts_sorted_by_order_or_date(
a.issue.id, gs_abstract=True)
self.assertEqual(articles, [])
def test__articles_or_abstracts_sorted_by_order_or_date_returns_empty_list2(self):
a = self._make_one({"abstracts": None})
articles = controllers._articles_or_abstracts_sorted_by_order_or_date(
a.issue.id, gs_abstract=True)
self.assertEqual(articles, [])
def test__articles_or_abstracts_sorted_by_order_or_date_returns_empty_list3(self):
# nao existe nem o campo `abstracts`
a = self._make_one()
articles = controllers._articles_or_abstracts_sorted_by_order_or_date(
a.issue.id, gs_abstract=True)
self.assertEqual(articles, [])
def test__articles_or_abstracts_sorted_by_order_or_date_returns_empty_list3(self):
a = self._make_one()
articles = controllers._articles_or_abstracts_sorted_by_order_or_date(
a.issue.id, gs_abstract=True)
self.assertEqual(articles, [])
def test__articles_or_abstracts_sorted_by_order_or_date_returns_one(self):
abstracts = [
{"language": "en", "text": "Texto"}
]
abstract_languages = ["en"]
a = self._make_one(
{
"abstract": "Texto",
"abstract": "resumo", "abstracts": abstracts,
"abstract_languages": abstract_languages
}
)
articles = controllers._articles_or_abstracts_sorted_by_order_or_date(
a.issue.id, gs_abstract=True)
self.assertEqual(len(articles), 1)
def test_get_articles_by_aid(self):
"""
Testando a função controllers.get_articles_by_aid() deve retornar uma
lista contendo objetos ``Article`` .
"""
self._make_any(items=5)
self._make_one(attrib={'_id': 'aid1'})
self._make_one(attrib={'_id': 'aid12'})
self._make_one(attrib={'_id': 'aid123'})
self._make_any(items=5)
articles = controllers.get_articles_by_aid(['aid1', 'aid12', 'aid123'])
expected = ['aid1', 'aid12', 'aid123']
self.assertListEqual(sorted([article for article in articles.keys()]),
sorted(expected))
def test_get_articles_by_aid_with_not_found_jids(self):
"""
Testando a função controllers.get_articles_by_aid() deve retornar um
None.
"""
self._make_any(items=5)
self._make_one(attrib={'_id': 'aid1'})
self._make_one(attrib={'_id': 'aid12'})
self._make_one(attrib={'_id': 'aid123'})
self._make_any(items=5)
articles = controllers.get_journals_by_jid(['k8u1jid1', '0823mgjid12',
'-012-js7jid123'])
self.assertEqual(articles, {})
def test_get_articles_by_aid_without_article(self):
"""
Testando controllers.get_articles_by_aid() sem article, deve retornar
None.
"""
articles = controllers.get_articles_by_aid(['aid1', 'aid12', 'aid123'])
self.assertEqual(articles, {})
def test_set_article_is_public_bulk(self):
"""
Testando alterar o valor de um conjunto de article
"""
self._make_one(attrib={'_id': '012ijs9y24', 'is_public': True})
self._make_one(attrib={'_id': '2183ikos90', 'is_public': True})
self._make_one(attrib={'_id': '9298wjso89', 'is_public': True})
controllers.set_article_is_public_bulk(
['012ijs9y24', '2183ikos90', '9298wjso89'], is_public=False)
ids = ['012ijs9y24', '2183ikos90', '9298wjso89']
articles = controllers.get_articles_by_aid(ids)
for article in articles.values():
self.assertFalse(article.is_public)
def test_set_article_is_public_bulk_without_aids(self):
"""
Testando alterar o valor de um conjunto de journals sem iids, deve
retorna um ValueError.
"""
self._make_one(attrib={'_id': '9ms9kos9js', 'is_public': True})
self._make_one(attrib={'_id': 'lksnsh8snk', 'is_public': True})
self._make_one(attrib={'_id': '7153gj6ysb', 'is_public': True})
self.assertRaises(ValueError,
controllers.set_article_is_public_bulk, [], is_public=False)
ids = ['9ms9kos9js', 'lksnsh8snk', '7153gj6ysb']
articles = controllers.get_articles_by_aid(ids)
for article in articles.values():
self.assertTrue(article.is_public)
def test_get_articles_by_iid(self):
"""
Testando a função controllers.get_articles_by_iid(), deve retorna uma
lista de articles.
"""
self._make_one(attrib={
'_id': '012ijs9y24',
'issue': '90210j83',
'order': '14',
'journal': 'oak,ajimn1'
})
self._make_one(attrib={
'_id': '2183ikos90',
'issue': '90210j83',
'order': '12',
'journal': 'oak,ajimn1'
})
self._make_one(attrib={
'_id': '9298wjso89',
'issue': '90210j82',
'order': '13',
'journal': 'oak,ajimn1'
})
expected = ['2183ikos90', '012ijs9y24', ]
articles = [article.id for article in controllers.get_articles_by_iid('90210j83')]
self.assertListEqual(articles, expected)
def test_get_articles_by_iid_from_aop_issue(self):
"""
Testando a função controllers.get_articles_by_iid(), deve retorna uma
lista de articles.
"""
issue = utils.makeOneIssue({"_id": '90210j83', "number": "ahead"})
self._make_one(attrib={
'_id': '012ijs9y24',
'issue': issue,
'journal': 'oak,ajimn1',
'publication_date': '2018-01-01',
'is_aop': True,
})
| |
from typing import AnyStr, Callable, Optional, List, Dict, Any
from typing_extensions import Literal
import sys
import os
import json
from functools import partial
from PySide2 import QtCore, QtGui, QtWidgets
from PySide2.QtGui import QIcon
from compas.data import Data
from ..views import View120
from ..views import View330
from ..objects import Object
from .controller import Controller
from .selector import Selector
from .timer import Timer
HERE = os.path.dirname(__file__)
ICONS = os.path.join(HERE, '../icons')
CONFIG = os.path.join(HERE, 'config.json')
VERSIONS = {'120': (2, 1), '330': (3, 3)}
class App:
"""Viewer app.
The app has a (main) window with a central OpenGL widget (i.e. the 'view'),
and a menubar, toolbar, and statusbar.
The menubar provides access to all supported 'actions'.
The toolbar is meant to be a 'quicknav' to a selected set of actions.
The app supports rotate/pan/zoom, and object selection via picking or box selections.
Currently the app uses OpenGL 2.2 and GLSL 120 with a 'compatibility' profile.
Support for OpenGL 3.3 and GLSL 330 with a 'core' profile is under development.
Parameters
----------
version: '120' | '330', optional
The version of the GLSL used by the shaders.
Default is ``'120'`` with a compatibility profile.
The option ``'330'`` is not yet available.
width: int, optional
The width of the app window at startup.
Default is ``800``.
height: int, optional
The height of the app window at startup.
Default is ``500``.
viewmode: 'shaded' | 'ghosted' | 'wireframe' | 'lighted', optional
The display mode of the OpenGL view.
Default is ``'shaded'``.
In ``'ghosted'`` mode, all objects have a default opacity of ``0.7``.
show_grid: bool, optional
Show the XY plane.
Default is ``True``.
controller_class: :class:`compas_view2.app.Controller`, optional
A custom controller corresponding to a custom config file.
Default is ``None``, in which case the default controller is used,
matching the default config file.
config: dict | filepath, optional
A configuration dict for the UI, or a path to a JSON file containing such a dict.
Default is ``None``, in which case the default configuration is used.
Attributes
----------
window: :class:`PySide2.QtWidgets.QMainWindow`
The main window of the application.
This window contains the view and any other UI components
such as the menu, toolbar, statusbar, ...
view: :class:`compas_view2.View`
Instance of OpenGL view.
This view is the central widget of the main window.
controller: :class:`compas_view2.app.Controller`
The action controller of the app.
Notes
-----
The app can currently only be used 'as-is'.
This means that there is no formal mechanism for adding actions to the controller
or to add functionality to the shader, other than by extending the core classes.
In the future, such mechanism will be provided by allowing the user to overwrite
the configuration file and add actions to the controller, without having
to modify the package source code.
Currently the app has no scene graph.
All added COMPAS objects are wrapped in a viewer object and stored in a dictionary,
mapping the object's ID (``id(object)``) to the instance.
Examples
--------
>>> from compas_view2 import app
>>> viewer = app.App()
>>> viewer.show()
"""
def __init__(self,
version: Literal['120', '330'] = '120',
width: int = 800,
height: int = 500,
viewmode: Literal['wireframe', 'shaded', 'ghosted', 'lighted'] = 'shaded',
controller_class: Optional[Controller] = None,
show_grid: bool = True,
config: Optional[dict] = None,
enable_sidebar: bool = False):
if version not in VERSIONS:
raise Exception('Only these versions are currently supported: {}'.format(VERSIONS))
glFormat = QtGui.QSurfaceFormat()
glFormat.setVersion(* VERSIONS[version])
if version == '330':
View = View330
glFormat.setProfile(QtGui.QSurfaceFormat.CoreProfile)
elif version == '120':
View = View120
glFormat.setProfile(QtGui.QSurfaceFormat.CompatibilityProfile)
else:
raise NotImplementedError
glFormat.setDefaultFormat(glFormat)
QtGui.QSurfaceFormat.setDefaultFormat(glFormat)
app = QtCore.QCoreApplication.instance()
if app is None:
app = QtWidgets.QApplication(sys.argv)
app.references = set()
appIcon = QIcon(os.path.join(ICONS, "compas_icon_white.png"))
app.setWindowIcon(appIcon)
self.timer = None
self.frame_count = 0
self.record = False
self.recorded_frames = []
self.width = width
self.height = height
self.window = QtWidgets.QMainWindow()
self.view = View(self, mode=viewmode, show_grid=show_grid)
self.window.setCentralWidget(self.view)
self.window.setContentsMargins(0, 0, 0, 0)
controller_class = controller_class or Controller
self.controller = controller_class(self)
config = config or CONFIG
if not isinstance(config, dict):
with open(config) as f:
config = json.load(f)
self.config = config
self._app = app
self._app.references.add(self.window)
self.selector = Selector(self)
self.enable_sidebar = enable_sidebar
self.init()
self.resize(width, height)
def init(self):
self._init_statusbar()
self._init_menubar(self.config.get('menubar'))
self._init_toolbar(self.config.get('toolbar'))
self._init_sidebar(self.config.get('sidebar'))
def resize(self, width: int, height: int):
"""Resize the main window programmatically.
Parameters
----------
width: int
height: int
"""
self.window.resize(width, height)
desktop = self._app.desktop()
rect = desktop.availableGeometry()
x = 0.5 * (rect.width() - width)
y = 0.5 * (rect.height() - height)
self.window.setGeometry(x, y, width, height)
def add(self, data: Data, **kwargs) -> Object:
"""Add a COMPAS object.
Parameters
----------
data: :class:`compas.geometry.Primitive` | :class:`compas.geometry.Shape` | :class:`compas.geometry.Datastructure`
Returns
-------
:class:`compas_view2.objects.Object`
"""
obj = Object.build(data, **kwargs)
self.view.objects[obj] = obj
self.selector.add(obj)
if self.view.isValid():
obj.init()
return obj
def add_reference(self, obj: Object, **kwargs) -> Object:
""""""
ref = obj.otype.from_other(obj, **kwargs)
self.view.objects[ref] = ref
self.selector.add(ref)
if self.view.isValid():
ref.init()
return ref
def remove(self, obj: Object):
if obj in list(self.view.objects):
del self.view.objects[obj]
for key, value in list(self.selector.instances.items()):
if obj == value:
del self.selector.instances[key]
def show(self, pause=None):
"""Show the viewer window."""
self.window.show()
self._app.exec_()
run = show
def about(self):
"""Display the about message as defined in the config file."""
QtWidgets.QMessageBox.about(self.window, 'About', self.config['messages']['about'])
def info(self, message: str):
"""Display info."""
QtWidgets.QMessageBox.information(self.window, 'Info', message)
def question(self, message: str):
"""Ask a question."""
pass
def warning(self, message: str):
"""Display a warning."""
QtWidgets.QMessageBox.warning(self.window, 'Warning', message)
def critical(self, message: str):
"""Display a critical warning."""
QtWidgets.QMessageBox.critical(self.window, 'Critical', message)
def status(self, message: str):
"""Display a message in the status bar."""
self.statusText.setText(message)
def fps(self, fps: int):
"""Update fps info in the status bar."""
self.statusFps.setText('fps: {}'.format(fps))
# ==============================================================================
# UI
# ==============================================================================
def _get_icon(self, icon: str):
return QtGui.QIcon(os.path.join(ICONS, icon))
def _init_statusbar(self):
self.statusbar = self.window.statusBar()
self.statusbar.setContentsMargins(0, 0, 0, 0)
self.statusText = QtWidgets.QLabel('Ready')
self.statusbar.addWidget(self.statusText, 1)
self.statusFps = QtWidgets.QLabel('fps: ')
self.statusbar.addWidget(self.statusFps)
def _init_menubar(self, items: List[Dict]):
if not items:
return
self.menubar = self.window.menuBar()
self.menubar.setNativeMenuBar(True)
self.menubar.setContentsMargins(0, 0, 0, 0)
self._add_menubar_items(items, self.menubar)
def _init_toolbar(self, items: List[Dict]):
if not items:
return
self.toolbar = self.window.addToolBar('Tools')
self.toolbar.setMovable(False)
self.toolbar.setObjectName('Tools')
self.toolbar.setIconSize(QtCore.QSize(16, 16))
self._add_toolbar_items(items, self.toolbar)
def _init_sidebar(self, items: List[Dict]):
if not self.enable_sidebar:
return
self.sidebar = QtWidgets.QToolBar(self.window)
self.window.addToolBar(QtCore.Qt.LeftToolBarArea, self.sidebar)
self.sidebar.setObjectName('Sidebar')
self.sidebar.setMovable(False)
self.sidebar.setIconSize(QtCore.QSize(16, 16))
self.sidebar.setMinimumWidth(240)
self._add_sidebar_items(items, self.sidebar)
def _add_menubar_items(self, items: List[Dict], parent: QtWidgets.QWidget):
if not items:
return
for item in items:
if item['type'] == 'separator':
parent.addSeparator()
elif item['type'] == 'menu':
menu = parent.addMenu(item['text'])
if 'items' in item:
self._add_menubar_items(item['items'], menu)
elif item['type'] == 'radio':
radio = QtWidgets.QActionGroup(self.window, exclusive=True)
for item in item['items']:
action = self._add_action(parent, text=item['text'], action=item['action'])
action.setCheckable(True)
action.setChecked(item['checked'])
radio.addAction(action)
elif item['type'] == 'action':
del item['type']
self._add_action(parent, text=item['text'], action=item['action'])
else:
raise NotImplementedError
def _add_toolbar_items(self, items: List[Dict], parent: QtWidgets.QWidget):
if not items:
return
for item in items:
if item['type'] == 'separator':
parent.addSeparator()
elif item['type'] == 'action':
del item['type']
self._add_action(parent, **item)
else:
raise NotImplementedError
def _add_sidebar_items(self, items: List[Dict], parent: QtWidgets.QWidget):
if not items:
return
for item in items:
if item['type'] == 'separator':
parent.addSeparator()
elif item['type'] == 'radio':
del item['type']
self.add_radio(parent, **item)
elif item['type'] == 'checkbox':
del item['type']
self.add_checkbox(parent, **item)
elif item['type'] == 'slider':
del item['type']
self.add_slider(parent, **item)
elif item['type'] == 'button':
del item['type']
self.add_button(parent, **item)
else:
raise NotImplementedError
def _add_action(self,
parent: QtWidgets.QWidget,
*,
text: str,
action: Callable,
args: Optional[List[Any]] = None,
kwargs: Optional[Dict] = None,
icon: Optional[AnyStr] = None):
action = action if callable(action) else getattr(self.controller, action)
args = args or []
kwargs = kwargs or {}
if icon:
icon = self._get_icon(icon)
action = parent.addAction(icon, text, partial(action, *args, **kwargs))
else:
action = parent.addAction(text, partial(action, *args, **kwargs))
return action
def add_button(self,
parent: QtWidgets.QWidget,
*,
text: str,
action: Callable):
box = QtWidgets.QWidget()
layout = QtWidgets.QHBoxLayout()
button = QtWidgets.QPushButton(text)
layout.addWidget(button)
box.setLayout(layout)
parent.addWidget(box)
action = action if callable(action) else getattr(self.controller, action)
button.clicked.connect(action)
# button.clicked.connect(self.view.update)
def add_radio(self,
parent: QtWidgets.QWidget,
*,
items: List[Dict]):
box = QtWidgets.QWidget()
layout = QtWidgets.QHBoxLayout()
radio = QtWidgets.QActionGroup(self.window, exclusive=True)
layout.addWidget(radio)
box.setLayout(layout)
parent.addWidget(box)
for item in items:
action = self._add_action(parent, text=item['text'], action=item['action'])
action.setCheckable(True)
action.setChecked(item['checked'])
radio.addAction(action)
# radio.toggled.connect(self.view.update)
def add_checkbox(self,
parent: QtWidgets.QWidget,
*,
text: str,
action: Callable,
checked: bool = False):
box = QtWidgets.QWidget()
layout = QtWidgets.QHBoxLayout()
checkbox = QtWidgets.QCheckBox(text)
checkbox.setCheckState(QtCore.Qt.CheckState.Checked if checked else QtCore.Qt.CheckState.Unchecked)
layout.addWidget(checkbox)
box.setLayout(layout)
parent.addWidget(box)
action = action if callable(action) else getattr(self.controller, action)
checkbox.toggled.connect(action)
checkbox.toggled.connect(self.view.update)
def add_input(self, parent: QtWidgets.QWidget):
pass
def add_colorpicker(self, parent: QtWidgets.QWidget):
pass
def add_slider(self,
parent: QtWidgets.QWidget,
*,
text: str,
action: Callable,
value: int = 0,
minval: int = | |
- 2 * self.V.T @ M @ self.B, self.V)
self.poly4 = Polynomial(self.P, self.x + self.v,
- 2 * self.V.T @ M @ self.A, self.V)
self.poly_deriv = [Polynomial(self.P, self.x + self.v, self.V,
(self.A[i,:] @ self.Z)[0] * self.V, deriv=True,
deriv_index=i) for i in self.zero_rows]
self.poly5 = Polynomial(self.c2, self.x + self.v, self.V, self.V)
d2 = np.max([i.monomial_degree(self.x) for i in self.poly_deriv]
+ [self.poly3.monomial_degree(self.x),
self.poly4.monomial_degree(self.x)]) // 2
z2 = sp.Matrix(monomial_generator(self.x, d2, 0))
self.kron2, self.Kron2 = self.kronecker(self.v, z2)
self.Q2 = MatrixVar(len(self.kron2), len(self.kron2), symmetric=True)
self.sos2 = Polynomial(self.Q2, self.x + self.v, self.Kron2,
self.Kron2)
#these are needed for post-processing
self.M = M
def kronecker(self, V, X):
''' Returns the Kronecker product of V and X
Parameters:
V (list[sympy.symbol]) : List of symbolic variables.
X (list[sympy.symbol]) : List of symbolic variables.
Returns:
kron (list[sympy.symbol]) : Kronecker product of V and X as list.
_ (sympy.Matrix) : Kronecker product as sympy matrix.
'''
kron = []
for v in V:
for x in X:
kron.append(v * x)
return kron, sp.Matrix(kron)
def generate_sdp(self):
''' Creates the semidefinite program for sos stability analysis.
'''
self.constraints = []
for mon in self.Q1.monomials:
self.constraints += [self.Q1.variables[mon] >> 0]
for mon in self.Q2.monomials:
self.constraints += [self.Q2.variables[mon] >> 0]
for monomial in self.sos1.monomials:
term = self.poly1.coefficient(monomial)
term += self.poly2.coefficient(monomial)
self.constraints += [term == self.sos1.coefficient(monomial)]
for monomial in self.sos2.monomials:
term = self.poly3.coefficient(monomial) \
+ self.poly4.coefficient(monomial) \
+ self.poly5.coefficient(monomial)
for poly in self.poly_deriv:
term += poly.coefficient(monomial)
self.constraints += [term == self.sos2.coefficient(monomial)]
def feasability_check(self, verbose=False):
''' Determines whether the learning process is feasable
Parameters:
verbose (bool) : Enable verbose optimizer output
Returns:
_ (bool) : Feasability of learning process
'''
obj = 0
for mon in self.P.monomials:
obj += cv.norm(self.P.variables[mon], 'fro')
for mon in self.F.monomials:
obj += cv.norm(self.F.variables[mon], 'fro')
self.prob = cv.Problem(cv.Minimize(obj), self.constraints)
self.prob.solve(verbose=verbose, solver=cv.SCS)
#self.prob.solve(verbose=verbose, solver=cv.CVXOPT)
#self.prob.solve(verbose=verbose, solver=cv.MOSEK)
print(self.prob.status)
if self.prob.status == 'optimal':
return True
else:
return False
def return_variables(self, threshold=1E-5):
''' Returns the optimization variables F(x), P(x), Q_1, and Q_2.
Parameters:
threshold (float) : coefficients below this value will be set to zero.
Returns:
F (sympy.matrix)
P (sympy.matrix)
Q1 (numpy.array)
Q2 (numpy.array)
'''
F = sp.zeros(*self.F.dimension)
for mon in self.F.monomials:
coeffs = self.F.variables[mon].value.copy()
coeffs[np.abs(coeffs) < threshold] = 0
F += mon * coeffs
P = sp.zeros(*self.P.dimension)
for mon in self.P.monomials:
coeffs = self.P.variables[mon].value.copy()
coeffs[np.abs(coeffs) < threshold] = 0
P += mon * coeffs
Q1 = self.Q1.variables[1].value
Q1[np.abs(Q1) < threshold] = 0
Q2 = self.Q2.variables[1].value
Q2[np.abs(Q2) < threshold] = 0
return F, P, Q1, Q2
def import_data(self, data):
''' Imports the training data into the imitation learning object
Parameters:
data (dict) : A dictionary containing the key-value pairs
'x' (numpy.array) : state samples
'u' (numpy.array) : control input samples
'N' (int) : number of state and control input samples
'''
self.data = data.copy()
fZ = lambdify(self.x, self.Z, 'numpy')
self.data['Z'] = fZ(*self.data['x']).squeeze(1)
for monomial in self.F.variables.keys():
if monomial == S.One:
self.data[str(monomial)] = np.ones(self.data['N'])
else:
f_mon = lambdify(self.x, monomial, 'numpy')
self.data[str(monomial)] = f_mon(*self.data['x'])
def imitate(self, algorithm, iterations=100, verbose=False, seed='zero',
step_length=1):
''' Run the imitation learning algorithm
Parameters:
algorithm (str) : choose from either 'admm' or 'pgd'
iterations (int) : number of iterations
verbose (bool) : Turn verbose output on
seed (int) : random seed for initialization ('zero' sets all arrays to
zero instead)
step length
[for admm] (float) : value of `rho' parameter
[for pgd] (str) : dcitionary for schedule of 'alpha' parameter,
where key is first value that alpha value is used
'''
if algorithm == 'admm':
self.admm_initialize(rho=step_length, seed=seed)
for iteration in range(iterations):
self.admm_step_1(verbose=False)
self.admm_step_2(verbose=False)
self.admm_step_3()
if verbose:
self.admm_print()
if iteration % 25 == 0:
print('ADMM iterations completed : ', iteration)
self.primal_residual.append(self.admm_problem2.value)
self.objective1.append(self.admm_control_eval())
F = 0
for mon in self.F.monomials:
F += mon * np.array(self.F.variables[mon].value.copy())
P = 0
for mon in self.P.monomials:
P += mon * np.array(self.P.variables[mon].value.copy())
return F, P
'''
elif algorithm == 'pf':
self.admm_initialize()
self.policy_fitting()
K = 0
for mon in self.K.monomials:
K += self.K.variables[mon].value * mon
print(expand((K @ self.Z)[0]))
print(self.K.variables[1].value)
'''
elif algorithm == 'pgd':
self.pgd_initialise(seed=seed)
self.pgd_projection()
imitation_loss = self.pgd_objective(self.Fp, self.Pp)
self.objective.append(imitation_loss.item())
print(0, imitation_loss)
for iteration in range(iterations):
if iteration in step_length.keys():
print('Alpha Update')
alpha = step_length[iteration]
self.pgd_grad_step(alpha=alpha)
self.pgd_projection()
imitation_loss = self.pgd_objective(self.Fp, self.Pp)
self.objective.append(imitation_loss.item())
print(iteration + 1, imitation_loss)
F = 0
for mon in self.F.monomials:
F += mon * np.array(self.Fp[str(mon)])
P = 0
for mon in self.P.monomials:
P += mon * np.array(self.Pp[str(mon)])
return F, P
else:
raise Exception('Please choose a valid optimization algorithm.')
######## Projected Gradient Descent
def pgd_initialise(self, seed):
''' Initialize the projected gradient descent algorithm
Parameters:
seed (int) : random seed for initialization
'''
self.grad = grad(self.pgd_objective, (0, 1))
if seed == 'zero':
pass
else:
self.key = jr.PRNGKey(seed)
self.key, *subkeys = jr.split(self.key, len(self.F.monomials) + 1)
self.Fp = {str(m) : jr.uniform(k, self.F.dimension, minval=-5,
maxval=5) for m, k in zip(self.F.variables.keys(), subkeys)}
self.key, *subkeys = jr.split(self.key, len(self.P.monomials) + 1)
self.Pp = {str(m) : jr.uniform(k, self.P.dimension, minval=-3,
maxval=5) for m, k in zip(self.P.variables.keys(), subkeys)}
self.Ftilde = {str(m) : cv.Parameter(self.F.dimension)
for m in self.F.variables.keys()}
self.Ptilde = {str(m) : cv.Parameter(self.P.dimension)
for m in self.P.variables.keys()}
obj = 0
for mon in self.F.monomials:
obj += cv.norm(self.F.variables[mon] - self.Ftilde[str(mon)],
'fro')
for mon in self.P.monomials:
obj += cv.norm(self.P.variables[mon] - self.Ptilde[str(mon)],
'fro')
self.projection = cv.Problem(cv.Minimize(obj), self.constraints)
self.objective = []
def pgd_control_eval(self, F, P):
''' Evaluate the control inputs for the state training data, given F
and P (implemented in Jax for autodiff)
# THIS ONLY WORKS FOR CONSTANT P - NEEDS TO BE CHANGED FOR HIGHER
# DEGREE P MATRICES
Parameters:
F (dict) : F matrix with key-value pairs
# TODO: Check this
monomial : jax.array
P (dict) : P matrix with key-value pairs
monomial : jax.array
Returns:
_ (jax.array) : control inputs
'''
Fsum = einsum1(F, self.data)
return einsum2(Fsum, jnp.linalg.inv(P['1']), self.data['Z'])
def pgd_objective(self, F, P):
''' Evaluate the imitation learning cost function, given F and P
Parameters:
F (dict) : F matrix with key-value pairs
# TODO: Check this
monomial : jax.array
P (dict) : P matrix with key-value pairs
monomial : jax.array
Returns:
_ (float) : Imitation loss
'''
u = self.pgd_control_eval(F, P)
return jnp.sum((u - self.data['u']) ** 2) / self.data['N']
def pgd_grad_step(self, alpha=1E-3):
''' Take projected gradient step.
Parameters:
alpha (float) : step length
'''
Fgrad, Pgrad = self.grad(self.Fp, self.Pp)
for mon in Fgrad.keys():
self.Fp[mon] += - alpha * Fgrad[mon].copy()
for mon in Pgrad.keys():
self.Pp[mon] += - alpha * Pgrad[mon].copy()
self.Pp[mon] = 0.5 * (self.Pp[mon] + self.Pp[mon].T)
def pgd_projection(self):
''' Do projection step of pgd algorithm.
'''
for mon in self.Fp.keys():
self.Ftilde[mon].value = np.array(self.Fp[mon].copy())
for mon in self.Pp.keys():
self.Ptilde[mon].value = np.array(self.Pp[mon].copy())
self.projection.solve(verbose=False, solver=cv.SCS)
for mon in self.F.monomials:
self.Fp[str(mon)] = jnp.array(self.F.variables[mon].value.copy())
for mon in self.P.monomials:
self.Pp[str(mon)] = jnp.array(self.P.variables[mon].value.copy())
######## ADMM
def admm_initialize(self, rho=1, seed='zero'):
''' Initialize the ADMM algorithm.
Parameters:
rho (float) : value of rho
seed (int) : random seed for initialization
'''
self.rho = rho
self.primal_residual = []
self.objective1 = []
order_K = self.F.order - self.P.order
self.K = MatrixVar(self.m, self.p, states=self.x, order=order_K)
self.Ftilde = {str(m) : cv.Parameter(self.F.dimension)
for m in self.F.variables.keys()}
self.Ptilde = {str(m) : cv.Parameter(self.P.dimension)
for m in self.P.variables.keys()}
self.Ktilde = {str(m) : cv.Parameter(self.K.dimension)
for m in self.K.variables.keys()}
self.Y = {m : cv.Parameter(self.F.dimension)
for m in self.F.variables.keys()}
if seed == 'zero':
for m in self.F.variables.keys():
self.Ftilde[str(m)].value = np.zeros(self.F.dimension)
self.Y[m].value = np.zeros(self.F.dimension)
for m in self.K.variables.keys():
self.Ktilde[str(m)].value = np.zeros(self.K.dimension)
for m in self.P.variables.keys():
self.Ptilde[str(m)].value = np.zeros(self.P.dimension)
else:
np.random.seed(seed)
for m in self.F.variables.keys():
self.Ftilde[str(m)].value = np.random.uniform(
5, 5, size=self.F.dimension)
self.Y[m].value = np.random.uniform(
-5, 5, size=self.F.dimension)
for m in self.K.variables.keys():
self.Ktilde[str(m)].value = np.random.uniform(
-5, 5, size=self.K.dimension)
for m in self.P.variables.keys():
self.Ptilde[str(m)].value = np.random.uniform(
-5, 5, size=self.P.dimension)
upred = cv.sum([cv.multiply(np.expand_dims(self.data[str(mon)], 0),
self.K.variables[mon] @ self.data['Z'])
for mon in self.K.monomials])
loss = cv.norm(self.data['u'] - upred, 'fro') ** 2 / self.data['N']
con1 = {mon : self.Ftilde[str(mon)] + self.Y[mon]
for mon in self.F.monomials}
for mon1 in self.K.monomials:
for mon2 in self.P.monomials:
mon = mon1 * mon2
con1[mon] += - self.K.variables[mon1] @ self.Ptilde[str(mon2)]
self.loss_function = cv.Problem(cv.Minimize(loss))
aug1 = 1 / 2 * cv.sum([rho * cv.norm(con1[mon], 'fro')**2
for mon in | |
)
# Now make the master attribute
jsonDebug( 'Creating master light data attribute' )
attr = ldAttr.create( name, shortName,
ldChildren[0], ldChildren[1], ldChildren[2],
ldChildren[3], ldChildren[4], ldChildren[5],
ldChildren[6], ldChildren[7] )
jsonDebug( 'Setting master light data defaults' )
ldAttr.default = defaultValues
return attr
#----------------------------------------------------------------------
def parseMessageAttribute(self, name, shortName, attrInfo):
"""
Given a JSON subsection describing a message attribute create the
attribute and set all of the provided flags/members for it.
name = Attribute long name
shortName = Attribute short name
attrInfo = JSON object containing the main attribute information
"""
jsonDebug( 'parseMessageAttribute(%s)' % name )
mAttr = omAPI.MFnMessageAttribute()
jsonDebug( 'Creating message attribute' )
attr = mAttr.create( name, shortName )
return attr
#----------------------------------------------------------------------
def parseStringAttribute(self, name, shortName, attrInfo):
"""
Given a JSON subsection describing a string attribute create the
attribute and set all of the provided flags/members for it.
name = Attribute long name
shortName = Attribute short name
attrInfo = JSON object containing the main attribute information
"""
jsonDebug( 'parseStringAttribute(%s)' % name )
sAttr = omAPI.MFnTypedAttribute()
if JsonKeys.kKeyDefault in attrInfo:
jsonDebug( 'Setting the string default to "%s"' % attrInfo[JsonKeys.kKeyDefault] )
sDefault = omAPI.MFnStringData()
defaultValue = sDefault.create( attrInfo[JsonKeys.kKeyDefault] )
attr = sAttr.create( name, shortName, omAPI.MFnData.kString, defaultValue )
else:
jsonDebug( 'Creating string attribute with no default' )
attr = sAttr.create( name, shortName, omAPI.MFnData.kString )
return attr
#----------------------------------------------------------------------
def parseMatrixAttribute(self, name, shortName, attrInfo):
"""
Given a JSON subsection describing a matrix attribute create the
attribute and set all of the provided flags/members for it.
name = Attribute long name
shortName = Attribute short name
attrInfo = JSON object containing the main attribute information
"""
jsonDebug( 'parseMatrixAttribute(%s)' % name )
matrixType = JsonKeys.kTypeMatrixTypes[attrInfo[JsonKeys.kKeyAttrType]]
if JsonKeys.kKeyDefault in attrInfo:
jsonDebug( 'Setting the matrix default to "%s"' % attrInfo[JsonKeys.kKeyDefault] )
mDefault = omAPI.MFnMatrixData()
defaultValue = mDefault.create( omAPI.MMatrix(attrInfo[JsonKeys.kKeyDefault]) )
mAttr = omAPI.MFnMatrixAttribute( defaultValue )
attr = mAttr.create( name, shortName, matrixType )
else:
jsonDebug( 'Creating matrix attribute with no default' )
mAttr = omAPI.MFnMatrixAttribute()
attr = mAttr.create( name, shortName, matrixType )
return attr
#----------------------------------------------------------------------
def parseNumericAttribute(self, name, shortName, numericType, attrInfo):
"""
Given a JSON subsection describing a numeric attribute create the
attribute and set all of the provided flags/members for it.
name = Attribute long name
shortName = Attribute short name
type = Numeric type
attrInfo = JSON object containing the main attribute information
"""
jsonDebug( 'parseNumericAttribute(%s, type=%s)' % (name, type) )
if numericType in [ 'angle', 'distance', 'time' ]:
jsonDebug( '... unit attribute type being set up' )
nAttr = omAPI.MFnUnitAttribute()
else:
jsonDebug( '... regular numeric attribute type being set up' )
nAttr = omAPI.MFnNumericAttribute()
jsonDebug( 'Creating numeric attribute' )
attr = nAttr.create( name, shortName, JsonKeys.kNumericTypes[numericType] )
jsonDebug( '...creation succeeded' )
if JsonKeys.kKeyDefault in attrInfo:
defaultValue = attrInfo[JsonKeys.kKeyDefault]
jsonDebug( '...setting numeric default to %s - it is a %s' % (str(defaultValue), type(defaultValue)) )
if type(defaultValue) == list:
# Internally the array numerics insist on tuples for defaults
jsonDebug( '...converting to tuple %s' % str(tuple(defaultValue)) )
nAttr.default = tuple(defaultValue)
else:
nAttr.default = defaultValue
jsonDebug( 'Setting range information on attribute' )
# Parse the numeric-specific attributes
if JsonKeys.kKeyMin in attrInfo:
jsonDebug( '...setting minimum' )
if type(attrInfo[JsonKeys.kKeyMin]) == list:
# Internally the array numerics insist on tuples for values
# but in the JSON it makes more sense to have them as a list
jsonDebug( '...converting list %s to tuple' % attrInfo[JsonKeys.kKeyMin] )
nAttr.setMin( tuple(attrInfo[JsonKeys.kKeyMin]) )
else:
jsonDebug( '...using %s as-is' % attrInfo[JsonKeys.kKeyMin] )
nAttr.setMin( attrInfo[JsonKeys.kKeyMin] )
#
if JsonKeys.kKeyMax in attrInfo:
jsonDebug( '...setting maximum' )
if type(attrInfo[JsonKeys.kKeyMax]) == list:
# Internally the array numerics insist on tuples for values
# but in the JSON it makes more sense to have them as a list
jsonDebug( '...converting list %s to tuple' % attrInfo[JsonKeys.kKeyMax] )
nAttr.setMax( tuple(attrInfo[JsonKeys.kKeyMax]) )
else:
jsonDebug( '...using %s as-is' % attrInfo[JsonKeys.kKeyMax] )
nAttr.setMax( attrInfo[JsonKeys.kKeyMax] )
#
if JsonKeys.kKeySoftMin in attrInfo:
jsonDebug( '...setting soft minimum to %s' % attrInfo[JsonKeys.kKeySoftMin] )
nAttr.setSoftMin( attrInfo[JsonKeys.kKeySoftMin] )
#
if JsonKeys.kKeySoftMax in attrInfo:
jsonDebug( '...setting soft maximum to %s' % attrInfo[JsonKeys.kKeySoftMax] )
nAttr.setSoftMax( attrInfo[JsonKeys.kKeySoftMax] )
jsonDebug( 'Numeric attribute creation of "%s" complete' % attr )
return attr
#----------------------------------------------------------------------
def parseAttribute(self, jsonInfo):
"""
Create an attribute using the JSON parameters to decode the structure
and values for the attribute. If the attribute is a compound then this
method will be recursively called so as to create the entire attribute
tree below it.
jsonInfo = JSON object containing the attribute's information
Returns the newly created attribute.
"""
if JsonKeys.kKeyName not in jsonInfo:
self.reportError( 'Missing attribute name' )
self.currentAttribute = jsonInfo[JsonKeys.kKeyName]
jsonDebug( 'parseAttribute(%s)' % str(jsonInfo) )
attr = None
# Short name must always be present so find or generate one now
if JsonKeys.kKeyShortName in jsonInfo:
shortName = jsonInfo[JsonKeys.kKeyShortName]
else:
shortName = self.currentAttribute
jsonDebug( '...got short name %s' % shortName )
#----------------------------------------
# Create the specific type of attribute requested and handle the
# type-specific parameters.
#
if JsonKeys.kKeyAttrType not in jsonInfo:
self.reportError('Required keyword "%s" missing' % JsonKeys.kKeyAttrType)
elif jsonInfo[JsonKeys.kKeyAttrType] in JsonKeys.kNumericTypes:
attr = self.parseNumericAttribute( self.currentAttribute, shortName, jsonInfo[JsonKeys.kKeyAttrType], jsonInfo )
elif jsonInfo[JsonKeys.kKeyAttrType] == JsonKeys.kTypeCompound:
attr = self.parseCompoundAttribute( self.currentAttribute, shortName, jsonInfo )
elif jsonInfo[JsonKeys.kKeyAttrType] == JsonKeys.kTypeEnum:
attr = self.parseEnumAttribute( self.currentAttribute, shortName, jsonInfo )
elif jsonInfo[JsonKeys.kKeyAttrType] == JsonKeys.kTypeString:
attr = self.parseStringAttribute( self.currentAttribute, shortName, jsonInfo )
elif jsonInfo[JsonKeys.kKeyAttrType] in JsonKeys.kTypeMatrix:
attr = self.parseMatrixAttribute( self.currentAttribute, shortName, jsonInfo )
elif jsonInfo[JsonKeys.kKeyAttrType] == JsonKeys.kTypeTyped:
attr = self.parseTypedAttribute( self.currentAttribute, shortName, jsonInfo )
elif jsonInfo[JsonKeys.kKeyAttrType] == JsonKeys.kTypeLightData:
attr = self.parseLightDataAttribute( self.currentAttribute, shortName, jsonInfo )
elif jsonInfo[JsonKeys.kKeyAttrType] == JsonKeys.kTypeMessage:
attr = self.parseMessageAttribute( self.currentAttribute, shortName, jsonInfo )
else:
self.reportError( 'Unknown attribute type "%s"' % jsonInfo[JsonKeys.kKeyAttrType] )
return None
jsonDebug( 'Done creating attribute "%s", now setting shared parameters' % str(attr) )
#----------------------------------------
# Handle the parameters common to all attribute types
#
aBase = omAPI.MFnAttribute( attr )
jsonDebug( '...handling common attribute flags for "%s"' % str(aBase) )
# Handle the standard flags
if JsonKeys.kKeyFlags in jsonInfo:
self.parseStandardFlags( aBase, jsonInfo[JsonKeys.kKeyFlags] )
# Look for a nice name override
if JsonKeys.kKeyNiceName in jsonInfo:
jsonDebug( '...Overriding nice name with "%s"' % jsonInfo[JsonKeys.kKeyNiceName] )
aBase.setNiceNameOverride( jsonInfo[JsonKeys.kKeyNiceName] )
# See if the attribute has been added to any categories
if JsonKeys.kKeyCategories in jsonInfo:
for category in jsonInfo[JsonKeys.kKeyCategories]:
jsonDebug( '...Adding category "%s"' % category )
aBase.addToCategory( category )
jsonDebug( '...Done on category "%s"' % category )
jsonDebug( '...Done the categories' )
# See if there is any special disconnection behaviour
if JsonKeys.kKeyDisconnect in jsonInfo:
behavior = jsonInfo[JsonKeys.kKeyDisconnect]
jsonDebug( '...Setting disconnect behaviour to "%s"' % behavior )
if behavior in JsonKeys.kDisconnectBehaviors:
aBase.disconnectBehavior = JsonKeys.kDisconnectBehaviors[behavior]
else:
self.reportError( 'Unknown behavior type "%s"' % behavior )
return attr
#----------------------------------------------------------------------
def parseJsonPatterns(self, jsonObj):
"""
The workhorse method. Takes the JSON Python object and deconstructs it
into one or more pattern descriptions and returns them.
If any of the patterns fail to create an exception is raised.
ValueError : When the JSON text is not valid
ValueError : When the pattern name already exists
"""
patternList = []
try:
jsonDebug( 'parseJsonPatterns(%d patterns)' % len(jsonObj) )
for thisPattern in jsonObj:
jsonDebug( '...Pattern is %s' % thisPattern )
if JsonKeys.kKeyName not in thisPattern:
self.reportError( 'Missing pattern name' )
continue
self.currentPattern = thisPattern[JsonKeys.kKeyName]
newPattern = omAPI.MAttributePattern( self.currentPattern )
jsonDebug( '...Pattern %s has %d attributes' % (self.currentPattern, len(thisPattern["attributes"])) )
if "attributes" not in thisPattern:
self.reportError( 'Empty attribute list' )
continue
for thisAttribute in thisPattern["attributes"]:
jsonDebug('Started parsing attribute "%s"' % str(thisAttribute))
attr = self.parseAttribute( thisAttribute )
jsonDebug('Completed parsing of attribute "%s"' % str(attr))
# If the attribute creation succeeded add it to the pattern.
# If it failed the creation code will have already reported
# the problem with the attribute's description.
newPattern.addRootAttr(attr)
jsonDebug( 'Done adding the attribute to the pattern' )
patternList.append( newPattern )
except Exception, e:
self.reportError( e )
return patternList
#----------------------------------------------------------------------
def createPatternsFromString(self, definition):
"""
Decode the input string from JSON format and create a set of
patterns each containing the set of root attributes described in
that pattern's data.
"""
jsonDebug( 'createPatternsFromString(%d chars, %d lines)' % (len(definition), definition.count('\n')) )
parsedPattern = None
try:
jsonPattern = json.loads( definition )
jsonDebug( 'Created pattern %s' % str(jsonPattern) )
parsedPattern = self.parseJsonPatterns( jsonPattern )
except Exception, e:
self.reportError( e )
return parsedPattern
#----------------------------------------------------------------------
def createPatternsFromFile(self, fileName):
"""
Decode the input file contents from JSON format and create a set of
patterns each containing the set of root attributes described in
that pattern's data.
"""
jsonDebug( 'createPatternsFromFile(%s)' % fileName )
parsedPattern = None
try:
fd = open(fileName, 'r')
definition = ""
for line in fd:
definition += line
fd.close()
parsedPattern = self.createPatternsFromString( definition )
except Exception, e:
self.reportError( e )
return parsedPattern
#----------------------------------------------------------------------
def name(self):
"""
Get the name of this pattern type.
"""
return JsonKeys.kPluginPatternFactoryName
#======================================================================
# Initialize the plug-in
def initializePlugin(plugin):
import_helpers()
pluginFn = omAPI.MFnPlugin(plugin)
try:
pluginFn.registerAttributePatternFactory(
JsonKeys.kPluginPatternFactoryName, PyJsonAttrPatternFactory.patternFactoryCreator
)
except:
sys.stderr.write(
"Failed to register attribute pattern factory: %s\n" % JsonKeys.kPluginPatternFactoryName
)
raise
#======================================================================
# Uninitialize the plug-in
def uninitializePlugin(plugin):
pluginFn = omAPI.MFnPlugin(plugin)
try:
pluginFn.deregisterAttributePatternFactory(JsonKeys.kPluginPatternFactoryName)
except:
sys.stderr.write(
"Failed to unregister command: %s\n" % JsonKeys.kPluginPatternFactoryName
)
raise
#-
# ==========================================================================
# Copyright (C) 2011 Autodesk, Inc. and/or its licensors. All
# rights reserved.
#
# The coded instructions, statements, computer programs, and/or related
# material (collectively the "Data") in these files contain unpublished
# information proprietary to Autodesk, Inc. ("Autodesk") and/or its
# licensors, which is protected by U.S. and Canadian federal copyright
# law and by international treaties.
#
# The Data is provided for use exclusively by You. You have the right
# to use, modify, and incorporate this Data into other products for
# purposes authorized by the Autodesk software license agreement,
# without fee.
#
# The copyright notices in the Software and this entire statement,
# including the above license grant, this restriction and the
# following disclaimer, must be included in all copies of the
# Software, in whole or in part, | |
is None:
print ("Mask file compulsory. Please provide mask='maskfilepath.ply'")
else:
# rweights = np.array([])
datR, rweights = randcatprep(datfile, randcatsize, maskfile, cosmology)
# randfile='./randcat.dat'
# datR, rweights=datprep(randfile,'random',cosmology)
else:
datR, rweights = datprep(randfile, 'random', cosmology)
# if len(weights)!=0:
# rfdat=readinfile(randfile,ftype='internal')
# rweights=1.0/(1.0+4.0*np.array(rfdat['nz']))
# rweights=rweights/np.mean(rweights)
# print (rweights)
# Nr=len(datR)
global Nr
Nr = len(datR)
fact = (1.0*Nr)/Nd
# Creating module-wise global balltrees so that they don't have to be created many times.
global dbt
global rbt
print ("Creating BallTree for data points using metric=")
print (metric)
dbt = BallTree(dat, metric='pyfunc', func=metric)
print ("Creating BallTree for random points using metric=")
print (metric)
rbt = BallTree(datR, metric='pyfunc', func=metric)
print ("Calculating 2pCF...")
# f=(1.0*Nrd)/N
# print (weights)
# Reference: arXiv: 1211.6211
if estimator == 'dp':
if weightsflag is False: # or len(weights) != Nd:
# print (weightsflag)
# print(len(weights))
# print(len(datR))
DD = DDcalc(dat, binsq)
DR = DRcalc(dat, binsq)
RD = RDcalc(datR, binsq)
else:
if useones is True or len(weights) != Nd:
weights = np.ones(Nd)
rweights = np.ones(Nr)
# if len(rweights)!=len(datR):
DD = DDwcalc(dat, binsq, metric, weights)
DR = DRwcalc(dat, datR, binsq, metric, rweights)
RD = RDwcalc(dat, datR, binsq, metric, weights)
# else:
# DD=DDwcalc(dat,binsq,metric,weights)
# DR=DRwcalc(dat,datR,binsq,metric,rweights)
print ("Using Davis-Peebles estimator")
correl = fact*(DD*2.0/(DR+RD))-1.0
elif estimator == 'ph':
if weightsflag is False: # or len(weights) != Nd:
DD = DDcalc(dat, binsq)
RR = RRcalc(datR, binsq)
else:
if useones is True or len(weights) != Nd:
weights = np.ones(Nd)
rweights = np.ones(Nr)
DD = DDwcalc(dat, binsq, metric, weights)
if len(rweights) != Nr:
RR = RRcalc(datR, binsq)
else:
RR = RRwcalc(datR, binsq, metric, rweights)
print ("Using Peebles-Hauser estimator")
correl = fact**2*(DD/RR)-1.0
else:
if weightsflag is False: # or len(weights) != Nd:
DD = DDcalc(dat, binsq)
RR = RRcalc(datR, binsq)
DR = DRcalc(dat, binsq)
RD = RDcalc(datR, binsq)
else:
if useones is True or len(weights) != Nd:
weights = np.ones(Nd)
rweights = np.ones(Nr)
DD = DDwcalc(dat, binsq, metric, weights)
DR = DRwcalc(dat, datR, binsq, metric, rweights)
RD = RDwcalc(dat, datR, binsq, metric, weights)
RR = RRwcalc(datR, binsq, metric, rweights)
if estimator == 'ls':
print ("Using Landy-Szalay estimator")
# correl = (DD-2.0*DR+RR)/RR
correl = fact**2*(DD/RR)-fact*(DR+RD)/RR+1.0
elif estimator == 'hew':
print ("Using Hewett estimator")
correl = fact**2*(DD/RR)-fact*0.5*(DR+RD)/RR
elif estimator == 'h':
print ("Using Hamilton estimator")
correl = (4.0*DD*RR)/(DR+RD)**2 - 1.0
correlerr = poserr(correl, DD)
print("Two-point correlation=")
np.savetxt("DD_"+str(cosmology)+"_"+str(geometry)+"_"+str(estimator)+".txt", DD)
np.savetxt("DR_"+str(cosmology)+"_"+str(geometry)+"_"+str(estimator)+".txt", DR)
np.savetxt("RD_"+str(cosmology)+"_"+str(geometry)+"_"+str(estimator)+".txt", RD)
np.savetxt("RR_"+str(cosmology)+"_"+str(geometry)+"_"+str(estimator)+".txt", RR)
np.savetxt("bins_"+str(cosmology)+"_"+str(geometry)+"_"+str(estimator)+".txt", bins)
np.savetxt("tpcf_"+str(cosmology)+"_"+str(geometry)+"_"+str(estimator)+".txt", (correl, correlerr))
print (correl, correlerr)
return correl, correlerr
def DDcalc(dat, bins):
print ("Calculating DD...\n DD=")
DD = autocorr(dat, bins)
DD[DD == 0] = 1.0
# Nd = len(dat)
# DD = DD/(Nd*(Nd-1.0))
print (DD)
return DD
def RRcalc(datR, bins):
print ("Calculating RR...\n RR=")
RR = rautocorr(datR, bins)
RR[RR == 0] = 1.0
# Nr = len(datR)
# RR = RR/(Nr*(Nr-1.0))
print (RR)
return RR
def DRcalc(dat, bins):
print ("Calculating DR...\n DR=")
DR = crosscorr(dat, bins)
DR[DR == 0] = 1.0
# Nd = len(dat)
# Nr = len(datR)
# DR = DR/(Nd*Nr)
print (DR/2.0)
return DR/2.0
def RDcalc(datR, bins):
print ("Calculating RD...\n RD=")
RD = crosscorrd(datR, bins)
RD[RD == 0] = 1.0
# Nd = len(dat)
# Nr = len(datR)
# DR = DR/(Nd*Nr)
print (RD/2.0)
return RD/2.0
def autocorr(dat, bins):
counts_DD = dbt.two_point_correlation(dat, bins)
DD = np.diff(counts_DD)
return DD
# def autocorrp(dat, bins):
# pool = Pool(processes=pcpus)
# counts_DD = pool.map(dbt.two_point_correlation, (dat, bins))
# DD = np.diff(counts_DD)
# return DD
def rautocorr(datR, bins):
counts_RR = rbt.two_point_correlation(datR, bins)
RR = np.diff(counts_RR)
return RR
def crosscorr(dat, bins):
counts_DR = rbt.two_point_correlation(dat, bins)
DR = np.diff(counts_DR)
return 2.0*DR
def crosscorrd(datR, bins):
counts_RD = dbt.two_point_correlation(datR, bins)
RD = np.diff(counts_RD)
return 2.0*RD
def poserr(xi, DD):
print ("Calculating Poisson error")
return (1.0+xi)/np.sqrt(DD)
# alternatively
# rbt=BallTree(dat,metric='pyfunc',func=metric)
# counts_RD=rbt.two_point_correlation(dat,bins)
def DDwcalc(dat, bins, metric, weights):
print ("Calculating DD with weights (parallelized)...\n DD=")
# DD = autocorrw(dat, bins, metric, weights)
# Nd = len(dat)
DD = multi_autocp(dat, bins, metric, weights, Nd, pcpus)
DD[DD == 0] = 1.0
# DD = DD/(Nd*(Nd-1.0)) # factor of 2 cancels with 1/2 that needs to be done to remove double counting of pairs
print (DD)
return DD
def RRwcalc(datR, bins, metric, rweights):
print ("Calculating RR with weights (parallelized)...\n RR=")
# RR = autocorrw(datR, bins, metric, weights)
# Nr = len(datR)
RR = multi_autocpr(datR, bins, metric, rweights, Nr, pcpus)
RR[RR == 0] = 1.0
# RR = RR/(Nr*(Nr-1.0))
print (RR)
return RR
def DRwcalc(dat, datR, bins, metric, rweights):
print ("Calculating DR with weights (parallelized)...\n DR=")
# DR = crosscorrw(dat, datR, bins, metric, rweights)
# Nd = len(dat)
# Nr = len(datR)
# DR = multi_crosscp(dat, datR, bins, metric, rweights, Nd, pcpus)
DR = multi_crosscpdr(dat, datR, bins, metric, rweights, Nd, pcpus)
DR[DR == 0] = 1.0
# DR = DR/(Nd*Nr)
print (DR/2.0)
return DR/2.0
def RDwcalc(dat, datR, bins, metric, weights):
print ("Calculating RD with weights...\n RD=")
# DR = crosscorrwrd(dat, datR, bins, metric, weights)
# Nd = len(dat)
# Nr = len(datR)
DR = multi_crosscp(dat, datR, bins, metric, weights, Nr, pcpus)
DR[DR == 0] = 1.0
# DR = DR/(Nd*Nr)
print (DR/2.0)
return DR/2.0
def autocorrw(dat, bins, metric, weights):
# dbt = BallTree(dat, metric='pyfunc', func=metric)
DD = np.zeros(len(bins)-1)
binmax = max(bins)
for i in tqdm(range(len(dat))):
ind = dbt.query_radius(dat[i].reshape(1, -1), binmax)
# wts=np.array([])
for j in ind:
# print ("i j")
# print (i, j)
# print ("ind[ind>i]")
# print (ind[ind>i])
dist0 = dist.cdist([dat[i], ], dat[j[j>i]], metric)[0]
DD += np.histogram(dist0, bins=bins, weights=weights[j[j>i]])[0]
# print (dist0,weights[j])
print(DD)
return DD
def crosscorrw(dat, datR, bins, metric, rweights):
# rbt = BallTree(datR, metric='pyfunc', func=metric)
DR = np.zeros(len(bins)-1)
binmax = max(bins)
for i in tqdm(range(len(dat))):
ind = rbt.query_radius(dat[i].reshape(1, -1), binmax)
# wts=np.array([])
for j in ind:
dist0 = dist.cdist([dat[i], ], datR[j], metric)[0]
DR += np.histogram(dist0, bins=bins, weights=rweights[j])[0]
# print (dist0,weights[j])
return DR
def crosscorrwrd(dat, datR, bins, metric, weights):
# dbt = BallTree(dat, metric='pyfunc', func=metric)
RD = np.zeros(len(bins)-1)
# p=multiprocessing.Pool(processes=multiprocessing.cpu_count())
# RD=p.map(rdcalc, range(len(datR)))
binmax = max(bins)
for i in tqdm(range(len(datR))):
# def rdcalc():
ind = dbt.query_radius(datR[i].reshape(1, -1), binmax)
# wts=np.array([])
for j in ind:
dist0 = dist.cdist([datR[i], ], dat[j], metric)[0]
RD += np.histogram(dist0, bins=bins, weights=weights[j])[0]
# print (dist0,weights[j])
# return RD
print(RD)
return RD
def autocorrwp(dat, bins, metric, weights, rNd, multi=False, queue=0):
# dbt = BallTree(dat, metric='pyfunc', func=metric)
DD = np.zeros(len(bins)-1)
binmax = max(bins)
for i in tqdm(rNd):
ind = dbt.query_radius(dat[i].reshape(1, -1), binmax)
# wts=np.array([])
for j in ind:
# print ("i j")
# print (i, j)
# print ("ind[ind>i]")
# print (ind)
# print (ind[ind>i])
dist0 = dist.cdist([dat[i], ], dat[j[j>i]], metric)[0]
DD += np.histogram(dist0, bins=bins, weights=weights[j[j>i]])[0]
# print (dist0,weights[j])
if multi:
queue.put(DD)
else:
return DD
# print (DD)
return DD
def crosscorrwrdp(dat, datR, bins, metric, weights, rNr, multi=False, queue=0):
# dbt = BallTree(dat, metric='pyfunc', func=metric)
RD = np.zeros(len(bins)-1)
binmax = max(bins)
# p=multiprocessing.Pool(processes=multiprocessing.cpu_count())
# RD=p.map(rdcalc, range(len(datR)))
for i in tqdm(rNr):
# def rdcalc():
ind = dbt.query_radius(datR[i].reshape(1, -1), binmax)
# wts=np.array([])
for j in ind:
dist0 = dist.cdist([datR[i], ], dat[j], metric)[0]
RD += np.histogram(dist0, bins=bins, weights=weights[j])[0]
if multi:
queue.put(RD)
else:
return RD
# print(RD)
return RD
def crosscorrwdrp(dat, datR, bins, metric, rweights, rNd, multi=False, queue=0):
# dbt = BallTree(dat, metric='pyfunc', func=metric)
DR = np.zeros(len(bins)-1)
binmax = max(bins)
# p=multiprocessing.Pool(processes=multiprocessing.cpu_count())
# RD=p.map(rdcalc, range(len(datR)))
for i in tqdm(rNd):
# def rdcalc():
ind = rbt.query_radius(dat[i].reshape(1, -1), binmax)
# wts=np.array([])
for j in ind:
dist0 = dist.cdist([dat[i], ], datR[j], metric)[0]
DR += np.histogram(dist0, bins=bins, weights=rweights[j])[0]
if multi:
queue.put(DR)
else:
return DR
# print(RD)
return DR
def autocorrwpr(datR, bins, metric, rweights, rNr, multi=False, queue=0):
# dbt = BallTree(dat, metric='pyfunc', func=metric)
RR = np.zeros(len(bins)-1)
binmax = max(bins)
for i in tqdm(rNr):
ind = rbt.query_radius(datR[i].reshape(1, -1), binmax)
# print (ind)
# wts=np.array([])
for j in ind:
# print("i")
# print (i)
# print ("j")
# print (j)
# print ("j[j>i]")
# print (j[j>i])
dist0 = dist.cdist([datR[i], ], datR[j[j>i]], metric)[0]
RR += np.histogram(dist0, bins=bins, weights=rweights[j[j>i]])[0]
# print (dist0,weights[j])
if multi:
queue.put(RR)
else:
return RR
# print (RR)
return RR
def multi_autocp(dat, bins, metric, weights, Nd, CORES=pcpus):
DD = np.zeros(len(bins)-1)
queues = [RetryQueue() for i in range(CORES)]
args = [(dat, bins, metric, weights, range(int(Nd*i/CORES), int(Nd*(i+1)/CORES)), True, queues[i]) for i in range(CORES)]
jobs = [Process(target=autocorrwp, | |
def clearCache(group, x = 0, y = 0):
if confirm("Reset the Autoscript Tag cache?"):
global savedtags
savedtags = { }
notify("{} reset the global tag cache.".format(me))
if confirm("Reset the Attachment Dictionary?\nNote: Cards will no longer be attached."):
setGlobalVariable('cattach', "{ }")
notify("{} reset the global attachment dictionary.".format(me))
def autoscriptCheck():
return getSetting("autoscripts", True)
def alignCheck():
return getSetting("alignment", True)
def anchorCheck():
return getSetting("anchor", True)
def debugCheck():
return getSetting("debugTimer", False)
def autoscriptMenu(group, x = 0, y = 0):
mute()
options = {1: ("autoscripts", "Autoscripts", autoscriptCheck()),
2: ("alignment", "Automatic Alignment", alignCheck()),
3: ("anchor", "Alignment Anchoring", anchorCheck()),
4: ("debugTimer", "Debug Message", debugCheck()) }
ret = 1
while ret > 0:
names = []
colors = []
for x in options:
names.append(options[x][1])
colors.append("#666666" if options[x][2] == False else "#000077")
ret = askChoice("Toggle Automation Settings:", names, colors)
if ret > 0:
options[ret] = (options[ret][0], options[ret][1], not options[ret][2])
if options[1][2] != autoscriptCheck():
setSetting(options[1][0], options[1][2])
if options[2][2] != alignCheck():
setSetting(options[2][0], options[2][2])
if options[2][0]:
cardalign()
if options[3][2] != anchorCheck():
setSetting(options[4][0], options[3][2])
if not options[3][2]:
global alignIgnore
alignIgnore = []
if options[4][2] != debugCheck():
setSetting(options[4][0], options[4][2])
def debugWhisper(text, timer):
if debugCheck():
elapsedTime = time.clock() - timer
whisper("DEBUG({}: {})".format(text, elapsedTime))
return time.clock()
def getTags(card, key = None):
mute()
timer = time.clock()
global savedtags, offlinedisable
cardname = card.Name
encodedcardname = Convert.ToBase64String(Text.Encoding.UTF8.GetBytes(cardname)) #encodes the card name so the website can parse it safely
if not cardname in savedtags:
#### Create a bunch of identifier tags for the card's rules, so I can sort the tag requests online
rules = card.Rules
if re.search(r'token', rules):
encodedcardname += '&token'
if re.search(r'counter', rules):
encodedcardname += "&counter"
if re.search(r'{} enters the battlefield'.format(card.name), rules):
encodedcardname += '&etb'
if re.search(r'{} dies'.format(card.name), rules):
encodedcardname += '&destroy'
if re.search(r'{} attacks'.format(card.name), rules):
encodedcardname += '&attack'
if re.search(r'{} blocks'.format(card.name), rules):
encodedcardname += '&block'
#### Fetch card tags from the online database
if offlinedisable == False:
(fulltag, code) = webRead('http://www.octgngames.com/forum/tags.php?id={}'.format(encodedcardname), 7000)
if code == 204: ## if the card tag doesn't exist on the site.
fulltag = ""
elif code != 200: ## Handles cases where the site is unavailable
whisper('tag database is currently unavailable, using offline tag cache')
offlinedisable = True
if offlinedisable == True: ## Access the tags cache in the game def if the website can't be accessed
fulltag = offlineTags.get(cardname, "")
#### Parse the raw tags into an autoscripts-readable format
tagdict = { }
classpieces = fulltag.split('; ')
for classes in classpieces:
if classes != "":
actionlist = classes.split('.')
actiondict = { }
for actionpieces in actionlist[1:]:
actionname = actionpieces[:actionpieces.find("[")]
actionparam = actionpieces[actionpieces.find("[")+1:actionpieces.find("]")]
actionparam = [x.strip() for x in actionparam.split(',')]
## add autotoken tags
if actionname == 'token':
tokenname = actionparam[0]
if not 'autotoken' in tagdict:
tagdict['autotoken'] = [ ]
if not tokenname in tagdict['autotoken']:
tagdict['autotoken'].append(tokenname)
## add automarker tags
if actionname == 'marker':
markername = actionparam[0]
if not 'automarker' in tagdict:
tagdict['automarker'] = [ ]
if not markername in tagdict['automarker']:
tagdict['automarker'].append(markername)
if not actionname in actiondict:
actiondict[actionname] = [ ]
actiondict[actionname].append(actionparam)
tagdict[actionlist[0]] = actiondict
savedtags[cardname] = tagdict
debugWhisper("getTags {} {}".format(card, key), timer)
#### Fetch and return the card tags to previous functions
if key in savedtags[cardname]:
returnTags = savedtags[cardname][key]
if returnTags == None:
return ""
else:
return returnTags
return None
def tagConstructor(card, key, modeModifier = ''):
#### this function modifies a card's base tags depending on its particular game-state or modal choices
mute()
returnTags = []
returnActiChoice = (0, '')
returnModeChoice = (0, '')
if key != 'morph' and not card.isFaceUp: ## Skip fetching tags for facedown/morph cards
return ([None, None, None, None, None, None], returnActiChoice, returnModeChoice)
#### look to see if an attachment is adding effects to the card tag
cattach = eval(getGlobalVariable('cattach'))
attachments = [Card(k) for (k,v) in cattach.iteritems() if v == card._id]
#### activated abilities require a specific case because of cards having more than one potential ability to choose from
#### this will split apart the card's rules text to predict potential abilities and ask the player to choose one
if key == 'acti':
count = 0
rulesList = card.Rules.splitlines() ## splits up the rules text
tagsList = []
colorList = []
for lines in rulesList:
count += 1
color = '#545454' ## grey, default color for no ability
activateList = []
for tagPrefix in ['init', '', 'cost', 'initres', 'res', 'costres']:
tags = getTags(card, modeModifier + tagPrefix + key + str(count)) ## ('1' + 'init' + 'acti' + '3')
if tags != None: ## change the color to blue if the card has a autoscript tag for this ability
color = '#0000ff'
activateList.append(tags)
colorList.append(color)
tagsList.append(activateList)
## check to see if any attachments will grant additional abilities to the card
for attachment in attachments:
attachRulesList = attachment.Rules.splitlines()
attachCount = 0
for attachLines in attachRulesList:
attachCount += 1
color = '#545454'
activateList = []
for tagPrefix in ['init', '', 'cost', 'initres', 'res', 'costres']:
tags = getTags(attachment, 'attach' + tagPrefix + key + str(attachCount))
if tags != None:
color = '#0000ff'
activateList.append(tags)
if color == '#0000ff': ## we need to filter out rules text lines that weren't involved in the granted ability
colorList.append(color)
tagsList.append(activateList)
rulesList.append(attachLines)
if len(rulesList) == 0:
return ([None, None, None, None, None, None], (1, ''), returnModeChoice)
else:
actiChoice = askChoice("{}\nActivate which ability?".format(card.Name), rulesList, colorList)
if actiChoice == 0: ## exiting the window
return "BREAK" ## cancels the autoscript resolution if the player closes the window without selecting a mode
returnActiChoice = (actiChoice, rulesList[actiChoice - 1])
returnTags = tagsList[actiChoice - 1]
else:
for tagPrefix in ['init', '', 'cost', 'initres', 'res', 'costres']:
tags = getTags(card, modeModifier + tagPrefix + key)
for attachment in attachments:
attachTags = getTags(attachment, 'attach' + tagPrefix + key)
if attachTags != None:
if tags == None:
tags = attachTags
else:
tags += attachTags
returnTags.append(tags)
### Handle the 'choose one' style abilities, these will add the modal tags to the main ones.
if returnTags[0] != None:
for choice in returnTags[0].get('choice', []):
min = int(choice[0])
max = int(choice[1])
modeList = [x.split(u'\u2022 ')[1] for x in card.Rules.splitlines() if u"\u2022" in x] ## split the rules text into lines and keep the ones with the modal bullet point
## fix out of range issues
if max > len(modeList):
max = len(modeList)
if min > max:
min = max
choiceList = []
while len(choiceList) < int(max):
if len(choiceList) >= min:
choicesRemaining = max - len(choiceList)
else:
choicesRemaining = min - len(choiceList)
text = "Choose{} {}{} mode{} for {}:".format(
" up to" if len(choiceList) >= min else "",
choicesRemaining,
"" if len(choiceList) == 0 else " more",
"s" if choicesRemaining > 1 else "",
card.Name)
modeChoice = askChoice(text, modeList, ['#ffaa00' if modeList.index(x) + 1 in choiceList else '#999999' for x in modeList], customButtons = [] if len(choiceList) == 0 else ["OK"] )
if modeChoice <= 0:
if len(choiceList) >= min:
break
else:
continue
if modeChoice in choiceList:
choiceList.remove(modeChoice)
continue
choiceList.append(modeChoice)
choiceList.sort()
for mode in choiceList:
if returnActiChoice != (0, ''): ## if the mode choice was from an activated ability
newTags = tagConstructor(card, key + str(returnActiChoice[0]), str(mode))[0]
else:
newTags = tagConstructor(card, key, str(mode))[0]
for tagIndex in [1,2,3,4,5]:
if returnTags[tagIndex] == None:
returnTags[tagIndex] = newTags[tagIndex]
else:
if newTags[tagIndex] != None:
returnTags[tagIndex] += newTags[tagIndex]
returnModeChoice = (int("".join([str(x) for x in choiceList])), ", ".join([modeList[x - 1] for x in choiceList]))
return (returnTags, returnActiChoice, returnModeChoice)
def submitTags(card, x = 0, y = 0):
mute()
encodedcardname = Convert.ToBase64String(Text.Encoding.UTF8.GetBytes(card.Name))
(url, code) = webRead('http://www.octgngames.com/forum/tags.php?id={}'.format(encodedcardname))
if code == 200 or code == 204:
if code == 200:
if not confirm("Submit an edit?\n{}".format(url)):
return
openUrl('http://www.octgngames.com/forum/submit.php?id={}'.format(encodedcardname))
else:
whisper("cannot connect to online database.")
def cardcount(card, stackData, search):
multiplier = 1
if re.search(r'-', search):
search = search.replace('-', '')
multiplier = multiplier * (0 - 1)
if re.search(r'\*', search):
intval = int(search[:search.find("*")])
search = search[search.find("*")+1:]
multiplier = multiplier * intval
if search == "x":
qty = stackData['x']
elif search == "cost":
qty = stackData['cost']
elif re.search(r'marker', search):
marker = search[search.find("marker")+7:]
addmarker = counters[marker]
qty = card.markers[addmarker]
elif search == "ask":
qty = askInteger("What is X?", | |
<reponame>ChristopherMayes/PyCSR2D
from csr2d.deposit import histogram_cic_2d
from csr2d.central_difference import central_difference_z
from csr2d.core2 import psi_sx, psi_s, psi_x0, psi_x0_hat, Es_case_B0, Es_case_A, Fx_case_A, Es_case_C, Fx_case_C, Es_case_D
from csr2d.core2 import psi_s_SC, psi_x0_SC
from csr2d.convolution import fftconvolve2
import numpy as np
from scipy.signal import savgol_filter
from scipy.interpolate import RectBivariateSpline
#from scipy.signal import convolve2d, fftconvolve, oaconvolve
from scipy.ndimage import map_coordinates
from numba import njit
import scipy.constants
mec2 = scipy.constants.value("electron mass energy equivalent in MeV") * 1e6
c_light = scipy.constants.c
e_charge = scipy.constants.e
r_e = scipy.constants.value("classical electron radius")
import time
def csr2d_kick_calc(
z_b,
x_b,
weight,
*,
gamma=None,
rho=None,
nz=100,
nx=100,
xlim=None,
zlim=None,
reuse_psi_grids=False,
psi_s_grid_old=None,
psi_x_grid_old=None,
map_f=map,
species="electron",
imethod='map_coordinates',
debug=False,
):
"""
Calculates the 2D CSR kick on a set of particles with positions `z_b`, `x_b` and charges `charges`.
Parameters
----------
z_b : np.array
Bunch z coordinates in [m]
x_b : np.array
Bunch x coordinates in [m]
weight : np.array
weight array (positive only) in [C]
This should sum to the total charge in the bunch
gamma : float
Relativistic gamma
rho : float
bending radius in [m]
if neagtive, particles with a positive x coordinate is on the inner side of the magnet
nz : int
number of z grid points
nx : int
number of x grid points
zlim : floats (min, max) or None
z grid limits in [m]
xlim : floats (min, max) or None
x grid limits in [m]
map_f : map function for creating potential grids.
Examples:
map (default)
executor.map
species : str
Particle species. Currently required to be 'electron'
imethod : str
Interpolation method for kicks. Must be one of:
'map_coordinates' (default): uses scipy.ndimage.map_coordinates
'spline': uses: scipy.interpolate.RectBivariateSpline
debug: bool
If True, returns the computational grids.
Default: False
Returns
-------
dict with:
ddelta_ds : np.array
relative z momentum kick [1/m]
dxp_ds : np.array
relative x momentum kick [1/m]
"""
assert species == "electron", "TODO: support species {species}"
# assert np.sign(rho) == 1, 'TODO: negative rho'
# Grid setup
if zlim:
zmin = zlim[0]
zmax = zlim[1]
else:
zmin = z_b.min()
zmax = z_b.max()
if xlim:
xmin = xlim[0]
xmax = xlim[1]
else:
xmin = x_b.min()
xmax = x_b.max()
dz = (zmax - zmin) / (nz - 1)
dx = (xmax - xmin) / (nx - 1)
# Charge deposition
t1 = time.time()
charge_grid = histogram_cic_2d(z_b, x_b, weight, nz, zmin, zmax, nx, xmin, xmax)
if debug:
t2 = time.time()
print("Depositing particles takes:", t2 - t1, "s")
# Normalize the grid so its integral is unity
norm = np.sum(charge_grid) * dz * dx
lambda_grid = charge_grid / norm
# Apply savgol filter
lambda_grid_filtered = np.array([savgol_filter(lambda_grid[:, i], 13, 2) for i in np.arange(nx)]).T
# Differentiation in z
lambda_grid_filtered_prime = central_difference_z(lambda_grid_filtered, nz, nx, dz, order=1)
# Grid axis vectors
zvec = np.linspace(zmin, zmax, nz)
xvec = np.linspace(xmin, xmax, nx)
beta = np.sqrt(1 - 1 / gamma ** 2)
t3 = time.time()
if reuse_psi_grids == True:
psi_s_grid = psi_s_grid_old
psi_x_grid = psi_x_grid_old
else:
# Creating the potential grids
psi_s_grid, psi_x_grid, zvec2, xvec2 = green_meshes(nz, nx, dz, dx, rho=rho, beta=beta)
if debug:
t4 = time.time()
print("Computing potential grids take:", t4 - t3, "s")
# Compute the wake via 2d convolution
conv_s, conv_x = fftconvolve2(lambda_grid_filtered_prime, psi_s_grid, psi_x_grid)
if debug:
t5 = time.time()
print("Convolution takes:", t5 - t4, "s")
Ws_grid = (beta ** 2 / abs(rho)) * (conv_s) * (dz * dx)
Wx_grid = (beta ** 2 / abs(rho)) * (conv_x) * (dz * dx)
# Calculate the kicks at the particle locations
# Overall factor
Nb = np.sum(weight) / e_charge
kick_factor = r_e * Nb / gamma # m
# Interpolate Ws and Wx everywhere within the grid
if imethod == 'spline':
# RectBivariateSpline method
Ws_interp = RectBivariateSpline(zvec, xvec, Ws_grid)
Wx_interp = RectBivariateSpline(zvec, xvec, Wx_grid)
delta_kick = kick_factor * Ws_interp.ev(z_b, x_b)
xp_kick = kick_factor * Wx_interp.ev(z_b, x_b)
elif imethod == 'map_coordinates':
# map_coordinates method. Should match above fairly well. order=1 is even faster.
zcoord = (z_b-zmin)/dz
xcoord = (x_b-xmin)/dx
delta_kick = kick_factor * map_coordinates(Ws_grid, np.array([zcoord, xcoord]), order=2)
xp_kick = kick_factor * map_coordinates(Wx_grid, np.array([zcoord, xcoord]), order=2)
else:
raise ValueError(f'Unknown interpolation method: {imethod}')
if debug:
t6 = time.time()
print(f'Interpolation with {imethod} takes:', t6 - t5, "s")
result = {"ddelta_ds": delta_kick, "dxp_ds": xp_kick}
if debug:
timing = np.array([t2-t1, t4-t3, t5-t4, t6-t5])
result.update(
{
"zvec": zvec,
"xvec": xvec,
"zvec2": zvec2,
"xvec2": xvec2,
"Ws_grid": Ws_grid,
"Wx_grid": Wx_grid,
"psi_s_grid": psi_s_grid,
"psi_x_grid": psi_x_grid,
"charge_grid": charge_grid,
"lambda_grid_filtered_prime": lambda_grid_filtered_prime,
"timing": timing
}
)
return result
def green_meshes(nz, nx, dz, dx, rho=None, beta=None):
"""
Computes Green funcion meshes for psi_s and psi_x simultaneously.
These meshes are in real space (not scaled space).
Parameters
----------
nz, nx : int
Size of the density mesh in z and x
dz, dx : float
Grid spacing of the density mesh in z and x [m]
rho : float
bending radius (must be positve)
beta : float
relativistic beta
Returns:
tuple of:
psi_s_grid : np.array
Double-sized array for the psi_s Green function
psi_x_grid :
Double-sized array for the psi_x Green function
zvec2 : array-like
Coordinate vector in z (real space) [m]
xvec2 : array-like
Coordinate vector in x (real space) [m]
"""
rho_sign = 1 if rho>=0 else -1
# Change to internal coordinates
dx = dx/rho
dz = dz/(2*abs(rho))
# Double-sized array for convolution with the density
zvec2 = np.arange(-nz+1,nz+1,1)*dz # center = 0 is at [nz-1]
xvec2 = np.arange(-nx+1,nx+1,1)*dx # center = 0 is at [nx-1]
zm2, xm2 = np.meshgrid(zvec2, xvec2, indexing="ij")
# Evaluate
#psi_s_grid, psi_x_grid = psi_sx(zm2, xm2, beta)
psi_s_grid = psi_s(zm2, xm2, beta) # Numba routines!
psi_x_grid = rho_sign * psi_x0(zm2, xm2, beta, abs(dx)) # Will average around 0
return psi_s_grid, psi_x_grid, zvec2*2*rho, xvec2*rho
def green_meshes_hat(nz, nx, dz, dx, rho=None, beta=None):
"""
Computes Green funcion meshes for psi_s and psi_x0_hat simultaneously.
These meshes are in real space (not scaled space).
Parameters
----------
nz, nx : int
Size of the density mesh in z and x
dz, dx : float
Grid spacing of the density mesh in z and x [m]
rho : float
bending radius (must be positve)
beta : float
relativistic beta
Returns:
tuple of:
psi_s_grid : np.array
Double-sized array for the psi_s Green function
psi_x_grid :
Double-sized array for the psi_x Green function
zvec2 : array-like
Coordinate vector in z (real space) [m]
xvec2 : array-like
Coordinate vector in x (real space) [m]
"""
# Change to internal coordinates
dx = dx/rho
dz = dz/(2*abs(rho))
# Double-sized array for convolution with the density
zvec2 = np.arange(-nz+1,nz+1,1)*dz # center = 0 is at [nz-1]
xvec2 = np.arange(-nx+1,nx+1,1)*dx # center = 0 is at [nx-1]
zm2, xm2 = np.meshgrid(zvec2, xvec2, indexing="ij")
# Evaluate
psi_s_grid = psi_s(zm2, xm2, beta) # Numba routines!
psi_x_grid = psi_x0_hat(zm2, xm2, beta, abs(dx)) # Will average around 0
return psi_s_grid, psi_x_grid, zvec2*2*rho, xvec2*rho
def green_meshes_with_SC(nz, nx, dz, dx, rho=None, beta=None):
"""
Computes Green funcion meshes for psi_s and psi_x with SC terms included.
These meshes are in real space (not scaled space).
Parameters
----------
nz, nx : int
Size of the density mesh in z and x
dz, dx : float
Grid spacing of the density mesh in z and x [m]
rho : float
bending radius (must be positve)
beta : float
relativistic beta
Returns:
tuple of:
psi_s_grid : np.array
Double-sized array for the psi_s Green function
psi_x_grid :
Double-sized array for the psi_x Green function
zvec2 : array-like
Coordinate vector in z (real space) [m]
xvec2 : array-like
Coordinate vector in x (real space) [m]
"""
rho_sign = 1 if rho>=0 else -1
# Change to internal coordinates
dx = dx/rho
dz = dz/(2*abs(rho))
# Double-sized array for convolution with the density
zvec2 = | |
<filename>src/lib/Bcfg2/Server/Core.py
"""Bcfg2.Server.Core provides the runtime support for Bcfg2 modules."""
import os
import atexit
import logging
import select
import sys
import threading
import time
import inspect
import lxml.etree
from traceback import format_exc
import Bcfg2.settings
import Bcfg2.Server
import Bcfg2.Logger
import Bcfg2.Server.FileMonitor
from Bcfg2.Bcfg2Py3k import xmlrpclib, reduce
from Bcfg2.Server.Plugin import PluginInitError, PluginExecutionError
try:
import psyco
psyco.full()
except:
pass
os.environ['DJANGO_SETTINGS_MODULE'] = 'Bcfg2.settings'
def exposed(func):
func.exposed = True
return func
def sort_xml(node, key=None):
for child in node:
sort_xml(child, key)
try:
sorted_children = sorted(node, key=key)
except TypeError:
sorted_children = node
node[:] = sorted_children
class CoreInitError(Exception):
"""This error is raised when the core cannot be initialized."""
pass
class BaseCore(object):
"""The Core object is the container for all
Bcfg2 Server logic and modules.
"""
def __init__(self, setup, start_fam_thread=False):
self.datastore = setup['repo']
if setup['debug']:
level = logging.DEBUG
elif setup['verbose']:
level = logging.INFO
else:
level = logging.WARNING
# we set a higher log level for the console by default. we
# assume that if someone is running bcfg2-server in such a way
# that it _can_ log to console, they want more output. if
# level is set to DEBUG, that will get handled by
# setup_logging and the console will get DEBUG output.
Bcfg2.Logger.setup_logging('bcfg2-server',
to_console=logging.INFO,
to_syslog=setup['syslog'],
to_file=setup['logging'],
level=level)
self.logger = logging.getLogger('bcfg2-server')
try:
fm = Bcfg2.Server.FileMonitor.available[setup['filemonitor']]
except KeyError:
self.logger.error("File monitor driver %s not available; "
"forcing to default" % filemonitor)
fm = Bcfg2.Server.FileMonitor.available['default']
famargs = dict(ignore=[], debug=False)
if 'ignore' in setup:
famargs['ignore'] = setup['ignore']
if 'debug' in setup:
famargs['debug'] = setup['debug']
try:
self.fam = fm(**famargs)
except IOError:
msg = "Failed to instantiate fam driver %s" % setup['filemonitor']
self.logger.error(msg, exc_info=1)
raise CoreInitError(msg)
self.pubspace = {}
self.cfile = setup['configfile']
self.cron = {}
self.plugins = {}
self.plugin_blacklist = {}
self.revision = '-1'
self.password = setup['password']
self.encoding = setup['encoding']
self.setup = setup
atexit.register(self.shutdown)
# Create an event to signal worker threads to shutdown
self.terminate = threading.Event()
# generate Django ORM settings. this must be done _before_ we
# load plugins
Bcfg2.settings.read_config(cfile=self.setup['web_configfile'],
repo=self.datastore)
self._database_available = False
# verify our database schema
try:
from Bcfg2.Server.SchemaUpdater import update_database, UpdaterError
try:
update_database()
self._database_available = True
except UpdaterError:
err = sys.exc_info()[1]
self.logger.error("Failed to update database schema: %s" % err)
except ImportError:
# assume django is not installed
pass
except Exception:
inst = sys.exc_info()[1]
self.logger.error("Failed to update database schema")
self.logger.error(str(inst))
self.logger.error(str(type(inst)))
raise CoreInitError
if '' in setup['plugins']:
setup['plugins'].remove('')
for plugin in setup['plugins']:
if not plugin in self.plugins:
self.init_plugins(plugin)
# Remove blacklisted plugins
for p, bl in list(self.plugin_blacklist.items()):
if len(bl) > 0:
self.logger.error("The following plugins conflict with %s;"
"Unloading %s" % (p, bl))
for plug in bl:
del self.plugins[plug]
# This section logs the experimental plugins
expl = [plug for (name, plug) in list(self.plugins.items())
if plug.experimental]
if expl:
self.logger.info("Loading experimental plugin(s): %s" %
(" ".join([x.name for x in expl])))
self.logger.info("NOTE: Interfaces subject to change")
# This section logs the deprecated plugins
depr = [plug for (name, plug) in list(self.plugins.items())
if plug.deprecated]
if depr:
self.logger.info("Loading deprecated plugin(s): %s" %
(" ".join([x.name for x in depr])))
mlist = self.plugins_by_type(Bcfg2.Server.Plugin.Metadata)
if len(mlist) == 1:
self.metadata = mlist[0]
else:
self.logger.error("No Metadata Plugin loaded; "
"failed to instantiate Core")
raise CoreInitError("No Metadata Plugin")
self.statistics = self.plugins_by_type(Bcfg2.Server.Plugin.Statistics)
self.pull_sources = self.plugins_by_type(Bcfg2.Server.Plugin.PullSource)
self.generators = self.plugins_by_type(Bcfg2.Server.Plugin.Generator)
self.structures = self.plugins_by_type(Bcfg2.Server.Plugin.Structure)
self.connectors = self.plugins_by_type(Bcfg2.Server.Plugin.Connector)
self.ca = setup['ca']
self.fam_thread = \
threading.Thread(name="%sFAMThread" % setup['filemonitor'],
target=self._file_monitor_thread)
self.lock = threading.Lock()
if start_fam_thread:
self.fam_thread.start()
self.fam.AddMonitor(self.cfile, self.setup)
def plugins_by_type(self, base_cls):
"""Return a list of loaded plugins that match the passed type.
The returned list is sorted in ascending order by the Plugins'
sort_order value. The sort_order defaults to 500 in Plugin.py,
but can be overridden by individual plugins. Plugins with the
same numerical sort_order value are sorted in alphabetical
order by their name.
"""
return sorted([plugin for plugin in self.plugins.values()
if isinstance(plugin, base_cls)],
key=lambda p: (p.sort_order, p.name))
def _file_monitor_thread(self):
"""The thread for monitor the files."""
famfd = self.fam.fileno()
terminate = self.terminate
while not terminate.isSet():
try:
if famfd:
select.select([famfd], [], [], 2)
else:
if not self.fam.pending():
terminate.wait(15)
self.fam.handle_event_set(self.lock)
except:
continue
# VCS plugin periodic updates
for plugin in self.plugins_by_type(Bcfg2.Server.Plugin.Version):
self.revision = plugin.get_revision()
def init_plugins(self, plugin):
"""Handling for the plugins."""
self.logger.debug("Loading plugin %s" % plugin)
try:
mod = getattr(__import__("Bcfg2.Server.Plugins.%s" %
(plugin)).Server.Plugins, plugin)
except ImportError:
try:
mod = __import__(plugin, globals(), locals(), [plugin.split('.')[-1]])
except:
self.logger.error("Failed to load plugin %s" % plugin)
return
try:
plug = getattr(mod, plugin.split('.')[-1])
except AttributeError:
self.logger.error("Failed to load plugin %s (AttributeError)" % plugin)
return
# Blacklist conflicting plugins
cplugs = [conflict for conflict in plug.conflicts
if conflict in self.plugins]
self.plugin_blacklist[plug.name] = cplugs
try:
self.plugins[plugin] = plug(self, self.datastore)
except PluginInitError:
self.logger.error("Failed to instantiate plugin %s" % plugin,
exc_info=1)
except:
self.logger.error("Unexpected instantiation failure for plugin %s" %
plugin, exc_info=1)
def shutdown(self):
"""Shutting down the plugins."""
if not self.terminate.isSet():
self.terminate.set()
self.fam.shutdown()
for plugin in list(self.plugins.values()):
plugin.shutdown()
def client_run_hook(self, hook, metadata):
"""Checks the data structure."""
for plugin in self.plugins_by_type(Bcfg2.Server.Plugin.ClientRunHooks):
try:
getattr(plugin, hook)(metadata)
except AttributeError:
err = sys.exc_info()[1]
self.logger.error("Unknown attribute: %s" % err)
raise
except:
err = sys.exc_info()[1]
self.logger.error("%s: Error invoking hook %s: %s" % (plugin,
hook,
err))
def validate_structures(self, metadata, data):
"""Checks the data structure."""
for plugin in self.plugins_by_type(Bcfg2.Server.Plugin.StructureValidator):
try:
plugin.validate_structures(metadata, data)
except Bcfg2.Server.Plugin.ValidationError:
err = sys.exc_info()[1]
self.logger.error("Plugin %s structure validation failed: %s" %
(plugin.name, err))
raise
except:
self.logger.error("Plugin %s: unexpected structure validation "
"failure" % plugin.name, exc_info=1)
def validate_goals(self, metadata, data):
"""Checks that the config matches the goals enforced by the plugins."""
for plugin in self.plugins_by_type(Bcfg2.Server.Plugin.GoalValidator):
try:
plugin.validate_goals(metadata, data)
except Bcfg2.Server.Plugin.ValidationError:
err = sys.exc_info()[1]
self.logger.error("Plugin %s goal validation failed: %s" %
(plugin.name, err.message))
raise
except:
self.logger.error("Plugin %s: unexpected goal validation "
"failure" % plugin.name, exc_info=1)
def GetStructures(self, metadata):
"""Get all structures for client specified by metadata."""
structures = reduce(lambda x, y: x + y,
[struct.BuildStructures(metadata)
for struct in self.structures], [])
sbundles = [b.get('name') for b in structures if b.tag == 'Bundle']
missing = [b for b in metadata.bundles if b not in sbundles]
if missing:
self.logger.error("Client %s configuration missing bundles: %s" %
(metadata.hostname, ':'.join(missing)))
return structures
def BindStructure(self, structure, metadata):
"""Bind a complete structure."""
for entry in structure.getchildren():
if entry.tag.startswith("Bound"):
entry.tag = entry.tag[5:]
continue
try:
self.Bind(entry, metadata)
except PluginExecutionError:
exc = sys.exc_info()[1]
if 'failure' not in entry.attrib:
entry.set('failure', 'bind error: %s' % format_exc())
self.logger.error("Failed to bind entry %s:%s: %s" %
(entry.tag, entry.get('name'), exc))
except Exception:
exc = sys.exc_info()[1]
if 'failure' not in entry.attrib:
entry.set('failure', 'bind error: %s' % format_exc())
self.logger.error("Unexpected failure in BindStructure: %s %s" %
(entry.tag, entry.get('name')), exc_info=1)
def Bind(self, entry, metadata):
"""Bind an entry using the appropriate generator."""
if 'altsrc' in entry.attrib:
oldname = entry.get('name')
entry.set('name', entry.get('altsrc'))
entry.set('realname', oldname)
del entry.attrib['altsrc']
try:
ret = self.Bind(entry, metadata)
entry.set('name', oldname)
del entry.attrib['realname']
return ret
except:
entry.set('name', oldname)
self.logger.error("Failed binding entry %s:%s with altsrc %s" %
(entry.tag, entry.get('name'),
entry.get('altsrc')))
self.logger.error("Falling back to %s:%s" % (entry.tag,
entry.get('name')))
glist = [gen for gen in self.generators if
entry.get('name') in gen.Entries.get(entry.tag, {})]
if len(glist) == 1:
return glist[0].Entries[entry.tag][entry.get('name')](entry,
metadata)
elif len(glist) > 1:
generators = ", ".join([gen.name for gen in glist])
self.logger.error("%s %s served by multiple generators: %s" %
(entry.tag, entry.get('name'), generators))
g2list = [gen for gen in self.generators if
gen.HandlesEntry(entry, metadata)]
if len(g2list) == 1:
return g2list[0].HandleEntry(entry, metadata)
entry.set('failure', 'no matching generator')
raise PluginExecutionError("No matching generator: %s:%s" %
(entry.tag, entry.get('name')))
def BuildConfiguration(self, client):
"""Build configuration for clients."""
start = time.time()
config = lxml.etree.Element("Configuration", version='2.0',
revision=self.revision)
try:
meta = self.build_metadata(client)
except Bcfg2.Server.Plugin.MetadataConsistencyError:
self.logger.error("Metadata consistency error for client %s" %
client)
return lxml.etree.Element("error", type='metadata error')
self.client_run_hook("start_client_run", meta)
try:
structures = self.GetStructures(meta)
except:
self.logger.error("error in GetStructures", exc_info=1)
return lxml.etree.Element("error", type='structure error')
self.validate_structures(meta, structures)
# Perform altsrc consistency checking
esrcs = {}
for struct in structures:
for entry in struct:
key = (entry.tag, entry.get('name'))
if key in esrcs:
if esrcs[key] != entry.get('altsrc'):
self.logger.error("Found inconsistent altsrc mapping "
"for entry %s:%s" % key)
else:
esrcs[key] = entry.get('altsrc', None)
del esrcs
for astruct in structures:
try:
self.BindStructure(astruct, meta)
config.append(astruct)
except:
self.logger.error("error in BindStructure", exc_info=1)
self.validate_goals(meta, config)
self.client_run_hook("end_client_run", meta)
sort_xml(config, key=lambda e: e.get('name'))
self.logger.info("Generated config for %s in %.03f seconds" %
(client, time.time() - start))
return config
def run(self, **kwargs):
""" run the server core """
raise NotImplementedError
def _daemonize(self):
child_pid = os.fork()
if child_pid | |
% ('nan', x[1])
elif ~pd.isnull(x[0]) and pd.isnull(x[1]):
return u'<table style="background-color:#0000ff;font-weight:bold;">'+\
'<tr><td>%s</td></tr><tr><td>%s</td></tr></table>' % (x[0],'nan')
else:
return u'<table style="background-color:#ff0000;font-weight:bold;">'+\
'<tr><td>%s</td></tr><tr><td>%s</td></tr></table>' % (x[0], x[1])
def compare_dataframes_html(df1, df2, **kwargs):
"""
From http://stackoverflow.com/questions/17095101/outputting-difference-in-two-pandas-dataframes-side-by-side-highlighting-the-d
"""
panel = pd.Panel(dict(df1=df1, df2=df2))
if pd.options.display.max_colwidth < 500:
pd.options.display.max_colwidth = 500 # You need this, otherwise pandas will limit your HTML strings to 50 characters
return HTML(panel.apply(lambda x: report_diff_html(x, **kwargs), axis=0).to_html(escape=False))
#
# File I/O
#
def move_data_files(src, dst):
if not os.path.isdir(dst):
os.mkdir(dst)
for filename in glob.glob(src):
print('Moving ' + filename + ' -> ' + dst)
shutil.move(filename, dst)
def text_files_iterator(src, verbose=False):
"""Iterate over lines in text files. src can contain wildcards. Files may be compressed with bzip2."""
for filename in glob.glob(src):
if verbose: print('Reading %s ' % filename)
ext = os.path.splitext(filename)[1]
if ext == '.bz2':
with closing(bz2.BZ2File(filename, 'rb')) as data_file:
reader = codecs.getreader("utf-8")
for line in reader(data_file):
yield line
else:
with open(filename, 'r') as file:
for line in file:
yield line
def load_json_from_file(filename):
ext = os.path.splitext(filename)[1]
print('Reading %s ext: %s' % (filename, ext))
if ext == '.bz2':
with closing(bz2.BZ2File(filename, 'rb')) as data_file:
reader = codecs.getreader("utf-8")
data = json.load(reader(data_file))
else:
with open(filename) as data_file:
data = json.load(data_file)
return data
def glob_file_list(filespecs):
if not isinstance(filespecs,list):
filespecs = [filespecs]
return sum(map(glob.glob, filespecs), [])
def load_json_records(src, verbose=False, n_jobs=-1):
recs = []
filenames = glob_file_list(src)
print('Loading records from %d files...' % len(filenames))
pjobs = [delayed(load_json_from_file)(filename) for filename in filenames]
file_record_list = Parallel(n_jobs=n_jobs)(pjobs) # list of lists of records
recs = flatten_list(file_record_list)
return recs
def load_json_from_file_as_dataframe(filename, filename_column_name='loaded_filename', ignore_error=False):
try:
df = pd.DataFrame(load_json_from_file(filename))
df[filename_column_name] = filename
return df
except Exception:
print('EXCEPTION while loading JSON file %s: %s' % (filename, traceback.format_exc()), file=sys.stderr)
if ignore_error:
return pd.DataFrame()
else:
raise
def load_json_records_as_dataframe(src, verbose=False, n_jobs=-1, ignore_error=False):
recs = []
filenames = glob_file_list(src)
print('Loading records from %d files...' % len(filenames))
pjobs = [delayed(load_json_from_file_as_dataframe)(filename, ignore_error=ignore_error) for filename in filenames]
df_list = Parallel(n_jobs=n_jobs)(pjobs)
return pd.concat(df_list, ignore_index=True, copy=False, sort=False)
def save_json_to_file(data, filename, sort_keys=False, indent=None, ensure_ascii=False):
ext = os.path.splitext(filename)[1]
temp_file_name = '%s.tmp%s' % os.path.splitext(filename)
if ext == '.bz2':
with closing(bz2.BZ2File(temp_file_name, 'wb')) as data_file:
json.dump(data, data_file, sort_keys=sort_keys, indent=indent, ensure_ascii=ensure_ascii)
else:
with open(temp_file_name, 'w') as data_file:
json.dump(data, data_file, sort_keys=sort_keys, indent=indent, ensure_ascii=ensure_ascii)
os.rename(temp_file_name, filename)
#
# Plotting
#
def plot_groups(df, x_col, y_col, group_by_columns=None, title=None, xlabel=None, ylabel=None, max_legend_items=10,
sort_columns=[0], semilogx=False, semilogy=False, agg=None, xlim=None, ylim=None, xticks=None):
# Cyclers for different plot styles
lines = ['-','--','-.',':']
markers = ['o','s','*','x','D','v','^','<','>','8','p','|']
colors = ['r','b','g','c','m','y','k']
lineCycler = itertools.cycle(lines)
markerCycler = itertools.cycle(markers)
colorCycler = itertools.cycle(colors)
fig = plt.figure(title, figsize=(20,10))
fig.clf()
if title:
fig.suptitle(title, fontsize=12)
num_groups = 0
if group_by_columns is None:
num_groups = 1
df.plot(x=x_col, y=y_col, style=next(markerCycler) + next(colorCycler) + next(lineCycler))
else:
group_by_columns = list(set(group_by_columns) - {x_col})
if sort_columns == [0]:
sort_columns = [x_col]
for name, group in df.groupby(group_by_columns):
num_groups += 1
nameStr = format_group_name(group_by_columns, name)
if len(group) > 100:
style = next(colorCycler) + next(lineCycler)
else:
style = next(markerCycler) + next(colorCycler) + next(lineCycler)
if sort_columns is None:
sorted_group = group
else:
sorted_group = group.sort_values(sort_columns)
plt.plot(sorted_group[x_col].values, sorted_group[y_col].values, style, label=nameStr)
if agg is not None:
agg_df = df.groupby(by=[x_col], as_index=False).agg({y_col: agg})
plt.plot(agg_df[x_col].values, agg_df[y_col].values, 'xk-', label=agg)
axes = plt.gca()
if xlabel is None:
xlabel = x_col
if ylabel is None:
ylabel = y_col
if num_groups <= max_legend_items:
axes.legend(loc='best')
else:
print('plot_groups: not showing legend because num_groups=%d' % num_groups)
if semilogx:
axes.semilogx()
fmt = matplotlib.ticker.ScalarFormatter(useOffset=False)
fmt.set_scientific(False)
axes.xaxis.set_major_formatter(fmt)
if semilogy:
axes.semilogy()
fmt = matplotlib.ticker.ScalarFormatter(useOffset=False)
fmt.set_scientific(False)
axes.yaxis.set_major_formatter(fmt)
if xlim:
axes.set_xlim(xlim)
if ylim:
axes.set_ylim(ylim)
if xticks:
axes.xaxis.set_ticks(xticks)
fig.autofmt_xdate()
axes.set_xlabel(xlabel)
axes.set_ylabel(ylabel)
#plt.show()
return fig
def show_dataframe_mpl(df, axes, col_labels=True):
"""See also pd.DataFrame.plot(table=True)."""
cellText = list(df.values)
if col_labels:
colLabels=list(df.columns.values)
else:
colLabels = None
tbl = axes.table(cellText=cellText, colLabels=colLabels, loc='center')
axes.axis('off')
# tbl.set_fontsize(5)
# for cell in tbl.get_child_artists():
# cell.set_height(0.04)
# cell.set_linewidth(0.001)
def show_series_mpl(series, axes):
# df = pd.DataFrame(series).reset_index()
df = pd.DataFrame({'index': series.index.values, 'value': series.values})
return show_dataframe_mpl(df, axes, col_labels=False)
def expand_xlim(x, axes, margin=0.0):
"""If necessary, expand x limit to that x-margin and x+margin are visible."""
a, b = axes.get_xlim()
a = min(a, x - margin)
b = max(b, x + margin)
axes.set_xlim(a, b)
def expand_ylim(y, axes, margin=0.0):
"""If necessary, expand y limit to that y-margin and y+margin are visible."""
a, b = axes.get_ylim()
a = min(a, y - margin)
b = max(b, y + margin)
axes.set_ylim(a, b)
#
# Optimization
#
class CachedFunction(object):
"""Caches function calls with the same arguments."""
def __init__(self, fun, record_history=False):
self.fun = fun
self.cached_points = {}
self.record_history = record_history
self.history = [] # ordered history of uncached function evaluations
self.uncached_fev = 0 # number of actual uncached function evaluations (cache misses)
self.cached_fev = 0 # number of cached function calls (cache hits)
def __call__(self, *args, **kwargs):
cache_key = make_hashable((args, kwargs))
# logging.info('cache_key=%s' % str(cache_key))
try:
y = self.cached_points[cache_key]
self.cached_fev += 1
return y
except KeyError:
# logging.info('Calling function to evaluate cache_key=%s' % str(cache_key))
self.uncached_fev += 1
y = self.fun(*args, **kwargs)
self.cached_points[cache_key] = y
if self.record_history:
self.history.append(args + (kwargs, y,))
return y
class SmoothedDiscreteFunction(object):
"""Smoothes a scalar function of a single discrete variable by linear interpolation between points."""
def __init__(self, fun, x_domain):
"""
Args:
x_domain (np.ndarray): Array of values that represent the discrete domain of the function.
Values can have type int or float.
"""
self.fun = fun
self.x_domain = np.sort(x_domain)
def __call__(self, x):
if x < self.x_domain[0] or x > self.x_domain[-1]:
raise ValueError('x=%s is outside the domain [%s,%s]' % (x, self.x_domain[0], self.x_domain[-1]))
x0_index = np.searchsorted(self.x_domain, x, side='right') - 1
if self.x_domain[x0_index] == x:
y = self.fun(x)
logging.info('SmoothedDiscreteFunction(%f) = fun(%f) = %f' % (x, x, y))
return y
X = self.x_domain[x0_index:x0_index+2]
Y = np.array([self.fun(xx) for xx in X])
ifun = scipy.interpolate.interp1d(X, Y, assume_sorted=True, copy=False)
y = ifun([x])[0]
logging.info('SmoothedDiscreteFunction(%f) ~ fun(%s) = %f' % (x, X, y))
return y
class SteppedDiscreteFunction(object):
"""Provided with a scalar function of multiple discrete variables, this will extend the domain
to all real numbers by rounding down to the nearest value in the domain. This is performed for each
dimension separately. This will create multi-dimensional "step" functions that are flat (zero gradient)
except at the points in the original domain, where the gradients may be undefined.
This can be used with `CachedFunction` to round down to the nearest point and cache that point."""
def __init__(self, fun, x_domain):
"""
Args:
x_domain (list(np.ndarray)): Array of values that represent the discrete domain of the function.
Values can have type int or float.
"""
self.fun = fun
self.x_domain = [np.sort(xi_domain) for xi_domain in x_domain]
def convert_x(self, x):
x = np.atleast_1d(x)
assert(len(x) == len(self.x_domain))
x_nearest = np.zeros(len(self.x_domain))
for i in range(len(self.x_domain)):
if x[i] <= self.x_domain[i][0]:
x_nearest[i] = self.x_domain[i][0]
elif x[i] >= self.x_domain[i][-1]:
x_nearest[i] = self.x_domain[i][-1]
else:
xi0_index = np.searchsorted(self.x_domain[i], x[i], side='right') - 1
x_nearest[i] = self.x_domain[i][xi0_index]
return x_nearest
def __call__(self, x):
x_nearest = self.convert_x(x)
y = self.fun(x_nearest)
# logging.info('SteppedDiscreteFunction(%s) ~ fun(%s) = %f' % (x, x_nearest, y))
return y
class PandasSeriesFunction(object):
"""Make a function out of a Pandas Series object."""
def __init__(self, series):
self.series = series
def __call__(self, x):
return self.series.ix[tuple(np.atleast_1d(x))]
class LoggingFunction(object):
"""This function wrapper will log all function calls."""
def __init__(self, fun=None, name=None):
self.fun = fun
if name is None:
try:
name = fun.__name__
except:
name = 'LoggingFunction'
self.name = name
def __call__(self, *args, **kwargs):
arg_str = [repr(a) for a in args]
kwarg_str = ['%s=%s' % (k,repr(v)) for k,v in kwargs.iteritems()]
both_str = arg_str + kwarg_str
joined_str = ', '.join(both_str)
if self.fun is None:
logging.info('%s(%s)' % (self.name, joined_str))
else:
result = self.fun(*args, **kwargs)
logging.info('%s(%s) -> %s' % (self.name, joined_str, result))
return result
class defaultlist(list):
"""Based on http://stackoverflow.com/questions/869778/populating-a-list-array-by-index-in-python."""
def __init__(self, iterable=None, default_factory=None):
args = []
if iterable:
args = [iterable]
super(defaultlist, self).__init__(*args)
if default_factory is None:
default_factory = lambda: None
self.default_factory = default_factory
def __setitem__(self, index, value):
size = len(self)
if index >= size:
self.extend(self.default_factory() for _ in range(size, index + 1))
list.__setitem__(self, index, value)
class ArgsToArrayMap(object):
def __init__(self, arg_map):
"""
Args:
arg_map (list): List of dict with the following keys:
kwarg_name: Name of keyword argument.
arg_number: Position of argument. Must use exactly one of kwarg or arg_number.
array_size: If argument should be a list, specify the size.
value_if_missing: If present, this value will be used by args_to_array if the argument is missing.
If this key is not present, a missing argument will produce an AttributeError exception.
fixed_arg_value: If present, this | |
# -*- coding: utf-8 -*-
"""
This module contains methods for generating statistics about the alignments generated by amplimap.
This includes the number of reads and read families per probe, per sample as well as data on off-target
alignments.
"""
# python 3 compat
# http://python-future.org/compatible_idioms.html
from __future__ import print_function
import sys
import os
import re
import pprint
from .common import parse_extended_read_name, find_umi_groups
"""
NOTES:
The BAM contains alignments of reads pairs. We can have these situations:
- Multiple possible alignments with different properties, each can be one of:
- Alignment of both mates (both have is_unmapped & mate_is_unmapped set to False)
- Alignment of one mate, other unmapped (one has is_unmapped set, one has mate_is_unmapped set)
- Or: Both unmapped (both have is_unmapped and mate_is_unmapped set) - if this is the case, it should be the only entry for this read pair
APPROACH:
- The BAM is coordinate sorted, so we need to read through it and cache, keeping track of mate positions to find the right mates for each pair.
- For each new mate, we can either:
- If self is mapped:
- If mate is mapped:
- Already have the mate -> process and remove mate from cache
- Not have the mate -> add to cache and wait for mate
- NB TODO: actually this will fail with bowtie2 because one read alignment can have multiple mate alignments.
- should keep all reads in cache, ideally forever or at least per chr in case there are more alignments
- at the moment we are just ignoring these and outputting these to the log as orphaned reads at the end
- If not:
- Count as alignment with unmapped mate [but maybe only if no better alignment]
- If not:
- If mate1, count unmapped pair, else ignore (should never see 2x)
- By the end of the file we should have resolved all mates. Anything left is suspicous!
But, question how we deal with multiple alignments per pair. Actually we are only interested in the top aligment, no need to record every single bad secondary alignment.
"""
import logging
log = logging.getLogger(__name__)
log.setLevel(logging.DEBUG)
sh = logging.StreamHandler()
sh.setFormatter(logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s'))
log.addHandler(sh)
import time
# use pandas to read the CSV file and write output files
import pandas as pd
# for defaultdict + sorting
import collections
import operator
# use python's argumentparser to support command line parameters like --probe=62
import argparse
# for pileup
import pysam
# for getting ref base
import pyfaidx
# for debugging umi distance
import distance
# import reader script for reading probe design
from .reader import read_new_probe_design
def aggregate(folder):
log.info('Aggregating tables in %s', folder)
agg = None
files = [file for file in os.listdir(folder) if file.endswith('.stats_alignment.csv')]
files.sort()
for file in files:
sample = file
sample = sample.replace('.stats_alignment.csv', '')
sample = sample.replace('_L001_R1_001', '')
sample = sample.replace('__MIP_TRIMMED_', '')
try:
df = pd.read_csv(os.path.join(folder, file))
log.info('Found %d rows in %s -> %s', len(df), file, sample)
df['sample'] = sample
if agg is None:
agg = df
else:
agg = agg.append(df)
except pd.errors.EmptyDataError:
log.exception("No data found: %s", file)
assert agg is not None and len(agg) > 0, \
'\n\nABORTED: Did not find any valid read alignments for any sample. Please check configuration, probe design table and samples!\n\n'
agg.sort_values(['sample', 'probe'], inplace=True)
agg = agg[ ['sample', 'probe'] + [c for c in agg.columns if not c in ['sample', 'probe']] ]
log.info('Found %d rows total, %d columns', len(agg), agg.shape[1])
outname = os.path.join(folder, 'stats_alignment.csv')
agg.to_csv(outname, index=False)
print(outname)
def process_file(
probes_path: str,
input_path: str,
output_path: str,
min_mapq: int,
min_consensus_count: int,
include_primers: bool,
use_naive_groups: bool,
ignore_groups: bool,
ignore_group_mismatch: bool = False,
debug: bool = False,
targets_path: str = None,
):
"""
Loop through coordinate-sorted BAM file and collect alignment statistics.
Args:
probes_path (str): Path to probes design CSV file
input_path (str): Path to input BAM file
output_path (str): Prefix of output CSV file ('.stats_alignment.csv' will be appended)
Output file columns:
- read_pairs_total: Total number of read pairs (one pair may have multiple alignments)
- alignments_total: Total number of read pair alignments (one pair may have multiple alignments)
- alignments_good: Total number of alignments that were on target (coordinates match target coordinates) and fully covered the target region
- umis_good: Total number of UMI groups (read families) that were on target and fully covered the target region
- umis_good_coverage_ge_NN: Total number of UMI groups (read families) that were on target, fully covered the target region and also had at least NN supporting read pairs
- alignments_partial: Total number of alignments that were on target but did not fully cover the target region
- alignments_off_target: Total number of alignments with coordinates different from the target coordinates
- alignments_unmapped_single: Total number of alignments where one mate was unmapped
- alignments_unmapped_both: Total number of alignments where both mates were unmapped
- alignments_flagged: Total number of alignments where at least one mate was flagged as QC fail or supplementary alignment
- alignments_invalid_pair: Total number of alignments where mates were on different chromosomes or not in the expected orientation (one forward, one reverse)
- alignments_mapq_lt_NN: Total number of alignments with at least one mate with mapping quality under NN
- multimapping_alignments: Total number of read pairs that had multiple possible alignments
- offtarget_locations: Locations and counts for the three most common off-target alignment locations
Will only process a read pair once both alignments have been found, which means that
the first mate has to be kept in memory until the second mate has been reached.
This may lead to high memory usage if mates are far away and there are many alignments
but seems to work fine in practice.
However, reads with multiple alternative mate alignments may not be handled
properly since an alignment is only kept in memory until the first matching mate has been found.
These will be reported as "orphaned mates" at the end of the run.
"""
assert input_path is not None, 'input file path missing'
assert output_path is not None, 'output file path missing'
# if we did not get a design path we can only look at on/off target
if probes_path is None or len(probes_path) == 0:
assert target_path is not None, 'target file path is missing'
# TODO
design = read_new_probe_design(probes_path)
if include_primers:
probe_column_start = 'probe_start'
probe_column_end = 'probe_end'
else:
probe_column_start = 'target_start'
probe_column_end = 'target_end'
log.info('Using columns %s / %s to determine target regions', probe_column_start, probe_column_end)
log.info('Processing file %s', input_path)
if not os.path.isfile(input_path):
sys.exit('input does not exist: %s' % input_path)
with pysam.AlignmentFile(input_path, "rb") as samfile:
t_shown = t_start = time.time()
reads = collections.defaultdict(dict)
reads_processed = set()
reads_umi_checked = set()
n_rows = 0
n_pairs = collections.defaultdict(int)
n_alignments = collections.defaultdict(int)
n_alignments_good = collections.defaultdict(int)
n_alignments_partial = collections.defaultdict(int)
n_alignments_off_target = collections.defaultdict(int)
n_alignments_unmapped_single = collections.defaultdict(int)
n_alignments_unmapped_both = collections.defaultdict(int)
n_alignments_flagged = collections.defaultdict(int)
n_alignments_invalid = collections.defaultdict(int)
n_alignments_low_mapq = collections.defaultdict(int)
n_alignments_multimappers = collections.defaultdict(int)
probe_umi_alignments = collections.defaultdict(collections.Counter)
n_umis_good = collections.defaultdict(int)
n_umis_good_coverage_ge_MIN = collections.defaultdict(int)
probe_offtarget_locations = collections.defaultdict(collections.Counter)
# make sure we have zeros for each probe id
for pname in design['id']:
for counter in [
n_pairs,
n_alignments,
n_alignments_good,
n_alignments_partial,
n_alignments_off_target,
n_alignments_unmapped_single,
n_alignments_unmapped_both,
n_alignments_flagged,
n_alignments_invalid,
n_alignments_low_mapq,
n_alignments_multimappers,
n_umis_good,
n_umis_good_coverage_ge_MIN,
]:
counter[pname] = 0
iter = samfile.fetch(until_eof = True)
for x in iter:
qname = x.query_name
assert qname is not None
rindex = 1 if x.is_read1 else 2 if x.is_read2 else None
assert rindex is not None
other_rindex = 3 - rindex
# supplementary alignments mess up the assumption that each mate has one other mate
# so we ignore them for now
if x.is_supplementary:
# log.warn('Ignorning supplementary alignment for %s', x.query_name)
continue
# only count rows after this to fulfil assumption later
n_rows += 1
# parse info from read name (should be bowtie2 compatible)
read_name, read_probe, read_umi = parse_extended_read_name(qname)
rdata = {}
rdata['index'] = rindex
rdata['mapq'] = x.mapping_quality
rdata['has_bad_flags'] = (x.is_qcfail) or \
(x.is_supplementary)
# (x.is_secondary) or \ #TODO: removed this to allow secondary hits from bowtie2, not sure if good idea or not. this column is often (always?) zero for bwa runs though...
rdata['unmapped'] = x.is_unmapped
if x.is_unmapped:
rdata['chr'] = None
rdata['start'] = None
rdata['end'] | |
5.725E-03 3.450E-01 2.000E+05 0.000E+00 0.000E+00 8.359E+05
3.58483819E-03 5653.1 1.133E+01 1.011E+11 6.184E-03 3.443E-01 2.000E+05 0.000E+00 0.000E+00 8.135E+05
4.50339792E-03 5687.5 1.424E+01 1.236E+11 6.727E-03 3.462E-01 2.000E+05 0.000E+00 0.000E+00 7.959E+05
5.62492776E-03 5722.0 1.779E+01 1.504E+11 7.376E-03 3.503E-01 2.000E+05 0.000E+00 0.000E+00 7.820E+05
6.98316992E-03 5757.3 2.208E+01 1.827E+11 8.154E-03 3.562E-01 2.000E+05 0.000E+00 0.000E+00 7.712E+05
8.61488728E-03 5793.5 2.724E+01 2.214E+11 9.085E-03 3.641E-01 2.000E+05 0.000E+00 0.000E+00 7.629E+05
1.05604921E-02 5830.6 3.339E+01 2.678E+11 1.019E-02 3.738E-01 2.000E+05 0.000E+00 0.000E+00 7.565E+05
1.28650764E-02 5868.8 4.068E+01 3.232E+11 1.151E-02 3.857E-01 2.000E+05 0.000E+00 0.000E+00 7.518E+05
1.55793533E-02 5908.0 4.926E+01 3.895E+11 1.306E-02 3.990E-01 2.000E+05 0.000E+00 0.000E+00 7.483E+05
1.87619355E-02 5947.9 5.932E+01 4.683E+11 1.488E-02 4.137E-01 2.000E+05 0.000E+00 0.000E+00 7.459E+05
2.24813570E-02 5988.4 7.108E+01 5.617E+11 1.701E-02 4.300E-01 2.000E+05 0.000E+00 0.000E+00 7.443E+05
2.68196283E-02 6028.9 8.480E+01 6.714E+11 1.945E-02 4.476E-01 2.000E+05 0.000E+00 0.000E+00 7.435E+05
3.18783251E-02 6069.0 1.008E+02 7.993E+11 2.224E-02 4.668E-01 2.000E+05 0.000E+00 0.000E+00 7.432E+05
3.77839331E-02 6107.9 1.195E+02 9.469E+11 2.537E-02 4.885E-01 2.000E+05 0.000E+00 0.000E+00 7.434E+05
4.46941510E-02 6145.5 1.413E+02 1.116E+12 2.888E-02 5.125E-01 2.000E+05 0.000E+00 0.000E+00 7.440E+05
5.28032997E-02 6181.3 1.670E+02 1.309E+12 3.277E-02 5.396E-01 2.000E+05 0.000E+00 0.000E+00 7.450E+05
6.23478003E-02 6215.6 1.971E+02 1.528E+12 3.708E-02 5.704E-01 2.000E+05 0.000E+00 0.000E+00 7.462E+05
7.36033675E-02 6248.9 2.327E+02 1.779E+12 4.191E-02 6.054E-01 2.000E+05 0.000E+00 0.000E+00 7.477E+05
8.68909008E-02 6281.6 2.747E+02 2.067E+12 4.732E-02 6.449E-01 2.000E+05 0.000E+00 0.000E+00 7.495E+05
1.02585697E-01 6314.0 3.244E+02 2.399E+12 5.342E-02 6.897E-01 2.000E+05 0.000E+00 0.000E+00 7.514E+05
1.21119138E-01 6346.4 3.829E+02 2.783E+12 6.035E-02 7.414E-01 2.000E+05 0.000E+00 0.000E+00 7.534E+05
1.42990421E-01 6379.1 4.521E+02 3.227E+12 6.822E-02 8.003E-01 2.000E+05 0.000E+00 0.000E+00 7.556E+05
1.68774482E-01 6412.5 5.336E+02 3.744E+12 7.722E-02 8.682E-01 2.000E+05 0.000E+00 0.000E+00 7.579E+05
1.99125600E-01 6446.8 6.296E+02 4.348E+12 8.757E-02 9.470E-01 2.000E+05 0.000E+00 0.000E+00 7.603E+05
2.34766159E-01 6482.8 7.422E+02 5.062E+12 9.959E-02 1.040E+00 2.000E+05 0.000E+00 0.000E+00 7.628E+05
2.76462116E-01 6521.5 8.740E+02 5.913E+12 1.138E-01 1.151E+00 2.000E+05 0.000E+00 0.000E+00 7.653E+05
3.24993333E-01 6564.0 1.027E+03 6.944E+12 1.308E-01 1.287E+00 2.000E+05 0.000E+00 0.000E+00 7.678E+05
3.81060755E-01 6611.9 1.205E+03 8.217E+12 1.516E-01 1.457E+00 2.000E+05 0.000E+00 0.000E+00 7.703E+05
4.45183274E-01 6666.8 1.407E+03 9.819E+12 1.778E-01 1.673E+00 2.000E+05 0.000E+00 0.000E+00 7.728E+05
5.17609057E-01 6730.9 1.636E+03 1.187E+13 2.114E-01 1.955E+00 2.000E+05 0.000E+00 0.000E+00 7.753E+05
5.98168368E-01 6806.5 1.891E+03 1.457E+13 2.557E-01 2.330E+00 2.000E+05 0.000E+00 0.000E+00 7.777E+05
6.86077214E-01 6896.7 2.169E+03 1.820E+13 3.158E-01 2.846E+00 2.000E+05 0.000E+00 0.000E+00 7.800E+05
7.79804519E-01 7004.7 2.465E+03 2.321E+13 4.002E-01 3.577E+00 2.000E+05 0.000E+00 0.000E+00 7.825E+05
8.76955172E-01 7134.7 2.771E+03 3.031E+13 5.231E-01 4.651E+00 2.000E+05 0.000E+00 0.000E+00 7.851E+05
9.74254094E-01 7291.1 3.079E+03 4.061E+13 7.100E-01 6.288E+00 2.000E+05 2.052E-12 1.632E+02 7.884E+05
1.06788585E+00 7478.5 3.374E+03 5.587E+13 1.005E+00 8.883E+00 2.000E+05 6.196E-11 4.820E+02 7.928E+05
1.15394113E+00 7702.6 3.645E+03 7.901E+13 1.496E+00 1.317E+01 2.000E+05 1.247E-09 1.244E+03 7.994E+05
1.22900061E+00 7968.9 3.881E+03 1.146E+14 2.350E+00 2.056E+01 2.000E+05 3.743E-08 3.674E+03 8.094E+05
1.29094001E+00 8282.6 4.076E+03 1.696E+14 3.898E+00 3.374E+01 2.000E+05 1.712E-06 1.255E+04 8.248E+05
1.33964504E+00 8644.2 4.227E+03 2.528E+14 6.737E+00 5.745E+01 2.000E+05 9.952E-05 4.699E+04 8.479E+05
1.37656533E+00 9065.5 4.341E+03 3.776E+14 1.201E+01 1.007E+02 2.000E+05 2.124E-03 1.281E+05 8.826E+05
1.40397828E+00 9556.8 4.424E+03 5.559E+14 2.140E+01 1.719E+02 2.000E+05 1.042E-02 2.180E+05 9.346E+05
1.42537452E+00 10060.0 4.488E+03 7.572E+14 3.404E+01 2.430E+02 2.000E+05 9.593E-02 4.734E+05 1.001E+06
1.44527994E+00 10511.6 4.545E+03 9.302E+14 4.529E+01 2.741E+02 2.000E+05 2.128E-01 6.511E+05 1.070E+06
1.46643393E+00 10957.9 4.606E+03 1.072E+15 5.370E+01 3.137E+02 2.000E+05 2.539E-01 7.357E+05 1.142E+06
1.49168056E+00 11544.6 4.677E+03 1.191E+15 5.647E+01 3.781E+02 2.000E+05 1.287E-01 6.272E+05 1.233E+06
1.52753544E+00 12364.5 4.777E+03 1.252E+15 4.815E+01 3.663E+02 2.000E+05 4.307E-03 2.298E+05 1.348E+06
1.58797643E+00 13286.6 4.949E+03 1.258E+15 3.489E+01 2.730E+02 2.000E+05 2.279E-05 4.724E+04 1.474E+06
1.70300903E+00 14280.1 5.287E+03 1.271E+15 2.440E+01 1.897E+02 2.000E+05 1.161E-07 8.917E+03 1.591E+06
1.91821547E+00 15333.2 5.933E+03 1.340E+15 1.786E+01 1.382E+02 2.000E+05 6.560E-09 3.337E+03 1.644E+06
2.29276399E+00 16472.6 7.071E+03 1.503E+15 1.434E+01 1.108E+02 2.000E+05 3.260E-09 2.429E+03 1.677E+06
2.88627458E+00 17689.3 8.887E+03 1.781E+15 1.257E+01 9.689E+01 2.000E+05 1.052E-09 1.591E+03 1.752E+06
3.76439036E+00 19007.4 1.158E+04 2.181E+15 1.159E+01 8.925E+01 2.000E+05 7.313E-12 3.035E+02 1.873E+06
5.01139804E+00 20414.8 1.542E+04 2.716E+15 1.105E+01 8.497E+01 2.000E+05 0.000E+00 0.000E+00 2.016E+06
6.73085776E+00 21936.8 2.071E+04 3.404E+15 1.082E+01 8.309E+01 2.000E+05 0.000E+00 0.000E+00 2.149E+06
9.04140309E+00 23564.6 2.783E+04 4.262E+15 1.083E+01 8.077E+01 2.000E+05 0.000E+00 0.000E+00 2.262E+06
PRADK 5.4848E+00
BEGIN ITERATION 15 COMPLETED
TEFF 8000. GRAVITY 4.00000 LTE
TITLE [-1.0] VTURB=2 L/H=1.25 NOVER NEW ODF
OPACITY IFOP 1 1 1 1 1 1 1 1 1 1 1 1 1 0 1 0 0 0 0 0
CONVECTION ON 1.25 TURBULENCE OFF 0.00 0.00 0.00 0.00
ABUNDANCE SCALE 0.10000 ABUNDANCE CHANGE 1 0.92140 2 0.07843
ABUNDANCE CHANGE 3 -10.94 4 -10.64 5 -9.49 6 -3.52 7 -4.12 8 -3.21
ABUNDANCE CHANGE 9 -7.48 10 -3.96 11 -5.71 12 -4.46 13 -5.57 14 -4.49
ABUNDANCE CHANGE 15 -6.59 16 -4.71 17 -6.54 18 -5.64 19 -6.92 20 -5.68
ABUNDANCE CHANGE 21 -8.87 22 -7.02 23 -8.04 24 -6.37 25 -6.65 26 -4.54
ABUNDANCE CHANGE 27 -7.12 28 -5.79 29 -7.83 30 -7.44 31 -9.16 32 -8.63
ABUNDANCE CHANGE 33 -9.67 34 -8.63 35 -9.41 36 -8.73 37 -9.44 38 -9.07
ABUNDANCE CHANGE 39 -9.80 40 -9.44 41 -10.62 42 -10.12 43 -20.00 44 -10.20
ABUNDANCE CHANGE 45 -10.92 46 -10.35 47 -11.10 48 -10.27 49 -10.38 50 -10.04
ABUNDANCE CHANGE 51 -11.04 52 -9.80 53 -10.53 54 -9.87 55 -10.91 56 -9.91
ABUNDANCE CHANGE 57 -10.87 58 -10.46 59 -11.33 60 -10.54 61 -20.00 62 -11.03
ABUNDANCE CHANGE 63 -11.53 64 -10.92 65 -11.69 66 -10.90 67 -11.78 68 -11.11
ABUNDANCE CHANGE 69 -12.04 70 -10.96 71 -11.98 72 -11.16 73 -12.17 74 -10.93
ABUNDANCE CHANGE 75 -11.76 76 -10.59 77 -10.69 78 -10.24 79 -11.03 80 -10.91
ABUNDANCE CHANGE 81 -11.14 82 -10.09 83 -11.33 84 -20.00 85 -20.00 86 -20.00
ABUNDANCE CHANGE 87 -20.00 88 -20.00 89 -20.00 90 -11.95 91 -20.00 92 -12.54
ABUNDANCE CHANGE 93 -20.00 94 -20.00 95 -20.00 96 -20.00 97 -20.00 98 -20.00
ABUNDANCE CHANGE 99 -20.00
READ DECK6 72 RHOX,T,P,XNE,ABROSS,ACCRAD,VTURB, FLXCNV,VCONV,VELSND
4.22792970E-05 5117.1 4.228E-01 4.416E+09 3.154E-03 5.727E-01 2.000E+05 0.000E+00 0.000E+00 2.042E+06
5.65534123E-05 5146.1 5.655E-01 5.580E+09 3.078E-03 5.661E-01 2.000E+05 0.000E+00 0.000E+00 1.831E+06
7.60929585E-05 5173.6 7.609E-01 7.034E+09 2.993E-03 5.561E-01 2.000E+05 0.000E+00 0.000E+00 1.647E+06
1.02653912E-04 5205.6 1.026E+00 8.993E+09 2.962E-03 5.442E-01 2.000E+05 0.000E+00 0.000E+00 1.483E+06
1.38177805E-04 5241.2 1.382E+00 1.159E+10 2.982E-03 5.309E-01 2.000E+05 0.000E+00 0.000E+00 1.342E+06
1.84887528E-04 5279.0 1.849E+00 1.496E+10 3.045E-03 5.164E-01 2.000E+05 0.000E+00 0.000E+00 1.223E+06
2.45465895E-04 5318.1 2.454E+00 1.927E+10 3.150E-03 5.014E-01 2.000E+05 0.000E+00 0.000E+00 1.125E+06
3.23072220E-04 5357.8 3.230E+00 2.473E+10 3.298E-03 4.868E-01 2.000E+05 0.000E+00 0.000E+00 1.045E+06
4.21374803E-04 5397.6 4.213E+00 3.154E+10 3.488E-03 4.738E-01 2.000E+05 0.000E+00 0.000E+00 9.808E+05
5.44688397E-04 5437.1 5.446E+00 3.996E+10 3.725E-03 4.618E-01 2.000E+05 0.000E+00 0.000E+00 9.290E+05
6.98005536E-04 5476.2 6.979E+00 5.027E+10 4.011E-03 4.509E-01 2.000E+05 0.000E+00 0.000E+00 8.877E+05
8.87104585E-04 5514.7 8.870E+00 6.278E+10 4.352E-03 4.408E-01 2.000E+05 0.000E+00 0.000E+00 8.550E+05
1.11877133E-03 5552.3 1.119E+01 7.781E+10 4.750E-03 4.318E-01 2.000E+05 0.000E+00 0.000E+00 8.291E+05
1.40113777E-03 5588.5 1.401E+01 9.565E+10 5.208E-03 4.242E-01 2.000E+05 0.000E+00 0.000E+00 8.089E+05
1.74394720E-03 5623.5 1.744E+01 1.167E+11 5.729E-03 4.184E-01 2.000E+05 0.000E+00 0.000E+00 7.930E+05
2.15889466E-03 5657.0 2.159E+01 1.413E+11 6.320E-03 4.146E-01 2.000E+05 0.000E+00 0.000E+00 7.807E+05
2.65986407E-03 5689.2 2.660E+01 1.700E+11 6.989E-03 4.133E-01 2.000E+05 0.000E+00 0.000E+00 7.712E+05
3.26303554E-03 5720.6 3.263E+01 2.035E+11 7.752E-03 4.147E-01 2.000E+05 0.000E+00 0.000E+00 7.640E+05
3.98698117E-03 5751.6 3.987E+01 2.427E+11 8.628E-03 4.193E-01 2.000E+05 0.000E+00 0.000E+00 7.585E+05
4.85269393E-03 5782.6 4.852E+01 2.886E+11 9.640E-03 4.266E-01 2.000E+05 0.000E+00 0.000E+00 7.545E+05
5.88363882E-03 5814.1 5.883E+01 3.426E+11 1.082E-02 4.361E-01 2.000E+05 0.000E+00 0.000E+00 7.516E+05
7.10614684E-03 5846.3 7.106E+01 4.064E+11 1.219E-02 4.476E-01 2.000E+05 0.000E+00 0.000E+00 7.497E+05
8.54977314E-03 5879.5 8.549E+01 4.817E+11 1.379E-02 4.611E-01 2.000E+05 0.000E+00 0.000E+00 7.485E+05
1.02481325E-02 5913.7 1.025E+02 5.707E+11 1.566E-02 4.765E-01 2.000E+05 0.000E+00 0.000E+00 7.479E+05
1.22397131E-02 5948.7 1.224E+02 6.758E+11 1.783E-02 4.948E-01 2.000E+05 0.000E+00 0.000E+00 7.478E+05
1.45687407E-02 5984.7 1.457E+02 7.997E+11 2.036E-02 5.150E-01 2.000E+05 0.000E+00 0.000E+00 7.482E+05
1.72873866E-02 6021.3 1.729E+02 9.451E+11 2.327E-02 5.371E-01 2.000E+05 0.000E+00 0.000E+00 7.489E+05
2.04576885E-02 6058.2 2.046E+02 1.115E+12 2.662E-02 5.615E-01 2.000E+05 0.000E+00 0.000E+00 7.499E+05
2.41538376E-02 6095.0 2.415E+02 1.313E+12 3.044E-02 5.880E-01 2.000E+05 0.000E+00 0.000E+00 7.511E+05
2.84680270E-02 6131.2 2.847E+02 1.541E+12 3.474E-02 6.170E-01 2.000E+05 0.000E+00 0.000E+00 7.525E+05
3.35129002E-02 6166.5 3.351E+02 1.802E+12 3.958E-02 6.495E-01 2.000E+05 0.000E+00 0.000E+00 7.542E+05
3.94295844E-02 6199.9 3.943E+02 2.098E+12 4.492E-02 6.867E-01 2.000E+05 0.000E+00 0.000E+00 7.561E+05
4.63866877E-02 6232.6 4.638E+02 2.436E+12 5.091E-02 7.289E-01 2.000E+05 0.000E+00 0.000E+00 7.581E+05
5.45805218E-02 6264.4 5.458E+02 2.821E+12 5.759E-02 7.761E-01 2.000E+05 0.000E+00 0.000E+00 7.602E+05
6.42438097E-02 6295.7 6.424E+02 3.263E+12 6.510E-02 8.293E-01 2.000E+05 0.000E+00 0.000E+00 7.625E+05
7.56492172E-02 6326.6 7.564E+02 3.769E+12 7.352E-02 8.891E-01 2.000E+05 0.000E+00 0.000E+00 7.649E+05
8.91174326E-02 6357.4 8.911E+02 4.351E+12 8.302E-02 9.567E-01 2.000E+05 0.000E+00 0.000E+00 7.663E+05
1.05022348E-01 6388.3 1.050E+03 5.023E+12 9.377E-02 1.034E+00 2.000E+05 0.000E+00 0.000E+00 7.699E+05
1.23790503E-01 6419.9 1.238E+03 5.802E+12 1.060E-01 1.124E+00 2.000E+05 0.000E+00 0.000E+00 7.726E+05
1.45910400E-01 6452.3 1.459E+03 6.710E+12 1.201E-01 1.227E+00 2.000E+05 0.000E+00 0.000E+00 7.752E+05
1.71932641E-01 6486.4 1.719E+03 7.778E+12 1.363E-01 1.348E+00 2.000E+05 0.000E+00 0.000E+00 7.780E+05
2.02439158E-01 6523.0 2.024E+03 9.050E+12 1.553E-01 1.493E+00 2.000E+05 0.000E+00 0.000E+00 7.807E+05
2.38042964E-01 6563.3 2.380E+03 1.059E+13 1.780E-01 1.668E+00 2.000E+05 0.000E+00 0.000E+00 7.834E+05
2.79322447E-01 6608.7 2.793E+03 1.248E+13 2.055E-01 1.883E+00 2.000E+05 | |
import tensorflow as tf
import numpy as np
try:
from models.read_util.read_train_data import get_all_bearings
except:
from ..read_util.read_train_data import get_all_bearings
# from read_train_data import get_all_bearings
VIB_SIZE = 2000
step = 20
IMAGE_SIZE = VIB_SIZE//step
# Global constants describing the CIFAR-10 data set.
NUM_JUDGE = 2
NUM_CLASSES = 4
NUM_DEGREES = 5
NUM_EXAMPLES_PER_EPOCH_FOR_EVAL = 1000
NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN = 1000
def read_cifar10(data_queue, fix_size=True):
"""Reads and parses examples from CIFAR10 data files.
Recommendation: if you want N-way read parallelism, call this function
N times. This will give you N independent Readers reading different
files & positions within those files, which will give better mixing of
examples.
Args:
filename_queue: A queue of strings with the filenames to read from.
Returns:
An object representing a single example, with the following fields:
height: number of rows in the result (32)
width: number of columns in the result (32)
depth: number of color channels in the result (3)
key: a scalar string Tensor describing the filename & record number
for this example.
label: an int32 Tensor with the label in the range 0..9.
uint8image: a [height, width, depth] uint8 Tensor with the image data
"""
class CIFAR10Record(object):
pass
result = CIFAR10Record()
# image_string = tf.read_file(data_queue[0])
image_string = data_queue[0]
# image_tensor = tf.image.decode_jpeg(image_string, channels=1, name='image_tensor')
image_tensor = image_string
# label_tensor = tf.constant(label,name='label_tensor')
label_tensor = tf.cast(data_queue[1], dtype=tf.int32)
# label_tensor = tf.cast(data_queue[1], dtype=tf.float32)
# Dimensions of the images in the CIFAR-10 dataset.
# input format.
label_bytes = 1 # 2 for CIFAR-100
result.depth = 1
# Read a record, getting filenames from the filename_queue. No
# header or footer in the CIFAR-10 format, so we leave header_bytes
# and footer_bytes at their default of 0.
# The first bytes represent the label, which we convert from uint8->int32.
# result.label = label_tensor[0]
result.label = label_tensor
# Convert from [depth, height, width] to [height, width, depth].
# result.uint8image = tf.transpose(depth_major, [1, 2, 0])
result.uint8image = image_tensor
result.filename = tf.cast(data_queue[0], dtype=tf.string)
return result
def _generate_image_and_label_batch(image, label1, min_queue_examples,
batch_size, shuffle=True):
"""Construct a queued batch of images and labels.
Args:
image: 3-D Tensor of [height, width, 3] of type.float32.
label: 1-D Tensor of type.int32
min_queue_examples: int32, minimum number of samples to retain
in the queue that provides of batches of examples.
batch_size: Number of images per batch.
shuffle: boolean indicating whether to use a shuffling queue.
Returns:
images: Images. 4D tensor of [batch_size, height, width, 3] size.
labels: Labels. 1D tensor of [batch_size] size.
"""
# Create a queue that shuffles the examples, and then
# read 'batch_size' images + labels from the example queue.
num_preprocess_threads = 256
if shuffle:
# Imagine inputs is a list or tuple of tensors representing single training example.
# In my case, inputs is a tuple (features, label) obtained by reading TFRecords.
# dtypes = list(map(lambda x: x.dtype, inputs))
# shapes = list(map(lambda x: x.get_shape(), inputs))
# queue = tf.RandomShuffleQueue(CAPACITY, MIN_AFTER_DEQUEUE, dtypes)
# enqueue_op = queue.enqueue(inputs)
# qr = tf.train.QueueRunner(queue, [enqueue_op] * NUM_THREADS)
# tf.add_to_collection(tf.GraphKeys.QUEUE_RUNNERS, qr)
# inputs = queue.dequeue()
# for tensor, shape in zip(inputs, shapes):
# tensor.set_shape(shape)
# Now you can use tf.train.batch with dynamic_pad=True, and the order in which
# it enqueues elements will be permuted because of RandomShuffleQueue.
# inputs_batch = tf.train.batch(inputs, batch_size, capacity=min_queue_examples + 3 * batch_size,
# dynamic_pad=True, name=name)
# images, label_batch1, label_batch2, label_batch3 = tf.train.batch(
# [image, label1, label2, label3], batch_size,
# num_threads=num_preprocess_threads,
# capacity=min_queue_examples + 3 * batch_size,
# dynamic_pad=True, name='batch')
images, label_batch1 = tf.train.shuffle_batch(
[image, label1],
batch_size=batch_size,
num_threads=num_preprocess_threads,
capacity=min_queue_examples + 3 * batch_size,
min_after_dequeue=min_queue_examples)
else:
images, label_batch1 = tf.train.batch(
[image, label1],
batch_size=batch_size,
num_threads=num_preprocess_threads//2,
capacity=min_queue_examples + 3 * batch_size)
# Display the training images in the visualizer.
# tf.summary.image('images', images)
return images, tf.reshape(label_batch1, [batch_size, 1])
import csv
import random
#fft
def fft(x_vibration):
x_freq = np.fft.fft(x_vibration)
x_freqabs = abs(x_freq)
return x_freqabs
# 高斯噪声
def wgn(length, mu=0, sigma=0.1):
noise = [random.gauss(mu, sigma) for i in range(length)]
return np.array(noise)
def read_from_string_func(name):
# 旋转角度范围
name = np.reshape(name, [1,])
# print('name',name)
csv_reader = csv.reader(open(name[0]))
vib = []
for row in csv_reader:
vib.append(float(row[4]))
# print('vib:',vib)
max_size = 2560 - VIB_SIZE
first = random.randint(0,max_size)
vib = np.array(vib)
vib = vib[first:first+VIB_SIZE]
noise = wgn(VIB_SIZE)
vib += noise
# vib = vib.reshape([VIB_SIZE//step,step])
# vib = np.max(vib,axis=1)
freq = fft(vib)
freq = freq.reshape([VIB_SIZE//step,step])
freq = np.max(freq,axis=1)
freq = np.expand_dims(freq, axis=1)
# print(vib.dtype.name)
freq = freq.astype(np.float32)
# print(vib.dtype.name)
return freq
def distorted_inputs(data_dir, batch_size=32, fix_size=True):
"""Construct distorted input for CIFAR training using the Reader ops.
Args:
data_dir: Path to the CIFAR-10 data directory.
batch_size: Number of images per batch.
Returns:
images: Images. 4D tensor of [batch_size, IMAGE_SIZE, IMAGE_SIZE, 3] size.
labels: Labels. 1D tensor of [batch_size] size.
"""
filename_list, label_list = get_all_bearings(data_dir)
print('the number of train data is:', len(filename_list))
# (full_name, 1, class_dict[label], degree_dict[degree])
filename_list = np.expand_dims(filename_list,axis=1)
label_list = np.expand_dims(label_list,axis=1)
data = np.concatenate([filename_list, label_list], axis=1)
np.random.shuffle(data)
# print(data)
filenames = data[:, 0]
labels = data[:, 1:]
# labels = data[:, 1]
labels = labels.astype(np.int32)
# print(filenames)
# print(labels)
for f in filenames:
if not tf.gfile.Exists(f):
raise ValueError('Failed to find file: ' + f)
# Create a queue that produces the filenames to read.
# filename_queue = tf.train.string_input_producer(filenames)
# input_queue = tf.train.slice_input_producer([filenames,labels])
input_queue = tf.train.slice_input_producer([filenames, labels[:, 0:]])
with tf.name_scope('data_augmentation'):
# Read examples from files in the filename queue.
read_input = read_cifar10(input_queue, fix_size=fix_size)
vibration = tf.py_func(read_from_string_func, [read_input.uint8image], tf.float32)
# image_rotate = tf.py_func(random_rotate_90_image_func, [read_input.uint8image], tf.uint8)
# image_rotate = tf.py_func(random_rotate_image_func, [image_rotate], tf.uint8)
# reshaped_image = tf.cast(image_rotate, tf.float32)
# height = IMAGE_SIZE - 24
# width = IMAGE_SIZE - 24
# Randomly flip the image horizontally.
# Because these operations are not commutative, consider randomizing
# the order their operation.
# NOTE: since per_image_standardization zeros the mean and makes
# the stddev unit, this likely has no effect see tensorflow#1458.
if fix_size:
# Set the shapes of tensors.
vibration.set_shape([IMAGE_SIZE,1])
else:
picture = read_input.uint8image
shape = picture.shape
# print(shape)
vibration.set_shape(shape)
read_input.label.set_shape([1,])
# Ensure that the random shuffling has good mixing properties.
min_fraction_of_examples_in_queue = 0.4
min_queue_examples = int(NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN *
min_fraction_of_examples_in_queue)
print('Filling queue with %d CIFAR images before starting to train. '
'This will take a few minutes.' % min_queue_examples)
if fix_size:
# Generate a batch of images and labels by building up a queue of examples.
return _generate_image_and_label_batch(vibration, read_input.label,
min_queue_examples, batch_size,
shuffle=True)
else:
return vibration, read_input.label
def test_read_from_string_func(name):
# 旋转角度范围
name = np.reshape(name, [1,])
# print('name',name)
csv_reader = csv.reader(open(name[0]))
vib = []
for row in csv_reader:
vib.append(float(row[4]))
# print('vib:',vib)
max_size = 2560 - IMAGE_SIZE
first = random.randint(0,max_size)
vib = np.array(vib)
vib = vib[first:first+IMAGE_SIZE]
noise = wgn(IMAGE_SIZE)
vib += noise
freq = fft(vib)
freq = np.expand_dims(freq, axis=1)
# print(vib.dtype.name)
freq = freq.astype(np.float32)
# print(vib.dtype.name)
return freq
def test_inputs(data_dir, batch_size=32, fix_size=True):
"""Construct distorted input for CIFAR training using the Reader ops.
Args:
data_dir: Path to the CIFAR-10 data directory.
batch_size: Number of images per batch.
Returns:
images: Images. 4D tensor of [batch_size, IMAGE_SIZE, IMAGE_SIZE, 3] size.
labels: Labels. 1D tensor of [batch_size] size.
"""
filename_list, label_list = get_all_bearings(data_dir)
print('the number of test data is:', len(filename_list))
filename_list = np.expand_dims(filename_list,axis=1)
label_list = np.expand_dims(label_list,axis=1)
data = np.concatenate([filename_list, label_list], axis=1)
np.random.shuffle(data)
# print(data)
filenames = data[:, 0]
labels = data[:, 1:]
# labels = data[:, 1]
labels = labels.astype(np.int32)
# print(filenames)
# print(labels)
for f in filenames:
if not tf.gfile.Exists(f):
raise ValueError('Failed to find file: ' + f)
# Create a queue that produces the filenames to read.
# filename_queue = tf.train.string_input_producer(filenames)
# input_queue = tf.train.slice_input_producer([filenames,labels])
input_queue = tf.train.slice_input_producer([filenames, labels[:, 0:]])
with tf.name_scope('data_augmentation_test'):
# Read examples from files in the filename queue.
read_input = read_cifar10(input_queue, fix_size=fix_size)
vibration = tf.py_func(test_read_from_string_func, [read_input.uint8image], tf.float32)
# Randomly flip the image horizontally.
# Because these operations are not commutative, consider randomizing
# the order their operation.
# NOTE: since per_image_standardization zeros the mean and makes
# the stddev unit, this likely has no effect see tensorflow#1458.
if fix_size:
# Set the shapes of tensors.
vibration.set_shape([IMAGE_SIZE,1])
else:
picture = read_input.uint8image
shape = picture.shape
# print(shape)
vibration.set_shape(shape)
read_input.label.set_shape([1,])
# Ensure that the random shuffling has good mixing properties.
min_fraction_of_examples_in_queue = 0.4
min_queue_examples = int(NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN *
min_fraction_of_examples_in_queue)
print('Filling queue with %d | |
driver = config_json.get('driver')
self.assertEqual(driver, {})
###
# Commands
###
cmds = config_json.get('commands')
self.assertIsNotNone(cmds)
self.assertIsNotNone(cmds.get(DriverEvent.START_AUTOSAMPLE))
self.assertIsNotNone(cmds.get(DriverEvent.STOP_AUTOSAMPLE))
###
# Parameters
###
params = config_json.get('parameters')
self.assertIsNotNone(params)
self.assertIsNotNone(params.get(DriverParameter.RECORDS_PER_SECOND))
self.assertIsNotNone(params.get(DriverParameter.PUBLISHER_POLLING_INTERVAL))
self.assertIsNotNone(params.get(DriverParameter.BATCHED_PARTICLE_COUNT))
class DataSetAgentTestCase(DataSetTestCase):
"""
Base class for dataset driver unit tests
"""
def setUp(self):
"""
Startup the container and start the agent.
"""
super(DataSetAgentTestCase, self).setUp()
self.instrument_agent_manager = InstrumentAgentClient()
self.instrument_agent_manager.start_container(deploy_file=self.test_config.container_deploy_file)
self.container = self.instrument_agent_manager.container
log.debug("Packet Config: %s", self.test_config.agent_packet_config)
self.data_subscribers = InstrumentAgentDataSubscribers(
packet_config=self.test_config.agent_packet_config,
)
self.event_subscribers = InstrumentAgentEventSubscribers(instrument_agent_resource_id=self.test_config.agent_resource_id)
self.init_dataset_agent_client()
self.event_subscribers.events_received = []
self.data_subscribers.start_data_subscribers()
log.debug("********* setUp complete. Begin Testing *********")
self.addCleanup(self._end_test)
def _end_test(self):
"""
Cleanup after the test completes or fails
"""
log.debug("Starting test cleanup")
#self.assert_reset()
self.event_subscribers.stop()
self.data_subscribers.stop_data_subscribers()
self.instrument_agent_manager.stop_container()
log.debug("Test complete and all cleaned up.")
def init_dataset_agent_client(self, bootmode=None):
self.set_dsa_client(self.get_dataset_agent_client(bootmode))
log.debug("DSA Client. Result: %s", self.dataset_agent_client)
def get_dataset_agent_client(self, bootmode=None, config=None, resource_id=None, agent_name=None):
log.info("Start Dataset Agent Client")
if config is None:
config = self._agent_config()
if resource_id is None:
resource_id = self.test_config.agent_resource_id
if agent_name is None:
agent_name = self.test_config.agent_name
# Start instrument agent client.
result = self.instrument_agent_manager.start_client(
name=agent_name,
module=self.test_config.agent_module,
cls=self.test_config.agent_class,
config=config,
resource_id=resource_id,
deploy_file=self.test_config.container_deploy_file,
bootmode=bootmode
)
log.debug("DSA Initialized. Result: %s", result)
return self.instrument_agent_manager.instrument_agent_client
def set_dsa_client(self, client):
self.dataset_agent_client = client
def get_dsa_client(self):
return self.dataset_agent_client
def stop_dataset_agent_client(self):
log.debug("Stopping dataset agent. ff")
self.instrument_agent_manager.stop_client()
def get_samples(self, stream_name, sample_count = 1, timeout = 10):
"""
listen on a stream until 'sample_count' samples are read and return
a list of all samples read. If the required number of samples aren't
read then throw an exception.
Note that this method does not clear the sample queue for the stream.
This should be done explicitly by the caller. However, samples that
are consumed by this method are removed.
@raise SampleTimeout - if the required number of samples aren't read
"""
result = []
start_time = time.time()
i = 1
log.debug("Fetch %d sample(s) from stream '%s'" % (sample_count, stream_name))
while(len(result) < sample_count):
if(self.data_subscribers.samples_received.has_key(stream_name) and
len(self.data_subscribers.samples_received.get(stream_name))):
log.trace("get_samples() received sample #%d!", i)
result.append(self.data_subscribers.samples_received[stream_name].pop(0))
log.debug('Popping received sample')
i += 1
# Check for timeout
if(start_time + timeout < time.time()):
raise SampleTimeout("DataSetQualificationTestCase.get_samples")
if(not self.data_subscribers.samples_received.has_key(stream_name) or
len(self.data_subscribers.samples_received.get(stream_name)) == 0):
log.debug("No samples in queue, sleep for a bit")
gevent.sleep(.2)
log.debug("get_samples() complete. returning %d records", sample_count)
return result
def assert_sample_queue_size(self, stream_name, size):
"""
Verify that a queue has size samples in it.
"""
if(not self.data_subscribers.samples_received.has_key(stream_name) and size == 0):
return
self.assertTrue(self.data_subscribers.samples_received.has_key(stream_name), msg="Sample queue does not exists")
self.assertEqual(len(self.data_subscribers.samples_received.get(stream_name)), size)
def assert_data_values(self, particles, dataset_definition_file):
"""
Verify particles match the particles defined in the definition file
"""
rs_file = self._get_source_data_file(dataset_definition_file)
rs = ResultSet(rs_file)
self.assertTrue(rs.verify(particles))
def assert_initialize(self, final_state = ResourceAgentState.STREAMING):
'''
Walk through DSA states to get to streaming mode from uninitialized
'''
log.debug("Initialize DataSet agent, %s", self.dataset_agent_client)
cmd = AgentCommand(command=ResourceAgentEvent.INITIALIZE)
retval = self.dataset_agent_client.execute_agent(cmd)
state = self.dataset_agent_client.get_agent_state()
self.assertEqual(state, ResourceAgentState.INACTIVE)
log.info("Sent INITIALIZE; DSA state = %s", state)
log.debug("DataSet agent go active")
cmd = AgentCommand(command=ResourceAgentEvent.GO_ACTIVE)
retval = self.dataset_agent_client.execute_agent(cmd)
state = self.dataset_agent_client.get_agent_state()
log.info("Sent GO_ACTIVE; DSA state = %s", state)
self.assertEqual(state, ResourceAgentState.IDLE)
log.debug("DataSet agent run")
cmd = AgentCommand(command=ResourceAgentEvent.RUN)
retval = self.dataset_agent_client.execute_agent(cmd)
state = self.dataset_agent_client.get_agent_state()
log.info("Sent RUN; DSA state = %s", state)
self.assertEqual(state, ResourceAgentState.COMMAND)
if final_state == ResourceAgentState.STREAMING:
self.assert_start_sampling()
def assert_stop_sampling(self):
'''
transition to command. Must be called from streaming
'''
state = self.dataset_agent_client.get_agent_state()
self.assertEqual(state, ResourceAgentState.STREAMING)
log.debug("DataSet agent stop sampling")
cmd = AgentCommand(command=DriverEvent.STOP_AUTOSAMPLE)
retval = self.dataset_agent_client.execute_resource(cmd)
state = self.dataset_agent_client.get_agent_state()
log.info("Sent STOP SAMPLING; DSA state = %s", state)
self.assertEqual(state, ResourceAgentState.COMMAND)
def assert_start_sampling(self):
'''
transition to sampling. Must be called from command
:rtype : object
'''
state = self.dataset_agent_client.get_agent_state()
self.assertEqual(state, ResourceAgentState.COMMAND)
log.debug("DataSet agent start sampling")
cmd = AgentCommand(command=DriverEvent.START_AUTOSAMPLE)
retval = self.dataset_agent_client.execute_resource(cmd)
state = self.dataset_agent_client.get_agent_state()
log.info("Sent START SAMPLING; DSA state = %s", state)
self.assertEqual(state, ResourceAgentState.STREAMING)
def assert_reset(self):
'''
Put the instrument back in uninitialized
'''
agent_state = self.dataset_agent_client.get_agent_state()
log.debug("Resetting agent: current state: %s", agent_state)
if agent_state != ResourceAgentState.UNINITIALIZED:
cmd = AgentCommand(command=ResourceAgentEvent.RESET)
retval = self.dataset_agent_client.execute_agent(cmd)
state = self.dataset_agent_client.get_agent_state()
log.debug("Resetting agent: final state: %s", state)
def assert_agent_state(self, target_state):
"""
Verify the current agent state
@param target_state: What we expect the agent state to be
"""
state = self.dataset_agent_client.get_agent_state()
self.assertEqual(state, target_state)
def assert_agent_command(self, command, args=None, timeout=None, client=None):
"""
Verify an agent command
@param command: driver command to execute
@param args: kwargs to pass to the agent command object
"""
if client is None:
client = self.dataset_agent_client
cmd = AgentCommand(command=command, kwargs=args)
retval = client.execute_agent(cmd, timeout=timeout)
def assert_resource_command(self, command, args=None, timeout=None):
"""
Verify a resource command
@param command: driver command to execute
@param args: kwargs to pass to the agent command object
"""
cmd = AgentCommand(command=command, kwargs=args)
retval = self.dataset_agent_client.execute_resource(cmd)
def assert_state_change(self, target_agent_state, timeout=10):
"""
Verify the agent and resource states change as expected within the timeout
Fail if the state doesn't change to the expected state.
@param target_agent_state: State we expect the agent to be in
@param timeout: how long to wait for the driver to change states
"""
to = gevent.Timeout(timeout)
to.start()
done = False
agent_state = None
try:
while(not done):
agent_state = self.dataset_agent_client.get_agent_state()
log.error("Current agent state: %s", agent_state)
if(agent_state == target_agent_state):
log.debug("Current state match: %s", agent_state)
done = True
if not done:
log.debug("state mismatch, waiting for state to transition.")
gevent.sleep(1)
except gevent.Timeout:
log.error("Failed to transition agent state to %s, current state: %s", target_agent_state, agent_state)
self.fail("Failed to transition state.")
finally:
to.cancel()
def assert_event_received(self, event_object_type, timeout=10):
"""
Verify an event has been received of a sepecific type
@param event_object_type: Event object we are looking for
@param timeout: how long to wait
"""
to = gevent.Timeout(timeout)
to.start()
done = False
try:
while(not done):
for event in self.event_subscribers.events_received:
log.debug("Event: %s", event)
if isinstance(event, event_object_type):
done = True
if not done:
log.debug("target event not detected, sleep a bit to let events happen")
gevent.sleep(1)
except gevent.Timeout:
log.error("Failed to find event in queue: %s", event_object_type)
log.error("Current event queue: %s", self.event_subscribers.events_received)
self.fail("%s event not detected")
finally:
to.cancel()
log.info("Expected event detected: %s", event)
class DataSetQualificationTestCase(DataSetAgentTestCase):
"""
Base class for dataset driver unit tests
"""
def test_initialize(self):
"""
Test that we can start the container and initialize the dataset agent.
"""
self.assert_initialize()
self.assert_stop_sampling()
self.assert_reset()
def test_resource_parameters(self):
"""
verify we can get a resource parameter lists and get/set parameters.
"""
def sort_capabilities(caps_list):
'''
sort a return value into capability buckets.
@retval agt_cmds, agt_pars, res_cmds, res_iface, res_pars
'''
agt_cmds = []
agt_pars = []
res_cmds = []
res_iface = []
res_pars = []
if len(caps_list)>0 and isinstance(caps_list[0], AgentCapability):
agt_cmds = [x.name for x in caps_list if x.cap_type==CapabilityType.AGT_CMD]
agt_pars = [x.name for x in caps_list if x.cap_type==CapabilityType.AGT_PAR]
res_cmds = [x.name for x in caps_list if x.cap_type==CapabilityType.RES_CMD]
#res_iface = [x.name for x in caps_list if x.cap_type==CapabilityType.RES_IFACE]
res_pars = [x.name for x in caps_list if x.cap_type==CapabilityType.RES_PAR]
elif len(caps_list)>0 and isinstance(caps_list[0], dict):
agt_cmds = [x['name'] for x in caps_list if x['cap_type']==CapabilityType.AGT_CMD]
agt_pars = [x['name'] for x in caps_list if x['cap_type']==CapabilityType.AGT_PAR]
res_cmds = [x['name'] for x in caps_list if x['cap_type']==CapabilityType.RES_CMD]
#res_iface = [x['name'] for x in caps_list if x['cap_type']==CapabilityType.RES_IFACE]
res_pars = [x['name'] for x in caps_list if x['cap_type']==CapabilityType.RES_PAR]
agt_cmds.sort()
agt_pars.sort()
res_cmds.sort()
res_iface.sort()
res_pars.sort()
return agt_cmds, agt_pars, res_cmds, res_iface, res_pars
log.debug("Initialize the agent")
expected_params = [DriverParameter.BATCHED_PARTICLE_COUNT,
DriverParameter.PUBLISHER_POLLING_INTERVAL,
DriverParameter.RECORDS_PER_SECOND]
self.assert_initialize(final_state=ResourceAgentState.COMMAND)
log.debug("Call get capabilities")
retval = self.dataset_agent_client.get_capabilities()
log.debug("Capabilities: %s", retval)
agt_cmds, agt_pars, res_cmds, res_iface, res_pars = sort_capabilities(retval)
self.assertEqual(sorted(res_pars), sorted(expected_params))
self.dataset_agent_client.set_resource({DriverParameter.RECORDS_PER_SECOND: 20})
reply = self.dataset_agent_client.get_resource(DriverParameter.ALL)
log.debug("Get Resource Result: %s", reply)
def test_capabilities(self):
"""
Verify capabilities throughout the agent lifecycle
"""
capabilities = {
AgentCapabilityType.AGENT_COMMAND: self._common_agent_commands(ResourceAgentState.UNINITIALIZED),
AgentCapabilityType.AGENT_PARAMETER: self._common_agent_parameters(),
AgentCapabilityType.RESOURCE_COMMAND: None,
AgentCapabilityType.RESOURCE_INTERFACE: None,
AgentCapabilityType.RESOURCE_PARAMETER: None,
}
###
# DSA State INACTIVE
###
log.debug("Initialize DataSet agent")
self.assert_agent_command(ResourceAgentEvent.INITIALIZE)
self.assert_state_change(ResourceAgentState.INACTIVE)
self.assert_capabilities(capabilities)
###
# DSA State IDLE
###
log.debug("DataSet agent go active")
capabilities[AgentCapabilityType.AGENT_COMMAND] = self._common_agent_commands(ResourceAgentState.IDLE)
self.assert_agent_command(ResourceAgentEvent.GO_ACTIVE)
self.assert_state_change(ResourceAgentState.IDLE)
self.assert_capabilities(capabilities)
###
# DSA State COMMAND
###
log.debug("DataSet agent run")
capabilities[AgentCapabilityType.AGENT_COMMAND] = self._common_agent_commands(ResourceAgentState.COMMAND)
capabilities[AgentCapabilityType.RESOURCE_COMMAND] = [DriverEvent.START_AUTOSAMPLE]
capabilities[AgentCapabilityType.RESOURCE_PARAMETER] = self._common_resource_parameters()
self.assert_agent_command(ResourceAgentEvent.RUN)
self.assert_state_change(ResourceAgentState.COMMAND)
self.assert_capabilities(capabilities)
###
# DSA State STREAMING
###
capabilities[AgentCapabilityType.AGENT_COMMAND] = self._common_agent_commands(ResourceAgentState.STREAMING)
capabilities[AgentCapabilityType.RESOURCE_COMMAND] = [DriverEvent.STOP_AUTOSAMPLE]
capabilities[AgentCapabilityType.RESOURCE_PARAMETER] = self._common_resource_parameters()
self.assert_start_sampling()
self.assert_capabilities(capabilities)
###
# DSA State COMMAND Revisited
###
log.debug("DataSet agent run")
capabilities[AgentCapabilityType.AGENT_COMMAND] = self._common_agent_commands(ResourceAgentState.COMMAND)
capabilities[AgentCapabilityType.RESOURCE_COMMAND] = [DriverEvent.START_AUTOSAMPLE]
capabilities[AgentCapabilityType.RESOURCE_PARAMETER] = self._common_resource_parameters()
self.assert_stop_sampling()
self.assert_capabilities(capabilities)
###
# DSA State INACTIVE
###
log.debug("DataSet agent run")
capabilities[AgentCapabilityType.AGENT_COMMAND] = self._common_agent_commands(ResourceAgentState.INACTIVE)
capabilities[AgentCapabilityType.RESOURCE_COMMAND] = None
capabilities[AgentCapabilityType.RESOURCE_PARAMETER] = None
self.assert_agent_command(ResourceAgentEvent.GO_INACTIVE)
self.assert_state_change(ResourceAgentState.INACTIVE)
self.assert_capabilities(capabilities)
###
# DSA State LOST_CONNECTION
###
capabilities[AgentCapabilityType.AGENT_COMMAND] = self._common_agent_commands(ResourceAgentState.LOST_CONNECTION)
capabilities[AgentCapabilityType.RESOURCE_COMMAND] = None
capabilities[AgentCapabilityType.RESOURCE_PARAMETER] = None
self.assert_agent_command(ResourceAgentEvent.RESET)
| |
# Original Version: <NAME> (http://carpedm20.github.io)
# + Source: https://github.com/carpedm20/DCGAN-tensorflow/blob/e30539fb5e20d5a0fed40935853da97e9e55eee8/model.py
# + License: MIT
# [2016-08-05] Modifications for Completion: <NAME> (http://bamos.github.io)
# + License: MIT
from __future__ import division
import os
import time
import math
import itertools
from glob import glob
import tensorflow as tf
from six.moves import xrange
from ops import *
from utils import *
SUPPORTED_EXTENSIONS = ["png", "jpg", "jpeg"]
def dataset_files(root):
"""Returns a list of all image files in the given directory"""
return list(itertools.chain.from_iterable(
glob(os.path.join(root, "*.{}".format(ext))) for ext in SUPPORTED_EXTENSIONS))
class DCGAN(object):
def __init__(self, sess, image_size=128, is_crop=False,
batch_size=16, sample_size=128, lowres=8,
z_dim=100, gf_dim=64, df_dim=64,
gfc_dim=1024, dfc_dim=1024, c_dim=3,
checkpoint_dir=None, lam=0.1):
"""
Args:
sess: TensorFlow session
batch_size: The size of batch. Should be specified before training.
lowres: (optional) Low resolution image/mask shrink factor. [8]
z_dim: (optional) Dimension of dim for Z. [100]
gf_dim: (optional) Dimension of gen filters in first conv layer. [64]
df_dim: (optional) Dimension of discrim filters in first conv layer. [64]
gfc_dim: (optional) Dimension of gen untis for for fully connected layer. [1024]
dfc_dim: (optional) Dimension of discrim units for fully connected layer. [1024]
c_dim: (optional) Dimension of image color. [3]
"""
# Currently, image size must be a (power of 2) and (8 or higher).
assert(image_size & (image_size - 1) == 0 and image_size >= 8)
self.sess = sess
self.is_crop = is_crop
self.batch_size = batch_size
self.image_size = image_size
self.sample_size = sample_size
self.image_shape = [image_size, image_size, c_dim]
self.lowres = lowres
self.lowres_size = image_size // lowres
self.lowres_shape = [self.lowres_size, self.lowres_size, c_dim]
self.z_dim = z_dim
self.gf_dim = gf_dim
self.df_dim = df_dim
self.gfc_dim = gfc_dim
self.dfc_dim = dfc_dim
self.lam = lam
self.c_dim = c_dim
# batch normalization : deals with poor initialization helps gradient flow
self.d_bns = [
batch_norm(name='d_bn{}'.format(i,)) for i in range(5)]
log_size = int(math.log(image_size) / math.log(2))
self.g_bns = [
batch_norm(name='g_bn{}'.format(i,)) for i in range(log_size)]
self.checkpoint_dir = checkpoint_dir
self.build_model()
self.model_name = "DCGAN.model"
def build_model(self):
self.is_training = tf.placeholder(tf.bool, name='is_training')
self.images = tf.placeholder(
tf.float32, [None] + self.image_shape, name='real_images')
self.lowres_images = tf.reduce_mean(tf.reshape(self.images,
[self.batch_size, self.lowres_size, self.lowres,
self.lowres_size, self.lowres, self.c_dim]), [2, 4])
self.z = tf.placeholder(tf.float32, [None, self.z_dim], name='z')
self.z_sum = tf.summary.histogram("z", self.z)
self.G = self.generator(self.z)
self.lowres_G = tf.reduce_mean(tf.reshape(self.G,
[self.batch_size, self.lowres_size, self.lowres,
self.lowres_size, self.lowres, self.c_dim]), [2, 4])
self.D, self.D_logits = self.discriminator(self.images)
self.D_, self.D_logits_ = self.discriminator(self.G, reuse=True)
self.d_sum = tf.summary.histogram("d", self.D)
self.d__sum = tf.summary.histogram("d_", self.D_)
self.G_sum = tf.summary.image("G", self.G)
self.d_loss_real = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(logits=self.D_logits,
labels=tf.ones_like(self.D)))
self.d_loss_fake = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(logits=self.D_logits_,
labels=tf.zeros_like(self.D_)))
self.g_loss = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(logits=self.D_logits_,
labels=tf.ones_like(self.D_)))
self.d_loss_real_sum = tf.summary.scalar("d_loss_real", self.d_loss_real)
self.d_loss_fake_sum = tf.summary.scalar("d_loss_fake", self.d_loss_fake)
self.d_loss = self.d_loss_real + self.d_loss_fake
self.g_loss_sum = tf.summary.scalar("g_loss", self.g_loss)
self.d_loss_sum = tf.summary.scalar("d_loss", self.d_loss)
t_vars = tf.trainable_variables()
self.d_vars = [var for var in t_vars if 'd_' in var.name]
self.g_vars = [var for var in t_vars if 'g_' in var.name]
self.saver = tf.train.Saver(max_to_keep=1)
# Completion.
self.mask = tf.placeholder(tf.float32, self.image_shape, name='mask')
self.lowres_mask = tf.placeholder(tf.float32, self.lowres_shape, name='lowres_mask')
self.contextual_loss = tf.reduce_sum(
tf.contrib.layers.flatten(
tf.abs(tf.multiply(self.mask, self.G) - tf.multiply(self.mask, self.images))), 1)
self.contextual_loss += tf.reduce_sum(
tf.contrib.layers.flatten(
tf.abs(tf.multiply(self.lowres_mask, self.lowres_G) - tf.multiply(self.lowres_mask, self.lowres_images))), 1)
self.perceptual_loss = self.g_loss
self.complete_loss = self.contextual_loss + self.lam*self.perceptual_loss
self.grad_complete_loss = tf.gradients(self.complete_loss, self.z)
def train(self, config):
data = dataset_files(config.dataset)
np.random.shuffle(data)
assert(len(data) > 0)
d_optim = tf.train.AdamOptimizer(config.learning_rate, beta1=config.beta1) \
.minimize(self.d_loss, var_list=self.d_vars)
g_optim = tf.train.AdamOptimizer(config.learning_rate, beta1=config.beta1) \
.minimize(self.g_loss, var_list=self.g_vars)
try:
tf.global_variables_initializer().run()
except:
tf.initialize_all_variables().run()
self.g_sum = tf.summary.merge(
[self.z_sum, self.d__sum, self.G_sum, self.d_loss_fake_sum, self.g_loss_sum])
self.d_sum = tf.summary.merge(
[self.z_sum, self.d_sum, self.d_loss_real_sum, self.d_loss_sum])
self.writer = tf.summary.FileWriter("./logs", self.sess.graph)
sample_z = np.random.uniform(-1, 1, size=(self.sample_size , self.z_dim))
sample_files = data[0:self.sample_size]
sample = [get_image(sample_file, self.image_size, is_crop=self.is_crop) for sample_file in sample_files]
sample_images = np.array(sample).astype(np.float32)
counter = 1
start_time = time.time()
if self.load(self.checkpoint_dir):
print("""
======
An existing model was found in the checkpoint directory.
If you just cloned this repository, it's a model for faces
trained on the CelebA dataset for 20 epochs.
If you want to train a new model from scratch,
delete the checkpoint directory or specify a different
--checkpoint_dir argument.
======
""")
else:
print("""
======
An existing model was not found in the checkpoint directory.
Initializing a new one.
======
""")
for epoch in xrange(config.epoch):
data = dataset_files(config.dataset)
batch_idxs = min(len(data), config.train_size) // self.batch_size
for idx in xrange(0, batch_idxs):
batch_files = data[idx*config.batch_size:(idx+1)*config.batch_size]
batch = [get_image(batch_file, self.image_size, is_crop=self.is_crop)
for batch_file in batch_files]
batch_images = np.array(batch).astype(np.float32)
batch_z = np.random.uniform(-1, 1, [config.batch_size, self.z_dim]) \
.astype(np.float32)
# Update D network
_, summary_str = self.sess.run([d_optim, self.d_sum],
feed_dict={ self.images: batch_images, self.z: batch_z, self.is_training: True })
self.writer.add_summary(summary_str, counter)
# Update G network
_, summary_str = self.sess.run([g_optim, self.g_sum],
feed_dict={ self.z: batch_z, self.is_training: True })
self.writer.add_summary(summary_str, counter)
# Run g_optim twice to make sure that d_loss does not go to zero (different from paper)
_, summary_str = self.sess.run([g_optim, self.g_sum],
feed_dict={ self.z: batch_z, self.is_training: True })
self.writer.add_summary(summary_str, counter)
errD_fake = self.d_loss_fake.eval({self.z: batch_z, self.is_training: False})
errD_real = self.d_loss_real.eval({self.images: batch_images, self.is_training: False})
errG = self.g_loss.eval({self.z: batch_z, self.is_training: False})
counter += 1
print("Epoch: [{:2d}] [{:4d}/{:4d}] time: {:4.4f}, d_loss: {:.8f}, g_loss: {:.8f}".format(
epoch, idx, batch_idxs, time.time() - start_time, errD_fake+errD_real, errG))
if np.mod(counter, 100) == 1:
samples, d_loss, g_loss = self.sess.run(
[self.G, self.d_loss, self.g_loss],
feed_dict={self.z: sample_z, self.images: sample_images, self.is_training: False}
)
save_images(samples, [8, 8],
'./samples/train_{:02d}_{:04d}.png'.format(epoch, idx))
print("[Sample] d_loss: {:.8f}, g_loss: {:.8f}".format(d_loss, g_loss))
if np.mod(counter, 500) == 2:
self.save(config.checkpoint_dir, counter)
def complete(self, config):
def make_dir(name):
# Works on python 2.7, where exist_ok arg to makedirs isn't available.
p = os.path.join(config.outDir, name)
if not os.path.exists(p):
os.makedirs(p)
make_dir('hats_imgs')
make_dir('completed')
make_dir('logs')
try:
tf.global_variables_initializer().run()
except:
tf.initialize_all_variables().run()
isLoaded = self.load(self.checkpoint_dir)
assert(isLoaded)
nImgs = len(config.imgs)
batch_idxs = int(np.ceil(nImgs/self.batch_size))
lowres_mask = np.zeros(self.lowres_shape)
if config.maskType == 'random':
fraction_masked = 0.2
mask = np.ones(self.image_shape)
mask[np.random.random(self.image_shape[:2]) < fraction_masked] = 0.0
elif config.maskType == 'center':
assert(config.centerScale <= 0.5)
mask = np.ones(self.image_shape)
sz = self.image_size
l = int(self.image_size*config.centerScale)
u = int(self.image_size*(1.0-config.centerScale))
mask[l:u, l:u, :] = 0.0
elif config.maskType == 'left':
mask = np.ones(self.image_shape)
c = self.image_size // 2
mask[:,:c,:] = 0.0
elif config.maskType == 'full':
mask = np.ones(self.image_shape)
elif config.maskType == 'grid':
mask = np.zeros(self.image_shape)
mask[::4,::4,:] = 1.0
elif config.maskType == 'lowres':
lowres_mask = np.ones(self.lowres_shape)
mask = np.zeros(self.image_shape)
else:
assert(False)
for idx in xrange(0, batch_idxs):
l = idx*self.batch_size
u = min((idx+1)*self.batch_size, nImgs)
batchSz = u-l
batch_files = config.imgs[l:u]
batch = [get_image(batch_file, self.image_size, is_crop=self.is_crop)
for batch_file in batch_files]
batch_images = np.array(batch).astype(np.float32)
if batchSz < self.batch_size:
print(batchSz)
padSz = ((0, int(self.batch_size-batchSz)), (0,0), (0,0), (0,0))
batch_images = np.pad(batch_images, padSz, 'constant')
batch_images = batch_images.astype(np.float32)
zhats = np.random.uniform(-1, 1, size=(self.batch_size, self.z_dim))
m = 0
v = 0
nRows = np.ceil(batchSz/8)
nCols = min(8, batchSz)
save_images(batch_images[:batchSz,:,:,:], [nRows,nCols],
os.path.join(config.outDir, 'before.png'))
masked_images = np.multiply(batch_images, mask)
save_images(masked_images[:batchSz,:,:,:], [nRows,nCols],
os.path.join(config.outDir, 'masked.png'))
if lowres_mask.any():
lowres_images = np.reshape(batch_images, [self.batch_size, self.lowres_size, self.lowres,
self.lowres_size, self.lowres, self.c_dim]).mean(4).mean(2)
lowres_images = np.multiply(lowres_images, lowres_mask)
lowres_images = np.repeat(np.repeat(lowres_images, self.lowres, 1), self.lowres, 2)
save_images(lowres_images[:batchSz,:,:,:], [nRows,nCols],
os.path.join(config.outDir, 'lowres.png'))
for img in range(batchSz):
with open(os.path.join(config.outDir, 'logs/hats_{:02d}.log'.format(img)), 'a') as f:
f.write('iter loss ' +
' '.join(['z{}'.format(zi) for zi in range(self.z_dim)]) +
'\n')
for i in xrange(config.nIter):
fd = {
self.z: zhats,
self.mask: mask,
self.lowres_mask: lowres_mask,
self.images: batch_images,
self.is_training: False
}
run = [self.complete_loss, self.grad_complete_loss, self.G, self.lowres_G]
loss, g, G_imgs, lowres_G_imgs = self.sess.run(run, feed_dict=fd)
for img in range(batchSz):
with open(os.path.join(config.outDir, 'logs/hats_{:02d}.log'.format(img)), 'ab') as f:
f.write('{} {} '.format(i, loss[img]).encode())
np.savetxt(f, zhats[img:img+1])
if i % config.outInterval == 0:
print(i, np.mean(loss[0:batchSz]))
imgName = os.path.join(config.outDir,
'hats_imgs/{:04d}.png'.format(i))
nRows = np.ceil(batchSz/8)
nCols = min(8, batchSz)
save_images(G_imgs[:batchSz,:,:,:], [nRows,nCols], imgName)
if lowres_mask.any():
imgName = imgName[:-4] + '.lowres.png'
save_images(np.repeat(np.repeat(lowres_G_imgs[:batchSz,:,:,:],
self.lowres, 1), self.lowres, 2),
[nRows,nCols], imgName)
inv_masked_hat_images = np.multiply(G_imgs, 1.0-mask)
completed = masked_images + inv_masked_hat_images
imgName = os.path.join(config.outDir,
'completed/{:04d}.png'.format(i))
save_images(completed[:batchSz,:,:,:], [nRows,nCols], imgName)
if config.approach == 'adam':
# Optimize single completion with Adam
m_prev = np.copy(m)
v_prev = np.copy(v)
m = config.beta1 * m_prev + (1 - config.beta1) * g[0]
v = config.beta2 * v_prev + (1 - config.beta2) * np.multiply(g[0], g[0])
m_hat = m / (1 - config.beta1 ** (i + 1))
v_hat = v / (1 - config.beta2 ** (i + 1))
zhats += - np.true_divide(config.lr * m_hat, (np.sqrt(v_hat) + config.eps))
zhats = np.clip(zhats, -1, 1)
elif config.approach == 'hmc':
# Sample example completions with HMC (not in paper)
zhats_old = np.copy(zhats)
loss_old = np.copy(loss)
v = np.random.randn(self.batch_size, self.z_dim)
v_old = np.copy(v)
for steps in range(config.hmcL):
v -= config.hmcEps/2 * config.hmcBeta * g[0]
zhats += config.hmcEps * v
np.copyto(zhats, np.clip(zhats, -1, 1))
loss, g, _, _ = self.sess.run(run, feed_dict=fd)
v -= config.hmcEps/2 * config.hmcBeta * g[0]
for img in range(batchSz):
logprob_old = config.hmcBeta | |
<reponame>jsmith00/contrail-controller
#
# Copyright (c) 2013 Juniper Networks, Inc. All rights reserved.
#
import gevent
import gevent.queue
import gevent.wsgi
import os
import sys
import logging
import pdb
import json
from pprint import pprint
import functools
import socket
import time
import errno
import re
import copy
from lxml import etree
from xml.sax.saxutils import escape, unescape
try:
from collections import OrderedDict
except ImportError:
from ordereddict import OrderedDict
import pycassa
import Queue
from collections import deque
import kombu
import kazoo
from kazoo.client import KazooState
from copy import deepcopy
from datetime import datetime
from pycassa.util import *
from vnc_api import vnc_api
from novaclient import exceptions as nc_exc
from cfgm_common.exceptions import ResourceExistsError
def stub(*args, **kwargs):
pass
class FakeApiConfigLog(object):
_all_logs = []
send = stub
def __init__(self, *args, **kwargs):
FakeApiConfigLog._all_logs.append(kwargs['api_log'])
@classmethod
def _print(cls):
for log in cls._all_logs:
x = copy.deepcopy(log.__dict__)
#body = x.pop('body')
#pprint(json.loads(body))
pprint(x)
print "\n"
# class FakeApiConfigLog
class FakeWSGIHandler(gevent.wsgi.WSGIHandler):
logger = logging.getLogger('FakeWSGIHandler')
logger.addHandler(logging.FileHandler('api_server.log'))
def __init__(self, socket, address, server):
super(FakeWSGIHandler, self).__init__(socket, address, server)
#server.log = open('api_server.log', 'a')
class LoggerWriter(object):
def write(self, message):
FakeWSGIHandler.logger.log(logging.INFO, message)
server.log = LoggerWriter()
class CassandraCFs(object):
_all_cfs = {}
@classmethod
def add_cf(cls, name, cf):
CassandraCFs._all_cfs[name] = cf
# end add_cf
@classmethod
def get_cf(cls, name):
return CassandraCFs._all_cfs[name]
# end get_cf
@classmethod
def reset(cls):
cls._all_cfs = {}
# end CassandraCFs
class FakeCF(object):
def __init__(*args, **kwargs):
self = args[0]
self._name = args[3]
try:
old_cf = CassandraCFs.get_cf(self._name)
self._rows = old_cf._rows
except KeyError:
self._rows = OrderedDict({})
self.column_validators = {}
CassandraCFs.add_cf(self._name, self)
# end __init__
def get_range(self, *args, **kwargs):
for key in self._rows:
yield (key, self.get(key))
# end get_range
def _column_within_range(self, column_name, column_start, column_finish):
if column_start and column_name < column_start:
return False
if column_finish and column_name > column_finish:
return False
return True
# end _column_within_range
def get(
self, key, columns=None, column_start=None, column_finish=None,
column_count=0, include_timestamp=False):
if not key in self._rows:
raise pycassa.NotFoundException
if columns:
col_dict = {}
for col_name in columns:
col_value = self._rows[key][col_name][0]
if include_timestamp:
col_tstamp = self._rows[key][col_name][1]
col_dict[col_name] = (col_value, col_tstamp)
else:
col_dict[col_name] = col_value
else:
col_dict = {}
for col_name in self._rows[key].keys():
if not self._column_within_range(col_name,
column_start, column_finish):
continue
col_value = self._rows[key][col_name][0]
if include_timestamp:
col_tstamp = self._rows[key][col_name][1]
col_dict[col_name] = (col_value, col_tstamp)
else:
col_dict[col_name] = col_value
return col_dict
# end get
def multiget(
self, keys, columns=None, column_start=None, column_finish=None,
column_count=0, include_timestamp=False):
result = {}
for key in keys:
try:
result[key] = {}
for col_name in self._rows[key]:
if not self._column_within_range(col_name,
column_start, column_finish):
continue
result[key][col_name] = copy.deepcopy(self._rows[key][col_name])
except KeyError:
pass
return result
# end multiget
def insert(self, key, col_dict):
if key not in self._rows:
self._rows[key] = {}
tstamp = datetime.now()
for col_name in col_dict.keys():
self._rows[key][col_name] = (col_dict[col_name], tstamp)
# end insert
def remove(self, key, columns=None):
try:
if columns:
# for each entry in col_name delete each that element
for col_name in columns:
del self._rows[key][col_name]
else:
del self._rows[key]
except KeyError:
# pycassa remove ignores non-existing keys
pass
# end remove
def xget(self, key, column_start=None, column_finish=None,
include_timestamp=False):
col_names = []
if key in self._rows:
col_names = self._rows[key].keys()
for col_name in col_names:
if not self._column_within_range(col_name,
column_start, column_finish):
continue
col_value = self._rows[key][col_name][0]
if include_timestamp:
col_tstamp = self._rows[key][col_name][1]
yield (col_name, (col_value, col_tstamp))
else:
yield (col_name, col_value)
# end xget
def batch(self):
return self
# end batch
def send(self):
pass
# end send
# end class FakeCF
class FakeNovaClient(object):
@staticmethod
def initialize(*args, **kwargs):
return FakeNovaClient
class flavors:
@staticmethod
def find(*args, **kwargs):
return 1
# end class flavors
class images:
@staticmethod
def find(name):
return 1
# end class images
class servers:
@staticmethod
def create(name, image, flavor, nics, *args, **kwargs):
vm = vnc_api.VirtualMachine(name)
FakeNovaClient.vnc_lib.virtual_machine_create(vm)
for network in nics:
if 'nic-id' in network:
vn = FakeNovaClient.vnc_lib.virtual_network_read(
id=network['net-id'])
vmi = vnc_api.VirtualMachineInterface(vn.name, parent_obj=vm)
vmi.set_virtual_network(vn)
FakeNovaClient.vnc_lib.virtual_machine_interface_create(vmi)
ip_address = FakeNovaClient.vnc_lib.virtual_network_ip_alloc(
vn, count=1)[0]
ip_obj = vnc_api.InstanceIp(ip_address, ip_address)
ip_obj.add_virtual_network(vn)
ip_obj.add_virtual_machine_interface(vmi)
FakeNovaClient.vnc_lib.instance_ip_create(ip_obj)
elif 'port-id' in network:
vmi = FakeNovaClient.vnc_lib.virtual_machine_interface_read(id=network['port-id'])
vmi.add_virtual_machine(vm)
FakeNovaClient.vnc_lib.virtual_machine_interface_update(vmi)
# end for network
vm.id = vm.uuid
vm.delete = FakeNovaClient.delete_vm.__get__(
vm, vnc_api.VirtualMachine)
vm.get = stub
return vm
# end create
@staticmethod
def find(id):
try:
vm = FakeNovaClient.vnc_lib.virtual_machine_read(id=id)
except vnc_api.NoIdError:
raise nc_exc.NotFound(404, "")
vm.delete = FakeNovaClient.delete_vm.__get__(
vm, vnc_api.VirtualMachine)
vm.status = 'OK'
return vm
# end find
get = find
# end class servers
@staticmethod
def delete_vm(vm):
for if_ref in vm.get_virtual_machine_interfaces() or vm.get_virtual_machine_interface_back_refs():
intf = FakeNovaClient.vnc_lib.virtual_machine_interface_read(
id=if_ref['uuid'])
for ip_ref in intf.get_instance_ip_back_refs() or []:
FakeNovaClient.vnc_lib.instance_ip_delete(id=ip_ref['uuid'])
FakeNovaClient.vnc_lib.virtual_machine_interface_delete(
id=if_ref['uuid'])
FakeNovaClient.vnc_lib.virtual_machine_delete(id=vm.uuid)
# end delete_vm
# end class FakeNovaClient
class FakeIfmapClient(object):
# _graph is dict of ident_names where val for each key is
# dict with keys 'ident' and 'links'
# 'ident' has ident xml element
# 'links' is a dict with keys of concat(<meta-name>' '<ident-name>')
# and vals of dict with 'meta' which has meta xml element and
# 'other' which has other ident xml element
# eg. cls._graph['contrail:network-ipam:default-domain:default-project:
# ipam2'] =
# 'ident': <Element identity at 0x2b3e280>,
# 'links': {'contrail:id-perms': {'meta': <Element metadata at 0x2b3eb40>},
# 'contrail:project-network-ipam
# contrail:project:default-domain:default-project':
# {'other': <Element identity at 0x2b3eaa0>,
# 'meta': <Element metadata at 0x2b3ea50>},
# 'contrail:virtual-network-network-ipam contrail:
# virtual-network:default-domain:default-project:vn2':
# {'other': <Element identity at 0x2b3ee10>,
# 'meta': <Element metadata at 0x2b3e410>}}}
_graph = {}
_published_messages = [] # all messages published so far
_subscribe_lists = [] # list of all subscribers indexed by session-id
_PUBLISH_ENVELOPE = \
"""<?xml version="1.0" encoding="UTF-8"?> """\
"""<env:Envelope xmlns:"""\
"""env="http://www.w3.org/2003/05/soap-envelope" xmlns:"""\
"""ifmap="http://www.trustedcomputinggroup.org/2010/IFMAP/2" """\
"""xmlns:contrail="http://www.contrailsystems.com/"""\
"""vnc_cfg.xsd" """\
"""xmlns:meta="http://www.trustedcomputinggroup.org"""\
"""/2010/IFMAP-METADATA/2"> """\
"""<env:Body> %(body)s </env:Body> </env:Envelope>"""
_RSP_ENVELOPE = \
"""<?xml version="1.0" encoding="UTF-8" standalone="yes"?> """\
"""<env:Envelope xmlns:ifmap="http://www.trustedcomputinggroup.org"""\
"""/2010/IFMAP/2" """\
"""xmlns:env="http://www.w3.org/2003/05/soap-envelope" """\
"""xmlns:meta="http://www.trustedcomputinggroup.org"""\
"""/2010/IFMAP-METADATA/2" """\
"""xmlns:contrail="http://www.contrailsystems.com/vnc_cfg.xsd"> """\
"""<env:Body><ifmap:response> %(result)s """\
"""</ifmap:response></env:Body></env:Envelope>"""
@classmethod
def reset(cls):
cls._graph = {}
cls._published_messages = [] # all messages published so far
cls._subscribe_lists = [] # list of all subscribers indexed by session-id
# end reset
@staticmethod
def initialize(*args, **kwargs):
pass
# end initialize
@classmethod
def _update_publish(cls, upd_root):
subscribe_item = etree.Element('resultItem')
subscribe_item.extend(deepcopy(upd_root))
from_name = escape(upd_root[0].attrib['name'])
if not from_name in cls._graph:
cls._graph[from_name] = {'ident': upd_root[0], 'links': {}}
if len(upd_root) == 2:
meta_name = re.sub("{.*}", "contrail:", upd_root[1][0].tag)
link_key = meta_name
link_info = {'meta': upd_root[1]}
cls._graph[from_name]['links'][link_key] = link_info
elif len(upd_root) == 3:
meta_name = re.sub("{.*}", "contrail:", upd_root[2][0].tag)
to_name = escape(upd_root[1].attrib['name'])
link_key = '%s %s' % (meta_name, to_name)
link_info = {'meta': upd_root[2], 'other': upd_root[1]}
cls._graph[from_name]['links'][link_key] = link_info
# reverse mapping only for strong refs
# currently refs from same type to each other is weak ref
from_type = from_name.split(':')[1]
to_type = to_name.split(':')[1]
if not to_name in cls._graph:
cls._graph[to_name] = {'ident': upd_root[1], 'links': {}}
link_key = '%s %s' % (meta_name, from_name)
link_info = {'meta': upd_root[2], 'other': upd_root[0]}
cls._graph[to_name]['links'][link_key] = link_info
else:
raise Exception("Unknown ifmap update: %s" %
(etree.tostring(upd_root)))
subscribe_result = etree.Element('updateResult')
subscribe_result.append(subscribe_item)
return subscribe_result
# end _update_publish
@classmethod
def _delete_publish(cls, del_root):
from_name = escape(del_root[0].attrib['name'])
if 'filter' in del_root.attrib:
meta_name = del_root.attrib['filter']
if len(del_root) == 1:
link_key = meta_name
elif len(del_root) == 2:
to_name = escape(del_root[1].attrib['name'])
link_key = '%s %s' % (meta_name, to_name)
else:
raise Exception("Unknown ifmap delete: %s" %
(etree.tostring(del_root)))
link_keys = [link_key]
else: # delete all metadata on this ident or between pair of idents
if len(del_root) == 1:
link_keys = cls._graph[from_name]['links'].keys()
elif len(del_root) == 2:
to_name = escape(del_root[1].attrib['name'])
link_keys = []
if from_name in cls._graph:
all_link_keys = cls._graph[from_name]['links'].keys()
for link_key in all_link_keys:
link_info = cls._graph[from_name]['links'][link_key]
if 'other' in link_info:
if link_key.split()[1] == to_name:
link_keys.append(link_key)
else:
raise Exception("Unknown ifmap delete: %s" %
(etree.tostring(del_root)))
subscribe_result = etree.Element('deleteResult')
for link_key in link_keys:
subscribe_item = etree.Element('resultItem')
subscribe_item.extend(deepcopy(del_root))
link_info = cls._graph[from_name]['links'][link_key]
# generate id1, id2, meta for poll for the case where
# del of ident for all metas requested but we have a
# ref meta to another ident
if len(del_root) == 1 and 'other' in link_info:
to_ident_elem = link_info['other']
subscribe_item.append(to_ident_elem)
subscribe_item.append(deepcopy(link_info['meta']))
subscribe_result.append(subscribe_item)
if 'other' in link_info:
other_name = escape(link_info['other'].attrib['name'])
meta_name = re.sub(
"{.*}", "contrail:", link_info['meta'][0].tag)
rev_link_key = '%s %s' % (meta_name, from_name)
from_type = from_name.split(':')[1]
other_type = other_name.split(':')[1]
if other_name in cls._graph:
del cls._graph[other_name]['links'][rev_link_key]
if not cls._graph[other_name]['links']:
del cls._graph[other_name]
del cls._graph[from_name]['links'][link_key]
# delete ident if no links left
if from_name in cls._graph and not cls._graph[from_name]['links']:
del cls._graph[from_name]
if len(subscribe_result) == 0:
subscribe_item = etree.Element('resultItem')
subscribe_item.extend(deepcopy(del_root))
subscribe_result.append(subscribe_item)
return subscribe_result
# end _delete_publish
@staticmethod
def call(method, body):
cls = FakeIfmapClient
if method == 'publish':
pub_env = cls._PUBLISH_ENVELOPE % {
'body': body._PublishRequest__operations}
pub_env = pub_env.encode('utf-8')
env_root = etree.fromstring(pub_env)
poll_result = etree.Element('pollResult')
for pub_root in env_root[0]:
# pub_root = env_root[0][0]
if pub_root.tag == 'update':
subscribe_result = cls._update_publish(pub_root)
elif pub_root.tag == 'delete':
subscribe_result = cls._delete_publish(pub_root)
else:
| |
from app.utilities import utils
from app.data.database import DB
from app.data.item_components import ItemComponent
from app.data.components import Type
from app.engine import action, combat_calcs, equations, banner
from app.engine import item_system, skill_system, item_funcs
from app.engine.game_state import game
class PermanentStatChange(ItemComponent):
nid = 'permanent_stat_change'
desc = "Item changes target's stats on hit."
tag = 'special'
expose = (Type.Dict, Type.Stat)
def target_restrict(self, unit, item, def_pos, splash) -> bool:
# Ignore's splash
defender = game.board.get_unit(def_pos)
if not defender:
return False
klass = DB.classes.get(defender.klass)
for stat, inc in self.value:
if inc <= 0 or defender.stats[stat] < klass.max_stats.get(stat, 30):
return True
return False
def on_hit(self, actions, playback, unit, item, target, target_pos, mode=None):
stat_changes = {k: v for (k, v) in self.value}
klass = DB.classes.get(target.klass)
# clamp stat changes
stat_changes = {k: utils.clamp(v, -unit.stats[k], klass.max_stats.get(k, 30) - target.stats[k]) for k, v in stat_changes.items()}
actions.append(action.ApplyStatChanges(target, stat_changes))
playback.append(('stat_hit', unit, item, target))
def end_combat(self, playback, unit, item, target, mode):
# Count number of stat hits
count = 0
for p in playback:
if p[0] == 'stat_hit':
count += 1
if count > 0:
stat_changes = {k: v*count for (k, v) in self.value}
klass = DB.classes.get(target.klass)
# clamp stat changes
stat_changes = {k: utils.clamp(v, -target.stats[k], klass.max_stats.get(k, 30) - target.stats[k]) for k, v in stat_changes.items()}
game.memory['stat_changes'] = stat_changes
game.exp_instance.append((target, 0, None, 'stat_booster'))
game.state.change('exp')
class PermanentGrowthChange(ItemComponent):
nid = 'permanent_growth_change'
desc = "Item changes target's growths on hit"
tag = 'special'
expose = (Type.Dict, Type.Stat)
def on_hit(self, actions, playback, unit, item, target, target_pos, mode=None):
growth_changes = {k: v for (k, v) in self.value}
actions.append(action.ApplyGrowthChanges(target, growth_changes))
playback.append(('stat_hit', unit, item, target))
class WexpChange(ItemComponent):
nid = 'wexp_change'
desc = "Item changes target's wexp on hit"
tag = 'special'
expose = (Type.Dict, Type.WeaponType)
def on_hit(self, actions, playback, unit, item, target, target_pos, mode=None):
actions.append(action.WexpChange(target, self.value))
playback.append(('hit', unit, item, target))
class FatigueOnHit(ItemComponent):
nid = 'fatigue_on_hit'
desc = "Item changes target's fatigue on hit"
tag = 'special'
expose = Type.Int
value = 1
def on_hit(self, actions, playback, unit, item, target, target_pos, mode=None):
actions.append(action.ChangeFatigue(target, self.value))
playback.append(('hit', unit, item, target))
class StatusOnHit(ItemComponent):
nid = 'status_on_hit'
desc = "Item gives status to target when it hits"
tag = 'special'
expose = Type.Skill # Nid
def on_hit(self, actions, playback, unit, item, target, target_pos, mode):
act = action.AddSkill(target, self.value, unit)
actions.append(act)
playback.append(('status_hit', unit, item, target, self.value))
def ai_priority(self, unit, item, target, move):
# Do I add a new status to the target
if target and self.value not in [skill.nid for skill in target.skills]:
accuracy_term = utils.clamp(combat_calcs.compute_hit(unit, target, item, target.get_weapon(), "attack")/100., 0, 1)
num_attacks = combat_calcs.outspeed(unit, target, item, target.get_weapon(), "attack")
accuracy_term *= num_attacks
# Tries to maximize distance from target
distance_term = 0.01 * utils.calculate_distance(move, target.position)
if skill_system.check_enemy(unit, target):
return 0.5 * accuracy_term + distance_term
else:
return -0.5 * accuracy_term
return 0
class StatusAfterCombatOnHit(StatusOnHit, ItemComponent):
nid = 'status_after_combat_on_hit'
desc = "Item gives status to target after it hits"
tag = 'special'
expose = Type.Skill # Nid
_did_hit = set()
def on_hit(self, actions, playback, unit, item, target, target_pos, mode):
self._did_hit.add(target)
def end_combat(self, playback, unit, item, target, mode):
for target in self._did_hit:
act = action.AddSkill(target, self.value, unit)
action.do(act)
self._did_hit.clear()
class Shove(ItemComponent):
nid = 'shove'
desc = "Item shoves target on hit"
tag = 'special'
expose = Type.Int
value = 1
def _check_shove(self, unit_to_move, anchor_pos, magnitude):
offset_x = utils.clamp(unit_to_move.position[0] - anchor_pos[0], -1, 1)
offset_y = utils.clamp(unit_to_move.position[1] - anchor_pos[1], -1, 1)
new_position = (unit_to_move.position[0] + offset_x * magnitude,
unit_to_move.position[1] + offset_y * magnitude)
mcost = game.movement.get_mcost(unit_to_move, new_position)
if game.tilemap.check_bounds(new_position) and \
not game.board.get_unit(new_position) and \
mcost <= equations.parser.movement(unit_to_move):
return new_position
return False
def on_hit(self, actions, playback, unit, item, target, target_pos, mode):
if not skill_system.ignore_forced_movement(target):
new_position = self._check_shove(target, unit.position, self.value)
if new_position:
actions.append(action.ForcedMovement(target, new_position))
playback.append(('shove_hit', unit, item, target))
class ShoveOnEndCombat(Shove):
nid = 'shove_on_end_combat'
desc = "Item shoves target at the end of combat"
tag = 'special'
expose = Type.Int
value = 1
def end_combat(self, playback, unit, item, target, mode):
if not skill_system.ignore_forced_movement(target):
new_position = self._check_shove(target, unit.position, self.value)
if new_position:
action.do(action.ForcedMovement(target, new_position))
class ShoveTargetRestrict(Shove, ItemComponent):
nid = 'shove_target_restrict'
desc = "Target restriction for Shove"
tag = 'special'
expose = Type.Int
value = 1
def target_restrict(self, unit, item, def_pos, splash) -> bool:
defender = game.board.get_unit(def_pos)
if defender and self._check_shove(defender, unit.position, self.value) and \
not skill_system.ignore_forced_movement(defender):
return True
for s_pos in splash:
s = game.board.get_unit(s_pos)
if self._check_shove(s, unit.position, self.value) and \
not skill_system.ignore_forced_movement(s):
return True
return False
def on_hit(self, actions, playback, unit, item, target, target_pos, mode):
pass
def end_combat(self, playback, unit, item, target, mode):
pass
class Swap(ItemComponent):
nid = 'swap'
desc = "Item swaps user with target on hit"
tag = 'special'
def on_hit(self, actions, playback, unit, item, target, target_pos, mode):
if not skill_system.ignore_forced_movement(unit) and not skill_system.ignore_forced_movement(target):
actions.append(action.Swap(unit, target))
playback.append(('swap_hit', unit, item, target))
class Pivot(ItemComponent):
nid = 'pivot'
desc = "User moves to other side of target on hit."
tag = 'special'
author = "<NAME>"
expose = Type.Int
value = 1
def _check_pivot(self, unit_to_move, anchor_pos, magnitude):
offset_x = utils.clamp(unit_to_move.position[0] - anchor_pos[0], -1, 1)
offset_y = utils.clamp(unit_to_move.position[1] - anchor_pos[1], -1, 1)
new_position = (anchor_pos[0] + offset_x * -magnitude,
anchor_pos[1] + offset_y * -magnitude)
mcost = game.movement.get_mcost(unit_to_move, new_position)
if game.tilemap.check_bounds(new_position) and \
not game.board.get_unit(new_position) and \
mcost <= equations.parser.movement(unit_to_move):
return new_position
return False
def on_hit(self, actions, playback, unit, item, target, target_pos, mode):
if not skill_system.ignore_forced_movement(unit):
new_position = self._check_pivot(unit, target.position, self.value)
if new_position:
actions.append(action.ForcedMovement(unit, new_position))
playback.append(('shove_hit', unit, item, unit))
class PivotTargetRestrict(Pivot, ItemComponent):
nid = 'pivot_target_restrict'
desc = "Suppresses the Pivot command when it would be invalid."
tag = 'special'
author = "<NAME>"
expose = Type.Int
value = 1
def target_restrict(self, unit, item, def_pos, splash) -> bool:
defender = game.board.get_unit(def_pos)
if defender and self._check_pivot(unit, defender.position, self.value) and \
not skill_system.ignore_forced_movement(unit):
return True
for s_pos in splash:
s = game.board.get_unit(s_pos)
if self._check_pivot(unit, s.position, self.value) and \
not skill_system.ignore_forced_movement(unit):
return True
return False
def on_hit(self, actions, playback, unit, item, target, target_pos, mode):
pass
def end_combat(self, playback, unit, item, target, mode):
pass
class DrawBack(ItemComponent):
nid = 'draw_back'
desc = "Item moves both user and target back on hit."
tag = 'special'
author = "<NAME>"
expose = Type.Int
value = 1
def _check_draw_back(self, target, user, magnitude):
offset_x = utils.clamp(target.position[0] - user.position[0], -1, 1)
offset_y = utils.clamp(target.position[1] - user.position[1], -1, 1)
new_position_user = (user.position[0] - offset_x * magnitude,
user.position[1] - offset_y * magnitude)
new_position_target = (target.position[0] - offset_x * magnitude,
target.position[1] - offset_y * magnitude)
mcost_user = game.movement.get_mcost(user, new_position_user)
mcost_target = game.movement.get_mcost(target, new_position_target)
if game.tilemap.check_bounds(new_position_user) and \
not game.board.get_unit(new_position_user) and \
mcost_user <= equations.parser.movement(user) and mcost_target <= equations.parser.movement(target):
return new_position_user, new_position_target
return None, None
def on_hit(self, actions, playback, unit, item, target, target_pos, mode):
if not skill_system.ignore_forced_movement(target):
new_position_user, new_position_target = self._check_draw_back(target, unit, self.value)
if new_position_user and new_position_target:
actions.append(action.ForcedMovement(unit, new_position_user))
playback.append(('shove_hit', unit, item, unit))
actions.append(action.ForcedMovement(target, new_position_target))
playback.append(('shove_hit', unit, item, target))
class DrawBackTargetRestrict(DrawBack, ItemComponent):
nid = 'draw_back_target_restrict'
desc = "Suppresses the Draw Back command when it would be invalid."
tag = 'special'
author = "<NAME>"
expose = Type.Int
value = 1
def target_restrict(self, unit, item, def_pos, splash) -> bool:
defender = game.board.get_unit(def_pos)
positions = [result for result in self._check_draw_back(defender, unit, self.value)]
if defender and all(positions) and \
not skill_system.ignore_forced_movement(defender):
return True
for s_pos in splash:
s = game.board.get_unit(s_pos)
splash_positions = [result for result in self._check_draw_back(s, unit, self.value)]
if all(splash_positions) and not skill_system.ignore_forced_movement(s):
return True
return False
def on_hit(self, actions, playback, unit, item, target, target_pos, mode):
pass
def end_combat(self, playback, unit, item, target, mode):
pass
class Steal(ItemComponent):
nid = 'steal'
desc = "Steal any unequipped item from target on hit"
tag = 'special'
_did_steal = False
def init(self, item):
item.data['target_item'] = None
def target_restrict(self, unit, item, def_pos, splash) -> bool:
# Unit has item that can be stolen
attack = equations.parser.steal_atk(unit)
defender = game.board.get_unit(def_pos)
defense = equations.parser.steal_def(defender)
if attack >= defense:
for def_item in defender.items:
if self.item_restrict(unit, item, defender, def_item):
return True
return False
def ai_targets(self, unit, item):
positions = set()
for other in game.units:
if other.position and skill_system.check_enemy(unit, other):
for def_item in other.items:
if self.item_restrict(unit, item, other, def_item):
positions.add(other.position)
break
return positions
def targets_items(self, unit, item) -> bool:
return True
def item_restrict(self, unit, item, defender, def_item) -> bool:
if | |
import numpy as np
import torch as th
from problem2 import random_policy
# Note: please don't import any new package. You should solve this problem using only the package(s) above.
#-------------------------------------------------------------------------
'''
Problem 3: Q Network (35 points)
In this problem, you will implement a neural network (with one fully-connected layer only) to estimate Q values in a game
A list of all variables being used in this problem is provided at the end of this file.
'''
#----------------------------------------------------
'''
(Training: estimate Q values using Q network) Given a Q network with parameters (W, b) and we have a mini-batch of sampled game states S. Please compute the predicted Q values on the mini-batch of samples.
---- Inputs: --------
* S: the current states for a mini-batch of sampled game steps, a torch tensor of shape (n,p), where S[i] is the current game state in the i-th sample in the mini-batch.
* W: the weights of fully connected layer of Q network, which is used to predict the Q values of each game state, a float torch matrix of shape (p,c).
* b: the biases of fully connected layer of Q network, a float torch vector of length c.
---- Outputs: --------
* Q: the predicted Q values by the Q network on all actions for a mini-batch of game state samples, a pytorch matrix of shape (n, c). Q[i,j] represents the Q value on the j-th action for the i-th sample in the mini-batch.
---- Hints: --------
* This problem can be solved using 1 line(s) of code.
'''
#---------------------
def compute_Q(S, W, b):
#########################################
## INSERT YOUR CODE HERE (2 points)
Q = S@W + b
#########################################
return Q
#-----------------
'''
TEST: Now you can test the correctness of your code above by typing the following in the terminal:
---------------------------------------------------
nosetests -v test3.py:test_compute_Q
--- OR ----
python3 -m nose -v test3.py:test_compute_Q
--- OR ----
python -m nose -v test3.py:test_compute_Q
---------------------------------------------------
'''
#----------------------------------------------------
'''
(Training: compute Target Q values using Bellman Optimality Equation) Suppose we have a mini-batch of training samples: including the new/next games states S_new and immediate rewards R in the sampled game steps in the mini-batch. Please compute the target Q values (Qt) for the mini-batch of samples using Bellman Optimality Equation. Note the gradients cannot flow through Qt, i.e., the gradients of Qt tensor should not connect with the parameters W and b.
---- Inputs: --------
* S_new: the new/next game states for a mini-batch of sampled games steps after state transition, a torch tensor of shape (n,p). S_new[i] is the next/new game state in the i-th sample of the mini-batch.
* R: a mini-batch of the immediate rewards returned after the transition, a float vector of length (n). R[i] is the received immediate reward of the i-th sampled game step in the mini-batch.
* T: whether or not the new/next game state is a terminal state in a mini-batch of sampled games steps, a boolean torch tensor of length n. T[i]= True if S_new[i] is a terminal state in the game (where the game ends).
* W: the weights of fully connected layer of Q network, which is used to predict the Q values of each game state, a float torch matrix of shape (p,c).
* b: the biases of fully connected layer of Q network, a float torch vector of length c.
* gamma: the discount factor, a float scalar between 0 and 1.
---- Outputs: --------
* Qt: the target Q values (estimated by Bellman Optimality Equation with the target Q network) for a mini-batch of samples, a pytorch vector of length (n). Qt[i] represents the target Q value for the i-th sample in the mini-batch.
---- Hints: --------
* (Step 1) compute Q values on the new/next game states.
* (Step 2.1) If S_new[i] is a terminal state (i.e., T[i] = True), use the immediate reward R[i] as the target reward.
* (Step 2.2) Otherwise, use Bellman Optimality Equation to estimate the target Q value.
* You could re-use compute_Q() function.
* To detach the gradients of a torch tensor x, you could use x.detach(), so that gradient will not flow through x.
* To negate the boolean values in a tensor x, you could use ~x.
* To convert a boolean-valued tensor x into an integer tensor, you could use x.int().
* To compute the max value of a tensor, you could use th.max() function.
* This problem can be solved using 2 line(s) of code.
'''
#---------------------
def compute_Qt(S_new, R, T, W, b, gamma=0.95):
#########################################
## INSERT YOUR CODE HERE (5 points)
new_Q = compute_Q(S_new, W, b)
#########################################
return Qt
#-----------------
'''
TEST: Now you can test the correctness of your code above by typing the following in the terminal:
---------------------------------------------------
nosetests -v test3.py:test_compute_Qt
--- OR ----
python3 -m nose -v test3.py:test_compute_Qt
--- OR ----
python -m nose -v test3.py:test_compute_Qt
---------------------------------------------------
'''
#----------------------------------------------------
'''
(Training: Loss function) Given estimated Q values by the Q network, the action chosen and the target Q values on a mini-batch of sampled game steps, please compute the mean-squared-error loss on the mini-batch of samples.
---- Inputs: --------
* Q: the predicted Q values by the Q network on all actions for a mini-batch of game state samples, a pytorch matrix of shape (n, c). Q[i,j] represents the Q value on the j-th action for the i-th sample in the mini-batch.
* A: a mini-batch of the actions chosen by the player, an integer vector of length (n).
* Qt: the target Q values (estimated by Bellman Optimality Equation with the target Q network) for a mini-batch of samples, a pytorch vector of length (n). Qt[i] represents the target Q value for the i-th sample in the mini-batch.
---- Outputs: --------
* L: the average of the least square losses on a mini-batch of training images, a torch float scalar.
---- Hints: --------
* You could use arange(n) function in Pytorch to create an index list of [0,1,2,...,n-1].
* You could use y = X[list1,list2] to select elements of matrix X into a vector. For example if list1=[1,3,5], list2=[2,4,6], then y will be a list of [ X[1,2], X[3,4], X[5,6] ].
* You could use MSELoss in Pytorch to compute the mean squared error.
* This problem can be solved using 2 line(s) of code.
'''
#---------------------
def compute_L(Q, A, Qt):
#########################################
## INSERT YOUR CODE HERE (5 points)
#########################################
return L
#-----------------
'''
TEST: Now you can test the correctness of your code above by typing the following in the terminal:
---------------------------------------------------
nosetests -v test3.py:test_compute_L
--- OR ----
python3 -m nose -v test3.py:test_compute_L
--- OR ----
python -m nose -v test3.py:test_compute_L
---------------------------------------------------
'''
#----------------------------------------------------
'''
(Training: Gradient Descent) Suppose we are given a Q neural network with parameters (W, b) and we have a mini-batch of training samples (S,A,S_new,R). Suppose we have already computed the global gradients of the loss L w.r.t. the weights W and biases b on the mini-batch of samples. Assume that we have already created an optimizer for the parameter W and b. Please update the weights W and biases b using gradient descent. After the update, the global gradients of W and b should be set to all zeros.
---- Inputs: --------
* optimizer: a PyTorch optimizer (such as SGD, ADAM, RMSProp) to handle the gradient descent for all the parameters in the model (W and b).
---- Hints: --------
* This problem can be solved using 2 line(s) of code.
'''
#---------------------
def update_parameters(optimizer):
#########################################
## INSERT YOUR CODE HERE (2 points)
optimizer.step()
optimizer.zero_grad()
#########################################
#-----------------
'''
TEST: Now you can test the correctness of your code above by typing the | |
<reponame>uhlerlab/graphical_models
# Author: <NAME>
"""Base class for causal DAGs
"""
from collections import defaultdict
import numpy as np
import itertools as itr
from graphical_models.utils import core_utils
import operator as op
from graphical_models.custom_types import Node, DirectedEdge, NodeSet, warn_untested
from typing import Set, Union, Tuple, Any, Iterable, Dict, FrozenSet, List
import networkx as nx
from networkx.utils import UnionFind
import random
import csv
import ipdb
from scipy.special import comb
class CycleError(Exception):
def __init__(self, cycle):
self.cycle = cycle
message = 'Adding arc(s) causes the cycle ' + path2str(cycle)
super().__init__(message)
def path2str(path):
return '->'.join(map(str, path))
class DAG:
"""
Base class for causal DAGs.
"""
def __init__(self, nodes: Set = frozenset(), arcs: Set = frozenset(), dag=None):
if dag is not None:
self._nodes = set(dag._nodes)
self._arcs = set(dag._arcs)
self._neighbors = defaultdict(set)
for node, nbrs in dag._neighbors.items():
self._neighbors[node] = set(nbrs)
self._parents = defaultdict(set)
for node, par in dag._parents.items():
self._parents[node] = set(par)
self._children = defaultdict(set)
for node, ch in dag._children.items():
self._children[node] = set(ch)
else:
self._nodes = set(nodes)
self._arcs = set()
self._neighbors = defaultdict(set)
self._parents = defaultdict(set)
self._children = defaultdict(set)
# print('before call to add arcs from')
self.add_arcs_from(arcs, check_acyclic=True)
def __eq__(self, other):
if not isinstance(other, DAG):
return False
return self._nodes == other._nodes and self._arcs == other._arcs
def __str__(self):
t = self.topological_sort()
substrings = []
for node in t:
if self._parents[node]:
parents_str = ','.join(map(str, self._parents[node]))
substrings.append('[%s|%s]' % (node, parents_str))
else:
substrings.append('[%s]' % node)
return ''.join(substrings)
def __repr__(self):
return str(self)
def copy(self):
"""
Return a copy of the current DAG.
"""
# return DAG(nodes=self._nodes, arcs=self._arcs)
return DAG(dag=self)
def rename_nodes(self, name_map: Dict):
"""
Rename the nodes in this graph according to ``name_map``.
Parameters
----------
name_map:
A dictionary from the current name of each node to the desired name of each node.
Examples
--------
>>> from graphical_models import DAG
>>> g = DAG(arcs={('a', 'b'), ('b', 'c')})
>>> g2 = g.rename_nodes({'a': 1, 'b': 2, 'c': 3})
>>> g2.arcs
{(1, 2), (2, 3)}
"""
return DAG(
nodes={name_map[n] for n in self._nodes},
arcs={(name_map[i], name_map[j]) for i, j in self._arcs}
)
def induced_subgraph(self, nodes: Set[Node]):
"""
Return the induced subgraph over only ``nodes``
Parameters
----------
nodes:
Set of nodes for the induced subgraph.
Returns
-------
DAG:
Induced subgraph over ``nodes``.
Examples
--------
>>> from graphical_models import DAG
>>> d = DAG(arcs={(1, 2), (2, 3), (1, 4)})
>>> d_induced = d.induced_subgraph({1, 2, 3})
>>> d_induced.arcs
{(1, 2), (2, 3)}
"""
return DAG(nodes, {(i, j) for i, j in self._arcs if i in nodes and j in nodes})
# === PROPERTIES
@property
def nodes(self) -> Set[Node]:
return set(self._nodes)
@property
def nnodes(self) -> int:
return len(self._nodes)
@property
def arcs(self) -> Set[DirectedEdge]:
return set(self._arcs)
@property
def num_arcs(self) -> int:
return len(self._arcs)
@property
def neighbors(self) -> Dict[Node, Set[Node]]:
return core_utils.defdict2dict(self._neighbors, self._nodes)
@property
def parents(self) -> Dict[Node, Set[Node]]:
return core_utils.defdict2dict(self._parents, self._nodes)
@property
def children(self) -> Dict[Node, Set[Node]]:
return core_utils.defdict2dict(self._children, self._nodes)
@property
def skeleton(self) -> Set[FrozenSet]:
return {frozenset({i, j}) for i, j in self._arcs}
@property
def in_degrees(self) -> Dict[Node, int]:
return {node: len(self._parents[node]) for node in self._nodes}
@property
def out_degrees(self) -> Dict[Node, int]:
return {node: len(self._children[node]) for node in self._nodes}
@property
def max_in_degree(self) -> int:
return max(len(self._parents[node]) for node in self._nodes)
@property
def max_out_degree(self) -> int:
return max(len(self._children[node]) for node in self._nodes)
@property
def sparsity(self) -> float:
p = len(self._nodes)
return len(self._arcs) / p / (p - 1) * 2
# === NODE PROPERTIES
def parents_of(self, nodes: NodeSet) -> Set[Node]:
"""
Return all nodes that are parents of the node or set of nodes ``nodes``.
Parameters
----------
nodes
A node or set of nodes.
See Also
--------
children_of, neighbors_of, markov_blanket_of
Examples
--------
>>> from graphical_models import DAG
>>> g = DAG(arcs={(1, 2), (2, 3)})
>>> g.parents_of(2)
{1}
>>> g.parents_of({2, 3})
{1, 2}
"""
if isinstance(nodes, set):
return set.union(*(self._parents[n] for n in nodes))
else:
return self._parents[nodes].copy()
def children_of(self, nodes: NodeSet) -> Set[Node]:
"""
Return all nodes that are children of the node or set of nodes ``nodes``.
Parameters
----------
nodes
A node or set of nodes.
See Also
--------
parents_of, neighbors_of, markov_blanket_of
Examples
--------
>>> from graphical_models import DAG
>>> g = DAG(arcs={(1, 2), (2, 3)})
>>> g.children_of(1)
{2}
>>> g.children_of({1, 2})
{2, 3}
"""
if isinstance(nodes, set):
return set.union(*(self._children[n] for n in nodes))
else:
return self._children[nodes].copy()
def neighbors_of(self, nodes: NodeSet) -> Set[Node]:
"""
Return all nodes that are adjacent to the node or set of nodes ``node``.
Parameters
----------
nodes
A node or set of nodes.
See Also
--------
parents_of, children_of, markov_blanket_of
Examples
--------
>>> from graphical_models import DAG
>>> g = DAG(arcs={(0,1), (0,2)})
>>> g.neighbors_of(0)
{1, 2}
>>> g.neighbors_of(2)
{0}
"""
if isinstance(nodes, set):
return set.union(*(self._neighbors[n] for n in nodes))
else:
return self._neighbors[nodes].copy()
def markov_blanket_of(self, node: Node) -> set:
"""
Return the Markov blanket of ``node``, i.e., the parents of the node, its children, and the parents of its children.
Parameters
----------
node:
Node whose Markov blanket to return.
See Also
--------
parents_of, children_of, neighbors_of
Returns
-------
set:
the Markov blanket of ``node``.
Example
-------
>>> from graphical_models import DAG
>>> g = DAG(arcs={(0, 1), (1, 3), (2, 3), (3, 4})
>>> g.markov_blanket_of(1)
{0, 2, 3}
"""
parents_of_children = set.union(*(self._parents[c] for c in self._children[node])) if self._children[
node] else set()
return self._parents[node] | self._children[node] | parents_of_children - {node}
def is_ancestor_of(self, anc: Node, desc: Node) -> bool:
"""
Check if ``anc`` is an ancestor of ``desc``
Return
------
bool
True if ``anc`` is an ancestor of ``desc``
Example
-------
>>> from graphical_models import DAG
>>> g = DAG(arcs={(1, 2), (1, 3), (2, 3)})
>>> g.is_ancestor_of(1, 3)
True
>>> g.is_ancestor_of(3, 1)
False
"""
return desc in self._children[anc] or desc in self.descendants_of(anc)
def _add_descendants(self, descendants, node):
for child in self._children[node]:
if child not in descendants:
descendants.add(child)
self._add_descendants(descendants, child)
def descendants_of(self, nodes: NodeSet) -> Set[Node]:
"""
Return the descendants of ``node``.
Parameters
----------
nodes:
The node.
See Also
--------
ancestors_of
Return
------
Set[node]
Return all nodes j such that there is a directed path from ``node`` to j.
Example
-------
>>> from graphical_models import DAG
>>> g = DAG(arcs={(1, 2), (2, 3)})
>>> g.descendants_of(1)
{2, 3}
"""
descendants = set()
if not isinstance(nodes, set):
self._add_descendants(descendants, nodes)
else:
return set.union(*(self.descendants_of(node) for node in nodes))
return descendants
def _add_ancestors(self, ancestors, node):
for parent in self._parents[node]:
if parent not in ancestors:
ancestors.add(parent)
self._add_ancestors(ancestors, parent)
def ancestors_of(self, nodes: Node) -> Set[Node]:
"""
Return the ancestors of ``nodes``.
Parameters
----------
nodes:
The node.
See Also
--------
descendants_of
Return
------
Set[node]
Return all nodes j such that there is a directed path from j to ``node``.
Example
-------
>>> from graphical_models import DAG
>>> g = DAG(arcs={(1, 2), (2, 3)})
>>> g.ancestors_of(3)
{1, 2, 3}
"""
ancestors = set()
if not isinstance(nodes, set):
self._add_ancestors(ancestors, nodes)
else:
return set.union(*(self.ancestors_of(node) for node in nodes))
return ancestors
def ancestor_dict(self) -> dict:
"""
Return a dictionary from each node to its ancestors.
See Also
--------
ancestors_of
Return
------
Dict[node,Set]
Mapping node to ancestors
Example
-------
TODO
"""
top_sort = self.topological_sort()
node2ancestors_plus_self = defaultdict(set)
for node in top_sort:
node2ancestors_plus_self[node].add(node)
for child in self._children[node]:
node2ancestors_plus_self[child].update(node2ancestors_plus_self[node])
for node in self._nodes:
node2ancestors_plus_self[node] -= {node}
return core_utils.defdict2dict(node2ancestors_plus_self, self._nodes)
def descendant_dict(self) -> dict:
"""
Return a dictionary from each node to its descendants.
See Also
--------
ancestors_of
Return
------
Dict[node,Set]
Mapping node to ancestors
Example
-------
TODO
"""
top_sort = self.topological_sort()
node2descendants_plus_self = defaultdict(set)
for node in reversed(top_sort):
node2descendants_plus_self[node].add(node)
for parent in self._parents[node]:
node2descendants_plus_self[parent].update(node2descendants_plus_self[node])
for node in self._nodes:
node2descendants_plus_self[node] -= {node}
return core_utils.defdict2dict(node2descendants_plus_self, self._nodes)
def nodes_between(self, source: Node, target: Node):
between = self.ancestors_of(target) & self.descendants_of(source)
return between | {source, target} if len(between) != 0 else between
def edges_between(self, source: Node, target: Node):
nodes_between = self.nodes_between(source, target)
return {(i, j) for i, j in itr.combinations(nodes_between, 2) if (i, j) in self._arcs}
def incident_arcs(self, node: Node) -> Set[DirectedEdge]:
"""
Return all arcs with ``node`` as either source or target.
Parameters
----------
node:
The node.
See Also
--------
incoming_arcs, outgoing_arcs
Return
------
Set[arc]
Return all arcs i->j such that either i=``node`` of j=``node``.
Example
-------
>>> from graphical_models import DAG
>>> g = DAG(arcs={(1, 2), (1, 3), | |
<gh_stars>1-10
from __future__ import absolute_import
import bisect
import functools
import itertools
import logging
import math
import operator
import zlib
from calendar import Calendar
from collections import OrderedDict, namedtuple
from datetime import datetime, timedelta
import pytz
from django.utils import dateformat, timezone
from sentry.app import tsdb
from sentry.models import (
Activity, GroupStatus, Organization, OrganizationStatus, Project, Team,
User, UserOption
)
from sentry.tasks.base import instrumented_task
from sentry.utils import json, redis
from sentry.utils.dates import floor_to_utc_day, to_datetime, to_timestamp
from sentry.utils.email import MessageBuilder
from sentry.utils.math import mean
from six.moves import reduce
date_format = functools.partial(
dateformat.format,
format_string="F jS, Y",
)
logger = logging.getLogger(__name__)
def _get_organization_queryset():
return Organization.objects.filter(
status=OrganizationStatus.VISIBLE,
)
def _fill_default_parameters(timestamp=None, rollup=None):
if timestamp is None:
timestamp = to_timestamp(floor_to_utc_day(timezone.now()))
if rollup is None:
rollup = 60 * 60 * 24 * 7
return (timestamp, rollup)
def _to_interval(timestamp, duration):
return (
to_datetime(timestamp - duration),
to_datetime(timestamp),
)
def change(value, reference):
"""
Calculate the relative change between a value and a reference point.
"""
if not reference: # handle both None and divide by zero case
return None
return ((value or 0) - reference) / float(reference)
def safe_add(x, y):
"""
Adds two values which are either numeric types or None.
- If both values are numeric, the result is the sum of those values.
- If only one numeric value is provided, that value is returned.
- If both values are None, then None is returned.
"""
if x is not None and y is not None:
return x + y
elif x is not None:
return x
elif y is not None:
return y
else:
return None
def month_to_index(year, month):
"""
Convert a year and month to a single value: the number of months between
this month and 1 AD.
This mainly exists to simplify doing month-based arithmetic (e.g. "three
months ago") without having to manually handle wrapping around years, since
timedelta doesn't accept a "months" parameter.
"""
assert 12 >= month >= 1
return (year - 1) * 12 + month - 1
def index_to_month(index):
"""
The opposite companion to ``month_to_index``. Returns a (year, month)
tuple.
"""
return (index // 12) + 1, index % 12 + 1
def clean_series(start, stop, rollup, series):
"""
Validate a series, ensuring that it follows the specified rollup and
boundaries. The start bound is inclusive, while the stop bound is
exclusive (similar to the slice operation.)
"""
start_timestamp = to_timestamp(start)
stop_timestamp = to_timestamp(stop)
result = []
for i, (timestamp, value) in enumerate(series):
assert timestamp == start_timestamp + rollup * i
if timestamp >= stop_timestamp:
break
result.append((timestamp, value))
return result
def merge_sequences(target, other, function=operator.add):
"""
Merge two sequences into a single sequence. The length of the two
sequences must be equal.
"""
assert len(target) == len(other), 'sequence lengths must match'
return type(target)([function(x, y) for x, y in zip(target, other)])
def merge_mappings(target, other, function=lambda x, y: x + y):
"""
Merge two mappings into a single mapping. The set of keys in both
mappings must be equal.
"""
assert set(target) == set(other), 'keys must match'
return {k: function(v, other[k]) for k, v in target.items()}
def merge_series(target, other, function=operator.add):
"""
Merge two series into a single series. Both series must have the same
start and end points as well as the same resolution.
"""
missing = object()
results = []
for x, y in itertools.izip_longest(target, other, fillvalue=missing):
assert x is not missing and y is not missing, 'series must be same length'
assert x[0] == y[0], 'series timestamps must match'
results.append((x[0], function(x[1], y[1])))
return results
def prepare_project_series((start, stop), project, rollup=60 * 60 * 24):
resolution, series = tsdb.get_optimal_rollup_series(start, stop, rollup)
assert resolution == rollup, 'resolution does not match requested value'
clean = functools.partial(clean_series, start, stop, rollup)
return merge_series(
reduce(
merge_series,
map(
clean,
tsdb.get_range(
tsdb.models.group,
project.group_set.filter(
status=GroupStatus.RESOLVED,
resolved_at__gte=start,
resolved_at__lt=stop,
).values_list('id', flat=True),
start,
stop,
rollup=rollup,
).values(),
),
clean([(timestamp, 0) for timestamp in series]),
),
clean(
tsdb.get_range(
tsdb.models.project,
[project.id],
start,
stop,
rollup=rollup,
)[project.id],
),
lambda resolved, total: (
resolved,
total - resolved, # unresolved
),
)
def prepare_project_aggregates((_, stop), project):
# TODO: This needs to return ``None`` for periods that don't have any data
# (because the project is not old enough) and possibly extrapolate for
# periods that only have partial periods.
segments = 4
period = timedelta(days=7)
start = stop - (period * segments)
def get_aggregate_value(start, stop):
return tsdb.get_sums(
tsdb.models.project,
(project.id,),
start,
stop,
rollup=60 * 60 * 24,
)[project.id]
return [
get_aggregate_value(
start + (period * i),
start + (period * (i + 1) - timedelta(seconds=1)),
) for i in range(segments)
]
def prepare_project_issue_summaries(interval, project):
start, stop = interval
queryset = project.group_set.exclude(status=GroupStatus.IGNORED)
# Fetch all new issues.
new_issue_ids = set(
queryset.filter(
first_seen__gte=start,
first_seen__lt=stop,
).values_list('id', flat=True)
)
# Fetch all regressions. This is a little weird, since there's no way to
# tell *when* a group regressed using the Group model. Instead, we query
# all groups that have been seen in the last week and have ever regressed
# and query the Activity model to find out if they regressed within the
# past week. (In theory, the activity table *could* be used to answer this
# query without the subselect, but there's no suitable indexes to make it's
# performance predictable.)
reopened_issue_ids = set(
Activity.objects.filter(
group__in=queryset.filter(
last_seen__gte=start,
last_seen__lt=stop,
resolved_at__isnull=False, # signals this has *ever* been resolved
),
type__in=(
Activity.SET_REGRESSION,
Activity.SET_UNRESOLVED,
),
datetime__gte=start,
datetime__lt=stop,
).distinct().values_list('group_id', flat=True)
)
rollup = 60 * 60 * 24
event_counts = tsdb.get_sums(
tsdb.models.group,
new_issue_ids | reopened_issue_ids,
start,
stop,
rollup=rollup,
)
new_issue_count = sum(event_counts[id] for id in new_issue_ids)
reopened_issue_count = sum(event_counts[id] for id in reopened_issue_ids)
existing_issue_count = max(
tsdb.get_sums(
tsdb.models.project,
[project.id],
start,
stop,
rollup=rollup,
)[project.id] - new_issue_count - reopened_issue_count,
0,
)
return [
new_issue_count,
reopened_issue_count,
existing_issue_count,
]
def prepare_project_usage_summary((start, stop), project):
return (
tsdb.get_sums(
tsdb.models.project_total_blacklisted,
[project.id],
start,
stop,
rollup=60 * 60 * 24,
)[project.id],
tsdb.get_sums(
tsdb.models.project_total_rejected,
[project.id],
start,
stop,
rollup=60 * 60 * 24,
)[project.id],
)
def get_calendar_range((_, stop_time), months):
assert (
stop_time.hour,
stop_time.minute,
stop_time.second,
stop_time.microsecond,
stop_time.tzinfo,
) == (0, 0, 0, 0, pytz.utc)
last_day = stop_time - timedelta(days=1)
stop_month_index = month_to_index(
last_day.year,
last_day.month,
)
start_month_index = stop_month_index - months + 1
return start_month_index, stop_month_index
def get_calendar_query_range(interval, months):
start_month_index, _ = get_calendar_range(interval, months)
start_time = datetime(
day=1,
tzinfo=pytz.utc,
*index_to_month(start_month_index)
)
return start_time, interval[1]
def clean_calendar_data(project, series, start, stop, rollup, timestamp=None):
earliest = tsdb.get_earliest_timestamp(rollup, timestamp=timestamp)
def remove_invalid_values(item):
timestamp, value = item
if timestamp < earliest:
value = None
elif to_datetime(timestamp) < project.date_added:
value = None
return (timestamp, value)
return map(
remove_invalid_values,
clean_series(
start,
stop,
rollup,
series,
),
)
def prepare_project_calendar_series(interval, project):
start, stop = get_calendar_query_range(interval, 3)
rollup = 60 * 60 * 24
series = tsdb.get_range(
tsdb.models.project,
[project.id],
start,
stop,
rollup=rollup,
)[project.id]
return clean_calendar_data(
project,
series,
start,
stop,
rollup,
)
def build(name, fields):
names, prepare_fields, merge_fields = zip(*fields)
cls = namedtuple(name, names)
def prepare(*args):
return cls(*[f(*args) for f in prepare_fields])
def merge(target, other):
return cls(*[f(target[i], other[i]) for i, f in enumerate(merge_fields)])
return cls, prepare, merge
Report, prepare_project_report, merge_reports = build(
'Report',
[
(
'series',
prepare_project_series,
functools.partial(
merge_series,
function=merge_sequences,
),
),
(
'aggregates',
prepare_project_aggregates,
functools.partial(
merge_sequences,
function=safe_add,
),
),
(
'issue_summaries',
prepare_project_issue_summaries,
merge_sequences,
),
(
'usage_summary',
prepare_project_usage_summary,
merge_sequences,
),
(
'calendar_series',
prepare_project_calendar_series,
functools.partial(
merge_series,
function=safe_add,
),
),
],
)
class ReportBackend(object):
def build(self, timestamp, duration, project):
return prepare_project_report(
_to_interval(timestamp, duration),
project,
)
def prepare(self, timestamp, duration, organization):
"""
Build and store reports for all projects in the organization.
"""
raise NotImplementedError
def fetch(self, timestamp, duration, organization, projects):
"""
Fetch reports for a set of projects in the organization, returning
reports in the order that they were requested.
"""
raise NotImplementedError
class DummyReportBackend(ReportBackend):
def prepare(self, timestamp, duration, organization):
pass
def fetch(self, timestamp, duration, organization, projects):
assert all(project.organization_id == organization.id for project in projects)
return map(
functools.partial(
self.build,
timestamp,
duration,
),
projects,
)
class RedisReportBackend(ReportBackend):
version = 1
def __init__(self, cluster, ttl, namespace='r'):
self.cluster = cluster
self.ttl = ttl
self.namespace = namespace
def __make_key(self, timestamp, duration, organization):
return '{}:{}:{}:{}:{}'.format(
self.namespace,
self.version,
organization.id,
int(timestamp),
int(duration),
)
def __encode(self, report):
return zlib.compress(json.dumps(list(report)))
def __decode(self, value):
if value is None:
return None
return Report(*json.loads(zlib.decompress(value)))
def prepare(self, timestamp, duration, organization):
reports = {}
for project in organization.project_set.all():
reports[project.id] = self.__encode(
self.build(timestamp, duration, project),
)
if not reports:
# XXX: HMSET requires | |
!= '' and economy.is_living(x)
and x != spouse
and (economy.people[x].supported is None or
economy.people[x].supported == p.id)
and economy.people[x].age >= 18]
if l1:
u = max(l1, key=lambda x: x[1])[1]
l2 = [x for x, y in l1 if y == u]
fst_heir = max(l2, key=lambda x:
economy.people[x].asset_value())
if (fst_heir is None
or fst_heir not in [ch.id for ch in p.children]) \
and spouse is not None and spouse in p.supporting:
if spouse == '':
fst_heir = ''
p.remove_supporting_nil()
else:
s = economy.people[spouse]
if s.age >= 18 and s.age < 70:
fst_heir = spouse
s.remove_supported()
if fst_heir is not None and fst_heir != '' \
and fst_heir in p.supporting:
fh = economy.people[fst_heir]
fh.remove_supported()
if p.supporting:
if p.supported is not None \
and economy.is_living(p.supported):
p.die_supporting(p.supported)
elif fst_heir is None or p.death.inheritance_share is None:
p.die_supporting(None)
else:
p.die_supporting(fst_heir)
if p.supported is not None:
p.remove_supported()
if fst_heir is not None and fst_heir != '':
fh = economy.people[fst_heir]
fh.add_supporting(p)
for p in persons:
p.do_inheritance()
class EconomyMA (Economy0):
def marry (self, male, female):
economy = self
m = male
f = female
assert m.marriage is None and f.marriage is None
assert m.sex == 'M' and f.sex == 'F'
f.married = True
m.married = True
fm = Marriage()
mm = Marriage()
f.marriage = fm
m.marriage = mm
fm.spouse = m.id
mm.spouse = f.id
fm.begin = economy.term
mm.begin = economy.term
fm.init_favor = f.marriage_favor(m)
mm.init_favor = m.marriage_favor(f)
for p in [m, f]:
pf = None
pm = None
sup = False
if p.father != '' and economy.is_living(p.father):
pf = economy.people[p.father]
if p.supported is not None and p.age < 18 \
and p.supported == pf.id:
sup = True
if p.mother != '' and economy.is_living(p.mother):
pm = economy.people[p.mother]
if p.supported is not None and p.age < 18 \
and p.supported == pm.id:
sup = True
mag = 0
if not p.married:
if sup:
mag = 2
else:
mag = 1
ex = False
if pf is not None:
for c in pf.children:
if c.id == p.id:
ex = True
break
if ex:
ch = [c.id for c in pf.children
if c.id in economy.people
and not economy.people[c.id].married]
r = mag * 0.5 / (len(ch) + 1 +
(0 if pm is None else 1))
a1 = pf.asset_value() * r
al = math.floor(pf.land * r)
ap = a1 - al * ARGS.prop_value_of_land
pf.land -= al
pf.prop -= ap
p.land += al
p.prop += ap
ex = False
if pm is not None:
for c in pm.children:
if c.id == p.id:
ex = True
break
if ex:
ch = [c.id for c in pm.children
if c.id in economy.people
and not economy.people[c.id].married]
r = mag * 0.5 / (len(ch) + 1 +
(0 if pf is None else 1))
a1 = pm.asset_value() * r
al = math.floor(pm.land * r)
ap = a1 - al * ARGS.prop_value_of_land
pm.land -= al
pm.prop -= ap
p.land += al
p.prop += ap
for p in [m, f]:
l = []
for a in p.adulteries:
if a.spouse == m.id or a.spouse == f.id:
l.append(a)
elif random.random() < 0.7:
l.append(a)
for a in l:
a.end = economy.term
p.adulteries.remove(a)
p.trash.append(a)
if a.spouse != m.id and a.spouse != f.id:
update_adultery_hating(economy, p, a)
if a.spouse != '' and economy.is_living(a.spouse):
s = economy.people[a.spouse]
sa = [a for a in s.adulteries if a.spouse == p.id][0]
sa.end = economy.term
s.adulteries.remove(sa)
s.trash.append(sa)
if sa.spouse != m.id and sa.spouse != f.id:
update_adultery_hating(economy, s, sa)
if m.supported is not None and m.age < 70:
m.remove_supported()
if m.supported is not None:
if m.supported == f.id:
m.remove_supported()
if f.supported is not None:
f.remove_supported()
m.add_supporting(f)
class Economy (EconomyDT, EconomyMA):
pass
class EconomyPlot0 (Frozen):
def __init__ (self):
#plt.style.use('bmh')
fig = plt.figure(figsize=(6, 4))
#plt.tight_layout()
self.ax1 = fig.add_subplot(2, 2, 1)
self.ax2 = fig.add_subplot(2, 2, 2)
self.ax3 = fig.add_subplot(2, 2, 3)
self.ax4 = fig.add_subplot(2, 2, 4)
self.options = {}
def plot (self, economy):
ax = self.ax1
ax.clear()
view = ARGS.view_1
if view is not None and view != 'none':
t, f = self.options[view]
ax.set_title('%s: %s' % (term_to_year_month(economy.term), t))
f(ax, economy)
ax = self.ax2
ax.clear()
view = ARGS.view_2
if view is not None and view != 'none':
t, f = self.options[view]
ax.set_title(t)
f(ax, economy)
ax = self.ax3
ax.clear()
view = ARGS.view_3
if view is not None and view != 'none':
t, f = self.options[view]
ax.set_xlabel(t)
f(ax, economy)
ax = self.ax4
ax.clear()
view = ARGS.view_4
if view is not None and view != 'none':
t, f = self.options[view]
ax.set_xlabel(t)
f(ax, economy)
class EconomyPlotEC (EconomyPlot0):
def __init__ (self):
super().__init__()
self.options.update({
'asset': ('Asset', self.view_asset),
'prop': ('Prop', self.view_prop),
'land': ('Land', self.view_land),
'land-vs-prop': ('Land vs Prop', self.view_land_vs_prop),
})
def view_asset (self, ax, economy):
ax.hist(list(map(lambda x: x.asset_value(),
economy.people.values())), bins=ARGS.bins)
def view_prop (self, ax, economy):
ax.hist(list(map(lambda x: x.prop,
economy.people.values())), bins=ARGS.bins)
def view_land (self, ax, economy):
ax.hist(list(map(lambda x: x.land,
economy.people.values())), bins=ARGS.bins)
def view_land_vs_prop (self, ax, economy):
ax.scatter(list(map(lambda x: x.land, economy.people.values())),
list(map(lambda x: x.prop, economy.people.values())),
c="pink", alpha=0.5)
class EconomyPlotBT (EconomyPlot0):
def __init__ (self):
super().__init__()
self.options.update({
'population': ('Population', self.view_population),
'children': ('Children', self.view_children),
'children_wanting': ('Ch Want', self.view_children_wanting),
'male-fertility': ('M Fertility', self.view_male_fertility),
'female-fertility': ('F Fertility', self.view_female_fertility)
})
def view_population (self, ax, economy):
ax.hist([x.age for x in economy.people.values() if x.death is None],
bins=ARGS.bins)
def view_children (self, ax, economy):
x = []
y = []
for p in economy.people.values():
if p.age < 12 or p.death is not None:
continue
x.append(p.age)
y.append(len(p.children))
ax.scatter(x, y, c="pink", alpha=0.5)
def view_children_wanting (self, ax, economy):
x = []
y = []
for p in economy.people.values():
if p.age < 12 or p.death is not None:
continue
x.append(p.age)
y.append(p.children_wanting())
ax.hist(y, bins=ARGS.bins)
#ax.scatter(x, y, c="pink", alpha=0.5)
def view_male_fertility (self, ax, economy):
l = [x.fertility for x in economy.people.values()
if x.sex == 'M' and x.death is None]
n0 = len([True for x in l if x == 0])
l2 = [x for x in l if x != 0]
ax.hist(l2, bins=ARGS.bins)
print("Fertility 0:", n0, "/", len(l), "Other Mean:", np.mean(l2))
def view_female_fertility (self, ax, economy):
l = [x.fertility for x in economy.people.values()
if x.sex == 'F' and x.death is None]
n0 = len([True for x in l if x == 0])
l2 = [x for x in l if x != 0]
ax.hist(l2, bins=ARGS.bins)
print("Fertility 0:", n0, "/", len(l), "Other Mean:", np.mean(l2))
class EconomyPlotAD (EconomyPlot0):
def __init__ (self):
super().__init__()
self.options.update({
'adulteries': ('Adulteries', self.view_adulteries),
'adultery-separability':
('Ad Separability', self.view_adultery_separability),
'adultery-age-vs-years':
('Adultery age vs years', self.view_adultery_age_vs_years)
})
def view_adultery_age_vs_years (self, ax, economy):
m1 = []
m2 = []
for p in economy.people.values():
for a in p.adulteries:
m1.append(p.age - ((economy.term
- (a.true_begin or a.begin)) / 12))
m2.append((economy.term - (a.true_begin or a.begin)) / 12)
ax.scatter(m1, m2, c="pink", alpha=0.5)
def view_adulteries (self, ax, economy):
m = []
f = []
for p in economy.people.values():
if p.adulteries:
m.append(len(p.adulteries))
if p.sex == 'F':
f.append(len(p.adulteries))
ax.hist(m, bins=ARGS.bins)
print("Adulteries: %d %d" % (len(m), sum(m)))
#print("Female Adulteries: %d %d" % (len(f), sum(f)))
def view_adultery_separability (self, ax, economy):
x = []
l = []
for p in economy.people.values():
for a in p.adulteries:
x.append((economy.term - (a.true_begin or a.begin)) / 12)
l.append(p.adultery_separability(a))
ax.scatter(x, l, c="pink", alpha=0.5)
class EconomyPlotMA (EconomyPlot0):
def __init__ (self):
super().__init__()
self.options.update({
'pregnancy': ('Pregnancy', self.view_pregnancy),
'married': ('Married', self.view_married),
'marriage-separability':
('Ma Separability', self.view_marriage_separability),
'marriage-age-vs-years':
('Marriage age vs years', self.view_marriage_age_vs_years)
})
def view_pregnancy (self, ax, economy):
m = []
mm = 0
ma = 0
m0 = 0
m0a = 0
m0m = 0
m10 = 0
for p in economy.people.values():
if p.pregnancy is not None \
and economy.term - p.pregnancy.begin <= 10:
terms = economy.term - p.pregnancy.begin
m.append(terms)
if isinstance(p.pregnancy.relation, Marriage):
mm += 1
if terms == 0:
m0m += 1
elif isinstance(p.pregnancy.relation, Adultery):
ma += 1
if terms == 0:
m0a += 1
if terms == 0:
m0 += 1
elif terms == 10:
m10 += 1
ax.hist(m, bins=ARGS.bins)
print("Pregnancy:", len(m), "0mon:", m0, "10mon:", m10)
print("Pregnancy Marriage:", mm, "0mon:", m0m)
print("Pregnancy Adultery:", ma, "0mon:", m0a)
def view_married (self, ax, economy):
m = []
m2 = []
for p in economy.people.values():
if p.death is None and p.marriage is not None:
x = | |
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import scipy.linalg
import scipy.special
from . import thops
def nan_throw(tensor, name="tensor"):
stop = False
if ((tensor!=tensor).any()):
print(name + " has nans")
stop = True
if (torch.isinf(tensor).any()):
print(name + " has infs")
stop = True
if stop:
print(name + ": " + str(tensor))
#raise ValueError(name + ' contains nans of infs')
class _ActNorm(nn.Module):
"""
Activation Normalization
Initialize the bias and scale with a given minibatch,
so that the output per-channel have zero mean and unit variance for that.
After initialization, `bias` and `logs` will be trained as parameters.
"""
def __init__(self, num_features, scale=1.):
super().__init__()
# register mean and scale
size = [1, num_features, 1]
self.register_parameter("bias", nn.Parameter(torch.zeros(*size)))
self.register_parameter("logs", nn.Parameter(torch.zeros(*size)))
self.num_features = num_features
self.scale = float(scale)
# self.inited = False
self.register_buffer('is_initialized', torch.zeros(1))
def _check_input_dim(self, input):
return NotImplemented
def initialize_parameters(self, input):
# print("HOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOo")
self._check_input_dim(input)
if not self.training:
return
assert input.device == self.bias.device
with torch.no_grad():
bias = thops.mean(input.clone(), dim=[0, 2], keepdim=True) * -1.0
vars = thops.mean((input.clone() + bias) ** 2, dim=[0, 2], keepdim=True)
logs = torch.log(self.scale/(torch.sqrt(vars)+1e-6))
self.bias.data.copy_(bias.data)
self.logs.data.copy_(logs.data)
# self.inited = True
self.is_initialized += 1.
def _center(self, input, reverse=False):
if not reverse:
return input + self.bias
else:
return input - self.bias
def _scale(self, input, logdet=None, reverse=False):
logs = self.logs
if not reverse:
input = input * torch.exp(logs)
else:
input = input * torch.exp(-logs)
if logdet is not None:
"""
logs is log_std of `mean of channels`
so we need to multiply timesteps
"""
dlogdet = thops.sum(logs) * thops.timesteps(input)
if reverse:
dlogdet *= -1
logdet = logdet + dlogdet
return input, logdet
def forward(self, input, logdet=None, reverse=False):
if not self.is_initialized:
self.initialize_parameters(input)
self._check_input_dim(input)
# no need to permute dims as old version
if not reverse:
# center and scale
input = self._center(input, reverse)
input, logdet = self._scale(input, logdet, reverse)
else:
# scale and center
input, logdet = self._scale(input, logdet, reverse)
input = self._center(input, reverse)
return input, logdet
class ActNorm2d(_ActNorm):
def __init__(self, num_features, scale=1.):
super().__init__(num_features, scale)
def _check_input_dim(self, input):
assert len(input.size()) == 3
assert input.size(1) == self.num_features, (
"[ActNorm]: input should be in shape as `BCT`,"
" channels should be {} rather than {}".format(
self.num_features, input.size()))
class LinearZeros(nn.Linear):
def __init__(self, in_channels, out_channels, logscale_factor=3):
super().__init__(in_channels, out_channels)
self.logscale_factor = logscale_factor
# set logs parameter
self.register_parameter("logs", nn.Parameter(torch.zeros(out_channels)))
# init
self.weight.data.zero_()
self.bias.data.zero_()
def forward(self, input):
output = super().forward(input)
return output * torch.exp(self.logs * self.logscale_factor)
class Conv2d(nn.Conv2d):
pad_dict = {
"same": lambda kernel, stride: [((k - 1) * s + 1) // 2 for k, s in zip(kernel, stride)],
"valid": lambda kernel, stride: [0 for _ in kernel]
}
@staticmethod
def get_padding(padding, kernel_size, stride):
# make paddding
if isinstance(padding, str):
if isinstance(kernel_size, int):
kernel_size = [kernel_size, kernel_size]
if isinstance(stride, int):
stride = [stride, stride]
padding = padding.lower()
try:
padding = Conv2d.pad_dict[padding](kernel_size, stride)
except KeyError:
raise ValueError("{} is not supported".format(padding))
return padding
def __init__(self, in_channels, out_channels,
kernel_size=[3, 3], stride=[1, 1],
padding="same", do_actnorm=True, weight_std=0.05):
padding = Conv2d.get_padding(padding, kernel_size, stride)
super().__init__(in_channels, out_channels, kernel_size, stride,
padding, bias=(not do_actnorm))
# init weight with std
self.weight.data.normal_(mean=0.0, std=weight_std)
if not do_actnorm:
self.bias.data.zero_()
else:
self.actnorm = ActNorm2d(out_channels)
self.do_actnorm = do_actnorm
def forward(self, input):
x = super().forward(input)
if self.do_actnorm:
x, _ = self.actnorm(x)
return x
class Conv2dZeros(nn.Conv2d):
def __init__(self, in_channels, out_channels,
kernel_size=[3, 3], stride=[1, 1],
padding="same", logscale_factor=3):
padding = Conv2d.get_padding(padding, kernel_size, stride)
super().__init__(in_channels, out_channels, kernel_size, stride, padding)
# logscale_factor
self.logscale_factor = logscale_factor
self.register_parameter("logs", nn.Parameter(torch.zeros(out_channels, 1, 1)))
# init
self.weight.data.zero_()
self.bias.data.zero_()
def forward(self, input):
output = super().forward(input)
return output * torch.exp(self.logs * self.logscale_factor)
class LinearNormInit(nn.Linear):
def __init__(self, in_channels, out_channels, weight_std=0.05):
super().__init__(in_channels, out_channels)
# init
self.weight.data.normal_(mean=0.0, std=weight_std)
self.bias.data.zero_()
class LinearZeroInit(nn.Linear):
def __init__(self, in_channels, out_channels):
super().__init__(in_channels, out_channels)
# init
self.weight.data.zero_()
self.bias.data.zero_()
class Permute2d(nn.Module):
def __init__(self, num_channels, shuffle):
super().__init__()
self.num_channels = num_channels
print(num_channels)
self.indices = np.arange(self.num_channels - 1, -1,-1).astype(np.long)
self.indices_inverse = np.zeros((self.num_channels), dtype=np.long)
print(self.indices_inverse.shape)
for i in range(self.num_channels):
self.indices_inverse[self.indices[i]] = i
if shuffle:
self.reset_indices()
def reset_indices(self):
np.random.shuffle(self.indices)
for i in range(self.num_channels):
self.indices_inverse[self.indices[i]] = i
def forward(self, input, reverse=False):
assert len(input.size()) == 3
if not reverse:
return input[:, self.indices, :]
else:
return input[:, self.indices_inverse, :]
class InvertibleConv1x1(nn.Module):
def __init__(self, num_channels, LU_decomposed=False):
super().__init__()
w_shape = [num_channels, num_channels]
w_init = np.linalg.qr(np.random.randn(*w_shape))[0].astype(np.float32)
if not LU_decomposed:
# Sample a random orthogonal matrix:
self.register_parameter("weight", nn.Parameter(torch.Tensor(w_init)))
else:
np_p, np_l, np_u = scipy.linalg.lu(w_init)
np_s = np.diag(np_u)
np_sign_s = np.sign(np_s)
np_log_s = np.log(np.abs(np_s))
np_u = np.triu(np_u, k=1)
l_mask = np.tril(np.ones(w_shape, dtype=np.float32), -1)
eye = np.eye(*w_shape, dtype=np.float32)
#self.p = torch.Tensor(np_p.astype(np.float32))
#self.sign_s = torch.Tensor(np_sign_s.astype(np.float32))
self.register_buffer('p', torch.Tensor(np_p.astype(np.float32)))
self.register_buffer('sign_s', torch.Tensor(np_sign_s.astype(np.float32)))
self.l = nn.Parameter(torch.Tensor(np_l.astype(np.float32)))
self.log_s = nn.Parameter(torch.Tensor(np_log_s.astype(np.float32)))
self.u = nn.Parameter(torch.Tensor(np_u.astype(np.float32)))
self.l_mask = torch.Tensor(l_mask)
self.eye = torch.Tensor(eye)
self.w_shape = w_shape
self.LU = LU_decomposed
self.first_pass = True
self.saved_weight = None
self.saved_dlogdet = None
def get_weight(self, input, reverse):
w_shape = self.w_shape
if not self.LU:
timesteps = thops.timesteps(input)
dlogdet = torch.slogdet(self.weight)[1] * timesteps
if not reverse:
weight = self.weight.view(w_shape[0], w_shape[1], 1)
else:
weight = torch.inverse(self.weight.double()).float()\
.view(w_shape[0], w_shape[1], 1)
return weight, dlogdet
else:
self.p = self.p.to(input.device)
self.sign_s = self.sign_s.to(input.device)
self.l_mask = self.l_mask.to(input.device)
self.eye = self.eye.to(input.device)
l = self.l * self.l_mask + self.eye
u = self.u * self.l_mask.transpose(0, 1).contiguous() + torch.diag(self.sign_s * torch.exp(self.log_s))
dlogdet = thops.sum(self.log_s) * thops.timesteps(input)
if not reverse:
w = torch.matmul(self.p, torch.matmul(l, u))
else:
l = torch.inverse(l.double()).float()
u = torch.inverse(u.double()).float()
w = torch.matmul(u, torch.matmul(l, self.p.inverse()))
return w.view(w_shape[0], w_shape[1], 1), dlogdet
def forward(self, input, logdet=None, reverse=False):
"""
log-det = log|abs(|W|)| * timesteps
"""
# weight, dlogdet = self.get_weight(input, reverse)
if not reverse:
weight, dlogdet = self.get_weight(input, reverse)
else:
if self.first_pass:
weight, dlogdet = self.get_weight(input, reverse)
self.saved_weight = weight
if logdet is not None:
self.saved_dlogdet = dlogdet
self.first_pass = False
else:
weight = self.saved_weight
if logdet is not None:
dlogdet = self.saved_dlogdet
nan_throw(weight, "weight")
nan_throw(dlogdet, "dlogdet")
if not reverse:
z = F.conv1d(input, weight)
if logdet is not None:
logdet = logdet + dlogdet
return z, logdet
else:
nan_throw(input, "InConv input")
z = F.conv1d(input, weight)
nan_throw(z, "InConv z")
nan_throw(logdet, "InConv logdet")
if logdet is not None:
logdet = logdet - dlogdet
return z, logdet
# Here we define our model as a class
class LSTM(nn.Module):
def __init__(self, input_dim, hidden_dim, output_dim=1, num_layers=2, dropout=0.0):
super(LSTM, self).__init__()
self.input_dim = input_dim
self.hidden_dim = hidden_dim
self.num_layers = num_layers
# Define the LSTM layer
self.lstm = nn.LSTM(self.input_dim, self.hidden_dim, self.num_layers, batch_first=True)
# Define the output layer
self.linear = LinearZeroInit(self.hidden_dim, output_dim)
# do_init
self.do_init = True
def init_hidden(self):
# This is what we'll initialise our hidden state as
self.do_init = True
def forward(self, input):
# Forward pass through LSTM layer
# shape of lstm_out: [batch_size, input_size, hidden_dim]
# shape of self.hidden: (a, b), where a and b both
# have shape (batch_size, num_layers, hidden_dim).
if self.do_init:
lstm_out, self.hidden = self.lstm(input)
self.do_init = False
else:
lstm_out, self.hidden = self.lstm(input, self.hidden)
#self.hidden = hidden[0].to(input.device), hidden[1].to(input.device)
# Final layer
y_pred = self.linear(lstm_out)
return y_pred
# Here we define our model as a class
class GRU(nn.Module):
def __init__(self, input_dim, hidden_dim, output_dim=1, num_layers=2, dropout=0.0):
super(GRU, self).__init__()
self.input_dim = input_dim
self.hidden_dim = hidden_dim
self.num_layers = num_layers
# Define the LSTM layer
self.gru = nn.GRU(self.input_dim, self.hidden_dim, self.num_layers, batch_first=True)
# Define the output layer
self.linear = LinearZeroInit(self.hidden_dim, output_dim)
# do_init
self.do_init = True
def init_hidden(self):
# This is what we'll initialise our hidden state as
self.do_init = True
def forward(self, input):
# Forward pass through LSTM layer
# shape of lstm_out: [batch_size, input_size, hidden_dim]
# shape of self.hidden: (a, b), where a and b both
# have shape (batch_size, num_layers, hidden_dim).
if self.do_init:
gru_out, self.hidden = self.gru(input)
self.do_init = False
else:
gru_out, self.hidden = self.gru(input, self.hidden)
#self.hidden = hidden[0].to(input.device), hidden[1].to(input.device)
# Final layer
y_pred = self.linear(gru_out)
return y_pred
class GaussianDiag:
Log2PI = float(np.log(2 * np.pi))
@staticmethod
def likelihood(x):
"""
lnL = -1/2 * { ln|Var| + ((X - Mu)^T)(Var^-1)(X - Mu) + kln(2*PI) }
k = 1 (Independent)
Var = logs ** 2
"""
return -0.5 * (((x) ** 2) + GaussianDiag.Log2PI)
@staticmethod
def logp(x):
likelihood = GaussianDiag.likelihood(x)
return thops.sum(likelihood, dim=[1, 2])
@staticmethod
def sample(z_shape, eps_std=None, device=None):
eps_std = eps_std or 1
eps = torch.normal(mean=torch.zeros(z_shape),
std=torch.ones(z_shape) * eps_std)
eps = eps.to(device)
return eps
class StudentT:
def __init__(self, df, d):
self.df=df
| |
+ 1;
// Auto-padding if requested
if (padH_l == -1 || padH_r == -1) { // vertical half padding
padH_l = padH_r = dil_kH / 2;
}
else if (padH_l == -2 || padH_r == -2) { // vertical full padding
padH_l = padH_r = dil_kH - 1;
}
else if (padH_l < 0 || padH_r < 0) {
PyErr_SetString(PyExc_ValueError, "BaseGpuCorrMM: padH must be >= -2");
%(fail)s
}
if (padW_l == -1 || padW_r == -1) { // horizontal half padding
padW_l = padW_r = dil_kW / 2;
}
else if (padW_l == -2 || padW_r == -2) { // horizontal full padding
padW_l = padW_r = dil_kW - 1;
}
else if (padW_l < 0 || padW_r < 0) {
PyErr_SetString(PyExc_ValueError, "BaseGpuCorrMM: padW must be >= -2");
%(fail)s
}
// Infer output shape and type
// The inferred shape can be negative.
long long out_dim[6];
size_t out_dim_size[6];
out_dim[4] = out_dim[5] = 0; //Only used for unshared backprop wrt weights
out_dim_size[4] = out_dim_size[5] = 0; //Same
int out_typecode;
PyGpuContextObject *out_context;
switch(direction) {
case 0: // forward pass
// output is top: (batchsize, num_filters, height, width)
// height and width: top = (bottom + pad_l + pad_r - ((weight-1)*dil + 1)) / sample + 1
out_dim[0] = PyGpuArray_DIMS(bottom)[0];
out_dim[1] = PyGpuArray_DIMS(weights)[0];
out_dim[2] = (PyGpuArray_DIMS(bottom)[2] + padH_l + padH_r - ((PyGpuArray_DIMS(weights)[wdim-2]-1)*dilH + 1)) / dH + 1;
out_dim[3] = (PyGpuArray_DIMS(bottom)[3] + padW_l + padW_r - ((PyGpuArray_DIMS(weights)[wdim-1]-1)*dilW + 1)) / dW + 1;
out_typecode = bottom->ga.typecode;
out_context = bottom->context;
if (out_dim[0] < 0 || out_dim[1] < 0 || out_dim[2] <= 0 || out_dim[3] <= 0)
{
if (unshared) {
PyErr_Format(PyExc_ValueError,
"GpuCorrMM: impossible output shape\\n"
" bottom shape: %%ld x %%ld x %%ld x %%ld\\n"
" weights shape: %%ld x %%ld x %%ld x %%ld x %%ld x %%ld\\n"
" top shape: %%ld x %%ld x %%ld x %%ld\\n",
PyGpuArray_DIMS(bottom)[0], PyGpuArray_DIMS(bottom)[1],
PyGpuArray_DIMS(bottom)[2], PyGpuArray_DIMS(bottom)[3],
PyGpuArray_DIMS(weights)[0], PyGpuArray_DIMS(weights)[1],
PyGpuArray_DIMS(weights)[2], PyGpuArray_DIMS(weights)[3],
PyGpuArray_DIMS(weights)[4], PyGpuArray_DIMS(weights)[5],
out_dim[0], out_dim[1], out_dim[2], out_dim[3]);
%(fail)s
}
else {
PyErr_Format(PyExc_ValueError,
"GpuCorrMM: impossible output shape\\n"
" bottom shape: %%ld x %%ld x %%ld x %%ld\\n"
" weights shape: %%ld x %%ld x %%ld x %%ld\\n"
" top shape: %%ld x %%ld x %%ld x %%ld\\n",
PyGpuArray_DIMS(bottom)[0], PyGpuArray_DIMS(bottom)[1],
PyGpuArray_DIMS(bottom)[2], PyGpuArray_DIMS(bottom)[3],
PyGpuArray_DIMS(weights)[0], PyGpuArray_DIMS(weights)[1],
PyGpuArray_DIMS(weights)[2], PyGpuArray_DIMS(weights)[3],
out_dim[0], out_dim[1], out_dim[2], out_dim[3]);
%(fail)s
}
}
break;
case 1: // backprop wrt. weights
// output is weights: (num_filters, num_channels, height, width) or
// (num_filters, top_height, top_width, num_channels, height, width) -> for unshared
// height and width: weights = (bottom + 2*pad - (top - 1) * sample - 1) / dil + 1
out_dim[0] = PyGpuArray_DIMS(top)[1];
if (unshared){
odim = 6;
out_dim[1] = PyGpuArray_DIMS(top)[2];
out_dim[2] = PyGpuArray_DIMS(top)[3];
}
out_dim[wdim-3] = PyGpuArray_DIMS(bottom)[1] / numgroups;
out_dim[wdim-2] = kH; // already inferred further above
out_dim[wdim-1] = kW; // how convenient
out_typecode = top->ga.typecode;
out_context = top->context;
if (unshared) {
if (out_dim[0] < 0 || out_dim[1] <= 0 || out_dim[2] <= 0 || out_dim[3] < 0
|| out_dim[4] <= 0 || out_dim[5] <= 0){
PyErr_Format(PyExc_ValueError,
"GpuCorrMM backprop wrt. weights: impossible output shape\\n"
" bottom shape: %%ld x %%ld x %%ld x %%ld\\n"
" weights shape: %%ld x %%ld x %%ld x %%ld x %%ld x %%ld\\n"
" top shape: %%ld x %%ld x %%ld x %%ld\\n",
PyGpuArray_DIMS(bottom)[0], PyGpuArray_DIMS(bottom)[1],
PyGpuArray_DIMS(bottom)[2], PyGpuArray_DIMS(bottom)[3],
out_dim[0], out_dim[1], out_dim[2], out_dim[3],
out_dim[4], out_dim[5],
PyGpuArray_DIMS(top)[0], PyGpuArray_DIMS(top)[1],
PyGpuArray_DIMS(top)[2], PyGpuArray_DIMS(top)[3]);
%(fail)s
}
}
else {
if (out_dim[0] < 0 || out_dim[1] < 0 || out_dim[2] <= 0 || out_dim[3] <= 0)
{
PyErr_Format(PyExc_ValueError,
"GpuCorrMM backprop wrt. weights: impossible output shape\\n"
" bottom shape: %%ld x %%ld x %%ld x %%ld\\n"
" weights shape: %%ld x %%ld x %%ld x %%ld\\n"
" top shape: %%ld x %%ld x %%ld x %%ld\\n",
PyGpuArray_DIMS(bottom)[0], PyGpuArray_DIMS(bottom)[1],
PyGpuArray_DIMS(bottom)[2], PyGpuArray_DIMS(bottom)[3],
out_dim[0], out_dim[1], out_dim[2], out_dim[3],
PyGpuArray_DIMS(top)[0], PyGpuArray_DIMS(top)[1],
PyGpuArray_DIMS(top)[2], PyGpuArray_DIMS(top)[3]);
%(fail)s
}
}
break;
case 2: // backprop wrt. inputs
// output is bottom: (batchsize, num_channels, height, width)
// height and width: bottom = (top - 1) * sample + (weights-1)*dil + 1 - 2*pad
out_dim[0] = PyGpuArray_DIMS(top)[0];
out_dim[1] = PyGpuArray_DIMS(weights)[wdim-3] * numgroups;
out_dim[2] = (%(height)s != -1) ? %(height)s : (PyGpuArray_DIMS(top)[2] - 1) * dH + (PyGpuArray_DIMS(weights)[wdim-2]-1)*dilH + 1 - padH_l - padH_r;
out_dim[3] = (%(width)s != -1) ? %(width)s : (PyGpuArray_DIMS(top)[3] - 1) * dW + (PyGpuArray_DIMS(weights)[wdim-1]-1)*dilW + 1 - padW_l - padW_r;
out_typecode = top->ga.typecode;
out_context = top->context;
if (unshared) {
if (out_dim[0] < 0 || out_dim[1] < 0 || out_dim[2] <= 0 || out_dim[3] <= 0)
{
PyErr_Format(PyExc_ValueError,
"GpuCorrMM backprop wrt. inputs: impossible output shape\\n"
" bottom shape: %%ld x %%ld x %%ld x %%ld\\n"
" weight shape: %%ld x %%ld x %%ld x %%ld x %%ld x %%ld\\n"
" top shape: %%ld x %%ld x %%ld x %%ld\\n",
out_dim[0], out_dim[1], out_dim[2], out_dim[3],
PyGpuArray_DIMS(weights)[0], PyGpuArray_DIMS(weights)[1],
PyGpuArray_DIMS(weights)[2], PyGpuArray_DIMS(weights)[3],
PyGpuArray_DIMS(weights)[4], PyGpuArray_DIMS(weights)[5],
PyGpuArray_DIMS(top)[0], PyGpuArray_DIMS(top)[1],
PyGpuArray_DIMS(top)[2], PyGpuArray_DIMS(top)[3]);
%(fail)s
}
}
else {
if (out_dim[0] < 0 || out_dim[1] < 0 || out_dim[2] <= 0 || out_dim[3] <= 0)
{
PyErr_Format(PyExc_ValueError,
"GpuCorrMM backprop wrt. inputs: impossible output shape\\n"
" bottom shape: %%ld x %%ld x %%ld x %%ld\\n"
" weight shape: %%ld x %%ld x %%ld x %%ld\\n"
" top shape: %%ld x %%ld x %%ld x %%ld\\n",
out_dim[0], out_dim[1], out_dim[2], out_dim[3],
PyGpuArray_DIMS(weights)[0], PyGpuArray_DIMS(weights)[1],
PyGpuArray_DIMS(weights)[2], PyGpuArray_DIMS(weights)[3],
PyGpuArray_DIMS(top)[0], PyGpuArray_DIMS(top)[1],
PyGpuArray_DIMS(top)[2], PyGpuArray_DIMS(top)[3]);
%(fail)s
}
}
break;
default:
PyErr_SetString(PyExc_ValueError, "BaseGpuCorrMM: direction must be 0, 1, or 2\\n");
%(fail)s
}
out_dim_size[0] = (size_t)out_dim[0];
out_dim_size[1] = (size_t)out_dim[1];
out_dim_size[2] = (size_t)out_dim[2];
out_dim_size[3] = (size_t)out_dim[3];
if (odim == 6) {
out_dim_size[4] = (size_t)out_dim[4];
out_dim_size[5] = (size_t)out_dim[5];
}
// Prepare output array
if (aesara_prep_output(&%(out)s, odim, out_dim_size, out_typecode, GA_C_ORDER, out_context) != 0)
{
if (odim == 4) {
PyErr_Format(PyExc_RuntimeError,
"BaseGpuCorrMM: Failed to allocate output of %%lld x %%lld x %%lld x %%lld",
out_dim[0], out_dim[1], out_dim[2], out_dim[3]);
}
if (odim == 6) {
PyErr_Format(PyExc_RuntimeError,
"BaseGpuCorrMM: Failed to allocate output of %%lld x %%lld x %%lld x %%lld %%lld %%lld",
out_dim[0], out_dim[1], out_dim[2], out_dim[3], out_dim[4], out_dim[5]);
}
%(fail)s
}
if (!GpuArray_IS_C_CONTIGUOUS(&%(out)s->ga)) {
PyErr_SetString(PyExc_ValueError, "Only contiguous outputs are supported.");
%(fail)s
}
// Call GPU code
out2 = corrMM(%(bottom)s, %(weights)s, %(top)s, direction, dH, dW, dilH, dilW,
padH_l, padH_r, padW_l, padW_r, numgroups, unshared);
if (out2==NULL){
%(fail)s
}
assert (out2 == %(out)s);
"""
% sub
)
class GpuCorrMM(BaseGpuCorrMM):
"""
GPU correlation implementation using Matrix Multiplication.
Parameters
----------
border_mode
The width of a border of implicit zeros to pad the
input with. Must be a tuple with 2 elements giving the numbers of rows
and columns to pad on each side, or a single integer to pad the same
on all sides, or a string shortcut setting the padding at runtime:
``'valid'`` for ``(0, 0)`` (valid convolution, no padding), ``'full'``
for ``(kernel_rows - 1, kernel_columns - 1)`` (full convolution),
``'half'`` for ``(kernel_rows // 2, kernel_columns // 2)`` (same
convolution for odd-sized kernels).
If it is a tuple containing 2 pairs of integers, then these specify
the padding to be applied on each side ((left, right), (top, bottom)).
Otherwise, each width is applied twice, once per side (left and right,
top and bottom).
subsample
The subsample operation applied to each output image.
Should be a tuple with 2 elements.
`(sv, sh)` is equivalent to `GpuCorrMM(...)(...)[:,:,::sv, ::sh]`,
but faster.
Set to `(1, 1)` to disable subsampling.
filter_dilation
The filter dilation operation applied to each input image.
Should be a tuple with 2 elements.
Set to `(1, 1)` to disable filter dilation.
num_groups
The number of distinct groups the image and kernel must be
divided into.
should be an int
set to 1 to disable grouped convolution
unshared
Perform unshared correlation (default: False)
Notes
-----
Currently, the Op requires the inputs, filters and outputs to be
C-contiguous. Use :func:`gpu_contiguous
<aesara.gpuarray.basic_ops.gpu_contiguous>` on these arguments
if needed.
You can either enable the Aesara flag `optimizer_including=conv_gemm`
to automatically replace all convolution operations with `GpuCorrMM`
or one of its gradients, or you can use it as a replacement for
:func:`conv2d <aesara.tensor.nnet.conv.conv2d>`, | |
<reponame>xuhao1/taichi_three
import taichi as ti
import numpy as np
import matplotlib.cm as cm
import matplotlib.pyplot as plt
ti.init(arch=ti.gpu)
@ti.data_oriented
class lbm_solver:
def __init__(self,
nx, # domain size
ny,
niu, # viscosity of fluid
# [left,top,right,bottom] boundary conditions: 0 -> Dirichlet ; 1 -> Neumann
bc_type,
bc_value, # if bc_type = 0, we need to specify the velocity in bc_value
cy=0, # whether to place a cylindrical obstacle
# location and radius of the cylinder
cy_para=[0.0, 0.0, 0.0],
steps=600000): # total steps to run
self.nx = nx # by convention, dx = dy = dt = 1.0 (lattice units)
self.ny = ny
self.niu = niu
self.tau = 3.0 * niu + 0.5
self.inv_tau = 1.0 / self.tau
self.rho = ti.var(dt=ti.f32, shape=(nx, ny))
self.vel = ti.Vector(2, dt=ti.f32, shape=(nx, ny))
self.mask = ti.var(dt=ti.f32, shape=(nx, ny))
self.display_var = ti.var(dt=ti.f32, shape=(nx, ny))
self.f_old = ti.Vector(9, dt=ti.f32, shape=(nx, ny))
self.f_new = ti.Vector(9, dt=ti.f32, shape=(nx, ny))
self.w = ti.var(dt=ti.f32, shape=9)
self.e = ti.var(dt=ti.i32, shape=(9, 2))
self.bc_type = ti.var(dt=ti.i32, shape=4)
self.bc_value = ti.var(dt=ti.f32, shape=(4, 2))
self.cy = cy
self.cy_para = ti.var(dt=ti.f32, shape=3)
self.bc_type.from_numpy(np.array(bc_type, dtype=np.int32))
self.bc_value.from_numpy(np.array(bc_value, dtype=np.float32))
self.cy_para.from_numpy(np.array(cy_para, dtype=np.float32))
self.steps = steps
arr = np.array([4.0 / 9.0, 1.0 / 9.0, 1.0 / 9.0, 1.0 / 9.0, 1.0 / 9.0, 1.0 / 36.0,
1.0 / 36.0, 1.0 / 36.0, 1.0 / 36.0], dtype=np.float32)
self.w.from_numpy(arr)
arr = np.array([[0, 0], [1, 0], [0, 1], [-1, 0], [0, -1], [1, 1],
[-1, 1], [-1, -1], [1, -1]], dtype=np.int32)
self.e.from_numpy(arr)
@ti.func # compute equilibrium distribution function
def f_eq(self, i, j, k):
eu = ti.cast(self.e[k, 0], ti.f32) * self.vel[i, j][0] + ti.cast(self.e[k, 1],
ti.f32) * self.vel[i, j][1]
uv = self.vel[i, j][0]**2.0 + self.vel[i, j][1]**2.0
return self.w[k] * self.rho[i, j] * (1.0 + 3.0 * eu + 4.5 * eu**2 - 1.5 * uv)
@ti.kernel
def init(self):
for i, j in self.rho:
self.vel[i, j][0] = 0.0
self.vel[i, j][1] = 0.0
self.rho[i, j] = 1.0
self.mask[i, j] = 0.0
for k in ti.static(range(9)):
self.f_new[i, j][k] = self.f_eq(i, j, k)
self.f_old[i, j][k] = self.f_new[i, j][k]
# half way bounce back for no-slip boundary
# borders
if (i == self.nx or j == 0 or j == self.ny):
self.mask[i, j] = 1.0
# cylinder
if(self.cy == 1):
if ((ti.cast(i, ti.f32) - self.cy_para[0])**2.0 + (ti.cast(j, ti.f32)
- self.cy_para[1])**2.0 <= self.cy_para[2]**2.0):
self.mask[i, j] = 1.0
@ti.func
def to_moment(self, f):
M = ti.Matrix([
[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0],
[-4.0, -1.0, -1.0, -1.0, -1.0, 2.0, 2.0, 2.0, 2.0],
[4.0, -2.0, -2.0, -2.0, -2.0, 1.0, 1.0, 1.0, 1.0],
[0.0, 1.0, 0.0, -1.0, 0.0, 1.0, -1.0, -1.0, 1.0],
[0.0, -2.0, 0.0, 2.0, 0.0, 1.0, -1.0, -1.0, 1.0],
[0.0, 0.0, 1.0, 0.0, -1.0, 1.0, 1.0, -1.0, -1.0],
[0.0, 0.0, -2.0, 0.0, 2.0, 1.0, 1.0, -1.0, -1.0],
[0.0, 1.0, -1.0, 1.0, -1.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 1.0, -1.0, 1.0, -1.0]
])
return M @ f
@ti.func
def from_moment(self, m):
d = ti.Vector([1./9, 1./36, 1./36, 1./6,
1./12, 1./6, 1./12, 1./4, 1./4])
M = ti.Matrix([
[1, -4, 4, 0, 0, 0, 0, 0, 0],
[1, -1, -2, 1, -2, 0, 0, 1, 0],
[1, -1, -2, 0, 0, 1, -2, -1, 0],
[1, -1, -2, -1, 2, 0, 0, 1, 0],
[1, -1, -2, 0, 0, -1, 2, -1, 0],
[1, 2, 1, 1, 1, 1, 1, 0, 1],
[1, 2, 1, -1, -1, 1, 1, 0, -1],
[1, 2, 1, -1, -1, -1, -1, 0, 1],
[1, 2, 1, 1, 1, -1, -1, 0, -1],
])
return M @ (d * m)
@ti.kernel
def do_collide(self, flg: ti.template()): # lbm core equation
for i, j in ti.ndrange((1, self.nx - 1), (1, self.ny - 1)):
feq = ti.Vector(
[self.f_eq(i, j, 0),
self.f_eq(i, j, 1),
self.f_eq(i, j, 2),
self.f_eq(i, j, 3),
self.f_eq(i, j, 4),
self.f_eq(i, j, 5),
self.f_eq(i, j, 6),
self.f_eq(i, j, 7),
self.f_eq(i, j, 8)])
f = self.f_old[i, j]
meq = self.to_moment(feq)
m = self.to_moment(f)
s = ti.Vector([1.0, 1.63, 1.14, 1.0, 1.92, 0.0, 1.92,
self.inv_tau, self.inv_tau])
# MRT
if ti.static(flg == 0):
m += (meq - m) * s
# BGK
else:
m += (meq - m) * self.inv_tau
self.f_old[i, j] = self.from_moment(m)
@ti.kernel
def do_stream(self): # lbm core equation
for i, j in ti.ndrange((1, self.nx - 1), (1, self.ny - 1)):
# inverse index for halfway bounce back
bi = ti.static([0, 3, 4, 1, 2, 7, 8, 5, 6])
for k in ti.static(range(9)):
ip = i - self.e[k, 0]
jp = j - self.e[k, 1]
kk = ti.static(bi[k])
if self.mask[ip, jp] == 0.0:
self.f_new[i, j][k] = self.f_old[ip, jp][k]
else:
self.f_new[i, j][k] = self.f_old[i, j][kk]
@ti.kernel
def update_macro_var(self): # compute rho u v
for i, j in ti.ndrange((1, self.nx - 1), (1, self.ny - 1)):
self.rho[i, j] = 0.0
self.vel[i, j][0] = 0.0
self.vel[i, j][1] = 0.0
for k in ti.static(range(9)):
self.f_old[i, j][k] = self.f_new[i, j][k]
self.rho[i, j] += self.f_new[i, j][k]
self.vel[i, j][0] += (ti.cast(self.e[k, 0], ti.f32) *
self.f_new[i, j][k])
self.vel[i, j][1] += (ti.cast(self.e[k, 1], ti.f32) *
self.f_new[i, j][k])
self.vel[i, j][0] /= self.rho[i, j]
self.vel[i, j][1] /= self.rho[i, j]
@ti.kernel
def apply_bc(self): # impose boundary conditions
# left and right
for j in ti.ndrange(1, self.ny - 1):
# left: dr = 0; ibc = 0; jbc = j; inb = 1; jnb = j
self.apply_bc_core(1, 0, 0, j, 1, j)
# right: dr = 2; ibc = nx-1; jbc = j; inb = nx-2; jnb = j
self.apply_bc_core(1, 2, self.nx - 1, j, self.nx - 2, j)
# top and bottom
for i in ti.ndrange(self.nx):
# top: dr = 1; ibc = i; jbc = ny-1; inb = i; jnb = ny-2
self.apply_bc_core(1, 1, i, self.ny - 1, i, self.ny - 2)
# bottom: dr = 3; ibc = i; jbc = 0; inb = i; jnb = 1
self.apply_bc_core(1, 3, i, 0, i, 1)
# cylindrical obstacle
# Note: for cuda backend, putting 'if statement' inside loops can be much faster!
for i, j in ti.ndrange(self.nx, self.ny):
if (self.cy == 1 and self.mask[i, j] == 1):
self.vel[i, j][0] = 0.0 # velocity is zero at solid boundary
self.vel[i, j][1] = 0.0
inb = 0
jnb = 0
if (ti.cast(i, ti.f32) >= self.cy_para[0]):
inb = i + 1
else:
inb = i - 1
if (ti.cast(j, ti.f32) >= self.cy_para[1]):
jnb = j + 1
else:
jnb = j - 1
self.apply_bc_core(0, 0, i, j, inb, jnb)
@ti.func
def apply_bc_core(self, outer, dr, ibc, jbc, inb, jnb):
if (outer == 1): # handle outer boundary
if (self.bc_type[dr] == 0):
self.vel[ibc, jbc][0] = self.bc_value[dr, 0]
self.vel[ibc, jbc][1] = self.bc_value[dr, 1]
elif (self.bc_type[dr] == 1):
self.vel[ibc, jbc][0] = self.vel[inb, jnb][0]
self.vel[ibc, jbc][1] = self.vel[inb, jnb][1]
self.rho[ibc, jbc] = self.rho[inb, jnb]
for k in ti.static(range(9)):
# self.f_old[ibc,jbc][k] = self.f_eq(ibc,jbc,k) - self.f_eq(inb,jnb,k) + self.f_old[inb,jnb][k]
self.f_old[ibc, jbc][k] = self.f_eq(ibc, jbc, k)
@ti.kernel
def get_display_var(self, flg: ti.template()):
# if (flg == 0): # get velocity magnitude
if ti.static(flg == 0): # get velocity magnitude
for i, j in ti.ndrange(self.nx, self.ny):
self.display_var[i, j] = ti.sqrt(self.vel[i, j][0]**2.0 +
self.vel[i, j][1]**2.0)
# elif (flg == 1): # get x-direction component only
else: # get x-direction component only
for i, j in ti.ndrange(self.nx, self.ny):
self.display_var[i, j] = self.vel[i, j][0]
def solve(self):
gui = ti.GUI('lbm solver', (self.nx, self.ny))
self.init()
for i in range(self.steps):
self.do_collide(0) # 0 for MRT, else for BGK
self.do_stream()
self.update_macro_var()
self.apply_bc()
if (i % 50 == 0):
self.get_display_var(0)
img = cm.plasma(self.display_var.to_numpy() / 0.15 * 2)
gui.set_image(img)
gui.show()
if (i % 1000 == 0):
print('Step: {:}'.format(i))
# ti.imwrite((img[:,:,0:3]*255).astype(np.uint8), 'fig/karman_'+str(i).zfill(6)+'.png')
def pass_to_py(self):
self.get_display_var(1)
return self.display_var.to_numpy()
if __name__ == '__main__':
flow_case = 0
if (flow_case == 0): # von Karman vortex street: Re = U*D/niu = 200
lbm = lbm_solver(401, 101, 0.0001, [0, 0, 1, 0],
[[0.05, 0.0], [0.0, 0.0], [0.0, 0.0], [0.0, 0.0]],
1, [80.0, 50.0, 10.0])
lbm.solve()
elif (flow_case == 1): # lid-driven cavity flow: Re = U*L/niu = 1000
lbm = lbm_solver(256, 256, 0.0255, [0, 0, 0, 0],
[[0.0, | |
# ******************************************************************************
# pysimm.cassandra module
# ******************************************************************************
#
# ******************************************************************************
# License
# ******************************************************************************
# The MIT License (MIT)
#
# Copyright (c) 2017 <NAME>, <NAME>, <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from StringIO import StringIO
from subprocess import call, Popen, PIPE
import os
import re
import numpy as np
import random
import logging
import types
from collections import Iterable, OrderedDict
from pysimm import system
from string import ascii_uppercase
from pydoc import locate
DATA_PATH = os.path.realpath(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../dat/csndra_data'))
KCALMOL_2_K = 503.22271716452
CASSANDRA_EXEC = os.environ.get('CASSANDRA_EXEC')
# Creating a logger instance and send its output to console 'deafault'
logging.basicConfig(level=logging.INFO, datefmt='%H:%M:%S',
format='%(asctime)s [%(levelname)s]: %(message)s')
DEFAULT_PARAMS = {
'Temperature_Info': 300,
'Pressure_Info': 1,
'Rcutoff_Low': 0.1
}
class MCSimulation(object):
"""pysimm.cassandra.MCSimulation
Object containing the settings and the logic necessary to partially set-up an abstract Monte Carlo simulation
to be submitted to the CASSANDRA software. The object also will include the simulation results once the simulations
are finished.
Attributes:
mc_sst (:class:`~pysimm.cassandra.McSystem`) : describes all molecules to be inserted by CASSANDRA
init_sst (:class:`~pysimm.system.System`) : describes the optional initial fixed molecular configuration for MC
simulations (default: empty cubic box with 1 nm side length). If the particles in the system are not
attributed with the flag `is_fixed` all of them are considered to be fixed, and will be marked with this
flag, otherwise all particles with is_fixed=False will be removed.
Keyword Args:
out_folder (str) : the relative path of the simulation results (all .dat, .mcf, as well as .chk, ... files will
go there). If the folder does not exist it will be created with 0755 permissions.
props_file (str) : the name of the .inp file.
Note:
Other keyword arguments that are accepted are the GCMC simulation settings. The keywords of the settings
are the same as they are described in CASSANDRA specification but without # symbol.
**For example**: the keyword argument `Run_Name='my_simulation'` will set `#Run_Name` setting in CASSANDRA
input file to `my_simulation` value
Parameters:
props (dictionary) : include all simulation settings to be written to the CASSANDRA .inp file
input (str) : text stream that will be written to the CASSANDRA .inp file
tot_sst (:class:`~pysimm.system.System`) : object containing the results of CASSANDRA simulations
"""
def __init__(self, mc_sst=None, init_sst=None, **kwargs):
global DATA_PATH
# Initializing CASSANDRA input stream, empty at the beginning
self.input = ''
# Initializing dictionary that contains records that directly will be sent to the .inp file
self.props = OrderedDict()
self.logger = logging.getLogger('MC Simulation')
# Reading default properties of the GCMC simulations
def_dat = Cassandra(system.System()).read_input(os.path.join(DATA_PATH, 'mc_default.inp'))
tmp = kwargs.get('out_folder') # Folder for the results and temporary files
if tmp:
self.out_folder = tmp
if os.path.isabs(tmp):
self.out_folder = os.path.relpath(tmp)
else:
self.out_folder = os.getcwd()
if not os.path.exists(self.out_folder):
os.makedirs(self.out_folder, mode=0755)
prefix = kwargs.get('Run_Name', def_dat['Run_Name'])
self.props['Run_Name'] = InpSpec('Run_Name', os.path.join(self.out_folder, prefix), '')
self.props_file = os.path.join(self.out_folder, kwargs.get('props_file', ''))
# Simple (one-value) dynamic properties
self.props['Temperature_Info'] = InpSpec('Temperature_Info',
kwargs.get('Temperature_Info'), DEFAULT_PARAMS['Temperature_Info'])
self.props['Pair_Energy'] = InpSpec('Pair_Energy', kwargs.get('Pair_Energy'), def_dat['Pair_Energy'])
self.props['Rcutoff_Low'] = InpSpec('Rcutoff_Low', kwargs.get('Rcutoff_Low'), def_dat['Rcutoff_Low'])
self.props['Mixing_Rule'] = InpSpec('Mixing_Rule', kwargs.get('Mixing_Rule'), def_dat['Mixing_Rule'])
self.props['Seed_Info'] = InpSpec('Seed_Info', kwargs.get('Seed_Info'),
[random.randint(int(1e+7), int(1e+8 - 1)),
random.randint(int(1e+7), int(1e+8 - 1))])
# Multiple-value one/many line dynamic properties
self.props['Run_Type'] = InpSpec('Run_Type', kwargs.get('Run_Type'), def_dat['Run_Type'])
self.props['Charge_Style'] = InpSpec('Charge_Style', kwargs.get('Charge_Style'), def_dat['Charge_Style'])
self.props['VDW_Style'] = InpSpec('VDW_Style', kwargs.get('VDW_Style'), def_dat['VDW_Style'])
self.props['Simulation_Length_Info'] = InpSpec('Simulation_Length_Info', kwargs.get('Simulation_Length_Info'),
def_dat['Simulation_Length_Info'],
**{'write_headers': True, 'new_line': True})
self.props['CBMC_Info'] = InpSpec('CBMC_Info', kwargs.get('CBMC_Info'), def_dat['CBMC_Info'],
**{'write_headers': True, 'new_line': True})
self.props['Box_Info'] = InpSpec('Box_Info', kwargs.get('Box_Info'), def_dat['Box_Info'], **{'new_line': True})
self.props['Property_Info 1'] = InpSpec('Property_Info 1', kwargs.get('Property_Info'), None, **{'new_line': True})
# Setting the simulation total system
if init_sst:
self.tot_sst = init_sst.copy()
self.tot_sst.center('box', [0, 0, 0], True) # the center of the calculation box should be at origin
else:
self.logger.warning('The frame generating system for Monte-Carlo simulations is not set. '
'Creating empty cubic box of 1 nm size')
self.tot_sst = system.System()
self.tot_sst.forcefield = 'trappe/amber'
self.tot_sst.dim = system.Dimension(dx=10, dy=10, dz=10)
# Molecule configuration files describing all species of the system.
# They are **absolutely** needed to start calculation
mol_files = OrderedDict()
# Some necessary verification of obtained system
# TODO: check the forcefield to be sure that it is claas 1
if False:
self.logger.error('CASSANDRA supports only 1-st class force fields')
exit(1)
self.tot_sst.zero_charge() # the sum of the charges should necessary be 0
# Creating the system of fixed molecules
self.fxd_sst_mcfile = None
self.fxd_sst = kwargs.get('fixed_sst')
if self.tot_sst.particles:
tmp = self.tot_sst.copy()
for p in tmp.particles:
if not p.is_fixed:
tmp.particles.remove(p.tag)
tmp.remove_spare_bonding()
self.fxd_sst = tmp
self.fxd_sst_mcfile = os.path.join(self.out_folder, 'fixed_syst.mcf')
mol_files['file1'] = [self.fxd_sst_mcfile, 1]
# Setting up the Monte Carlo system
self.mc_sst = mc_sst
if mc_sst:
mc_sst.file_store = self.out_folder
mol_files = mc_sst.update_props(mol_files)
if kwargs.get('Molecule_Files'):
mol_files = OrderedDict(sorted(kwargs.get('Molecule_Files').items()))
# Raising an error and stop execution if no MCF information in one or another way is provided
if (mc_sst is None) and (not kwargs.get('Molecule_Files')):
self.logger.error('The molecular configuration files of gas molecules for simulation are not set. '
'Nothing to simulate. Exiting...')
exit(0)
self._n_spec = len(mol_files)
self.props['Nbr_Species'] = InpSpec('Nbr_Species', self._n_spec, self._n_spec)
self.props['Molecule_Files'] = InpSpec('Molecule_Files', mol_files, None, **{'new_line': True})
# Synchronzing "start type" .inp record
self.fxd_sst_xyz = ''
pops_list = [0] * self._n_spec
start_type = 'make_config'
if self.fxd_sst:
pops_list[0] = 1
self.fxd_sst_xyz = os.path.join(self.out_folder, 'fixed_syst.xyz')
start_type = 'read_config'
start_conf_dict = OrderedDict([('start_type', start_type), ('species', pops_list),
('file_name', self.fxd_sst_xyz)])
self.props['Start_Type'] = InpSpec('Start_Type', kwargs.get('Start_Type'), start_conf_dict)
# Synchronzing Fragment files:
frag_files = OrderedDict()
if mc_sst:
mc_sst.temperature = self.props['Temperature_Info'].value
frag_files = mc_sst.update_frag_record(frag_files)
if kwargs.get('Fragment_Files'):
frag_files = OrderedDict(sorted(kwargs.get('Fragment_Files').items()))
if (mc_sst is None) and (not kwargs.get('Fragment_Files')):
self.logger.error('Cannot set the fragment files of gas molecules for simulation')
exit(1)
self.props['Fragment_Files'] = InpSpec('Fragment_Files', frag_files, None, **{'new_line': True})
def write(self):
"""pysimm.cassandra.MCSimulation.write
Iterates through the :class:`~MCSimulation.props` dictionary creating the text for correct CASSANDRA input
"""
for key in self.props.keys():
if self.props[key].value is not None:
self.input += '{:}\n'.format(self.props[key].to_string())
self.input += '\nEND'
# Initializing output stream
self.logger.info('Writing CASSANDRA .inp file to "{:}"...'.format(self.props_file))
out_stream = open(self.props_file, 'w')
out_stream.write('{:}'.format(self.input))
out_stream.close()
self.logger.info('File: "{:}" was created sucsessfully'.format(self.props_file))
def group_by_id(self, group_key='matrix'):
"""pysimm.cassandra.MCSimulation.group_by_id
Method groups the atoms of the system :class:`~MCSimulation.tot_sst` by a certain property. Will iterate through
all atoms in the system and return indexes of only those atoms that match the property. Currently supports 3
properties defined by the input keyword argument argument.
Keyword Args:
group_key (str): text constant defines the property to match. Possible keywords are:
(1) `matrix` -- (default) indexes of the atoms in :obj:`~MCSimulation.fxd_sst`
(2) `rigid` -- indexes of all atoms that have rigid atomic bonds. It is assumed here that rigid and
nonrigid atoms can interact only through intermolecular forces
(3) `nonrigid` -- opposite of previous, indexes of all atoms that have nonrigid atomic bonds
Returns:
str:
string in format `a1:b1 a2:b2 ...` where all indexes inside `[ak, bk]` belongs to the selected group
and array of the form `[[a1, b1], [a2, b2], ...]`
"""
fxd_sst_idxs = []
if self.fxd_sst:
fxd_sst_idxs = range(1, len(self.fxd_sst.particles) + 1)
# Behaviour depending on type of particles to check
check = lambda x: x
if group_key.lower() == 'nonrigid':
check = lambda x: not x.is_rigid
elif group_key.lower() == 'rigid':
check = lambda x: x.is_rigid
elif group_key.lower() == 'matrix':
check = lambda x: x.tag in fxd_sst_idxs
idx_array = [[-1, -1]]
for p in self.tot_sst.particles:
if check(p):
if idx_array[-1][0] > 0:
if abs(p.tag - idx_array[-1][1]) > 1:
idx_array.append([p.tag, p.tag])
else:
idx_array[-1][1] = p.tag
else:
idx_array[-1] = [p.tag, p.tag]
idx_string = | |
# noinspection SpellCheckingInspection
icons_smileys_people = [
['😀', ' Grinning Face'],
['😃', ' Grinning Face with Big Eyes'],
['😄', ' Grinning Face with Smiling Eyes'],
['😁', ' Beaming Face with Smiling Eyes'],
['😆', ' Grinning Squinting Face'],
['😅', ' Grinning Face with Sweat'],
['🤣', ' Rolling on the Floor Laughing'],
['😂', ' Face with Tears of Joy'],
['🙂', ' Slightly Smiling Face'],
['🙃', ' Upside-Down Face'],
['😉', ' Winking Face'],
['😊', ' Smiling Face with Smiling Eyes'],
['😇', ' Smiling Face with Halo'],
['🥰', ' Smiling Face with Hearts'],
['😍', ' Smiling Face with Heart-Eyes'],
['🤩', ' Star-Struck'],
['😘', ' Face Blowing a Kiss'],
['😗', ' Kissing Face'],
['☺️', ' Smiling Face'],
['😚', ' Kissing Face with Closed Eyes'],
['😙', ' Kissing Face with Smiling Eyes'],
['🥲', ' Smiling Face with Tear'],
['😋', ' Face Savoring Food'],
['😛', ' Face with Tongue'],
['😜', ' Winking Face with Tongue'],
['🤪', ' Zany Face'],
['😝', ' Squinting Face with Tongue'],
['🤑', ' Money-Mouth Face'],
['🤗', ' Hugging Face'],
['🤭', ' Face with Hand Over Mouth'],
['🤫', ' Shushing Face'],
['🤔', ' Thinking Face'],
['🤐', ' Zipper-Mouth Face'],
['🤨', ' Face with Raised Eyebrow'],
['😐', ' Neutral Face'],
['😑', ' Expressionless Face'],
['😶', ' Face Without Mouth'],
['😶‍🌫️', ' Face in Clouds'],
['😏', ' Smirking Face'],
['😒', ' Unamused Face'],
['🙄', ' Face with Rolling Eyes'],
['😬', ' Grimacing Face'],
['😮‍💨', ' Face Exhaling'],
['🤥', ' Lying Face'],
['😌', ' Relieved Face'],
['😔', ' Pensive Face'],
['😪', ' Sleepy Face'],
['🤤', ' Drooling Face'],
['😴', ' Sleeping Face'],
['😷', ' Face with Medical Mask'],
['🤒', ' Face with Thermometer'],
['🤕', ' Face with Head-Bandage'],
['🤢', ' Nauseated Face'],
['🤮', ' Face Vomiting'],
['🤧', ' Sneezing Face'],
['🥵', ' Hot Face'],
['🥶', ' Cold Face'],
['🥴', ' Woozy Face'],
['😵', ' Dizzy Face'],
['😵‍💫', ' Face with Spiral Eyes'],
['🤯', ' Exploding Head'],
['🤠', ' Cowboy Hat Face'],
['🥳', ' Partying Face'],
['🥸', ' Disguised Face'],
['😎', ' Smiling Face with Sunglasses'],
['🤓', ' Nerd Face'],
['🧐', ' Face with Monocle'],
['😕', ' Confused Face'],
['😟', ' Worried Face'],
['🙁', ' Slightly Frowning Face'],
['☹️', ' Frowning Face'],
['😮', ' Face with Open Mouth'],
['😯', ' Hushed Face'],
['😲', ' Astonished Face'],
['😳', ' Flushed Face'],
['🥺', ' Pleading Face'],
['😦', ' Frowning Face with Open Mouth'],
['😧', ' Anguished Face'],
['😨', ' Fearful Face'],
['😰', ' Anxious Face with Sweat'],
['😥', ' Sad but Relieved Face'],
['😢', ' Crying Face'],
['😭', ' Loudly Crying Face'],
['😱', ' Face Screaming in Fear'],
['😖', ' Confounded Face'],
['😣', ' Persevering Face'],
['😞', ' Disappointed Face'],
['😓', ' Downcast Face with Sweat'],
['😩', ' Weary Face'],
['😫', ' Tired Face'],
['🥱', ' Yawning Face'],
['😤', ' Face with Steam From Nose'],
['😡', ' Pouting Face'],
['😠', ' Angry Face'],
['🤬', ' Face with Symbols on Mouth'],
['😈', ' Smiling Face with Horns'],
['👿', ' Angry Face with Horns'],
['💀', ' Skull'],
['☠️', ' Skull and Crossbones'],
['💩', ' Pile of Poo'],
['🤡', ' Clown Face'],
['👹', ' Ogre'],
['👺', ' Goblin'],
['👻', ' Ghost'],
['👽', ' Alien'],
['👾', ' Alien Monster'],
['🤖', ' Robot'],
['😺', ' Grinning Cat'],
['😸', ' Grinning Cat with Smiling Eyes'],
['😹', ' Cat with Tears of Joy'],
['😻', ' Smiling Cat with Heart-Eyes'],
['😼', ' Cat with Wry Smile'],
['😽', ' Kissing Cat'],
['🙀', ' Weary Cat'],
['😿', ' Crying Cat'],
['😾', ' Pouting Cat'],
['💋', ' Kiss Mark'],
['👋', ' Waving Hand'],
['🤚', ' Raised Back of Hand'],
['🖐️', ' Hand with Fingers Splayed'],
['✋', ' Raised Hand'],
['🖖', ' Vulcan Salute'],
['👌', ' OK Hand'],
['🤌', ' Pinched Fingers'],
['🤏', ' Pinching Hand'],
['✌️', ' Victory Hand'],
['🤞', ' Crossed Fingers'],
['🤟', ' Love-You Gesture'],
['🤘', ' Sign of the Horns'],
['🤙', ' Call Me Hand'],
['👈', ' Backhand Index Pointing Left'],
['👉', ' Backhand Index Pointing Right'],
['👆', ' Backhand Index Pointing Up'],
['🖕', ' Middle Finger'],
['👇', ' Backhand Index Pointing Down'],
['☝️', ' Index Pointing Up'],
['👍', ' Thumbs Up'],
['👎', ' Thumbs Down'],
['✊', ' Raised Fist'],
['👊', ' Oncoming Fist'],
['🤛', ' Left-Facing Fist'],
['🤜', ' Right-Facing Fist'],
['👏', ' Clapping Hands'],
['🙌', ' Raising Hands'],
['👐', ' Open Hands'],
['🤲', ' Palms Up Together'],
['🤝', ' Handshake'],
['🙏', ' Folded Hands'],
['✍️', ' Writing Hand'],
['💅', ' Nail Polish'],
['🤳', ' Selfie'],
['💪', ' Flexed Biceps'],
['🦾', ' Mechanical Arm'],
['🦿', ' Mechanical Leg'],
['🦵', ' Leg'],
['🦶', ' Foot'],
['👂', ' Ear'],
['🦻', ' Ear with Hearing Aid'],
['👃', ' Nose'],
['🧠', ' Brain'],
['🫀', ' Anatomical Heart'],
['🫁', ' Lungs'],
['🦷', ' Tooth'],
['🦴', ' Bone'],
['👀', ' Eyes'],
['👁️', ' Eye'],
['👅', ' Tongue'],
['👄', ' Mouth'],
['👶', ' Baby'],
['🧒', ' Child'],
['👦', ' Boy'],
['👧', ' Girl'],
['🧑', ' Person'],
['👱', ' Person: Blond Hair'],
['👨', ' Man'],
['🧔', ' Person: Beard'],
['👨‍🦰', ' Man: Red Hair'],
['👨‍🦱', ' Man: Curly Hair'],
['👨‍🦳', ' Man: White Hair'],
['👨‍🦲', ' Man: Bald'],
['👩', ' Woman'],
['👩‍🦰', ' Woman: Red Hair'],
['🧑‍🦰', ' Person: Red Hair'],
['👩‍🦱', ' Woman: Curly Hair'],
['🧑‍🦱', ' Person: Curly Hair'],
['👩‍🦳', ' Woman: White Hair'],
['🧑‍🦳', ' Person: White Hair'],
['👩‍🦲', ' Woman: Bald'],
['🧑‍🦲', ' Person: Bald'],
['👱‍♀️', ' Woman: Blond Hair'],
['👱‍♂️', ' Man: Blond Hair'],
['🧓', ' Older Person'],
['👴', ' Old Man'],
['👵', ' Old Woman'],
['🙍', ' Person Frowning'],
['🙍‍♂️', ' Man Frowning'],
['🙍‍♀️', ' Woman Frowning'],
['🙎', ' Person Pouting'],
['🙎‍♂️', ' Man Pouting'],
['🙎‍♀️', ' Woman Pouting'],
['🙅', ' Person Gesturing No'],
['🙅‍♂️', ' Man Gesturing No'],
['🙅‍♀️', ' Woman Gesturing No'],
['🙆', ' Person Gesturing OK'],
['🙆‍♂️', ' Man Gesturing OK'],
['🙆‍♀️', ' Woman Gesturing OK'],
['💁', ' Person Tipping Hand'],
['💁‍♂️', ' Man Tipping Hand'],
['💁‍♀️', ' Woman Tipping Hand'],
['🙋', ' Person Raising Hand'],
['🙋‍♂️', ' Man Raising Hand'],
['🙋‍♀️', ' Woman Raising Hand'],
['🧏', ' Deaf Person'],
['🧏‍♂️', ' Deaf Man'],
['🧏‍♀️', ' Deaf Woman'],
['🙇', ' Person Bowing'],
['🙇‍♂️', ' Man Bowing'],
['🙇‍♀️', ' Woman Bowing'],
['🤦', ' Person Facepalming'],
['🤦‍♂️', ' Man Facepalming'],
['🤦‍♀️', ' Woman Facepalming'],
['🤷', ' Person Shrugging'],
['🤷‍♂️', ' Man Shrugging'],
['🤷‍♀️', ' Woman Shrugging'],
['🧑‍⚕️', ' Health Worker'],
['👨‍⚕️', ' Man Health Worker'],
['👩‍⚕️', ' Woman Health Worker'],
['🧑‍🎓', ' Student'],
['👨‍🎓', ' Man Student'],
['👩‍🎓', ' Woman Student'],
['🧑‍🏫', ' Teacher'],
['👨‍🏫', ' Man Teacher'],
['👩‍🏫', ' Woman Teacher'],
['🧑‍⚖️', ' Judge'],
['👨‍⚖️', ' Man Judge'],
['👩‍⚖️', ' Woman Judge'],
['🧑‍🌾', ' Farmer'],
['👨‍🌾', ' Man Farmer'],
['👩‍🌾', ' Woman Farmer'],
['🧑‍🍳', ' Cook'],
['👨‍🍳', ' Man Cook'],
['👩‍🍳', ' Woman Cook'],
['🧑‍🔧', ' Mechanic'],
['👨‍🔧', ' Man Mechanic'],
['👩‍🔧', ' Woman Mechanic'],
['🧑‍🏭', ' Factory Worker'],
['👨‍🏭', ' Man Factory Worker'],
['👩‍🏭', ' Woman Factory Worker'],
['🧑‍💼', ' Office Worker'],
['👨‍💼', ' Man Office Worker'],
['👩‍💼', ' Woman Office Worker'],
['🧑‍🔬', ' Scientist'],
['👨‍🔬', ' Man Scientist'],
['👩‍🔬', ' Woman Scientist'],
['🧑‍💻', ' Technologist'],
['👨‍💻', ' Man Technologist'],
['👩‍💻', ' Woman Technologist'],
['🧑‍🎤', ' Singer'],
['👨‍🎤', ' Man Singer'],
['👩‍🎤', ' Woman Singer'],
['🧑‍🎨', ' Artist'],
['👨‍🎨', ' Man Artist'],
['👩‍🎨', ' Woman Artist'],
['🧑‍✈️', ' Pilot'],
['👨‍✈️', ' Man Pilot'],
['👩‍✈️', ' Woman Pilot'],
['🧑‍🚀', ' Astronaut'],
['👨‍🚀', ' Man Astronaut'],
['👩‍🚀', ' Woman Astronaut'],
['🧑‍🚒', ' Firefighter'],
['👨‍🚒', ' Man Firefighter'],
['👩‍🚒', ' Woman Firefighter'],
['👮', ' Police Officer'],
['👮‍♂️', ' Man Police Officer'],
['👮‍♀️', ' Woman Police Officer'],
['🕵️', ' Detective'],
['🕵️‍♂️', ' Man Detective'],
['🕵️‍♀️', ' Woman Detective'],
['💂', ' Guard'],
['💂‍♂️', ' Man Guard'],
['💂‍♀️', ' Woman Guard'],
['🥷', ' Ninja'],
['👷', ' Construction Worker'],
['👷‍♂️', ' Man Construction Worker'],
['👷‍♀️', ' Woman Construction Worker'],
['🤴', ' Prince'],
['👸', ' Princess'],
['👳', ' Person Wearing Turban'],
['👳‍♂️', ' Man Wearing Turban'],
['👳‍♀️', ' Woman Wearing Turban'],
['👲', ' Person With Skullcap'],
| |
= ccols = 0
pos = []
pos2 = []
for x in i:
#if x == ';':
if x is Ellipsis:
rows.append([])
if final_cols > 0 and final_cols != ccols:
error("Incompatible shapes!")
else:
final_cols = ccols
final_rows += crows
ccols = 0
pos.append(Ellipsis)
elif isinstance(x, mvar):
shp = x.msize
if len(shp) < 1: shp = [0]
if len(shp) < 2: shp += [0]
rows[-1].append(shp[0])
pos.append( (slice(final_rows, final_rows+shp[0]),
slice(ccols, ccols+shp[1])) )
crows = shp[0] # FIXME
ccols += shp[1]
elif _isscalar(x):
rows[-1].append(1)
pos.append( (final_rows, ccols) )
crows = 1
ccols += 1
else:
raise OMPCException("Unsupported type: %s!"%type(x))
if final_cols > 0 and final_cols != ccols:
error("Incompatible shapes!")
else:
final_cols = ccols
final_rows += crows
out = empty((final_rows, final_cols), 'double')
for sl, x in _izip(pos, i):
if x is not Ellipsis:
if isinstance(x, mvar): x = x._a
out._a.__setitem__(sl[::-1], x)
#out._a.reshape(final_cols, final_rows).T.__setitem__(sl, x)
return out
def who(*args,**kwargs):
nargin, nargout = _get_narginout(0)
import __main__
ns = __main__.__dict__
vars = [ x for x in ns \
if isinstance(ns[x], mvar) and x[0] != '_' ]
if args:
vars = [ x for x in vars if x in args ]
vars.sort()
if nargout == 0:
print 'Your variables are:'
print ' '.join(vars)
else:
return mcellarray(vars)
@_ompc_base
def whos(*args, **kwargs):
"""Return list of variables in the current workspace."""
nargin, nargout = _get_narginout(0)
import __main__
ns = __main__.__dict__
vars = [ x for x in ns \
if isinstance(ns[x], mvar) and x[0] != '_' ]
if args:
vars = [ x for x in vars if x in args ]
vars.sort()
if nargout == 0:
cols = ['Name', 'Size', 'Bytes', 'Class', 'Attributes']
print ' %10s %15s %15s %10s %10s '%tuple(cols)
for xname in vars:
x = ns[xname]
print ' %10s %15r %15r %10s '%(xname, x.msize, x._a.nbytes, x.dtype)
print
else:
raise NotImplementedError()
@_ompc_base
def size(X):
return X.msize
@_ompc_base
def rand(*args):
if isinstance(args[0], str):
raise NotImplemented
if len(args) == 1:
args = (args[0], args[0])
return _marray('double', args, np.random.rand(*args[::-1]))
@_ompc_base
def randn(*args):
if isinstance(args[0], str):
raise NotImplemented
if len(args) == 1:
args = (args[0], args[0])
return _marray('double', args, np.random.randn(*args[::-1]))
@_ompc_base
def reshape(A, *newsize):
if len(newsize) == 0:
raise OMPCException('??? Error using ==> reshape\n'
'Not enough input arguments.')
if len(newsize) == 1 and hasattr(newsize, '__len__'):
newsize = newsize[0]
if not np.prod(A.msize) == np.prod(newsize):
raise OMPCException('??? Error using ==> reshape\n'
'To RESHAPE the number of elements must not change.')
out = A.__copy__()
out.msize = newsize
out._a = out._a.reshape(newsize[::-1])
return out
@_ompc_base
def fliplr(X):
if X._a.ndim != 2:
error('X must be a 2-D matrix.')
return _marray(X.dtype, X.msize, np.flipud(X._a))
@_ompc_base
def flipud(X):
if X._a.ndim != 2:
error('X must be a 2-D matrix.')
return _marray(X.dtype, X.msize, np.fliplr(X._a))
@_ompc_base
def sum(A, *dimtype):
restype = 'double'
dim = 1
if len(dimtype) == 2:
dim = dimtype[0]
dimtype = dimtype[1]
elif len(dimtype) == 1:
dimtype = dimtype[0]
if isinstance(dimtype, str):
if dimtype == 'native':
restype = A.dtype
else:
restype = dimtype
else:
dim = dimtype
msize = A.msize
if A.msize[dim-1] == 1:
return A.__copy__()
nshp = list(msize)
nshp[dim-1] = 1
if len(nshp) > 2 and nshp[-1] == 1: nshp = nshp[:-1]
# use numpy's sum
a = np.sum(A._a, len(msize)-dim)
return _marray(A.dtype, nshp, a.reshape(nshp[::-1]))
@_ompc_base
def find(cond):
a = mpl.find(cond._a.reshape(-1)) + 1
msize = (len(a), 1)
if len(cond.msize) == 2 and cond.msize[0] == 1:
msize = (1, len(a))
return _marray('double', msize, a.astype('f8').reshape(msize[::-1]))
try: _inv = np.oldnumeric.linear_algebra.inverse
except: _inv = np.linalg.inv
@_ompc_base
def inv(X):
assert len(X.msize) == 2 and X.msize[0] == X.msize[1]
return _marray('double', X.msize, _inv(X._a.T).T)
_eig = np.linalg.eig
@_ompc_base
def eig(X):
assert len(X.msize) == 2 and X.msize[0] == X.msize[1]
nargin, nargout = _get_narginout(1)
[V, D] = _eig(X._a.T)
if nargout == 1:
return _marray('double', (len(V), 1), V.reshape(1, -1))
elif nargout == 2:
V = np.diag(V.reshape(-1))
return _marray('double', D.shape[::-1], D.T), \
_marray('double', V.shape, V)
else:
raise OMPCException('Too many output arguments.')
_svd = np.linalg.svd
@_ompc_base
def svd(X, *args):
if len(args) > 0:
raise NotImplementedError()
assert len(X.msize) == 2 and X.msize[0] == X.msize[1]
nargin, nargout = _get_narginout(1)
[U, S, V] = _svd(X._a.T)
# V is transposed already
if nargout == 1:
return _marray('double', (len(S), 1), S.reshape(1, -1))
elif nargout == 3:
S = np.diag(S.reshape(-1))
return _marray('double', U.shape[::-1], U.T), \
_marray('double', S.shape[::-1], S), \
_marray('double', V.shape, V)
else:
raise OMPCException('Incorrect number of output arguments.')
@_ompc_base
def poly(X):
na = np.poly(X._a.T)
return _marray('double', (1, len(na)), na.reshape(-1, 1))
@_ompc_base
def roots(X):
assert len(X.msize) == 2 and (X.msize[0] == 1 or X.msize[1] == 1)
na = np.roots(X._a.reshape(-1))
return _marray('double', (len(na), 1), na.reshape(1, -1))
@_ompc_base
def conv(X, Y):
assert len(X.msize) == 2 and (X.msize[0] == 1 or X.msize[1] == 1)
assert len(Y.msize) == 2 and (Y.msize[0] == 1 or Y.msize[1] == 1)
na = np.convolve(X._a.reshape(-1), Y._a.reshape(-1))
msize = (1, len(na))
if Y.msize[1] == 1:
msize = (len(na), 1)
return _marray('double', msize, na.reshape(msize[::-1]))
@_ompc_base
def round(X):
return _marray('double', X.msize, np.around(X._a))
@_ompc_base
def floor(X):
return _marray('double', X.msize, np.floor(X._a))
@_ompc_base
def ceil(X):
return _marray('double', X.msize, np.ceil(X._a))
@_ompc_base
def fix(X):
return _marray('double', X.msize, np.fix(X._a))
def _what(X):
if isinstance(X, mvar):
return X.dtype, X.msize
elif isinstance(X, int):
return 'int32', (1, 1)
elif isinstance(X, float):
return 'double', (1, 1)
else:
raise NotImplementedError()
@_ompc_base
def mod(X, i):
dtype, msize = _what(X)
if isinstance(X, mvar): X = X._a
if isinstance(i, mvar):
if i.msize != msize:
raise OMPCException("Matrix dimensions must agree!")
i = i._a
if i == 0:
return _marray(dtype, msize, X)
elif np.all(X == i):
return zeros(msize, dtype)
na = np.mod(X, i)
return _marray(_numpy2dtype[na.dtype], msize, na)
@_ompc_base
def sqrt(X):
if _isscalar(X):
X = _marray('double', (1,1), [X])
if np.any(X._a < 0):
return _marray('complex', X.msize, np.sqrt(X._a.astype('complex128')))
else:
return _marray('double', X.msize, np.sqrt(X._a))
@_ompc_base
def magic(n):
# from Octave's magic.m
A = empty((n, n), 'double')
if n == 0:
return marray([])
elif mod (n, 2) == 1:
n = 3
shift = floor ((mslice[0:n*n-1])/n)
c = mod(mslice[1:n*n] - shift + (n-3)/2, n)
r = mod(mslice[n*n:-1:1] + 2*shift, n)
A(c*n+r+1).lvalue = mslice[1:n*n]
A = reshape(A, n, n);
elif mod(n, 4) == 0:
A = reshape(mslice[1:n*n], n, n).cT;
I = mcat([mslice[1:4:n], mslice[4:4:n]])
J = fliplr(I);
A(I,I).lvalue = A(J,J)
I = mcat([mslice[2:4:n], mslice[3:4:n]]);
J = fliplr(I);
A(I,I).lvalue = A(J,J);
elif mod(n, 4) == 2:
m = n/2
A = magic(m)
A = mcat([A, A+2*m*m, OMPCSEMI, A+3*m*m, A+m*m])
k = (m-1)/2
if k > 1:
I = mslice[1:m]
J = mcat([mslice[2:k], mslice[n-k+2:n]])
A([I,I+m],J).lvalue = A([I+m,I],J)
I = mcat([mslice[1:k], mslic[k+2:m]])
A([I,I+m],1).lvalue = A([I+m,I],1);
I = k + 1
A([I,I+m],I).lvalue = A([I+m,I],I)
return A
from os.path import normpath as _normpath
import scipy.io
@_ompc_base
def load(*X):
X = list(X)
format = None
re = []
vars = []
if X[0].strip()[0] == '-':
op = X.pop(0).strip()
if op.lower() == '-ascii': format = 'a'
elif op.lower() == '-mat': format = 'm'
else: raise OMPCException('Unknown option "%s".'%op)
# next must be filename
fname = X.pop(0)
base, ext = os.path.splitext(fname)
if not ext:
if os.path.exists(fname):
format = 'a'
else:
ext = '.mat'
fname += ext
format = 'm'
elif ext == '.mat':
format = 'm'
if not os.path.exists(fname):
raise OMPCException('Cannot find file "%s"!'%fname)
# variables
if len(X) > 0:
if X[0].strip()[0] == '-':
# regexp
op = X.pop(0).strip().lower()
if not op == '-regexp':
raise OMPCException('Unknown option "%s".'%op)
re = X
else:
vars = X
fname = _normpath(fname)
# load
if format == 'm':
# scipy makes imports really slow
_loadmat = scipy.io.loadmat
try: d = _loadmat(fname, matlab_compatible=True)
except: raise OMPCException('Cannot open "%s" as an M-file!!'%fname)
data = []
if vars:
data = [ (k, v) for k, v in d.items() if k in vars ]
elif re:
raise NotImplementedError()
else:
data = [ (k, v) for k, v in d.items() if k[:2] != '__' ]
# populate the workspace
import inspect
cf = inspect.currentframe()
for var, val in data:
na = np.asfortranarray(val).T
cf.f_back.f_globals[var] = \
_marray(_numpy2dtype[str(na.dtype)], na.shape[::-1], na)
else:
# ASCII
try: f = file(fname, 'rU')
except: raise OMPCException('Cannot open "%s"!'%fname)
data = []
for x in f:
x = x.strip()
if x.startswith('%'): continue
data += [ map(float, x.split()) ]
na = np.asfortranarray(data, 'f8').T
import inspect
cf = inspect.currentframe()
base = os.path.basename(base)
cf.f_back.f_globals[base] = _marray('double', na.shape[::-1], na)
# _ompc_base
# def save(*X):
# import inspect
# f = inspect.currentframe()
# d = {}
# for var in args:
# d[var] = f.f_back.f_globals[var]
# _savemat(fname, d)
@_ompc_base
def | |
and not request.POST.get("reset"):
form = PublicationForm(request.POST)
if form.is_valid():
events = []
# start by notifying the RFC Editor
import ietf.sync.rfceditor
response, error = ietf.sync.rfceditor.post_approved_draft(settings.RFC_EDITOR_SYNC_NOTIFICATION_URL, doc.name)
if error:
return render(request, 'doc/draft/rfceditor_post_approved_draft_failed.html',
dict(name=doc.name,
response=response,
error=error))
m.subject = form.cleaned_data["subject"]
m.body = form.cleaned_data["body"]
m.save()
if doc.group.acronym != "none":
m.related_groups.set([doc.group])
m.related_docs.set([doc])
send_mail_message(request, m)
# IANA copy
(m.to, m.cc) = gather_address_lists('pubreq_rfced_iana',doc=doc).as_strings()
send_mail_message(request, m, extra=extra_automation_headers(doc))
e = DocEvent(doc=doc, type="requested_publication", rev=doc.rev, by=request.user.person)
e.desc = "Sent request for publication to the RFC Editor"
e.save()
events.append(e)
# change state
prev_state = doc.get_state(next_state.type_id)
if next_state != prev_state:
doc.set_state(next_state)
e = add_state_change_event(doc, request.user.person, prev_state, next_state)
if e:
events.append(e)
doc.save_with_history(events)
return redirect('ietf.doc.views_doc.document_main', name=doc.name)
else:
if doc.intended_std_level_id in ("std", "ds", "ps", "bcp"):
action = "Protocol Action"
else:
action = "Document Action"
from ietf.doc.templatetags.mail_filters import std_level_prompt
subject = "%s: '%s' to %s (%s-%s.txt)" % (action, doc.title, std_level_prompt(doc), doc.name, doc.rev)
body = generate_publication_request(request, doc)
form = PublicationForm(initial=dict(subject=subject,
body=body))
return render(request, 'doc/draft/request_publication.html',
dict(form=form,
doc=doc,
message=m,
next_state=next_state,
consensus_filled_in=(
True if (doc.stream_id and doc.stream_id=='ietf')
else (consensus_event != None and consensus_event.consensus != None)),
),
)
class AdoptDraftForm(forms.Form):
group = forms.ModelChoiceField(queryset=Group.objects.filter(type__features__acts_like_wg=True, state="active").order_by("-type", "acronym"), required=True, empty_label=None)
newstate = forms.ModelChoiceField(queryset=State.objects.filter(type__in=['draft-stream-ietf','draft-stream-irtf'], used=True).exclude(slug__in=settings.GROUP_STATES_WITH_EXTRA_PROCESSING), required=True, label="State")
comment = forms.CharField(widget=forms.Textarea, required=False, label="Comment", help_text="Optional comment explaining the reasons for the adoption.", strip=False)
weeks = forms.IntegerField(required=False, label="Expected weeks in adoption state")
def __init__(self, *args, **kwargs):
user = kwargs.pop("user")
rg_features = GroupFeatures.objects.get(type_id='rg')
wg_features = GroupFeatures.objects.get(type_id='wg')
super(AdoptDraftForm, self).__init__(*args, **kwargs)
state_types = set()
if has_role(user, "Secretariat"):
state_types.update(['draft-stream-ietf','draft-stream-irtf'])
else:
if (has_role(user, "IRTF Chair")
or Group.objects.filter(type="rg",
state="active",
role__person__user=user,
role__name__in=rg_features.docman_roles).exists()):
state_types.add('draft-stream-irtf')
if Group.objects.filter( type="wg",
state="active",
role__person__user=user,
role__name__in=wg_features.docman_roles).exists():
state_types.add('draft-stream-ietf')
state_choices = State.objects.filter(type__in=state_types, used=True).exclude(slug__in=settings.GROUP_STATES_WITH_EXTRA_PROCESSING)
if not has_role(user, "Secretariat"):
if has_role(user, "IRTF Chair"):
group_queryset = self.fields["group"].queryset.filter(Q(role__person__user=user, role__name__in=rg_features.docman_roles)|Q(type="rg", state="active")).distinct()
else:
group_queryset = self.fields["group"].queryset.filter(role__person__user=user, role__name__in=wg_features.docman_roles).distinct()
self.fields["group"].queryset = group_queryset
self.fields['group'].choices = [(g.pk, '%s - %s' % (g.acronym, g.name)) for g in self.fields["group"].queryset]
self.fields['newstate'].choices = [('','-- Pick a state --')]
self.fields['newstate'].choices.extend([(x.pk,x.name + " (IETF)") for x in state_choices if x.type_id == 'draft-stream-ietf'])
self.fields['newstate'].choices.extend([(x.pk,x.name + " (IRTF)") for x in state_choices if x.type_id == 'draft-stream-irtf'])
def clean_newstate(self):
group = self.cleaned_data['group']
newstate = self.cleaned_data['newstate']
if (newstate.type_id == 'draft-stream-ietf') and (group.type_id == 'rg'):
raise forms.ValidationError('Cannot assign IETF WG state to IRTF group')
elif (newstate.type_id == 'draft-stream-irtf') and (group.type_id == 'wg'):
raise forms.ValidationError('Cannot assign IRTF RG state to IETF group')
else:
return newstate
@login_required
def adopt_draft(request, name):
doc = get_object_or_404(Document, type="draft", name=name)
if not can_adopt_draft(request.user, doc):
permission_denied(request, "You don't have permission to access this page.")
if request.method == 'POST':
form = AdoptDraftForm(request.POST, user=request.user)
if form.is_valid():
# adopt
by = request.user.person
events = []
group = form.cleaned_data["group"]
if group.type.slug == "rg":
new_stream = StreamName.objects.get(slug="irtf")
else:
new_stream = StreamName.objects.get(slug="ietf")
new_state = form.cleaned_data["newstate"]
# stream
if doc.stream != new_stream:
e = DocEvent(type="changed_stream", doc=doc, rev=doc.rev, by=by)
e.desc = "Changed stream to <b>%s</b>" % new_stream.name
if doc.stream:
e.desc += " from %s" % doc.stream.name
e.save()
events.append(e)
old_stream = doc.stream
doc.stream = new_stream
if old_stream != None:
email_stream_changed(request, doc, old_stream, new_stream)
# group
if group != doc.group:
e = DocEvent(type="changed_group", doc=doc, rev=doc.rev, by=by)
e.desc = "Changed group to <b>%s (%s)</b>" % (group.name, group.acronym.upper())
if doc.group.type_id != "individ":
e.desc += " from %s (%s)" % (doc.group.name, doc.group.acronym.upper())
e.save()
events.append(e)
doc.group = group
new_notify = get_initial_notify(doc,extra=doc.notify)
events.append(make_notify_changed_event(request, doc, by, new_notify))
doc.notify = new_notify
comment = form.cleaned_data["comment"].strip()
# state
prev_state = doc.get_state("draft-stream-%s" % doc.stream_id)
if new_state != prev_state:
doc.set_state(new_state)
e = add_state_change_event(doc, by, prev_state, new_state)
events.append(e)
due_date = None
if form.cleaned_data["weeks"] != None:
due_date = datetime.date.today() + datetime.timedelta(weeks=form.cleaned_data["weeks"])
update_reminder(doc, "stream-s", e, due_date)
email_adopted(request, doc, prev_state, new_state, by, comment)
# comment
if comment:
e = DocEvent(type="added_comment", doc=doc, rev=doc.rev, by=by)
e.desc = comment
e.save()
events.append(e)
doc.save_with_history(events)
return HttpResponseRedirect(doc.get_absolute_url())
else:
form = AdoptDraftForm(user=request.user)
return render(request, 'doc/draft/adopt_draft.html',
{'doc': doc,
'form': form,
})
class ReleaseDraftForm(forms.Form):
comment = forms.CharField(widget=forms.Textarea, required=False, label="Comment", help_text="Optional comment explaining the reasons for releasing the document." )
@login_required
def release_draft(request, name):
doc = get_object_or_404(Document, type="draft", name=name)
if doc.get_state_slug('draft-iesg') != 'idexists':
raise Http404
if not can_unadopt_draft(request.user, doc):
permission_denied(request, "You don't have permission to access this page.")
if request.method == 'POST':
form = ReleaseDraftForm(request.POST)
if form.is_valid():
comment = form.cleaned_data["comment"]
by = request.user.person
events = []
if doc.stream.slug == 'ise' or doc.group.type_id != 'individ':
existing_tags = list(doc.tags.all())
if existing_tags:
doc.tags.clear()
e = DocEvent(type="changed_document", doc=doc, rev=doc.rev, by=by)
l = []
l.append("Tag%s %s cleared." % (pluralize(existing_tags), ", ".join(t.name for t in existing_tags)))
e.desc = " ".join(l)
e.save()
events.append(e)
email_stream_tags_changed(request, doc, set(), existing_tags, by, comment)
prev_state = doc.get_state("draft-stream-%s" % doc.stream_id)
if prev_state:
doc.unset_state("draft-stream-%s" % doc.stream_id)
e = StateDocEvent(doc=doc, rev=doc.rev, by=by)
e.type = "changed_state"
e.state_type = (prev_state).type
e.state = None
e.desc = "State changed to <b>None</b> from %s" % prev_state.name
e.save()
events.append(e)
email_state_changed(request,doc,e.desc)
if doc.stream.slug != 'ise':
old_group = doc.group
doc.group = Group.objects.get(acronym='none')
e = DocEvent(type="changed_document", doc=doc, rev=doc.rev, by=by)
e.desc = "Document removed from group %s." % old_group.acronym.upper()
if doc.stream:
e = DocEvent(type="changed_stream", doc=doc, rev=doc.rev, by=by)
e.desc = "Changed stream to <b>None</b> from %s" % doc.stream.name
e.save()
events.append(e)
old_stream = doc.stream
doc.stream = None
email_stream_changed(request, doc, old_stream, None)
if comment:
e = DocEvent(type="added_comment", doc=doc, rev=doc.rev, by=by)
e.desc = comment
e.save()
events.append(e)
doc.save_with_history(events)
return HttpResponseRedirect(doc.get_absolute_url())
else:
form = ReleaseDraftForm()
return render(request, 'doc/draft/release_draft.html', {'doc':doc, 'form':form })
class ChangeStreamStateForm(forms.Form):
new_state = forms.ModelChoiceField(queryset=State.objects.filter(used=True), label='State' )
weeks = forms.IntegerField(label='Expected weeks in state',required=False)
comment = forms.CharField(widget=forms.Textarea, required=False, help_text="Optional comment for the document history.", strip=False)
tags = forms.ModelMultipleChoiceField(queryset=DocTagName.objects.filter(used=True), widget=forms.CheckboxSelectMultiple, required=False)
def __init__(self, *args, **kwargs):
doc = kwargs.pop("doc")
state_type = kwargs.pop("state_type")
self.can_set_sub_pub = kwargs.pop("can_set_sub_pub")
self.stream = kwargs.pop("stream")
super(ChangeStreamStateForm, self).__init__(*args, **kwargs)
f = self.fields["new_state"]
f.queryset = f.queryset.filter(type=state_type)
if doc.group:
unused_states = doc.group.unused_states.values_list("pk", flat=True)
f.queryset = f.queryset.exclude(pk__in=unused_states)
f.label = state_type.label
if self.stream.slug == 'ietf':
if self.can_set_sub_pub:
f.help_text = "Only select 'Submitted to IESG for Publication' to correct errors. Use the document's main page to request publication."
else:
f.help_text = "You may not set the 'Submitted to IESG for Publication' using this form - Use the document's main page to request publication."
f = self.fields['tags']
f.queryset = f.queryset.filter(slug__in=get_tags_for_stream_id(doc.stream_id))
if doc.group:
unused_tags = doc.group.unused_tags.values_list("pk", flat=True)
f.queryset = f.queryset.exclude(pk__in=unused_tags)
def clean_new_state(self):
new_state = self.cleaned_data.get('new_state')
if new_state.slug=='sub-pub' and not self.can_set_sub_pub:
raise forms.ValidationError('You may not set the %s state using this form. Use the "Submit to IESG for publication" button on the document\'s main page instead. If that button does not appear, the document may already have IESG state. Ask your Area Director or the Secretariat for help.'%new_state.name)
return new_state
def next_states_for_stream_state(doc, state_type, current_state):
# find next states
next_states = []
if current_state:
next_states = current_state.next_states.all()
if doc.stream_id == "ietf" and doc.group:
transitions = doc.group.groupstatetransitions_set.filter(state=current_state)
if transitions:
next_states = transitions[0].next_states.all()
else:
# return the initial state
states = State.objects.filter(used=True, type=state_type).order_by('order')
if states:
next_states = states[:1]
if doc.group:
unused_states = doc.group.unused_states.values_list("pk", flat=True)
next_states = [n for n in next_states if n.pk not in unused_states]
return next_states
@login_required
def change_stream_state(request, name, state_type):
doc = get_object_or_404(Document, type="draft", name=name)
if not doc.stream:
raise Http404
state_type = get_object_or_404(StateType, slug=state_type)
if not is_authorized_in_doc_stream(request.user, doc):
permission_denied(request, "You don't have permission to access this page.")
prev_state = doc.get_state(state_type.slug)
next_states = next_states_for_stream_state(doc, state_type, prev_state)
can_set_sub_pub = has_role(request.user,('Secretariat','Area Director')) or (prev_state and prev_state.slug=='sub-pub')
if request.method == 'POST':
form = ChangeStreamStateForm(request.POST, doc=doc, state_type=state_type,can_set_sub_pub=can_set_sub_pub,stream=doc.stream)
if form.is_valid():
by = request.user.person
events = []
comment = form.cleaned_data["comment"].strip()
# state
new_state = form.cleaned_data["new_state"]
if new_state != prev_state:
doc.set_state(new_state)
e = add_state_change_event(doc, by, prev_state, new_state)
events.append(e)
due_date = None
if form.cleaned_data["weeks"] != None:
due_date = datetime.date.today() + datetime.timedelta(weeks=form.cleaned_data["weeks"])
update_reminder(doc, "stream-s", e, due_date)
email_stream_state_changed(request, doc, prev_state, new_state, by, comment)
# tags
existing_tags = set(doc.tags.all())
new_tags = set(form.cleaned_data["tags"])
if existing_tags != new_tags:
doc.tags.clear()
doc.tags.set(new_tags)
e = DocEvent(type="changed_document", doc=doc, rev=doc.rev, by=by)
added_tags = new_tags - existing_tags
removed_tags = existing_tags - new_tags
l = []
if added_tags:
l.append("Tag%s %s set." % (pluralize(added_tags), ", ".join(t.name for t in added_tags)))
if removed_tags:
l.append("Tag%s %s cleared." % (pluralize(removed_tags), ", ".join(t.name for t in removed_tags)))
e.desc = " ".join(l)
e.save()
events.append(e)
email_stream_tags_changed(request, doc, added_tags, removed_tags, by, comment)
# comment
if comment:
e = DocEvent(type="added_comment", doc=doc, rev=doc.rev, by=by)
e.desc = comment
e.save()
events.append(e)
if events:
doc.save_with_history(events)
return HttpResponseRedirect(doc.get_absolute_url())
else:
form.add_error(None, "No change in state or tags found, and no comment provided -- nothing to do.")
else:
form | |
hands = ['''Game Hand #712008727 - Tournament #24095328 - Holdem(No Limit) - Level 9 (2500.00/5000.00)- 2021/02/20 18:46:52 UTC
Table '3' 9-max Seat #2 is the button
Seat 1: MateusVR (146796.00)
Seat 2: PotNoodle99912 (155072.00)
Seat 3: Frank126 (97418.00)
Seat 4: TiltedMILF (53800.00)
Seat 5: What_Truth (43825.00)
Seat 6: VUSEDSV (54225.00)
Seat 7: coog (59284.00)
Seat 8: dartdog (91180.00)
Seat 9: JokesyWales (48400.00)
MateusVR posts ante 500.00
PotNoodle99912 posts ante 500.00
Frank126 posts ante 500.00
TiltedMILF posts ante 500.00
What_Truth posts ante 500.00
VUSEDSV posts ante 500.00
coog posts ante 500.00
dartdog posts ante 500.00
JokesyWales posts ante 500.00
*** <NAME> ***
Main pot 4500.00
Frank126 posts the small blind 2500.00
TiltedMILF posts the big blind 5000.00
Dealt to PotNoodle99912 [Qc 6s]
What_Truth raises 43325.00 to 43325.00 and is all-in
VUSEDSV folds
coog calls 43325.00
dartdog folds
JokesyWales folds
MateusVR folds
PotNoodle99912 folds
Frank126 folds
TiltedMILF folds
*** FLOP *** [2h 8h 9c]
Main pot 98650.00
*** TURN *** [2h 8h 9c] [5c]
Main pot 98650.00
*** RIVER *** [2h 8h 9c 5c] [8s]
Main pot 98650.00
*** SHOW DOWN ***
Main pot 98650.00
What_Truth shows [Ad Tc] (a pair of Eights [8s 8h Ad Tc 9c])
coog shows [Td Th] (two pair, Tens and Eights [Th Td 8s 8h 9c])
coog collected 98650.00 from main pot
*** SUMMARY ***
Total pot 98650.00
Board [2h 8h 9c 5c 8s]
Seat 1: MateusVR folded on the Pre-Flop and did not bet
Seat 2: PotNoodle99912 (button) folded on the Pre-Flop
Seat 3: Frank126 (small blind) folded on the Pre-Flop
Seat 4: TiltedMILF (big blind) folded on the Pre-Flop
Seat 5: What_Truth showed [Ad Tc] and lost with a pair of Eights [8s 8h Ad Tc 9c]
Seat 6: VUSEDSV folded on the Pre-Flop and did not bet
Seat 7: coog showed [Td Th] and won 98650.00 with two pair, Tens and Eights [Th Td 8s 8h 9c]
Seat 8: dartdog folded on the Pre-Flop and did not bet
Seat 9: JokesyWales folded on the Pre-Flop and did not bet''',
'''Game Hand #712009450 - Tournament #24095328 - Holdem(No Limit) - Level 9 (2500.00/5000.00)- 2021/02/20 18:47:34 UTC
Table '3' 9-max Seat #3 is the button
Seat 1: MateusVR (146296.00)
Seat 2: PotNoodle99912 (154572.00)
Seat 3: Frank126 (94418.00)
Seat 4: TiltedMILF (48300.00)
Seat 6: VUSEDSV (53725.00)
Seat 7: coog (114109.00)
Seat 8: dartdog (90680.00)
Seat 9: JokesyWales (47900.00)
MateusVR posts ante 500.00
PotNoodle99912 posts ante 500.00
Frank126 posts ante 500.00
TiltedMILF posts ante 500.00
VUSEDSV posts ante 500.00
coog posts ante 500.00
dartdog posts ante 500.00
JokesyWales posts ante 500.00
*** <NAME> ***
Main pot 4000.00
TiltedMILF posts the small blind 2500.00
VUSEDSV posts the big blind 5000.00
Dealt to PotNoodle99912 [Ad Ac]
coog folds
dartdog folds
JokesyWales folds
MateusVR raises 10000.00 to 10000.00
PotNoodle99912 raises 25750.00 to 25750.00
Frank126 folds
TiltedMILF folds
VUSEDSV raises 48225.00 to 53225.00 and is all-in
MateusVR folds
PotNoodle99912 calls 27475.00
*** FLOP *** [4h 9s Kh]
Main pot 122950.00
*** TURN *** [4h 9s Kh] [Ts]
Main pot 122950.00
*** RIVER *** [4h 9s Kh Ts] [Ks]
Main pot 122950.00
*** SHOW DOWN ***
Main pot 122950.00
PotNoodle99912 shows [Ad Ac] (two pair, Aces and Kings [Ad Ac Ks Kh Ts])
VUSEDSV shows [Qd Qs] (two pair, Kings and Queens [Ks Kh Qs Qd Ts])
PotNoodle99912 collected 122950.00 from main pot
*** SUMMARY ***
Total pot 122950.00
Board [4h 9s Kh Ts Ks]
Seat 1: MateusVR folded on the Pre-Flop and did not bet
Seat 2: PotNoodle99912 showed [Ad Ac] and won 122950.00 with two pair, Aces and Kings [Ad Ac Ks Kh Ts]
Seat 3: Frank126 (button) folded on the Pre-Flop
Seat 4: TiltedMILF (small blind) folded on the Pre-Flop
Seat 6: VUSEDSV (big blind) showed [Qd Qs] and lost with two pair, Kings and Queens [Ks Kh Qs Qd Ts]
Seat 7: coog folded on the Pre-Flop and did not bet
Seat 8: dartdog folded on the Pre-Flop and did not bet
Seat 9: JokesyWales folded on the Pre-Flop and did not bet''',
'''Game Hand #712010302 - Tournament #24095328 - Holdem(No Limit) - Level 9 (2500.00/5000.00)- 2021/02/20 18:48:26 UTC
Table '3' 9-max Seat #4 is the button
Seat 1: MateusVR (135796.00)
Seat 2: PotNoodle99912 (223797.00)
Seat 3: Frank126 (93918.00)
Seat 4: TiltedMILF (45300.00)
Seat 7: coog (113609.00)
Seat 8: dartdog (90180.00)
Seat 9: JokesyWales (47400.00)
MateusVR posts ante 500.00
PotNoodle99912 posts ante 500.00
Frank126 posts ante 500.00
TiltedMILF posts ante 500.00
coog posts ante 500.00
dartdog posts ante 500.00
JokesyWales posts ante 500.00
*** HOLE CARDS ***
Main pot 3500.00
coog posts the big blind 5000.00
Dealt to PotNoodle99912 [9d 8c]
dartdog folds
JokesyWales raises 46900.00 to 46900.00 and is all-in
MateusVR folds
PotNoodle99912 folds
Frank126 folds
TiltedMILF folds
coog calls 41900.00
*** FLOP *** [6d Qd Ah]
Main pot 97300.00
*** TURN *** [6d Qd Ah] [6c]
Main pot 97300.00
*** RIVER *** [6d Qd Ah 6c] [5h]
Main pot 97300.00
*** SHOW DOWN ***
Main pot 97300.00
coog shows [Kh Ac] (two pair, Aces and Sixs [Ah Ac 6d 6c Kh])
JokesyWales shows [5s As] (two pair, Aces and Sixs [As Ah 6d 6c Qd])
coog collected 97300.00 from main pot
*** SUMMARY ***
Total pot 97300.00
Board [6d Qd Ah 6c 5h]
Seat 1: MateusVR folded on the Pre-Flop and did not bet
Seat 2: PotNoodle99912 folded on the Pre-Flop and did not bet
Seat 3: Frank126 folded on the Pre-Flop and did not bet
Seat 4: TiltedMILF (button) folded on the Pre-Flop
Seat 7: coog (big blind) showed [Kh Ac] and won 97300.00 with two pair, Aces and Sixs [Ah Ac 6d 6c Kh] with the kicker King
Seat 8: dartdog folded on the Pre-Flop and did not bet
Seat 9: JokesyWales showed [5s As] and lost with two pair, Aces and Sixs [As Ah 6d 6c Qd]''',
'''Game Hand #712012036 - Tournament #24095328 - Holdem(No Limit) - Level 10 (3000.00/6000.00)- 2021/02/20 18:50:12 UTC
Table '3' 9-max Seat #1 is the button
Seat 1: MateusVR (125696.00)
Seat 2: PotNoodle99912 (238797.00)
Seat 3: Frank126 (91818.00)
Seat 4: TiltedMILF (43200.00)
Seat 7: coog (159409.00)
Seat 8: dartdog (91080.00)
MateusVR posts ante 600.00
PotNoodle99912 posts ante 600.00
Frank126 posts ante 600.00
TiltedMILF posts ante 600.00
coog posts ante 600.00
dartdog posts ante 600.00
*** HOLE CARDS ***
Main pot 3600.00
PotNoodle99912 posts the small blind 3000.00
Frank126 posts the big blind 6000.00
Dealt to PotNoodle99912 [Kh 8d]
TiltedMILF folds
coog folds
dartdog folds
MateusVR folds
PotNoodle99912 raises 9000.00 to 12000.00
Frank126 raises 12000.00 to 18000.00
PotNoodle99912 calls 6000.00
*** FLOP *** [4h Kd 4s]
Main pot 39600.00
PotNoodle99912 checks
Frank126 bets 19800.00
PotNoodle99912 raises 99000.00 to 99000.00
Frank126 calls 53418.00 and is all-in
*** TURN *** [4h Kd 4s] [9c]
Main pot 186036.00
*** RIVER *** [4h Kd 4s 9c] [Ah]
Main pot 186036.00
*** SHOW DOWN ***
Main pot 186036.00
Uncalled bet (25782.00) returned to PotNoodle99912
PotNoodle99912 shows [Kh 8d] (two pair, Kings and Fours [Kh Kd 4s 4h Ah])
Frank126 shows [Jh Js] (two pair, Jacks and Fours [Js Jh 4s 4h Ah])
PotNoodle99912 collected 186036.00 from main pot
*** SUMMARY ***
Total pot 186036.00
Board [4h Kd 4s 9c Ah]
Seat 1: MateusVR (button) folded on the Pre-Flop
Seat 2: PotNoodle99912 (small blind) showed [Kh 8d] and won 186036.00 with two pair, Kings and Fours [Kh Kd 4s 4h Ah]
Seat 3: Frank126 (big blind) showed [Jh Js] and lost with two pair, Jacks and Fours [Js Jh 4s 4h Ah]
Seat 4: TiltedMILF folded on the Pre-Flop and did not bet
Seat 7: coog folded on the Pre-Flop and did not bet
Seat 8: dartdog folded on the Pre-Flop and did not bet''',
'''Game Hand #712019578 - Tournament #24095328 - Holdem(No Limit) - Level 11 (3500.00/7000.00)- 2021/02/20 19:00:24 UTC
Table '3' 9-max Seat #2 is the button
Seat 1: MateusVR (111796.00)
Seat 2: PotNoodle99912 (376715.00)
Seat 4: TiltedMILF (49800.00)
Seat 7: coog (152009.00)
Seat 8: dartdog (59680.00)
MateusVR posts ante 700.00
PotNoodle99912 posts ante 700.00
TiltedMILF posts ante 700.00
coog posts ante 700.00
dartdog posts ante 700.00
*** <NAME>DS ***
Main pot 3500.00
TiltedMILF posts the small blind 3500.00
coog posts the big blind 7000.00
Dealt to PotNoodle99912 [Qs Ad]
dartdog folds
MateusVR raises 14000.00 to 14000.00
PotNoodle99912 calls 14000.00
TiltedMILF folds
coog folds
*** FLOP *** [7d Qc Ah]
Main pot 42000.00
MateusVR bets 18900.00
PotNoodle99912 raises 78750.00 to 78750.00
MateusVR raises 78196.00 to 97096.00 and is all-in
PotNoodle99912 calls 18346.00
*** TURN *** [7d Qc Ah] [4h]
Main pot 236192.00
*** RIVER *** [7d Qc Ah 4h] [6h]
Main pot 236192.00
*** SHOW DOWN ***
Main pot 236192.00
MateusVR shows [Kd Ac] (a pair of Aces [Ah Ac Kd Qc 7d])
PotNoodle99912 shows [Qs Ad] (two pair, Aces and Queens [Ah Ad Qs Qc 7d])
PotNoodle99912 collected 236192.00 from main pot
*** SUMMARY ***
Total pot 236192.00
Board [7d Qc Ah 4h 6h]
Seat 1: MateusVR showed [Kd Ac] and lost with a pair of Aces [Ah Ac Kd Qc 7d]
Seat 2: PotNoodle99912 (button) showed [Qs Ad] and won 236192.00 with two pair, Aces and Queens [Ah Ad Qs Qc 7d]
Seat 4: TiltedMILF (small blind) folded on the Pre-Flop
Seat 7: coog (big blind) folded on the Pre-Flop
Seat 8: dartdog folded on the | |
routes
''',
'maximum_routes',
'Cisco-IOS-XR-ipv4-pim-cfg', False),
_MetaInfoClassMember('warning-threshold', ATTRIBUTE, 'int' , None, None,
[('1', '200000')], [],
''' Set threshold to print warning
''',
'warning_threshold',
'Cisco-IOS-XR-ipv4-pim-cfg', False),
],
'Cisco-IOS-XR-ipv4-pim-cfg',
'routes',
_yang_ns._namespaces['Cisco-IOS-XR-ipv4-pim-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_pim_cfg'
),
},
'Pim.Vrfs.Vrf.Ipv4.Maximum' : {
'meta_info' : _MetaInfoClass('Pim.Vrfs.Vrf.Ipv4.Maximum',
False,
[
_MetaInfoClassMember('bsr-candidate-rp-cache', REFERENCE_CLASS, 'BsrCandidateRpCache' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_pim_cfg', 'Pim.Vrfs.Vrf.Ipv4.Maximum.BsrCandidateRpCache',
[], [],
''' Override default maximum and threshold for BSR
C-RP cache setting
''',
'bsr_candidate_rp_cache',
'Cisco-IOS-XR-ipv4-pim-cfg', False),
_MetaInfoClassMember('bsr-global-candidate-rp-cache', REFERENCE_CLASS, 'BsrGlobalCandidateRpCache' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_pim_cfg', 'Pim.Vrfs.Vrf.Ipv4.Maximum.BsrGlobalCandidateRpCache',
[], [],
''' Override default global maximum and threshold
for C-RP set in BSR
''',
'bsr_global_candidate_rp_cache',
'Cisco-IOS-XR-ipv4-pim-cfg', False),
_MetaInfoClassMember('bsr-global-group-mappings', REFERENCE_CLASS, 'BsrGlobalGroupMappings' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_pim_cfg', 'Pim.Vrfs.Vrf.Ipv4.Maximum.BsrGlobalGroupMappings',
[], [],
''' Override default global maximum and threshold
for PIM group mapping ranges from BSR
''',
'bsr_global_group_mappings',
'Cisco-IOS-XR-ipv4-pim-cfg', False),
_MetaInfoClassMember('bsr-group-mappings', REFERENCE_CLASS, 'BsrGroupMappings' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_pim_cfg', 'Pim.Vrfs.Vrf.Ipv4.Maximum.BsrGroupMappings',
[], [],
''' Override default maximum and threshold for
number of group mappings from BSR
''',
'bsr_group_mappings',
'Cisco-IOS-XR-ipv4-pim-cfg', False),
_MetaInfoClassMember('gloabal-high-priority-packet-queue', ATTRIBUTE, 'int' , None, None,
[('0', '2147483648')], [],
''' Maximum packet queue size in bytes
''',
'gloabal_high_priority_packet_queue',
'Cisco-IOS-XR-ipv4-pim-cfg', False),
_MetaInfoClassMember('gloabal-low-priority-packet-queue', ATTRIBUTE, 'int' , None, None,
[('0', '2147483648')], [],
''' Maximum packet queue size in bytes
''',
'gloabal_low_priority_packet_queue',
'Cisco-IOS-XR-ipv4-pim-cfg', False),
_MetaInfoClassMember('global-group-mappings-auto-rp', REFERENCE_CLASS, 'GlobalGroupMappingsAutoRp' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_pim_cfg', 'Pim.Vrfs.Vrf.Ipv4.Maximum.GlobalGroupMappingsAutoRp',
[], [],
''' Maximum for number of group mappings from
autorp mapping agent
''',
'global_group_mappings_auto_rp',
'Cisco-IOS-XR-ipv4-pim-cfg', False),
_MetaInfoClassMember('global-register-states', REFERENCE_CLASS, 'GlobalRegisterStates' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_pim_cfg', 'Pim.Vrfs.Vrf.Ipv4.Maximum.GlobalRegisterStates',
[], [],
''' Override default maximum for number of
sparse-mode source registers
''',
'global_register_states',
'Cisco-IOS-XR-ipv4-pim-cfg', False),
_MetaInfoClassMember('global-route-interfaces', REFERENCE_CLASS, 'GlobalRouteInterfaces' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_pim_cfg', 'Pim.Vrfs.Vrf.Ipv4.Maximum.GlobalRouteInterfaces',
[], [],
''' Override default maximum for number of
route-interfaces
''',
'global_route_interfaces',
'Cisco-IOS-XR-ipv4-pim-cfg', False),
_MetaInfoClassMember('global-routes', REFERENCE_CLASS, 'GlobalRoutes' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_pim_cfg', 'Pim.Vrfs.Vrf.Ipv4.Maximum.GlobalRoutes',
[], [],
''' Override default maximum for number of routes
''',
'global_routes',
'Cisco-IOS-XR-ipv4-pim-cfg', False),
_MetaInfoClassMember('group-mappings-auto-rp', REFERENCE_CLASS, 'GroupMappingsAutoRp' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_pim_cfg', 'Pim.Vrfs.Vrf.Ipv4.Maximum.GroupMappingsAutoRp',
[], [],
''' Override default maximum for number of group
mappings from autorp mapping agent
''',
'group_mappings_auto_rp',
'Cisco-IOS-XR-ipv4-pim-cfg', False),
_MetaInfoClassMember('register-states', REFERENCE_CLASS, 'RegisterStates' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_pim_cfg', 'Pim.Vrfs.Vrf.Ipv4.Maximum.RegisterStates',
[], [],
''' Override default maximum for number of
sparse-mode source registers
''',
'register_states',
'Cisco-IOS-XR-ipv4-pim-cfg', False),
_MetaInfoClassMember('route-interfaces', REFERENCE_CLASS, 'RouteInterfaces' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_pim_cfg', 'Pim.Vrfs.Vrf.Ipv4.Maximum.RouteInterfaces',
[], [],
''' Override default maximum for number of
route-interfaces
''',
'route_interfaces',
'Cisco-IOS-XR-ipv4-pim-cfg', False),
_MetaInfoClassMember('routes', REFERENCE_CLASS, 'Routes' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_pim_cfg', 'Pim.Vrfs.Vrf.Ipv4.Maximum.Routes',
[], [],
''' Override default maximum for number of routes
''',
'routes',
'Cisco-IOS-XR-ipv4-pim-cfg', False),
],
'Cisco-IOS-XR-ipv4-pim-cfg',
'maximum',
_yang_ns._namespaces['Cisco-IOS-XR-ipv4-pim-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_pim_cfg'
),
},
'Pim.Vrfs.Vrf.Ipv4.Ssm' : {
'meta_info' : _MetaInfoClass('Pim.Vrfs.Vrf.Ipv4.Ssm',
False,
[
_MetaInfoClassMember('disable', ATTRIBUTE, 'bool' , None, None,
[], [],
''' TRUE if SSM is disabled on this router
''',
'disable',
'Cisco-IOS-XR-ipv4-pim-cfg', False),
_MetaInfoClassMember('range', ATTRIBUTE, 'str' , None, None,
[], [],
''' Access list of groups enabled with SSM
''',
'range',
'Cisco-IOS-XR-ipv4-pim-cfg', False),
],
'Cisco-IOS-XR-ipv4-pim-cfg',
'ssm',
_yang_ns._namespaces['Cisco-IOS-XR-ipv4-pim-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_pim_cfg'
),
},
'Pim.Vrfs.Vrf.Ipv4.BidirRpAddresses.BidirRpAddress' : {
'meta_info' : _MetaInfoClass('Pim.Vrfs.Vrf.Ipv4.BidirRpAddresses.BidirRpAddress',
False,
[
_MetaInfoClassMember('rp-address', REFERENCE_UNION, 'str' , None, None,
[], [],
''' RP address of Rendezvous Point
''',
'rp_address',
'Cisco-IOS-XR-ipv4-pim-cfg', True, [
_MetaInfoClassMember('rp-address', ATTRIBUTE, 'str' , None, None,
[], ['(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'],
''' RP address of Rendezvous Point
''',
'rp_address',
'Cisco-IOS-XR-ipv4-pim-cfg', True),
_MetaInfoClassMember('rp-address', ATTRIBUTE, 'str' , None, None,
[], ['((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(%[\\p{N}\\p{L}]+)?'],
''' RP address of Rendezvous Point
''',
'rp_address',
'Cisco-IOS-XR-ipv4-pim-cfg', True),
]),
_MetaInfoClassMember('access-list-name', ATTRIBUTE, 'str' , None, None,
[], [],
''' Access list of groups that should map to a
given RP
''',
'access_list_name',
'Cisco-IOS-XR-ipv4-pim-cfg', False),
_MetaInfoClassMember('auto-rp-override', ATTRIBUTE, 'bool' , None, None,
[], [],
''' TRUE Indicates if static RP config overrides
AutoRP and BSR
''',
'auto_rp_override',
'Cisco-IOS-XR-ipv4-pim-cfg', False),
],
'Cisco-IOS-XR-ipv4-pim-cfg',
'bidir-rp-address',
_yang_ns._namespaces['Cisco-IOS-XR-ipv4-pim-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_pim_cfg'
),
},
'Pim.Vrfs.Vrf.Ipv4.BidirRpAddresses' : {
'meta_info' : _MetaInfoClass('Pim.Vrfs.Vrf.Ipv4.BidirRpAddresses',
False,
[
_MetaInfoClassMember('bidir-rp-address', REFERENCE_LIST, 'BidirRpAddress' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_pim_cfg', 'Pim.Vrfs.Vrf.Ipv4.BidirRpAddresses.BidirRpAddress',
[], [],
''' Address of the Rendezvous Point
''',
'bidir_rp_address',
'Cisco-IOS-XR-ipv4-pim-cfg', False),
],
'Cisco-IOS-XR-ipv4-pim-cfg',
'bidir-rp-addresses',
_yang_ns._namespaces['Cisco-IOS-XR-ipv4-pim-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_pim_cfg'
),
},
'Pim.Vrfs.Vrf.Ipv4.Bsr.CandidateRps.CandidateRp.Sm' : {
'meta_info' : _MetaInfoClass('Pim.Vrfs.Vrf.Ipv4.Bsr.CandidateRps.CandidateRp.Sm',
False,
[
_MetaInfoClassMember('access-list-name', ATTRIBUTE, 'str' , None, None,
[], [],
''' Access-list specifying the group range for
the Candidate-RP
''',
'access_list_name',
'Cisco-IOS-XR-ipv4-pim-cfg', False),
_MetaInfoClassMember('interval', ATTRIBUTE, 'int' , None, None,
[('30', '600')], [],
''' Advertisement interval
''',
'interval',
'Cisco-IOS-XR-ipv4-pim-cfg', False),
_MetaInfoClassMember('priority', ATTRIBUTE, 'int' , None, None,
[('1', '255')], [],
''' Priority of the CRP
''',
'priority',
'Cisco-IOS-XR-ipv4-pim-cfg', False),
_MetaInfoClassMember('protocol-mode', ATTRIBUTE, 'int' , None, None,
[('0', '1')], [],
''' CRP Mode
''',
'protocol_mode',
'Cisco-IOS-XR-ipv4-pim-cfg', False),
],
'Cisco-IOS-XR-ipv4-pim-cfg',
'sm',
_yang_ns._namespaces['Cisco-IOS-XR-ipv4-pim-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_pim_cfg'
),
},
'Pim.Vrfs.Vrf.Ipv4.Bsr.CandidateRps.CandidateRp.Bidir' : {
'meta_info' : _MetaInfoClass('Pim.Vrfs.Vrf.Ipv4.Bsr.CandidateRps.CandidateRp.Bidir',
False,
[
_MetaInfoClassMember('access-list-name', ATTRIBUTE, 'str' , None, None,
[], [],
''' Access-list specifying the group range for
the Candidate-RP
''',
'access_list_name',
'Cisco-IOS-XR-ipv4-pim-cfg', False),
_MetaInfoClassMember('interval', ATTRIBUTE, 'int' , None, None,
[('30', '600')], [],
''' Advertisement interval
''',
'interval',
'Cisco-IOS-XR-ipv4-pim-cfg', False),
_MetaInfoClassMember('priority', ATTRIBUTE, 'int' , None, None,
[('1', '255')], [],
''' Priority of the CRP
''',
'priority',
'Cisco-IOS-XR-ipv4-pim-cfg', False),
_MetaInfoClassMember('protocol-mode', ATTRIBUTE, 'int' , None, None,
[('0', '1')], [],
''' CRP Mode
''',
'protocol_mode',
'Cisco-IOS-XR-ipv4-pim-cfg', False),
],
'Cisco-IOS-XR-ipv4-pim-cfg',
'bidir',
_yang_ns._namespaces['Cisco-IOS-XR-ipv4-pim-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_pim_cfg'
),
},
'Pim.Vrfs.Vrf.Ipv4.Bsr.CandidateRps.CandidateRp' : {
'meta_info' : _MetaInfoClass('Pim.Vrfs.Vrf.Ipv4.Bsr.CandidateRps.CandidateRp',
False,
[
_MetaInfoClassMember('address', REFERENCE_UNION, 'str' , None, None,
[], [],
''' Address of Candidate-RP
''',
'address',
'Cisco-IOS-XR-ipv4-pim-cfg', True, [
_MetaInfoClassMember('address', ATTRIBUTE, 'str' , None, None,
[], ['(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'],
''' Address of Candidate-RP
''',
'address',
'Cisco-IOS-XR-ipv4-pim-cfg', True),
_MetaInfoClassMember('address', ATTRIBUTE, 'str' , None, None,
[], ['((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(%[\\p{N}\\p{L}]+)?'],
''' Address of Candidate-RP
''',
'address',
'Cisco-IOS-XR-ipv4-pim-cfg', True),
]),
_MetaInfoClassMember('bidir', REFERENCE_CLASS, 'Bidir' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_pim_cfg', 'Pim.Vrfs.Vrf.Ipv4.Bsr.CandidateRps.CandidateRp.Bidir',
[], [],
''' Parameters of PIM Bidir BSR Candidate-RP
''',
'bidir',
'Cisco-IOS-XR-ipv4-pim-cfg', False),
_MetaInfoClassMember('sm', REFERENCE_CLASS, 'Sm' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_pim_cfg', 'Pim.Vrfs.Vrf.Ipv4.Bsr.CandidateRps.CandidateRp.Sm',
[], [],
''' Parameters of PIM SM BSR Candidate-RP
''',
'sm',
'Cisco-IOS-XR-ipv4-pim-cfg', False),
],
'Cisco-IOS-XR-ipv4-pim-cfg',
'candidate-rp',
_yang_ns._namespaces['Cisco-IOS-XR-ipv4-pim-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_pim_cfg'
),
},
'Pim.Vrfs.Vrf.Ipv4.Bsr.CandidateRps' : {
'meta_info' : _MetaInfoClass('Pim.Vrfs.Vrf.Ipv4.Bsr.CandidateRps',
False,
[
_MetaInfoClassMember('candidate-rp', REFERENCE_LIST, 'CandidateRp' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_pim_cfg', 'Pim.Vrfs.Vrf.Ipv4.Bsr.CandidateRps.CandidateRp',
[], [],
''' BSR Candidate RP Configuration
''',
'candidate_rp',
'Cisco-IOS-XR-ipv4-pim-cfg', False),
],
'Cisco-IOS-XR-ipv4-pim-cfg',
'candidate-rps',
_yang_ns._namespaces['Cisco-IOS-XR-ipv4-pim-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_pim_cfg'
),
},
'Pim.Vrfs.Vrf.Ipv4.Bsr.CandidateBsr' : {
'meta_info' : _MetaInfoClass('Pim.Vrfs.Vrf.Ipv4.Bsr.CandidateBsr',
False,
[
_MetaInfoClassMember('address', REFERENCE_UNION, 'str' , None, None,
[], [],
''' BSR Address configured
''',
'address',
'Cisco-IOS-XR-ipv4-pim-cfg', False, [
_MetaInfoClassMember('address', ATTRIBUTE, 'str' , None, None,
[], ['(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'],
''' BSR Address configured
''',
'address',
'Cisco-IOS-XR-ipv4-pim-cfg', False),
_MetaInfoClassMember('address', ATTRIBUTE, 'str' , None, None,
[], ['((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(%[\\p{N}\\p{L}]+)?'],
''' BSR Address configured
''',
'address',
'Cisco-IOS-XR-ipv4-pim-cfg', False),
]),
_MetaInfoClassMember('prefix-length', ATTRIBUTE, 'int' , None, None,
[('-2147483648', '2147483647')], [],
''' Hash Mask Length for this candidate BSR
''',
'prefix_length',
'Cisco-IOS-XR-ipv4-pim-cfg', False),
_MetaInfoClassMember('priority', ATTRIBUTE, 'int' , None, None,
[('1', '255')], [],
''' Priority of the Candidate BSR
''',
'priority',
'Cisco-IOS-XR-ipv4-pim-cfg', False),
],
'Cisco-IOS-XR-ipv4-pim-cfg',
'candidate-bsr',
_yang_ns._namespaces['Cisco-IOS-XR-ipv4-pim-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_pim_cfg'
),
},
'Pim.Vrfs.Vrf.Ipv4.Bsr' : {
'meta_info' : _MetaInfoClass('Pim.Vrfs.Vrf.Ipv4.Bsr',
False,
[
_MetaInfoClassMember('candidate-bsr', REFERENCE_CLASS, 'CandidateBsr' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_pim_cfg', 'Pim.Vrfs.Vrf.Ipv4.Bsr.CandidateBsr',
[], [],
''' PIM Candidate BSR configuration
''',
'candidate_bsr',
'Cisco-IOS-XR-ipv4-pim-cfg', False),
_MetaInfoClassMember('candidate-rps', REFERENCE_CLASS, 'CandidateRps' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_pim_cfg', 'Pim.Vrfs.Vrf.Ipv4.Bsr.CandidateRps',
[], [],
''' PIM RP configuration
''',
'candidate_rps',
'Cisco-IOS-XR-ipv4-pim-cfg', False),
],
'Cisco-IOS-XR-ipv4-pim-cfg',
'bsr',
_yang_ns._namespaces['Cisco-IOS-XR-ipv4-pim-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_pim_cfg'
),
},
'Pim.Vrfs.Vrf.Ipv4.Interfaces.Interface.RedirectBundle' : {
'meta_info' : _MetaInfoClass('Pim.Vrfs.Vrf.Ipv4.Interfaces.Interface.RedirectBundle',
False,
[
_MetaInfoClassMember('bundle-name', ATTRIBUTE, 'str' , None, None,
[], [],
''' Bundle name
''',
'bundle_name',
'Cisco-IOS-XR-ipv4-pim-cfg', False),
_MetaInfoClassMember('interface-bandwidth', ATTRIBUTE, 'int' , None, None,
[('0', '100000000')], [],
''' Interface bandwidth in Kbps
''',
'interface_bandwidth',
'Cisco-IOS-XR-ipv4-pim-cfg', False),
_MetaInfoClassMember('threshold-bandwidth', ATTRIBUTE, 'int' , None, None,
[('0', '100000000')], [],
''' Threshold bandwidth in Kbps
''',
'threshold_bandwidth',
'Cisco-IOS-XR-ipv4-pim-cfg', False),
],
'Cisco-IOS-XR-ipv4-pim-cfg',
'redirect-bundle',
_yang_ns._namespaces['Cisco-IOS-XR-ipv4-pim-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_pim_cfg'
),
},
'Pim.Vrfs.Vrf.Ipv4.Interfaces.Interface.Bfd' : {
'meta_info' : _MetaInfoClass('Pim.Vrfs.Vrf.Ipv4.Interfaces.Interface.Bfd',
False,
[
_MetaInfoClassMember('detection-multiplier', ATTRIBUTE, 'int' , None, None,
[('2', '50')], [],
''' Detection multiplier for BFD sessions created
by PIM
''',
'detection_multiplier',
'Cisco-IOS-XR-ipv4-pim-cfg', False),
_MetaInfoClassMember('enable', ATTRIBUTE, 'bool' , None, None,
[], [],
''' TRUE to enable BFD. FALSE to disable and to
prevent inheritance from a parent
''',
'enable',
'Cisco-IOS-XR-ipv4-pim-cfg', False),
_MetaInfoClassMember('interval', ATTRIBUTE, 'int' , None, None,
[('3', '30000')], [],
''' Hello interval for BFD sessions created by
PIM
''',
'interval',
'Cisco-IOS-XR-ipv4-pim-cfg', False),
| |
None:
DOM.suspend()
else:
logger.error("%s does not exist." % self.vm_name)
self._disconnect()
def machine_resume(self):
"""
Resume a paused machine
"""
logger.debug("Resuming machine %s..."%self.vm_name)
DOM = self._connect()
if DOM is not None:
DOM.resume()
else:
logger.error("%s does not exist." % self.vm_name)
self._disconnect()
"""
Snapshot Functions
"""
def machine_snapshot(self):
"""
Takes a snapshot of the VM and freezes it.
"""
logger.debug("Taking snapshot of %s..." % self.vm_name)
if self.MACHINE_TYPE == G.MACHINE_TYPES.XEN:
logger.warning("Xen doesn't have snapshot capabilities!")
return False
# First let's see if our snapshot already exist?
DOM = self._connect()
if DOM is not None:
if self.MACHINE_TYPE == G.MACHINE_TYPES.KVM:
try:
DOM.snapshotLookupByName(self.SNAPSHOT_NAME, 0)
logger.warning("Snapshot already exist for %s" % self.vm_name)
except:
DOM.snapshotCreateXML(self.SNAPSHOT_XML, 0)
else:
logger.error("%s does not exist." % self.vm_name)
self._disconnect()
def machine_snapshot_restore(self):
"""
Restore a previously snapshotte version of the machine
"""
logger.debug("Restoring snapshot of %s..." % self.vm_name)
if self.MACHINE_TYPE == G.MACHINE_TYPES.XEN:
logger.warning("Xen doesn't have snapshot capabilities! (Using reset/restore)")
self.machine_reset()
self.machine_restore()
return False
# Try to detach our USB key
self.detach_usb()
# Does our snapshot exist?
DOM = self._connect()
rtn = False
if DOM is not None:
if self.MACHINE_TYPE == G.MACHINE_TYPES.KVM:
try:
snap = DOM.snapshotLookupByName(self.SNAPSHOT_NAME, 0)
DOM.revertToSnapshot(snap, libvirt.VIR_DOMAIN_SNAPSHOT_REVERT_PAUSED)
rtn = True
except:
logger.error("Tried to revert %s to snapshot when none exist!" % self.vm_name)
else:
logger.error("%s does not exist." % self.vm_name)
self._disconnect()
return rtn
def machine_save(self, filename):
"""
Suspends machine and saves state to a file.
"""
logger.debug("Saving state of %s..." % self.vm_name)
if self.MACHINE_TYPE == G.MACHINE_TYPES.XEN:
# In Xen the machine must be "running" to save.
self.machine_resume()
DOM = self._connect()
if DOM is not None:
DOM.save(filename)
else:
logger.error("%s does not exist." % self.vm_name)
self._disconnect()
def machine_restore(self, filename, paused=False):
"""
restore a machine from our snapshotted state and start it
"""
logger.debug("Restoring %s..." % self.vm_name)
self.MACHINE_STATE = G.MACHINE_STATES['RESTORING']
self._connect()
if self.MACHINE_TYPE == G.MACHINE_TYPES.KVM:
if paused:
self._libvirt_conn.restoreFlags(filename, None, libvirt.VIR_DOMAIN_SAVE_PAUSED)
else:
self._libvirt_conn.restore(filename)
else:
if paused:
logger.error("Unable to restore Xen paused without command line hack...")
else:
self._libvirt_conn.restore(filename)
self._disconnect()
"""
Miscellaneous functions
"""
def screenshot(self, filename):
"""
Screenshot the display of the machine and save it to a file.
@param filename: Filename to save screenshot data to.
"""
rtn = filename+".ppm"
def saver(stream, data, file_):
return file_.write(data)
DOM = self._connect()
if DOM is not None:
try:
# Create a new virtual stream
stream = self._libvirt_conn.newStream(0)
# Take our screenshot
DOM.screenshot(stream, 0, 0)
# save to file
f = open(rtn, 'w+')
stream.recvAll(saver, f)
f.close()
# Finish and return
stream.finish()
except:
rtn = False
else:
logger.error("%s does not exist." % self.vm_name)
self._disconnect()
return rtn
"""
VM Only Functions
"""
def memory_get_size(self):
"""
Return the memory size in bytes of this virtual machine
@return: Memory size in bytes
"""
rtn = None
DOM = self._connect()
if DOM is None:
logger.error("VM %s was not found." % self.vm_name)
rtn = None
else:
xml_str = DOM.XMLDesc(0)
logger.debug("Getting memory size from %s"%xml_str)
import xml.dom.minidom as xml
dom = xml.parseString(xml_str)
mem = dom.getElementsByTagName("currentMemory")
for m in mem:
unit = m.getAttribute("unit")
if unit == "KiB":
rtn = int(m.childNodes[0].data)*1024
else:
logger.error("Found a unit that we don't support when calculating memory size! (%s)"%unit)
self._disconnect()
return rtn
def disk_get_filename(self):
"""
Retrieve the filename of the disk used by this SUT.
@return: Filename of backing disk.
"""
filename = None
DOM = self._connect()
if DOM is None:
logger.error("VM %s was not found." % self.vm_name)
else:
xml_str = DOM.XMLDesc(0)
logger.debug("Getting disk from %s"%xml_str)
import xml.dom.minidom as xml
dom = xml.parseString(xml_str)
disks = dom.getElementsByTagName("disk")
for d in disks:
device = d.getAttribute("device")
if device == "disk":
source = d.getElementsByTagName("source")
fname = source[0].getAttribute("file")
filename = fname
self._disconnect()
return filename
"""
Network Functions
"""
def network_get_interface(self):
"""
Dump the xml configuration and get the network interface assigned
to this virtual machine.
@return: Interface name or None
"""
iface_dev = None
DOM = self._connect()
if DOM is None:
logger.error("VM %s was not found." % self.vm_name)
else:
try:
xml_str = DOM.XMLDesc(0)
import xml.dom.minidom as xml
dom = xml.parseString(xml_str)
iface = dom.getElementsByTagName("interface")
target = iface[0].getElementsByTagName("target")
iface_dev = target[0].getAttribute("dev")
except:
logger.error("Could not find network interface for %s"%self.vm_name)
self._disconnect()
return iface_dev
def has_lophi_snapshot(self):
"""
Check to see if a LO-PHI snapshot exists, if so, we consider
this machine ready to go.
"""
rtn = False
DOM = self._connect()
if DOM is not None:
try:
DOM.snapshotLookupByName(self.SNAPSHOT_NAME, 0)
rtn = True
except:
pass
else:
pass
self._disconnect()
return rtn
"""
Experimental Functions
"""
# # TODO: This is too much for this class to be doing!
# def run_code(self, input_directory, ftp_info):
# """
# Will download the entire directory to the SUT and run lophi.bat
#
# @param input_directory: Directory with executable code and lophi.bat
#
# """
# import time
# import lophi.actuation.scripts as S
#
# if not os.path.exists(input_directory):
# print "ERROR: input directory (%s) does not exist." % input_directory
# return
#
# print "* lophi.bat %s" % self.FTP_DIR
#
# if G.VERBOSE:
# print "* Loading %s on %s..." % (input_directory, self.FTP_PATH)
#
# print "Getting script"
#
# # print self.config
#
# if self.config.volatility_profile is None:
# print "ERROR: No profile provided for this machine. Cannot execute code."
# return
#
# # Get our script to execute
# SCRIPT = S.get_execute_script(self.config.volatility_profile,
# ftp_info,
# self.FTP_DIR)
#
# # Clear our ftp directory
# if os.path.exists(self.FTP_PATH):
# shutil.rmtree(self.FTP_PATH)
#
# # Copy new contents
# shutil.copytree(input_directory, self.FTP_PATH)
#
# # Start emulating
# DOM = self._connect()
# # Get terminal
# if G.VERBOSE:
# print "* Opening terminal"
#
# if SCRIPT["HOTKEY"]:
# DOM.sendKey(libvirt.VIR_KEYCODE_SET_LINUX, 0, SCRIPT["HOTKEY"], len(SCRIPT["HOTKEY"]), 0)
#
# time.sleep(1)
# # Open Run Dialog
# for command in SCRIPT["COMMANDS"]:
# if G.VERBOSE:
# print "* Typing: %s" % command
#
# for c in command:
# DOM.sendKey(libvirt.VIR_KEYCODE_SET_LINUX, 0, [c], 1, 0)
# time.sleep(.1)
#
# time.sleep(1)
#
# self._disconnect()
#
# def attach_usb(self, input_directory):
# """
# This will create a USB device with the contents from
# 'input_directory' and attach it to the machine
#
# # set size of disk
# dd if=/dev/zero of=usb_${sizeMB}.img bs=512 count=$size
# # equivalent to: qemu-img create -f raw harddisk.img 100M
# sudo parted usb_${sizeMB}.img mktable msdos
# # create partition table
# sudo parted usb_${sizeMB}.img "mkpart p fat32 1 -0"
# # make primary partition, type fat32 from 1 to end
# sudo parted usb_${sizeMB}.img mkfs y 1 fat32
# # list partition table (in bytes)
# offset=$(parted harddisk.img unit b print | tail -2 | head -1 | cut -f 1 --delimit="B" | cut -c 9-)
#
# sudo mount -a -o loop,offset=$offset usb_${sizeMB}.img usb0/
# """
#
#
#
# min_size = 2 ** 20 * 100 # 100MB
# input_size_b = min_size # max(min_size, G.get_directory_size(input_directory))
# input_size_mb = input_size_b / (2 ** 20)
#
# # Keep some extra space
# # input_size_mb = input_size_mb * (2)
#
# print "* Creating a %dMB USB Key..." % input_size_mb
#
# input_size_mb = (input_size_mb * 1024 * 1024) / 512
# # Create img
#
# call(["dd",
# "if=/dev/zero",
# "of=%s" % self.USB_IMG,
# "bs=512",
# "count=%d" % input_size_mb])
#
# print "* Formating image (msdos)..."
# # Format (-s for script)
# call(["parted", "-s", self.USB_IMG, "mktable msdos"])
#
# print "* Formating image (fat32)..."
# call(["parted", "-s", self.USB_IMG, "mkpart p fat32 1 -0"])
#
# print "* Formating image (fat32)..."
# call(["parted", self.USB_IMG, "mkfs y 1 fat32"])
#
# # Get our offset. (Should be a nicer way...)
# import subprocess
# # offset = subprocess.check_output("parted harddisk.img unit b print | tail -2 | head -1 | cut -f 1 --delimit=\"B\" | cut -c 9-")
# ## TODO: Make dynamic
# offset = 1048576
#
# print "* Mounting image..."
# # Mount image
# call(["mount",
# "-a", # Mount all
# "-oloop,offset=%d" % offset, # Loopback and size
# self.USB_IMG,
# self.USB_PATH
# ])
#
# # Copy contents
# print "* Copying contents to USB..."
#
# G.copy_tree(input_directory, self.USB_PATH)
# # call(["cp", "-r", os.path.join(input_directory, "/*"), self.USB_PATH])
#
# # Unmount image
# call(["umount", self.USB_PATH])
#
# print "* Attaching device..."
# # Finally, attach to the VM
# DOM = self._connect()
#
# usb_xml = self.USB_XML.replace(REPLACE_STRINGS.usb_img, self.USB_IMG)
# DOM.attachDevice(usb_xml)
#
# | |
import os
import logging
import tensorflow as tf
import random
from model_factory import get_emb_vec
logger = logging.getLogger(__name__)
global tf_image, tf_label, status
import numpy as np
class Preprocess:
def __init__(self, directory_path, filetype, tfrecord_image, tfrecord_label):
"""
Return a randomized list of each directory's contents
:param directory_path: a directory that contains sub-folders of images
:returns class_files: a dict of each file in each folder
"""
global tf_image, tf_label
tf_image = tfrecord_image
tf_label = tfrecord_label
logger.debug('Initializing Preprocess')
self.directory_path = directory_path
self.filetype = filetype
self.classes = self.__get_classes()
self.tfrecord_image = tfrecord_image
self.tfrecord_label = tfrecord_label
self.files, self.labels, self.label_dict, self.min_images, self.filetype, self.tfrecord_image, self.tfrecord_label = self.__get_lists()
def __check_min_image(self, prev, new):
logger.debug('Counting the number of images')
if prev is None:
return new
else:
return prev + new
def __get_classes(self):
classes = os.listdir(self.directory_path)
return classes.__len__()
def __get_lists(self):
logging.debug('Getting initial list of images and labels')
files = []
labels = []
label_dict = dict()
label_number = 0
min_images = None
filetype = self.filetype
tfrecord_image = self.tfrecord_image
tfrecord_label = self.tfrecord_label
classes = os.listdir(self.directory_path)
for x in classes:
class_files = os.listdir(os.path.join(self.directory_path, x))
class_files = [os.path.join(self.directory_path, x, j) for j in class_files]
class_labels = [label_number for x in range(class_files.__len__())]
min_images = self.__check_min_image(min_images, class_labels.__len__())
label_dict[x] = label_number
label_number += 1
files.extend(class_files)
labels.extend(class_labels)
labels = tf.dtypes.cast(labels, tf.uint8)
return files, labels, label_dict, min_images, filetype, tfrecord_image, tfrecord_label
def update_status(stat):
global status
status = stat
return stat
# processing images
def format_example(image_name=None, img_size=256):
"""
Apply any image preprocessing here
:param image_name: the specific filename of the image
:param img_size: size that images should be reshaped to
:return: image
"""
global status
train = status
image = tf.io.read_file(image_name)
image = tf.io.decode_jpeg(image, channels=3)
image = tf.cast(image, tf.float32)#/255
image = (image/127.5) - 1
#image = tf.image.per_image_standardization(image)
image = tf.image.resize(image, (img_size, img_size))
if train is True:
image = tf.image.random_flip_left_right(image)
#image = tf.image.random_brightness(image, 0.4)
#image = tf.image.random_contrast(image, lower=0.0, upper=0.1)
image = tf.image.random_flip_up_down(image)
#image = tf.image.random_hue(image, max_delta=0.2)
image = tf.image.random_hue(image, 0.08)
image = tf.image.random_saturation(image, 0.6, 1.6)
image = tf.image.random_brightness(image, 0.05)
image = tf.image.random_contrast(image, 0.7, 1.3)
#image = tf.image.rot90(image, tf.random_uniform(shape=[], minval=0, maxval=4, dtype=tf.int32))
image = tf.reshape(image, (img_size, img_size, 3))
return image
# extracting images and labels from tfrecords
def format_example_tf(tfrecord_proto, img_size=256):
# Parse the input tf.Example proto using the dictionary above.
# Create a dictionary describing the features.
global tf_image, tf_label, status
train = status
image_feature_description = {
tf_image: tf.io.FixedLenFeature((), tf.string, ""),
tf_label: tf.io.FixedLenFeature((), tf.int64, -1),
}
parsed_image_dataset = tf.io.parse_single_example(tfrecord_proto, image_feature_description)
image = parsed_image_dataset[tf_image]
label = parsed_image_dataset[tf_label]
label = tf.dtypes.cast(label, tf.uint8)
image = tf.io.decode_png(image, channels=3)
image = tf.cast(image, tf.float32)
image = tf.image.per_image_standardization(image)
image = tf.image.resize(image, (img_size, img_size))
if train is True:
image = tf.image.random_flip_left_right(image)
image = tf.image.random_brightness(image, max_delta=0.2)
image = tf.image.random_contrast(image, lower=0.0, upper=0.1)
image = tf.image.random_flip_up_down(image)
image = tf.image.random_hue(image, max_delta=0.2)
#image = tf.reshape(image, (img_size, img_size, 3))
return image, label
# Create pairs for one shot learning
def create_triplets_oneshot(t_image_ds):
list_images = []
list_labels = []
# adding images and labels to the list
for image, label in t_image_ds:
list_images.append(image)
list_labels.append(label.numpy())
# unique labels
unique_labels = list(set(list_labels))
unique_labels_num = []
unique_labels_index = []
# creating array of indexes per label and number of images per label
for label in unique_labels:
inx = [x for x in range(0, len(list_labels)) if list_labels[x] == label]
unique_labels_num.append(len(inx))
unique_labels_index.append(inx)
# max number of images per label category
max_unique_labels_num = max(unique_labels_num)*len(unique_labels)
# randomly selecting images for a,p & n class
list_img_index = []
list_img_label = []
# iterating through all classes
for i in range(0, len(unique_labels)):
# iterating number of times equal to max image count of all category (doing this step , not to over represent
# the categories with more images)
for j in range(0, max_unique_labels_num):
tmp_a_idx = i
tmp_p_idx = tmp_a_idx
tmp_unique_labels = list(range(0, len(unique_labels)))
tmp_unique_labels.remove(tmp_p_idx)
# selecting other category for 'n' class
tmp_n_idx = random.choices(tmp_unique_labels, k=1)[0]
# selecting image index for 'a','n' & 'p' category randonly
tmp_a_idx_img = random.choices(unique_labels_index[tmp_a_idx], k=1)[0]
tmp_p_idx_img = random.choices(unique_labels_index[tmp_p_idx], k=1)[0]
tmp_n_idx_img = random.choices(unique_labels_index[tmp_n_idx], k=1)[0]
# extracting actual images with selected indexes for each class 'a','p' & 'n'
#list_img_index.append((list_images[tmp_a_idx_img], list_images[tmp_p_idx_img], list_images[tmp_n_idx_img]))
#list_img_label.append([tmp_a_idx, tmp_p_idx, tmp_n_idx])
list_img_index.append((list_images[tmp_a_idx_img], list_images[tmp_p_idx_img], [0,1]))
list_img_label.append([tmp_a_idx, tmp_p_idx])
list_img_index.append((list_images[tmp_a_idx_img], list_images[tmp_n_idx_img], [1,0]))
list_img_label.append([tmp_a_idx, tmp_n_idx])
return list_img_index, max_unique_labels_num, list_img_label
def create_triplets_oneshot_img_v(t_image_ds, t_label_ds):
"""
Args:
t_image_ds: image list
t_label_ds: image class
Returns:
list_img_index: a set of images
max_unique_labels_num: number of images that will be in a batch (so we dont over represent one class)
list_img_label: a label for each of the three images in a triplet (e.g. [0, 0, 1])
"""
list_images = []
list_labels = []
# adding images and labels to the list
for image in t_image_ds:
list_images.append(image)
for label in t_label_ds:
list_labels.append(label.numpy())
# unique labels
unique_labels = list(set(list_labels))
unique_labels_num = []
unique_labels_index = []
# creating array of indexes per label and number of images per label
for label in unique_labels:
inx = [x for x in range(0, len(list_labels)) if list_labels[x] == label]
unique_labels_num.append(len(inx))
unique_labels_index.append(inx)
# max number of images per label category
max_unique_labels_num = max(unique_labels_num)
# randomly selecting images for a,p & n class
list_img_index = []
list_img_label = []
# iterating through all classes
for i in range(0, len(unique_labels)):
# iterating number of times equal to max image count of all category (doing this step , not to over represent
# the categories with more images)
for j in range(0, max_unique_labels_num):
tmp_a_idx = i
tmp_p_idx = tmp_a_idx
tmp_unique_labels = list(range(0, len(unique_labels)))
tmp_unique_labels.remove(tmp_p_idx)
# selecting other category for 'n' class
tmp_n_idx = random.choices(tmp_unique_labels, k=1)[0]
# selecting image index for 'a','n' & 'p' category randonly
tmp_a_idx_img = random.choices(unique_labels_index[tmp_a_idx], k=1)[0]
tmp_p_idx_img = random.choices(unique_labels_index[tmp_p_idx], k=1)[0]
tmp_n_idx_img = random.choices(unique_labels_index[tmp_n_idx], k=1)[0]
list_img_index.append((list_images[tmp_a_idx_img], list_images[tmp_p_idx_img], [0,1]))
list_img_label.append([tmp_a_idx, tmp_p_idx])
list_img_index.append((list_images[tmp_a_idx_img], list_images[tmp_n_idx_img], [1,0]))
list_img_label.append([tmp_a_idx, tmp_n_idx])
return list_img_index, max_unique_labels_num, list_img_label
def create_triplets_oneshot_img(t_image_ds, t_label_ds):
"""
Args:
t_image_ds: image list
t_label_ds: image class
Returns:
list_img_index: a set of images
max_unique_labels_num: number of images that will be in a batch (so we dont over represent one class)
list_img_label: a label for each of the three images in a triplet (e.g. [0, 0, 1])
"""
#get model
patch_size = 224
img_size = 224
IMG_SHAPE = (patch_size, patch_size, 3)
IMG_SHAPE_NEW = (1,patch_size, patch_size, 3)
conv_model= get_emb_vec(IMG_SHAPE)
#model = conv_model.build_model()
list_images = []
list_labels = []
# adding images and labels to the list
for image in t_image_ds:
list_images.append(image)
for label in t_label_ds:
list_labels.append(label.numpy())
# unique labels
unique_labels = list(set(list_labels))
unique_labels_num = []
unique_labels_index = []
# creating array of indexes per label and number of images per label
for label in unique_labels:
inx = [x for x in range(0, len(list_labels)) if list_labels[x] == label]
unique_labels_num.append(len(inx))
unique_labels_index.append(inx)
# max number of images per label category
max_unique_labels_num = max(unique_labels_num)
batch_loss_dist = []
# iterating through all classes
for i in range(0, len(unique_labels)):
# iterating number of times equal to max image count of all category (doing this step , not to over represent
# the categories with more images)
for j in range(0, int(max_unique_labels_num/10)):
#print(i,j,max_unique_labels_num)
tmp_a_idx = i
tmp_p_idx = tmp_a_idx
tmp_unique_labels = list(range(0, len(unique_labels)))
tmp_unique_labels.remove(tmp_p_idx)
# selecting other category for 'n' class
tmp_n_idx = random.choices(tmp_unique_labels, k=1)[0]
# selecting image index for 'a','n' & 'p' category randonly
tmp_a_idx_img = random.choices(unique_labels_index[tmp_a_idx], k=1)[0]
tmp_p_idx_img = random.choices(unique_labels_index[tmp_p_idx], k=1)[0]
tmp_n_idx_img = random.choices(unique_labels_index[tmp_n_idx], k=1)[0]
# extracting actual images with selected indexes for each class 'a','p' & 'n'
A = conv_model.predict(tf.reshape(tf.image.resize(list_images[tmp_a_idx_img], (img_size, img_size)), IMG_SHAPE_NEW))
P = conv_model.predict(tf.reshape(tf.image.resize(list_images[tmp_p_idx_img], (img_size, img_size)), IMG_SHAPE_NEW))
N = conv_model.predict(tf.reshape(tf.image.resize(list_images[tmp_n_idx_img], (img_size, img_size)), IMG_SHAPE_NEW))
studybatchloss = np.sum(np.square(A - P), axis=1) - np.sum(np.square(A - N), axis=1)
batch_loss_dist.append(studybatchloss[0])
batch_loss_dist_25=np.percentile(batch_loss_dist, 20)
batch_loss_dist_75 = np.percentile(batch_loss_dist, 85)
x=[i for i in batch_loss_dist if i < batch_loss_dist_25]
y = [i for i in batch_loss_dist if i > batch_loss_dist_75]
#print(len(batch_loss_dist),len(x),len(y))
print(batch_loss_dist_25,batch_loss_dist_75)
#exit(0)
#*len(unique_labels)
# randomly selecting images for a,p & n class
list_img_index = []
list_img_label = []
num_limit = int(max_unique_labels_num/3)
#batch_loss_dist = []
# iterating through all classes
for i in range(0, len(unique_labels)):
# iterating number of times equal to max image count of all category (doing this step , not to over represent
# the categories with more images)
num1 = 0
num2 = 0
num3 = 0
for j in range(0, max_unique_labels_num*10):
tmp_a_idx = i
tmp_p_idx | |
import base64, discord, hashlib, math, os, requests, time, traceback, youtube_dl
from discord.utils import get
from utils.datautils import config, data, default, discard, save_data, set_client
from utils.discordbot import BotClient, emoji_shorthand, send
from utils.errors import BotError
from utils.logging import log
client = None
downloader = youtube_dl.YoutubeDL({
"format": "bestaudio/best"
})
def gfn(vid):
return "music/" + hashlib.md5(bytes(vid, "utf-8")).hexdigest() + ".webm"
def cutmax(string, length, term = "..."):
if len(string) > length:
return string[:length - len(term)] + term
return string
class BotlaneClient(BotClient):
def __init__(self):
BotClient.__init__(self)
self.name = "botlane"
client = BotlaneClient()
histories = {}
queues = {}
nowplaying = {}
stopskip = {}
playtime = {}
timetrack = {}
def current(gid, key, default = "<unknown>"):
return nowplaying.get(gid, {}).get(key, default)
def gettime(voice, gid):
if voice.is_paused():
return playtime[gid]
elif voice.is_playing():
return playtime[gid] + time.time() - timetrack[gid]
else:
return 0
def ftime(seconds):
seconds = int(seconds)
if seconds < 60:
return f"0:{str(seconds).zfill(2)}"
elif seconds < 3600:
return f"{seconds // 60}:{str(seconds % 60).zfill(2)}"
else:
return f"{seconds // 3600}:{str(seconds % 3600 // 60).zfill(2)}:{str(seconds % 60).zfill(2)}"
@client.command("Testing Commands", ["test"], "test", "Test the BotLane bot")
async def command_test(command, message):
await send(message, "Test success!", reaction = "check")
@client.command("Voice Commands", ["join"], "join", "join your voice channel")
async def command_join(command, message):
if message.author.voice:
channel = message.author.voice.channel
voice = get(client.voice_clients, guild = message.guild)
if voice and voice.is_connected():
await voice.move_to(channel)
else:
voice = await channel.connect()
await send(message, f"Connected to {channel.name}!", reaction = "check")
else:
await send(message, "You must be in a voice channel to use the `join` command!", reaction = "x")
@client.command("Voice Commands", ["leave"], "leave", "leave the current voice channel (purges the queue and history)")
async def command_leave(command, message):
voice = get(client.voice_clients, guild = message.guild)
if voice and voice.is_connected():
await voice.disconnect()
discard(queues, message.guild.id)
discard(histories, message.guild.id)
discard(stopskip, message.guild.id)
discard(nowplaying, message.guild.id)
await send(message, "Disconnected!", reaction = "check")
else:
await send(message, "I am not connected to a voice channel!", reaction = "x")
async def get_voice(message):
voice = get(client.voice_clients, guild = message.guild)
if message.author.id in config["global-arguments"]["sudo"] or message.author.voice:
if voice and voice.is_connected():
if message.author.id in config["global-arguments"]["sudo"] or voice.channel == message.author.voice.channel:
return voice
else:
raise BotError("You must be in the same channel as me to use this command!")
else:
return await message.author.voice.channel.connect()
else:
raise BotError("You must be connected to a voice channel to use this command!")
@client.command("Voice Commands", ["playshuffle", ".+"], "play <url>", "same as `play` but inserts songs shuffled (if called on a playlist; otherwise, it is the exact same as `play`)")
@client.command("Voice Commands", ["play", ".+"], "play <url>", "play audio from YouTube (appends a song to the queue if playing, and places at the front if not)")
async def command_play(command, message):
voice = await get_voice(message)
try:
info = downloader.extract_info(command[1], download = False)
queue = await default(message.guild.id, [], queues)
if voice.is_playing():
queue.append((command[1], info, message.author))
embed = discord.Embed(
title = "Added to Queue!",
description = f"[**{info['title']}**]({command[1]})",
color = 0x3333AA
).add_field(
name = "Channel",
value = f"[{info['uploader']}]({info['uploader_url']})"
).add_field(
name = "Song Length",
value = ftime(info["duration"])
)
if info["thumbnails"]:
embed.set_thumbnail(url = info["thumbnails"][-1]["url"])
await send(message, embed = embed, reaction = "check")
else:
queue.insert(0, (command[1], info, message.author))
await playaudio(message.channel)
await message.add_reaction(emoji_shorthand["check"])
except:
print(traceback.format_exc())
await send(message, f"Invalid URL: `{command[1]}`!", reaction = "x")
@client.command("Voice Commands", ["stop"], "stop", "stops the player (places the current song into history but retains the remaining queue)")
async def command_stop(command, message):
voice = await get_voice(message)
if voice.is_playing() or voice.is_paused():
queue = await default(message.guild.id, [], queues)
if len(queue) > 0:
(await default(message.guild.id, [], histories)).append(queue.pop(0))
stopskip[message.guild.id] = stopskip.get(message.guild.id, 0) + 1
voice.stop()
if not await default(message.guild.id, [], queues):
await send(message, "Nothing is playing!", reaction = "x")
if message.guild.id in nowplaying:
del nowplaying[message.guild.id]
await send(message, "Stopped the player!", reaction = "check")
@client.command("Voice Commands", ["pause"], "pause", "pauses the player")
async def command_pause(command, message):
voice = await get_voice(message)
if voice.is_paused():
await send(message, "Already paused!", reaction = "x")
elif not voice.is_playing():
await send(message, "Nothing is playing!", reaction = "x")
else:
voice.pause()
playtime[message.guild.id] += time.time() - timetrack[message.guild.id]
await send(message, "Paused!", reaction = "check")
@client.command("Voice Commands", ["play"], "play", "alias for `resume` (if not called with a URL)")
@client.command("Voice Commands", ["resume"], "resume", "resume the player if it is paused; start playing from the queue if it is stopped")
async def command_pause(command, message):
voice = await get_voice(message)
if voice.is_paused():
voice.resume()
timetrack[message.guild.id] = time.time()
await send(message, "Resumed!", reaction = "check")
elif voice.is_playing():
await send(message, "Player is not currently paused!", reaction = "x")
elif await default(message.guild.id, [], queues):
await playaudio(message.channel)
else:
await send(message, "Nothing is playing and the queue is empty!", reaction = "x")
@client.command("Voice Commands", ["backtrack", "?"], "backtrack [amount = 1]", "backtrack to a song / songs in the history and start playing the new song (least recent song first)")
@client.command("Voice Commands", ["skip", "?"], "skip [amount = 1]", "skip a song / songs and start playing the next song in the queue if present")
async def command_skip(command, message):
try:
amt = int(command[1]) if len(command) > 1 else 1
except:
print(traceback.format_exc())
await send(message, "Not an integer!", reaction = "x")
amt = None
if amt is not None:
if amt <= 0:
await send(message, "Must be a positive integer!", reaction = "x")
else:
voice = await get_voice(message)
gid = message.guild.id
queue = await default(gid, [], queues)
history = await default(gid, [], histories)
if command[0] == "skip":
history.extend(queue[:amt])
queue[:amt] = []
await send(message, f"Skipped `{current(gid, 'title')}`!" if amt == 1 else f"Skipped {amt} songs!", reaction = "check")
else:
queues[gid] = history[-amt:] + queue
history[-amt:] = []
await send(message, f"Backtracked {amt} song{'' if amt == 1 else 's'}!", reaction = "check")
if voice.is_playing() or voice.is_paused():
stopskip[message.guild.id] = stopskip.get(message.guild.id, 0) + 1
voice.stop()
await playaudio(message.channel)
@client.command("Voice Commands", ["replay"], "replay", "start playing the current song from the beginning")
async def command_replay(command, message):
voice = await get_voice(message)
if voice.is_playing() or voice.is_paused():
stopskip[message.guild.id] = stopskip.get(message.guild.id, 0) + 1
voice.stop()
await send(message, f"Restarting `{current(message.guild.id, 'title')}`!", reaction = "check")
await playaudio(message.channel)
else:
await send(message, "Nothing is playing!", "x")
@client.command("Voice Commands", ["np"], "np", "alias for `nowplaying`")
@client.command("Voice Commands", ["nowplaying"], "nowplaying", "display the current song")
async def command_queue(command, message):
if not queues.get(message.guild.id):
await send(message, "Nothing is playing in this server!", reaction = "x")
else:
voice = await get_voice(message)
url, info, user = queues[message.guild.id][0]
ct = gettime(voice, message.guild.id)
prog = int(29 * ct / info["duration"])
bar = "▬" * prog + ("⏸️" if voice.is_paused() else "🔘") + "▬" * (29 - prog)
await send(message, embed = discord.Embed(
title = f"Now Playing in {message.guild.name}",
description = f"""[{cutmax(info['title'], 85)}]({url})
by [{cutmax(info['uploader'], 85)}]({info['uploader_url']})
`{bar}`
{ftime(gettime(voice, message.guild.id))} / {ftime(info['duration'])}
Requested by: {user.mention}""",
color = 0x3333AA
), reaction = "check")
@client.command("Voice Commands", ["history", "?"], "history [page = 1]", "display the song history")
@client.command("Voice Commands", ["queue", "?"], "queue [page = 1]", "display the song queue")
async def command_queue(command, message):
itemmap = histories if command[0] == "history" else queues
try:
page = int(command[1]) if len(command) > 1 else 1
except:
await send(message, "Page must be an integer!", reaction = "x")
page = None
if page is not None:
if page <= 0:
await send(message, "Page must be a positive integer!", reaction = "x")
else:
page -= 1
mv = 0 if command[0] == "history" else 1
if len(await default(message.guild.id, [], itemmap)) < mv:
await send(message, "Nothing is in the history (has finished playing) in this server! (this message should actually never occur)" if command[0] == "history" else "Nothing is playing in this server!", reaction = "x")
else:
voice = await get_voice(message)
items = itemmap[message.guild.id]
url, info, user = queues[message.guild.id][0]
msg = f"""__Now Playing__
[{cutmax(info['title'], 85)}]({url})
by [{cutmax(info['uploader'], 85)}]({info['uploader_url']})
`{ftime(gettime(voice, message.guild.id))} / {ftime(info['duration'])}` Requested by: {user.mention}
__Song {'History (most recent song first)' if command[0] == 'history' else 'Queue'}__
"""
if len(items) > mv:
maxpages = int(math.ceil((len(items) - mv) / 10))
if page >= maxpages:
msg += f"There are only {maxpages} pages of songs in the {command[0]}!" if maxpages == 1 else f"There is only 1 page of songs in the {command[0]}!"
else:
for pos, (url, info, user) in enumerate(items[::-1 if command[0] == "history" else 1][page * 10 + mv:][:10]):
msg += f"{page * 10 + pos + 1}. [{cutmax(info['title'], 85)}]({url}) | by [{cutmax(info['uploader'], 85)}]({info['uploader_url']})\n`{ftime(info['duration'])}` Requested by: {user.mention}\n\n"
msg += f"Page {page + 1}/{maxpages}"
else:
msg += "No songs have finished playing yet!" if command[0] == "history" else "No more songs are queued!"
await send(message, embed = discord.Embed(title = f"{'History' if command[0] == 'history' | |
to program a check for such very unlikely inputs.
(IterableObject(b"abc"), [97, 98, 99]), # The blueprint only sees 'IterableObject', not 'bytes', when checking the input data type. However, it's OK that the blueprint accepts it, as it would be unnecessarily complicated to program a check for such very unlikely inputs.
(IterableObject(bytearray(b"abc")), [97, 98, 99]), # The blueprint only sees 'IterableObject', not 'bytearray', when checking the input data type. However, it's OK that the blueprint accepts it, as it would be unnecessarily complicated to program a check for such very unlikely inputs.
(ExceptionRaisingIterableObject(raise_=False), [-123]),
([], []),
(tuple(), []),
(set(), []),
(dict(), InputDataTypeInBlocklistExc),
("", InputDataTypeInBlocklistExc),
(b"", InputDataTypeInBlocklistExc),
(("abc" for _ in range(0)), []),
(("abc" for _ in range(1)), InputDataNotConvertibleExc),
((theoretical_testutils.EmptyObject() for _ in range(0)), []),
((theoretical_testutils.EmptyObject() for _ in range(1)), InputDataTypeNotInAllowlistExc),
(map(lambda x: str(x) + "t", (1, 2, 3)), InputDataNotConvertibleExc),
(map(lambda _: theoretical_testutils.EmptyObject(), (1, 2, 3)), InputDataTypeNotInAllowlistExc),
([789, float("inf"), True], InputDataNotConvertibleExc),
([789, float("-inf"), True], InputDataNotConvertibleExc),
([789, float("nan"), True], InputDataNotConvertibleExc),
([789, "", True], InputDataNotConvertibleExc),
((789, "", True), InputDataNotConvertibleExc),
({789, "", True}, InputDataNotConvertibleExc),
({789: "hello", "": "hello", True: theoretical_testutils.EmptyObject()}, InputDataTypeInBlocklistExc),
([789, ipaddress.ip_address("127.0.0.1"), ipaddress.ip_address("::1"), True], InputDataTypeNotInAllowlistExc),
([789, theoretical_testutils.EmptyObject(), True], InputDataTypeNotInAllowlistExc),
([ipaddress.ip_address("127.0.0.1"), ipaddress.ip_address("::1")], InputDataTypeNotInAllowlistExc),
([theoretical_testutils.EmptyObject()], InputDataTypeNotInAllowlistExc),
("123a456", InputDataTypeInBlocklistExc),
("-123", InputDataTypeInBlocklistExc),
("123_000", InputDataTypeInBlocklistExc),
("hello", InputDataTypeInBlocklistExc),
(None, InputDataNotConvertibleExc),
(False, InputDataNotConvertibleExc),
(True, InputDataNotConvertibleExc),
(-123, InputDataNotConvertibleExc),
(0, InputDataNotConvertibleExc),
(123, InputDataNotConvertibleExc),
(-123.5, InputDataNotConvertibleExc),
(-0.0, InputDataNotConvertibleExc),
(0.0, InputDataNotConvertibleExc),
(123.5, InputDataNotConvertibleExc),
(float("inf"), InputDataNotConvertibleExc),
(float("nan"), InputDataNotConvertibleExc),
(int, InputDataNotConvertibleExc),
(theoretical_testutils.EmptyObject, InputDataNotConvertibleExc),
(datetime.datetime.now(), InputDataNotConvertibleExc),
(datetime.datetime.now().date(), InputDataNotConvertibleExc),
(datetime.datetime.now().time(), InputDataNotConvertibleExc),
(ipaddress.ip_address("127.0.0.1"), InputDataNotConvertibleExc),
(ipaddress.ip_address("::1"), InputDataNotConvertibleExc),
(ipaddress.ip_network("127.0.0.0/30"), InputDataTypeNotInAllowlistExc), # ipaddress.ip_network() can be converted to list of IP addresses, but they cannot be converted to int due to the IntegerBlueprint being in rational mode!
(ipaddress.ip_network("2001:db8::/126"), InputDataTypeNotInAllowlistExc), # ipaddress.ip_network() can be converted to list of IP addresses, but they cannot be converted to int due to the IntegerBlueprint being in rational mode!
(urllib.parse.urlparse("https://www.google.cz/test?abc=def"), InputDataNotConvertibleExc),
(uuid.UUID('{12345678-1234-5678-1234-567812345678}'), InputDataNotConvertibleExc),
(theoretical_testutils.EmptyObject(), InputDataNotConvertibleExc),
(IterableObject([1, "", 3]), InputDataNotConvertibleExc),
(IterableObject([1, "hello", 3]), InputDataNotConvertibleExc),
(IterableObject([1, theoretical_testutils.EmptyObject, 2]), InputDataTypeNotInAllowlistExc),
(IterableObject([1, theoretical_testutils.EmptyObject(), 2]), InputDataTypeNotInAllowlistExc),
(ExceptionRaisingIterableObject(raise_=True), InputDataNotConvertibleExc),
)),
(ListBlueprint(item_blueprint=IntegerBlueprint(), parsing_mode=ParsingMode.MODE_STRICT), (
([789, -123, 2.5, 4.775, "456", "\r\n-888_222 \t", True, False], [789, -123, 2, 4, 456, -888222, 1, 0]),
((789, -123, 2.5, 4.775, "456", "\r\n-888_222 \t", True, False), [789, -123, 2, 4, 456, -888222, 1, 0]),
({789, -123, 2.5, 4.775, "456", "\r\n-888_222 \t", True, False}, ignore_order_of_output_list([789, -123, 2, 4, 456, -888222, 1, 0])),
(frozenset((789, -123, 2.5, 4.775, "456", "\r\n-888_222 \t", True, False)), ignore_order_of_output_list([789, -123, 2, 4, 456, -888222, 1, 0])),
(
{789: theoretical_testutils.EmptyObject(), -123: "hello", 2.5: "hello", 4.775: "hello", "456": "hello", "\r\n-888_222 \t": "hello", True: "hello", False: "hello"},
InputDataTypeNotInAllowlistExc
),
([2.001, 2.499, 2.5, 2.501, 2.999, 0.0, -0.0], [2, 2, 2, 2, 2, 0, 0]),
("1234567890", InputDataTypeNotInAllowlistExc),
(b"\x00\x00\x00\x00", InputDataTypeNotInAllowlistExc),
(b"abcdef", InputDataTypeNotInAllowlistExc),
(bytearray(b"abcdef"), InputDataTypeNotInAllowlistExc),
(range(5, 15), InputDataTypeNotInAllowlistExc),
(sorted((100, 5, 849, 2, -456, 999)), [-456, 2, 5, 100, 849, 999]), # sorted() returns a list object no matter what its input iterable was!
(sorted("18754522"), [1, 2, 2, 4, 5, 5, 7, 8]), # sorted() returns a list object no matter what its input iterable was!
(sorted(b"cabfdeee"), [97, 98, 99, 100, 101, 101, 101, 102]), # sorted() returns a list object no matter what its input iterable was!
(sorted(bytearray(b"cabfdeee")), [97, 98, 99, 100, 101, 101, 101, 102]), # sorted() returns a list object no matter what its input iterable was!
((i * i for i in range(10)), InputDataTypeNotInAllowlistExc),
(map(lambda x: x + "000", ("1", "2", "3")), InputDataTypeNotInAllowlistExc),
(map(lambda x: x ** 2, range(5)), InputDataTypeNotInAllowlistExc),
(filter(lambda x: len(x) > 1, ("1", "123", "", "t", "789456", "\r\n9\t")), InputDataTypeNotInAllowlistExc),
(IterableObject([]), InputDataTypeNotInAllowlistExc),
(IterableObject(["-555", 2.999, True, "\v+123_000\f", 999]), InputDataTypeNotInAllowlistExc),
(IterableObject({"-789": "HelloWorld!", False: theoretical_testutils.EmptyObject(), 5.5: "xyz"}), InputDataTypeNotInAllowlistExc),
(IterableObject(range(1, 10, 2)), InputDataTypeNotInAllowlistExc),
(IterableObject("886644"), InputDataTypeNotInAllowlistExc),
(IterableObject(b"abc"), InputDataTypeNotInAllowlistExc),
(IterableObject(bytearray(b"abc")), InputDataTypeNotInAllowlistExc),
(ExceptionRaisingIterableObject(raise_=False), InputDataTypeNotInAllowlistExc),
([], []),
(tuple(), []),
(set(), []),
(dict(), InputDataTypeNotInAllowlistExc),
("", InputDataTypeNotInAllowlistExc),
(b"", InputDataTypeNotInAllowlistExc),
(("abc" for _ in range(0)), InputDataTypeNotInAllowlistExc),
(("abc" for _ in range(1)), InputDataTypeNotInAllowlistExc),
((theoretical_testutils.EmptyObject() for _ in range(0)), InputDataTypeNotInAllowlistExc),
((theoretical_testutils.EmptyObject() for _ in range(1)), InputDataTypeNotInAllowlistExc),
(map(lambda x: str(x) + "t", (1, 2, 3)), InputDataTypeNotInAllowlistExc),
(map(lambda _: theoretical_testutils.EmptyObject(), (1, 2, 3)), InputDataTypeNotInAllowlistExc),
([789, float("inf"), True], InputDataNotConvertibleExc),
([789, float("-inf"), True], InputDataNotConvertibleExc),
([789, float("nan"), True], InputDataNotConvertibleExc),
([789, "", True], InputDataNotConvertibleExc),
((789, "", True), InputDataNotConvertibleExc),
({789, "", True}, InputDataNotConvertibleExc),
({789: "hello", "": "hello", True: theoretical_testutils.EmptyObject()}, InputDataTypeNotInAllowlistExc),
([789, ipaddress.ip_address("127.0.0.1"), ipaddress.ip_address("::1"), True], InputDataTypeNotInAllowlistExc),
([789, theoretical_testutils.EmptyObject(), True], InputDataTypeNotInAllowlistExc),
([ipaddress.ip_address("127.0.0.1"), ipaddress.ip_address("::1")], InputDataTypeNotInAllowlistExc),
([theoretical_testutils.EmptyObject()], InputDataTypeNotInAllowlistExc),
("123a456", InputDataTypeNotInAllowlistExc),
("-123", InputDataTypeNotInAllowlistExc),
("123_000", InputDataTypeNotInAllowlistExc),
("hello", InputDataTypeNotInAllowlistExc),
(None, InputDataTypeNotInAllowlistExc),
(False, InputDataTypeNotInAllowlistExc),
(True, InputDataTypeNotInAllowlistExc),
(-123, InputDataTypeNotInAllowlistExc),
(0, InputDataTypeNotInAllowlistExc),
(123, InputDataTypeNotInAllowlistExc),
(-123.5, InputDataTypeNotInAllowlistExc),
(-0.0, InputDataTypeNotInAllowlistExc),
(0.0, InputDataTypeNotInAllowlistExc),
(123.5, InputDataTypeNotInAllowlistExc),
(float("inf"), InputDataTypeNotInAllowlistExc),
(float("nan"), InputDataTypeNotInAllowlistExc),
(int, InputDataTypeNotInAllowlistExc),
(theoretical_testutils.EmptyObject, InputDataTypeNotInAllowlistExc),
(datetime.datetime.now(), InputDataTypeNotInAllowlistExc),
(datetime.datetime.now().date(), InputDataTypeNotInAllowlistExc),
(datetime.datetime.now().time(), InputDataTypeNotInAllowlistExc),
(ipaddress.ip_address("127.0.0.1"), InputDataTypeNotInAllowlistExc),
(ipaddress.ip_address("::1"), InputDataTypeNotInAllowlistExc),
(ipaddress.ip_network("127.0.0.0/30"), InputDataTypeNotInAllowlistExc),
(ipaddress.ip_network("2001:db8::/126"), InputDataTypeNotInAllowlistExc),
(urllib.parse.urlparse("https://www.google.cz/test?abc=def"), InputDataNotConvertibleExc), # ParseResult is a subclass of tuple!!!
(uuid.UUID('{12345678-1234-5678-1234-567812345678}'), InputDataTypeNotInAllowlistExc),
(theoretical_testutils.EmptyObject(), InputDataTypeNotInAllowlistExc),
(IterableObject([1, "", 3]), InputDataTypeNotInAllowlistExc),
(IterableObject([1, "hello", 3]), InputDataTypeNotInAllowlistExc),
(IterableObject([1, theoretical_testutils.EmptyObject, 2]), InputDataTypeNotInAllowlistExc),
(IterableObject([1, theoretical_testutils.EmptyObject(), 2]), InputDataTypeNotInAllowlistExc),
(ExceptionRaisingIterableObject(raise_=True), InputDataTypeNotInAllowlistExc),
)),
(ListBlueprint(item_blueprint=StringBlueprint(), parsing_mode=ParsingMode.MODE_LOOSE), (
([789, -123, 2.5, 4.775, "456", "\r\n-888_222 \t", True, False], ["789", "-123", "2.5", "4.775", "456", "\r\n-888_222 \t", "True", "False"]),
((789, -123, 2.5, 4.775, "456", "\r\n-888_222 \t", True, False), ["789", "-123", "2.5", "4.775", "456", "\r\n-888_222 \t", "True", "False"]),
({789, -123, 2.5, 4.775, "456", "\r\n-888_222 \t", True, False}, ignore_order_of_output_list(["789", "-123", "2.5", "4.775", "456", "\r\n-888_222 \t", "True", "False"])),
(frozenset((789, -123, 2.5, 4.775, "456", "\r\n-888_222 \t", True, False)), ignore_order_of_output_list(["789", "-123", "2.5", "4.775", "456", "\r\n-888_222 \t", "True", "False"])),
(
{789: theoretical_testutils.EmptyObject(), -123: "hello", 2.5: "hello", 4.775: "hello", "456": "hello", "\r\n-888_222 \t": "hello", True: "hello", False: "hello"},
ignore_order_of_output_list(["789", "-123", "2.5", "4.775", "456", "\r\n-888_222 \t", "True", "False"])
),
([2.001, 2.499, 2.5, 2.501, 2.999, 0.0, -0.0], ["2.001", "2.499", "2.5", "2.501", "2.999", "0.0", "-0.0"]),
("1234567890", ["1", "2", "3", "4", "5", "6", "7", "8", "9", "0"]),
(b"\x00\x00\x00\x00", ["0", "0", "0", "0"]),
(b"abcdef", ["97", "98", "99", "100", "101", "102"]), # list(bytes) returns a list of integers (ASCII values)!
(bytearray(b"abcdef"), ["97", "98", "99", "100", "101", "102"]), # list(bytes) returns a list of integers (ASCII values)!
(range(5, 15), ["5", "6", "7", "8", "9", "10", "11", "12", "13", "14"]),
(sorted((100, 5, 849, 2, -456, 999)), ["-456", "2", "5", "100", "849", "999"]),
(sorted("18754522"), ["1", "2", "2", "4", "5", "5", "7", "8"]),
(sorted(b"cabfdeee"), ["97", "98", "99", "100", "101", "101", "101", "102"]),
(sorted(bytearray(b"cabfdeee")), ["97", "98", "99", "100", "101", "101", "101", "102"]),
((i * i for i in range(10)), ["0", "1", "4", "9", "16", "25", "36", "49", "64", "81"]),
(map(lambda x: x + "000", ("1", "2", "3")), ["1000", "2000", "3000"]),
(map(lambda x: x ** 2, range(5)), ["0", "1", "4", "9", "16"]),
(filter(lambda x: len(x) > 1, ("1", "123", "", "t", "789456", "\r\n9\t")), ["123", "789456", "\r\n9\t"]),
(IterableObject([]), []),
(IterableObject(["-555", 2.999, True, "\v+123_000\f", 999]), ["-555", "2.999", "True", "\v+123_000\f", "999"]),
(IterableObject({"-789": "HelloWorld!", False: theoretical_testutils.EmptyObject(), 5.5: "xyz"}), ignore_order_of_output_list(["-789", "False", "5.5"])),
(IterableObject(range(1, 10, 2)), ["1", "3", "5", "7", "9"]),
(IterableObject("886644"), ["8", "8", "6", "6", "4", "4"]),
(IterableObject(b"abc"), ["97", "98", "99"]),
(IterableObject(bytearray(b"abc")), ["97", "98", "99"]),
(ExceptionRaisingIterableObject(raise_=False), ["-123"]),
([], []),
(tuple(), []),
(set(), []),
(dict(), []),
("", []),
(b"", []),
(("abc" for _ in range(0)), []),
(("abc" for _ in range(1)), ["abc"]),
((theoretical_testutils.EmptyObject() for _ in range(0)), []),
((theoretical_testutils.EmptyObject() for _ in range(1)), InputDataTypeNotInAllowlistExc),
(map(lambda x: str(x) + "t", (1, 2, 3)), ["1t", "2t", "3t"]),
(map(lambda _: theoretical_testutils.EmptyObject(), (1, 2, 3)), InputDataTypeNotInAllowlistExc),
([789, float("inf"), True], ["789", "inf", "True"]),
([789, float("-inf"), True], ["789", "-inf", "True"]),
([789, float("nan"), True], ["789", "nan", "True"]),
([789, "", True], ["789", "", "True"]),
((789, "", True), ["789", "", "True"]),
({789, "", True}, ignore_order_of_output_list(["789", "", "True"])),
([789, "Hello World!", True], ["789", "Hello World!", "True"]),
((789, "Hello World!", True), ["789", "Hello World!", "True"]),
({789, "Hello World!", True}, ignore_order_of_output_list(["789", "Hello World!", "True"])),
({789: "hello", "": "hello", True: theoretical_testutils.EmptyObject()}, ignore_order_of_output_list(["789", "", "True"])),
([789, ipaddress.ip_address("127.0.0.1"), ipaddress.ip_address("::1"), True], ["789", "127.0.0.1", "::1", "True"]),
([789, theoretical_testutils.EmptyObject(), True], InputDataTypeNotInAllowlistExc),
([ipaddress.ip_address("127.0.0.1"), ipaddress.ip_address("::1")], ["127.0.0.1", "::1"]),
([theoretical_testutils.EmptyObject()], InputDataTypeNotInAllowlistExc),
("123a456", ["1", "2", "3", "a", "4", "5", "6"]),
("-123", ["-", "1", "2", "3"]),
("123_000", ["1", "2", "3", "_", "0", "0", "0"]),
("hello", ["h", "e", "l", "l", "o"]),
(None, InputDataNotConvertibleExc),
(False, InputDataNotConvertibleExc),
(True, InputDataNotConvertibleExc),
(-123, InputDataNotConvertibleExc),
(0, InputDataNotConvertibleExc),
(123, InputDataNotConvertibleExc),
(-123.5, InputDataNotConvertibleExc),
(-0.0, InputDataNotConvertibleExc),
(0.0, InputDataNotConvertibleExc),
(123.5, InputDataNotConvertibleExc),
(float("inf"), InputDataNotConvertibleExc),
(float("nan"), InputDataNotConvertibleExc),
(int, InputDataNotConvertibleExc),
(theoretical_testutils.EmptyObject, InputDataNotConvertibleExc),
(datetime.datetime.now(), InputDataNotConvertibleExc),
(datetime.datetime.now().date(), InputDataNotConvertibleExc),
(datetime.datetime.now().time(), | |
<reponame>cybrnode/pyzkaccess
from datetime import datetime
from enum import Enum
from unittest.mock import Mock, call
import pytest
from pyzkaccess.device import ZK400
from pyzkaccess.enum import SensorType, VerifyMode
from pyzkaccess.param import (
DaylightSavingMomentMode1,
DaylightSavingMomentMode2,
DeviceParameters,
DoorParameters
)
# (property, parameter_name, property_type, correct_sdk_values, wrong_sdk_values, correct_prop_values, wrong_prop_values)
daylight_saving_mode2_properties = (
(
'month', ('WeekOfMonth1', 'WeekOfMonth6'), int, ['1', '12'], ['0', '-1', '13'],
[1, 12], [0, -1, 13]
),
(
'week_of_month', ('WeekOfMonth2', 'WeekOfMonth7'), int, ['1', '6'], ['0', '-1', '7'],
[1, 6], [0, -1, 7]
),
(
'day_of_week', ('WeekOfMonth3', 'WeekOfMonth8'), int, ['1', '7'], ['0', '-1', '8'],
[1, 7], [0, -1, 8]
),
(
'hour', ('WeekOfMonth4', 'WeekOfMonth9'), int, ['0', '23'], ['-1', '24'],
[0, 23], [-1, 24]
),
(
'minute', ('WeekOfMonth5', 'WeekOfMonth10'), int, ['0', '59'], ['-1', '60'],
[0, 59], [-1, 60]
),
)
# (property, parameter_name, property_type, correct_sdk_values, wrong_sdk_values, correct_prop_values, wrong_prop_values)
device_params_read_only = (
('serial_number', '~SerialNumber', str, ['asdf', ''], [], ['asdf', ''], []),
('lock_count', 'LockCount', int, ['2'], ['asdf', ''], [2], ['asdf', '']),
('reader_count', 'ReaderCount', int, ['2'], ['asdf', ''], [2], ['asdf', '']),
('aux_in_count', 'AuxInCount', int, ['2'], ['asdf', ''], [2], ['asdf', '']),
('aux_out_count', 'AuxOutCount', int, ['2'], ['asdf', ''], [2], ['asdf', '']),
# reboot
(
'fingerprint_version', '~ZKFPVersion', int, ['9', '10'], ['8', '11', 'asdf', ''],
[9, 10], [8, 11, 'asdf', '']
),
)
# (property, parameter_name, property_type, correct_sdk_values, wrong_sdk_values, correct_prop_values, wrong_prop_values)
device_params_read_write = (
('communication_password', '<PASSWORD>', str, ['asdf', ''], [], ['asdf', ''], []),
(
'ip_address', 'IPAddress', str, ['192.168.1.201'], ['', 'asdf', '1.2.3'],
['192.168.1.201'], ['', 'asdf', '1.2.3']
),
(
'netmask', 'NetMask', str, ['192.168.1.255'], ['', 'asdf', '1.2.3'],
['192.168.1.201'], ['', 'asdf', '1.2.3']
),
(
'gateway_ip_address', 'GATEIPAddress', str, ['192.168.1.201'], ['', 'asdf', '1.2.3'],
['192.168.1.201'], ['', 'asdf', '1.2.3']
),
('rs232_baud_rate', 'RS232BaudRate', int, ['32165'], ['asdf', ''], [32165], ['asdf', '']),
(
'watchdog_enabled', 'WatchDog', bool, ['1', '0'], ['asdf', ''],
[True, False], ['asdf', '', 1]
),
(
'door4_to_door2', 'Door4ToDoor2', bool, ['1', '0'], ['asdf', ''],
[True, False], ['asdf', '', 1]
),
(
'backup_hour', 'BackupTime', int, ['2'], ['asdf', '', '-1', '0', '25'],
[2], ['asdf', '', -1, 0, 25]
),
('reader_direction', 'InBIOTowWay', str, ['1', '0'], [], ['1', '0'], []),
(
'display_daylight_saving', '~DSTF', bool, ['1', '0'], ['asdf', ''],
[True, False], ['asdf', '', 1]
),
(
'enable_daylight_saving', 'DaylightSavingTimeOn', bool, ['1', '0'], ['asdf', ''],
[True, False], ['asdf', '', 1]
),
('daylight_saving_mode', 'DLSTMode', int, ['1', '0'], ['asdf', ''], [0, 1], ['asdf', '']),
(
'anti_passback_rule', 'AntiPassback', int, ['16', '0', '128'], ['asdf', ''],
[16, 0, 128], ['asdf', '']
),
(
'interlock', 'InterLock', int, ['0', '5'], ['asdf', '', '-1', '241'],
[0, 5], ['asdf', '', -1, 241]
),
# datetime
# spring_daylight_time_mode1
# fall_daylight_time_mode1
# spring_daylight_time_mode2
# fall_daylight_time_mode2
)
# (property, parameter_name, property_type, correct_sdk_values, wrong_sdk_values, correct_prop_values, wrong_prop_values)
door_params_read_write = (
('duress_password', '<PASSWORD>', str, ['', '1234'], ['asdf'], ['', '1234'], ['asdf', 1234]),
(
'emergency_password', '<PASSWORD>perPassWord', str, ['', '1234'], ['asdf'],
['', '1234'], ['asdf', 1234]
),
(
'lock_on_close', 'CloseAndLock', bool, ['1', '0'], ['asdf', ''],
[True, False], ['asdf', '', 1]
),
(
'sensor_type', 'SensorType', SensorType, ['0', '2'], ['asdf', '', '-1', '3'],
[SensorType.normal_closed], ['asdf', '', -1, 3, 2]
),
(
'lock_driver_time', 'Drivertime', int, ['0', '255'], ['asdf', '', '-1', '256'],
[0, 255], ['asdf', '', -1, 256]
),
(
'magnet_alarm_duration', 'Detectortime', int, ['0', '255'], ['asdf', '', '-1', '256'],
[0, 255], ['asdf', '', -1, 256]
),
(
'verify_mode', 'VerifyType', VerifyMode,
['0', '1', '3', '4', '6', '10', '11', '200'], ['asdf', '', '-1', '2', '100', '201'],
[VerifyMode.card_and_finger], ['asdf', '', -1, 2, 100, 201]
),
(
'multi_card_open', 'MultiCardOpenDoor', bool, ['1', '0'], ['asdf', ''],
[True, False], ['asdf', '']
),
(
'first_card_open', 'FirstCardOpenDoor', bool, ['1', '0'], ['asdf', ''],
[True, False], ['asdf', '']
),
('active_time_tz', 'ValidTZ', int, ['0', '128'], ['asdf', ''], [0, 128], ['asdf', '']),
('open_time_tz', 'KeepOpenTimeZone', int, ['0', '128'], ['asdf', ''], [0, 128], ['asdf', '']),
('punch_interval', 'Intertime', int, ['0', '128'], ['asdf', ''], [0, 128], ['asdf', '']),
(
'cancel_open_day', 'CancelKeepOpenDay', int, ['0', '128'], ['asdf', ''],
[0, 128], ['asdf', '']
)
)
def prop_read_test_combinations(test_cases, correct):
for prop, param, prop_type, ok_sdks, bad_sdks, _, _ in test_cases:
test_vals = ok_sdks if correct else bad_sdks
for test_val in test_vals:
expect_value = None
if correct:
expect_value = test_val
if issubclass(prop_type, (int, Enum, bool)):
expect_value = int(test_val)
expect_value = prop_type(expect_value)
yield prop, param, prop_type, test_val, expect_value
def prop_write_test_combinations(test_cases, correct):
for prop, param, prop_type, _, _, ok_props, bad_props in test_cases:
test_vals = ok_props if correct else bad_props
for test_val in test_vals:
expect_value = None
if correct:
expect_value = test_val
if isinstance(expect_value, (bool)):
expect_value = int(expect_value)
elif isinstance(expect_value, Enum):
expect_value = expect_value.value
expect_value = str(expect_value)
yield prop, param, prop_type, test_val, expect_value
class TestDaylightSavingMomentMode1:
def test_init__should_initialize_attributes(self):
obj = DaylightSavingMomentMode1(2, 4, 15, 35)
assert obj.month == 2
assert obj.day == 4
assert obj.hour == 15
assert obj.minute == 35
@pytest.mark.parametrize('init_kwargs', (
{'month': 13, 'day': 4, 'hour': 15, 'minute': 35},
{'month': 0, 'day': 4, 'hour': 15, 'minute': 35},
{'month': 2, 'day': 32, 'hour': 15, 'minute': 35},
{'month': 2, 'day': 0, 'hour': 15, 'minute': 35},
{'month': 2, 'day': 4, 'hour': -1, 'minute': 35},
{'month': 2, 'day': 4, 'hour': 24, 'minute': 35},
{'month': 2, 'day': 4, 'hour': 15, 'minute': -1},
{'month': 2, 'day': 4, 'hour': 15, 'minute': 60},
))
def test_init__if_parameters_out_of_range__should_raise_error(self, init_kwargs):
with pytest.raises(ValueError):
_ = DaylightSavingMomentMode1(**init_kwargs)
def test_str__should_return_string_representation(self):
obj = DaylightSavingMomentMode1(2, 4, 15, 35)
assert str(obj) == '2-4-15-35'
def test_repr__should_return_name_of_class(self):
obj = DaylightSavingMomentMode1(2, 4, 15, 35)
assert repr(obj).startswith('DaylightSavingMomentMode1(')
class TestDaylightSavingMomentMode2:
def test_init__should_initialize_attributes(self):
sdk = Mock()
obj = DaylightSavingMomentMode2(sdk, True, 4096)
assert obj._sdk is sdk
assert obj.is_daylight is True
assert obj.buffer_size == 4096
@pytest.mark.parametrize(
'prop,param,prop_type,sdk_value,prop_value',
prop_read_test_combinations(daylight_saving_mode2_properties, correct=True)
)
@pytest.mark.parametrize('is_daylight,param_idx', ((True, 0), (False, 1)))
def test_read_readwrite_property__should_return_value_of_correct_type(
self, prop, param, prop_type, sdk_value, prop_value, is_daylight, param_idx
):
sdk = Mock()
param = param[param_idx]
sdk.get_device_param.return_value = {param: sdk_value}
obj = DaylightSavingMomentMode2(sdk, is_daylight, 4096)
res = getattr(obj, prop)
sdk.get_device_param.assert_called_once_with(parameters=(param, ), buffer_size=4096)
assert type(res) == prop_type
assert res == prop_value
@pytest.mark.parametrize(
'prop,param,prop_type,sdk_value,prop_value',
prop_read_test_combinations(daylight_saving_mode2_properties, correct=False)
)
@pytest.mark.parametrize('is_daylight,param_idx', ((True, 0), (False, 1)))
def test_read_readwrite_property__if_wrong_value_returned__should_raise_error(
self, prop, param, prop_type, sdk_value, prop_value, is_daylight, param_idx
):
sdk = Mock()
param = param[param_idx]
sdk.get_device_param.return_value = {param: sdk_value}
obj = DaylightSavingMomentMode2(sdk, is_daylight, 4096)
with pytest.raises((TypeError, ValueError)):
_ = getattr(obj, prop)
@pytest.mark.parametrize(
'prop,param,prop_type,prop_value,sdk_value',
prop_write_test_combinations(daylight_saving_mode2_properties, correct=True)
)
@pytest.mark.parametrize('is_daylight,param_idx', ((True, 0), (False, 1)))
def test_write_readwrite_property__should_set_value_on_a_device(
self, prop, param, prop_type, sdk_value, prop_value, is_daylight, param_idx
):
sdk = Mock()
param = param[param_idx]
sdk.set_device_param.return_value = None
obj = DaylightSavingMomentMode2(sdk, is_daylight, 4096)
setattr(obj, prop, prop_value)
sdk.set_device_param.assert_called_once_with(parameters={param: sdk_value})
@pytest.mark.parametrize(
'prop,param,prop_type,prop_value,sdk_value',
prop_write_test_combinations(daylight_saving_mode2_properties, correct=False)
)
@pytest.mark.parametrize('is_daylight,param_idx', ((True, 0), (False, 1)))
def test_write_readwrite_property__if_wrong_value_passed__should_raise_error(
self, prop, param, prop_type, sdk_value, prop_value, is_daylight, param_idx
):
sdk = Mock()
sdk.set_device_param.return_value = None
obj = DaylightSavingMomentMode2(sdk, is_daylight, 4096)
with pytest.raises(ValueError):
setattr(obj, prop, prop_value)
def test_str__should_return_name_of_class(self):
def se(parameters, buffer_size):
return {parameters[0]: '1'}
sdk = Mock()
sdk.get_device_param.side_effect = se
obj = DaylightSavingMomentMode2(sdk, False, 4096)
assert str(obj).startswith('DaylightSavingMomentMode2(')
def test_repr__should_return_name_of_class(self):
def se(parameters, buffer_size):
return {parameters[0]: '1'}
sdk = Mock()
sdk.get_device_param.side_effect = se
obj = DaylightSavingMomentMode2(sdk, False, 4096)
assert repr(obj).startswith('DaylightSavingMomentMode2(')
class TestDeviceParameters:
def test_init__should_fill_attributes(self):
sdk = Mock()
obj = DeviceParameters(sdk, ZK400)
assert obj._sdk is sdk
assert obj.device_model == ZK400
@pytest.mark.parametrize(
'prop,param,prop_type,sdk_value,prop_value',
prop_read_test_combinations(device_params_read_only, correct=True)
)
def test_read_readonly_property__should_return_value_of_correct_type(
self, prop, param, prop_type, sdk_value, prop_value
):
sdk = Mock()
sdk.get_device_param.return_value = {param: sdk_value}
obj = DeviceParameters(sdk, ZK400)
res = getattr(obj, prop)
sdk.get_device_param.assert_called_once_with(parameters=(param, ), buffer_size=4096)
assert isinstance(res, prop_type)
assert res == prop_value
@pytest.mark.parametrize(
'prop,param,prop_type,sdk_value,prop_value',
prop_read_test_combinations(device_params_read_only, correct=False)
)
def test_read_readonly_property__if_wrong_value_returned__should_raise_error(
self, prop, param, prop_type, sdk_value, prop_value
):
sdk = Mock()
sdk.get_device_param.return_value = {param: sdk_value}
obj = DeviceParameters(sdk, ZK400)
with pytest.raises((TypeError, ValueError)):
_ = getattr(obj, prop)
@pytest.mark.parametrize(
'prop,param,prop_type,prop_value,sdk_value',
prop_write_test_combinations(device_params_read_only, correct=True)
)
def test_write_readonly_property__for_read_only_properties__should_raise_error(
self, prop, param, prop_type, sdk_value, prop_value
):
sdk = Mock()
obj = DeviceParameters(sdk, ZK400)
with pytest.raises(AttributeError):
setattr(obj, prop, prop_value)
@pytest.mark.parametrize(
'prop,param,prop_type,sdk_value,prop_value',
prop_read_test_combinations(device_params_read_write, correct=True)
)
def test_read_readwrite_property__should_return_value_of_correct_type(
self, prop, param, prop_type, sdk_value, prop_value
):
sdk = Mock()
sdk.get_device_param.return_value = {param: sdk_value}
obj = DeviceParameters(sdk, ZK400)
res = getattr(obj, prop)
sdk.get_device_param.assert_called_once_with(parameters=(param, ), buffer_size=4096)
assert isinstance(res, prop_type)
assert res == prop_value
@pytest.mark.parametrize(
'prop,param,prop_type,sdk_value,prop_value',
prop_read_test_combinations(device_params_read_write, correct=False)
)
def test_read_readwrite_property__if_wrong_value_returned__should_raise_error(
self, prop, param, prop_type, sdk_value, prop_value
):
sdk = Mock()
sdk.get_device_param.return_value = {param: sdk_value}
obj = DeviceParameters(sdk, ZK400)
with pytest.raises((TypeError, ValueError)):
_ = getattr(obj, prop)
@pytest.mark.parametrize(
'prop,param,prop_type,prop_value,sdk_value',
prop_write_test_combinations(device_params_read_write, correct=True)
)
def test_write_readwrite_property__should_set_value_on_a_device(
self, prop, param, prop_type, sdk_value, prop_value
):
sdk = Mock()
sdk.set_device_param.return_value = None
obj = DeviceParameters(sdk, ZK400)
setattr(obj, prop, prop_value)
sdk.set_device_param.assert_called_once_with(parameters={param: sdk_value})
@pytest.mark.parametrize(
'prop,param,prop_type,prop_value,sdk_value',
prop_write_test_combinations(device_params_read_write, correct=False)
)
def test_write_readwrite_property__if_wrong_value_passed__should_raise_error(
self, prop, param, prop_type, sdk_value, prop_value
):
sdk = Mock()
sdk.set_device_param.return_value = None
obj = DeviceParameters(sdk, ZK400)
with pytest.raises((ValueError, TypeError)):
setattr(obj, | |
foo_clip_output2 = "fooclip_output2"
arcpy.MultipartToSinglepart_management(foo_clip_output, foo_clip_output2)
# Select by location - intersect of centerline and inundated_area_clip
subreach_x = (r"ScriptOutputs/subreach_{}".format(puppy))
foo_subreach = arcpy.SelectLayerByLocation_management(foo_clip_output2,'INTERSECT',selected_centerline, "", 'NEW_SELECTION')
arcpy.CopyFeatures_management(foo_subreach, subreach_x)
# Create 2 border polygons, one along each bank
# (NOTE: using polygons instead of lines helps avoid potential errors caused by messy pixelated
# geometry caused by using Raster To Feature to create river area feature.)
# (ALSO NOTE: The outside edge of these buffers lines up with the river's edge)
# Clip river_area_lines to subreach_x and assign a "BankID" number to each
foo_clip_lines1 = "foocliplines1"
fooclip = arcpy.Clip_analysis(river_area_lines, subreach_x, foo_clip_lines1)
#Convert multipart to single part...
foo_clip_lines = "foo_clip_lines"
arcpy.MultipartToSinglepart_management(foo_clip_lines1, foo_clip_lines)
# Buffer the output with small (2m) buffers on the inside of the river, and rounded ends (for overlap)
foo_output = "foobuffer"
foobuffer = arcpy.Buffer_analysis(foo_clip_lines, foo_output, "2 meters", "RIGHT", "ROUND")
# Dissolve intersecting buffer outputs
foo_dissolve1 = "foo_dissolve1"
arcpy.Dissolve_management(foo_output, foo_dissolve1, "", "", 'SINGLE_PART')
# Confirm dissolve output is single part...
foo_dissolve = "foo_dissolve"
arcpy.MultipartToSinglepart_management(foo_dissolve1, foo_dissolve)
# IF only one polygon is created (as is the case for river spurs), use the
# Feature To Polygon tool to split it into 2 with the centerline
cursor = arcpy.da.SearchCursor(foo_dissolve,'Shape_Length')
count = 0
YSHAPED = "FALSE"
y_count = 0
for row in cursor:
count = count + 1
print("count of bank polygons = {}".format(count))
if count == 1:
foo_banks = "foo_banks"
arcpy.FeatureToPolygon_management([foo_dissolve, selected_centerline], foo_banks)
YSHAPED = "FALSE"
#Sometimes there can be a rare condition where the above operation splits the polygon into
#three parts instead of two (this only happens at the edges of the study area, because of
#the study area boundaries not being at a right angle to the river.)
#If this happens, just delete the smallest scrap of bank - it wouldn't be relevant anyway
# because its buffer would be outside the study area.
foo_banks_length = int(arcpy.GetCount_management(foo_banks).getOutput(0))
print("after splitting polygons, there are now {} bank polygons".format(foo_banks_length))
if foo_banks_length > 2:
foo_minimum = -99
YSHAPED = "TRUE" #This redirects the script to remove the third piece (below).
elif count == 2:
foo_banks = "foo_banks"
arcpy.CopyFeatures_management(foo_dissolve, foo_banks)
YSHAPED = "FALSE"
print("count of bank polygons is still 2.")
else: # If this stream segment is at a forked/y-shaped section of the river, part of one of the banks
# of the neighboring segment can sometimes be selected - but, this polygon is typically smaller
# than the others.
YSHAPED = "TRUE"
y_count = y_count + 1
foo_minimum = -99
if YSHAPED == "TRUE":
print("Correcting for too many banks...")
foo_length = count
while foo_length > 2:
with arcpy.da.SearchCursor(foo_dissolve, 'Shape_Length') as cursor:
foo_minimum = min(cursor)
print(foo_minimum[0], foo_length)
foo = ('"Shape_Length" > {}'.format(foo_minimum[0]))
temp_select = arcpy.SelectLayerByAttribute_management(foo_dissolve, 'NEW_SELECTION', foo)
foo_dissolve_tmp = "foo_dissolve_tmp"
arcpy.CopyFeatures_management(temp_select, foo_dissolve_tmp)
arcpy.CopyFeatures_management(foo_dissolve_tmp, foo_dissolve)
foo_length = int(arcpy.GetCount_management(foo_dissolve).getOutput(0))
print("New number of bank polygons = {}".format(foo_length))
foo_banks = "foo_banks"
arcpy.CopyFeatures_management(foo_dissolve, foo_banks)
# Assign a unique ID ("BankID") to each bank
arcpy.AddField_management(foo_banks, "BankID", "LONG")
expression = "!OBJECTID!"
arcpy.CalculateField_management(foo_banks, "BankID", expression, "PYTHON3")
# Determine the angle from the centerline to each border polygon
# Convert centerline features to points (otherwise the Near tool breaks on river spurs)
foo_points_output = "foo_points"
foo_points = arcpy.FeatureToPoint_management(selected_centerline, foo_points_output, "INSIDE")
# Iteratively use Near to determine angle to bank from point
cursor = arcpy.da.SearchCursor(foo_banks, "BankID")
bank_angles = [-99,-99,-99,-99] #BankID, angle, BankID, angle_2
foo2 = 0
foo3 = 1
for row in cursor:
foo = ('"BankID" = {}'.format(row[0]))
temp_select = arcpy.SelectLayerByAttribute_management(foo_banks, 'NEW_SELECTION', foo)
bank_angles[foo2] = row[0]
foo2 = foo2 + 2
#Run Near tool
foo_near = arcpy.Near_analysis(foo_points, temp_select, '100 meters', "", "ANGLE")
cursor2 = arcpy.da.SearchCursor(foo_near, 'NEAR_ANGLE')
for row2 in cursor2:
bank_angles[foo3] = row2[0]
foo3 = foo3 + 2
print("Bank ID 1, Angle 1, Bank ID 2, Angle 2...")
print(bank_angles)
# Determine the dominant aspect within subreach_x
foo_aspect = ZonalStatistics(subreach_x, "OBJECTID", shade_aspect, "MAJORITY")
foo_aspect.save("foo_aspect")
foo_aspect = "foo_aspect"
foo_aspect_value = "foo_aspect_value"
ExtractValuesToPoints(foo_points, foo_aspect, foo_aspect_value)
cursor = arcpy.da.SearchCursor(foo_aspect_value,'RASTERVALU')
aspect_value = -99
for row in cursor:
aspect_value = row[0]
print("overall aspect = {}".format(aspect_value))
# If the (aspect - 180) is within 90 degrees of the border polygon, use that one
# (For example, if the majority aspect is South, use the border polygon to the north).
#IF angle 1 is negative: new angle 1 = 360 + angle 1 #or = (360 - abs(angle1)
# Else: new angle 1 = original angle 1
if bank_angles[1] <0:
new_angle_1 = bank_angles[1] + 360
else: new_angle_1 = bank_angles[1]
print("new_angle_1 = {}".format(new_angle_1))
if bank_angles[3] <0:
new_angle_2 = bank_angles[3] + 360
else: new_angle_2 = bank_angles[3]
print("new_angle_2 = {}".format(new_angle_2))
#Split the angles into left and right from the perspective of the stream
# Set compass needle to mean direction of stream center (line_azimuth), ex: 319
# the compass offset is the difference between the line azimuth and true north (360 degrees)
compass_offset = 360 - line_azimuth
print("compass_offset = {}".format(compass_offset))
corr_angle_1 = new_angle_1 + compass_offset
if corr_angle_1 >360: corr_angle_1 = corr_angle_1 - 360
side1 = "placeholder"
if corr_angle_1 >= 180: side1 = "LEFT"
else: side1 = "RIGHT"
corr_angle_2 = new_angle_2 + compass_offset
if corr_angle_2 >360: corr_angle_2 = corr_angle_2 - 360
side2 = "placeholder"
if corr_angle_2 >=180: side2 = "LEFT"
else: side2 = "RIGHT"
print("angle 1 corrected to {} degrees if line azimuth is set to 0 degrees.".format(corr_angle_1))
print("bank 1 is on the {} of the stream.".format(side1))
print("angle 2 corrected to {} degrees if line azimuth is set to 0 degrees.".format(corr_angle_2))
print("bank 2 is on the {} of the stream.".format(side2))
# Determine whether the aspect is facing left or right
# (Determine where the shade is coming from)
# (If the aspect is facing N, then the shade is coming from the South)
shade_source = aspect_value - 180
print("original shade source = {}".format(shade_source))
corr_shade_source = shade_source + compass_offset
if corr_shade_source >360: corr_shade_source = corr_shade_source - 360
print("corrected shade source = {} degrees if line azimuth is set to 0 degrees.".format(corr_shade_source))
# If the shade_source is on the left side of compass needle, then "LEFT". Else: "RIGHT"
side_shade = "placeholder"
if corr_shade_source >= 180: side_shade = "LEFT"
else: side_shade = "RIGHT"
print("the shade is coming from the {} bank.".format(side_shade))
selected_bank = -99
if side1 == side_shade: selected_bank = bank_angles[0]
else: selected_bank = bank_angles[2]
print("The bank with BankID {} selected for buffer".format(selected_bank))
# Buffer the selected bank, output = riparian_x
foo = ('"BankID" = {}'.format(selected_bank))
temp_select = arcpy.SelectLayerByAttribute_management(foo_banks, 'NEW_SELECTION', foo)
# But I want a line instead of a polygon, because that gives me better options in the buffer tool. So...
temp_line = "temp_line"
foo_temp_line = arcpy.SelectLayerByLocation_management(foo_clip_lines,'WITHIN A DISTANCE', temp_select, "0.5 meters",'NEW_SELECTION')
arcpy.CopyFeatures_management(foo_temp_line, temp_line)
foo_buffer_2 = "foo_buffer_2"
foobuffer2 = arcpy.Buffer_analysis(temp_line, foo_buffer_2, "30 meters", "LEFT", "FLAT")
#(30-meters distance was chosen to be consistent with the scale of the LAI calculations)
# This is roughly 2x the width of the riparian core zone given in WAC 222-30-021)
# Sometimes, for oddly shaped stream segments that are relatively short, the above buffer tool fails
# because the buffer width is wider than the length of the segment. When that happens, foo_buffer_2
# has a null geometry.
# If foo_buffer_2 has a null geometry, no further steps can be performed and I assign a -99 "error" value
# to subreach_x
#Is foo_buffer_2 null?
error_check_length = int(arcpy.GetCount_management(foo_buffer_2).getOutput(0))
if error_check_length == 0:
# assign error value
arcpy.AddField_management(subreach_x, "RASTERVALU", "DOUBLE")
arcpy.CalculateField_management(subreach_x, "RASTERVALU", -99, "PYTHON3")
print("Failed to process subreach {}; assigned -99 error value".format(puppy))
else:
# Clean up the buffer analysis area; Erase the RiverArea from riparian_x
# (This is necessary even if using a one-sided buffer because of odd shapes. It is necessary to
# confirm that water area is eliminated from consideration).
foo_erase = "foo_erase"
arcpy.Erase_analysis(foo_buffer_2, inundated_area, foo_erase)
LAI_analysis_area = "LAI_analysis_area"
arcpy.Dissolve_management(foo_erase, LAI_analysis_area, "", "", 'SINGLE_PART')
LAI_analysis_centroid_foo = "LAI_analysis_centroid_foo"
arcpy.FeatureToPoint_management(LAI_analysis_area, LAI_analysis_centroid_foo, "INSIDE")
# Use Zonal Statistics to determine the mean LAI modifier from the riparian | |
<gh_stars>1000+
#
# This file is part of pretix (Community Edition).
#
# Copyright (C) 2014-2020 <NAME> and contributors
# Copyright (C) 2020-2021 rami.io GmbH and contributors
#
# This program is free software: you can redistribute it and/or modify it under the terms of the GNU Affero General
# Public License as published by the Free Software Foundation in version 3 of the License.
#
# ADDITIONAL TERMS APPLY: Pursuant to Section 7 of the GNU Affero General Public License, additional terms are
# applicable granting you additional permissions and placing additional restrictions on your usage of this software.
# Please refer to the pretix LICENSE file to obtain the full terms applicable to this work. If you did not receive
# this file, see <https://pretix.eu/about/en/license>.
#
# This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied
# warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more
# details.
#
# You should have received a copy of the GNU Affero General Public License along with this program. If not, see
# <https://www.gnu.org/licenses/>.
#
# This file is based on an earlier version of pretix which was released under the Apache License 2.0. The full text of
# the Apache License 2.0 can be obtained at <http://www.apache.org/licenses/LICENSE-2.0>.
#
# This file may have since been changed and any changes are released under the terms of AGPLv3 as described above. A
# full history of changes and contributors is available at <https://github.com/pretix/pretix>.
#
# This file contains Apache-licensed contributions copyrighted by: <NAME>
#
# Unless required by applicable law or agreed to in writing, software distributed under the Apache License 2.0 is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under the License.
import datetime
import os
from django import forms
from django.conf import settings
from django.core.exceptions import ValidationError
from django.core.files import File
from django.core.files.uploadedfile import UploadedFile
from django.forms.utils import from_current_timezone
from django.urls import reverse
from django.utils.html import escape
from django.utils.safestring import mark_safe
from django.utils.timezone import now
from django.utils.translation import gettext_lazy as _
from django_scopes.forms import SafeModelMultipleChoiceField
from ...base.forms import I18nModelForm, SecretKeySettingsField
# Import for backwards compatibility with okd import paths
from ...base.forms.widgets import ( # noqa
DatePickerWidget, SplitDateTimePickerWidget, TimePickerWidget,
)
class TolerantFormsetModelForm(I18nModelForm):
"""
This is equivalent to a normal I18nModelForm, but works around a problem that
arises when the form is used inside a FormSet with can_order=True and django-formset-js
enabled. In this configuration, even empty "extra" forms might have an ORDER value
sent and Django marks the form as empty and raises validation errors because the other
fields have not been filled.
"""
def has_changed(self) -> bool:
"""
Returns True if data differs from initial. Contrary to the default
implementation, the ORDER field is being ignored.
"""
for name, field in self.fields.items():
if name == 'ORDER' or name == 'id':
continue
prefixed_name = self.add_prefix(name)
data_value = field.widget.value_from_datadict(self.data, self.files, prefixed_name)
if not field.show_hidden_initial:
initial_value = self.initial.get(name, field.initial)
if callable(initial_value):
initial_value = initial_value()
else:
initial_prefixed_name = self.add_initial_prefix(name)
hidden_widget = field.hidden_widget()
try:
initial_value = field.to_python(hidden_widget.value_from_datadict(
self.data, self.files, initial_prefixed_name))
except forms.ValidationError:
# Always assume data has changed if validation fails.
self._changed_data.append(name)
continue
# We're using a private API of Django here. This is not nice, but no problem as it seems
# like this will become a public API in future Django.
if field._has_changed(initial_value, data_value):
return True
return False
def selector(values, prop):
# Given an iterable of PropertyValue objects, this will return a
# list of their primary keys, ordered by the primary keys of the
# properties they belong to EXCEPT the value for the property prop2.
# We'll see later why we need this.
return [
v.id for v in sorted(values, key=lambda v: v.prop.id)
if v.prop.id != prop.id
]
class ClearableBasenameFileInput(forms.ClearableFileInput):
template_name = 'pretixbase/forms/widgets/thumbnailed_file_input.html'
class FakeFile(File):
def __init__(self, file):
self.file = file
@property
def name(self):
if hasattr(self.file, 'display_name'):
return self.file.display_name
return self.file.name
@property
def is_img(self):
return any(self.file.name.lower().endswith(e) for e in ('.jpg', '.jpeg', '.png', '.gif'))
def __str__(self):
if hasattr(self.file, 'display_name'):
return self.file.display_name
return os.path.basename(self.file.name).split('.', 1)[-1]
@property
def url(self):
return self.file.url
def get_context(self, name, value, attrs):
ctx = super().get_context(name, value, attrs)
ctx['widget']['value'] = self.FakeFile(value)
ctx['widget']['cachedfile'] = None
return ctx
class CachedFileInput(forms.ClearableFileInput):
template_name = 'pretixbase/forms/widgets/thumbnailed_file_input.html'
class FakeFile(File):
def __init__(self, file):
self.file = file
@property
def name(self):
return self.file.filename
@property
def is_img(self):
return False # thumbnailing doesn't work since the file isn't available publicly
def __str__(self):
return self.file.filename
@property
def url(self):
return reverse('cachedfile.download', kwargs={'id': self.file.id})
def value_from_datadict(self, data, files, name):
from ...base.models import CachedFile
v = super().value_from_datadict(data, files, name)
if v is None and data.get(name + '-cachedfile'): # An explicit "[x] clear" would be False, not None
return CachedFile.objects.filter(id=data[name + '-cachedfile']).first()
return v
def get_context(self, name, value, attrs):
from ...base.models import CachedFile
if isinstance(value, CachedFile):
value = self.FakeFile(value)
ctx = super().get_context(name, value, attrs)
ctx['widget']['value'] = value
ctx['widget']['cachedfile'] = value.file if isinstance(value, self.FakeFile) else None
ctx['widget']['hidden_name'] = name + '-cachedfile'
return ctx
class SizeValidationMixin:
def __init__(self, *args, **kwargs):
self.max_size = kwargs.pop("max_size", None)
super().__init__(*args, **kwargs)
@staticmethod
def _sizeof_fmt(num, suffix='B'):
for unit in ['', 'K', 'M', 'G', 'T', 'P', 'E', 'Z']:
if abs(num) < 1024.0:
return "%3.1f%s%s" % (num, unit, suffix)
num /= 1024.0
return "%.1f%s%s" % (num, 'Yi', suffix)
def clean(self, *args, **kwargs):
data = super().clean(*args, **kwargs)
if isinstance(data, UploadedFile) and self.max_size and data.size > self.max_size:
raise forms.ValidationError(_("Please do not upload files larger than {size}!").format(
size=SizeValidationMixin._sizeof_fmt(self.max_size)
))
return data
class ExtValidationMixin:
def __init__(self, *args, **kwargs):
ext_whitelist = kwargs.pop("ext_whitelist")
self.ext_whitelist = [i.lower() for i in ext_whitelist]
super().__init__(*args, **kwargs)
def clean(self, *args, **kwargs):
data = super().clean(*args, **kwargs)
if isinstance(data, File):
filename = data.name
ext = os.path.splitext(filename)[1]
ext = ext.lower()
if ext not in self.ext_whitelist:
raise forms.ValidationError(_("Filetype not allowed!"))
return data
class SizeFileField(SizeValidationMixin, forms.FileField):
pass
class ExtFileField(ExtValidationMixin, SizeFileField):
widget = ClearableBasenameFileInput
class CachedFileField(ExtFileField):
widget = CachedFileInput
def to_python(self, data):
from ...base.models import CachedFile
if isinstance(data, CachedFile):
return data
return super().to_python(data)
def bound_data(self, data, initial):
from ...base.models import CachedFile
if isinstance(data, File):
if hasattr(data, '_uploaded_to'):
return data._uploaded_to
cf = CachedFile.objects.create(
expires=now() + datetime.timedelta(days=1),
date=now(),
web_download=True,
filename=data.name,
type=data.content_type,
)
cf.file.save(data.name, data.file)
cf.save()
data._uploaded_to = cf
return cf
return super().bound_data(data, initial)
def clean(self, *args, **kwargs):
from ...base.models import CachedFile
data = super().clean(*args, **kwargs)
if isinstance(data, File):
if hasattr(data, '_uploaded_to'):
return data._uploaded_to
cf = CachedFile.objects.create(
expires=now() + datetime.timedelta(days=1),
web_download=True,
date=now(),
filename=data.name,
type=data.content_type,
)
cf.file.save(data.name, data.file)
cf.save()
data._uploaded_to = cf
return cf
return data
class SlugWidget(forms.TextInput):
template_name = 'pretixcontrol/slug_widget.html'
prefix = ''
def get_context(self, name, value, attrs):
ctx = super().get_context(name, value, attrs)
ctx['pre'] = self.prefix
return ctx
class MultipleLanguagesWidget(forms.CheckboxSelectMultiple):
option_template_name = 'pretixcontrol/multi_languages_widget.html'
def sort(self):
self.choices = sorted(self.choices, key=lambda l: (
(
0 if l[0] in settings.LANGUAGES_OFFICIAL
else (
1 if l[0] not in settings.LANGUAGES_INCUBATING
else 2
)
), str(l[1])
))
def options(self, name, value, attrs=None):
self.sort()
return super().options(name, value, attrs)
def optgroups(self, name, value, attrs=None):
self.sort()
return super().optgroups(name, value, attrs)
def create_option(self, name, value, label, selected, index, subindex=None, attrs=None):
opt = super().create_option(name, value, label, selected, index, subindex, attrs)
opt['official'] = value in settings.LANGUAGES_OFFICIAL
opt['incubating'] = value in settings.LANGUAGES_INCUBATING
return opt
class SingleLanguageWidget(forms.Select):
def modify(self):
if hasattr(self, '_modified'):
return self.choices
self.choices = sorted(self.choices, key=lambda l: (
(
0 if l[0] in settings.LANGUAGES_OFFICIAL
else (
1 if l[0] not in settings.LANGUAGES_INCUBATING
else 2
)
), str(l[1])
))
new_choices = []
for k, v in self.choices:
new_choices.append((
k,
v if k in settings.LANGUAGES_OFFICIAL
else (
'{} (inofficial translation)'.format(v) if k not in settings.LANGUAGES_INCUBATING
else '{} (translation in progress)'.format(v)
)
))
self._modified = True
self.choices = new_choices
def options(self, name, value, attrs=None):
self.modify()
return super().options(name, value, attrs)
def optgroups(self, name, value, attrs=None):
self.modify()
return super().optgroups(name, value, attrs)
class SplitDateTimeField(forms.SplitDateTimeField):
def compress(self, data_list):
# Differs from the default implementation: If only a time is given and no date, we consider the field empty
if data_list:
if data_list[0] in self.empty_values:
return None
if data_list[1] in self.empty_values:
raise ValidationError(self.error_messages['invalid_date'], code='invalid_date')
result = datetime.datetime.combine(*data_list)
return from_current_timezone(result)
return None
class FontSelect(forms.RadioSelect):
option_template_name = 'pretixcontrol/font_option.html'
class SMTPSettingsMixin(forms.Form):
smtp_use_custom = forms.BooleanField(
label=_("Use custom SMTP server"),
help_text=_("All mail related to your event will be sent over the smtp server specified | |
ELSE 0 END AS Unset_No_Basics
FROM (
SELECT participant_id,
hpo_id,
sign_up_time,
consent_for_study_enrollment_time,
enrollment_status_member_time,
enrollment_status_core_stored_sample_time,
participant_origin,
MAX(WhatRaceEthnicity_Hispanic) AS WhatRaceEthnicity_Hispanic,
MAX(WhatRaceEthnicity_Black) AS WhatRaceEthnicity_Black,
MAX(WhatRaceEthnicity_White) AS WhatRaceEthnicity_White,
MAX(WhatRaceEthnicity_AIAN) AS WhatRaceEthnicity_AIAN,
MAX(UNSET) AS UNSET,
MAX(WhatRaceEthnicity_RaceEthnicityNoneOfThese) AS WhatRaceEthnicity_RaceEthnicityNoneOfThese,
MAX(WhatRaceEthnicity_Asian) AS WhatRaceEthnicity_Asian,
MAX(PMI_PreferNotToAnswer) AS PMI_PreferNotToAnswer,
MAX(WhatRaceEthnicity_MENA) AS WhatRaceEthnicity_MENA,
MAX(PMI_Skip) AS PMI_Skip,
MAX(WhatRaceEthnicity_NHPI) AS WhatRaceEthnicity_NHPI,
MAX(questionnaire_on_the_basics) AS HasBasicsSurvey,
COUNT(*) as Number_of_Answer
FROM (
SELECT ps.participant_id,
ps.hpo_id,
ps.sign_up_time,
consent_for_study_enrollment_time,
ps.enrollment_status_member_time,
ps.enrollment_status_core_stored_sample_time,
ps.participant_origin,
ps.questionnaire_on_the_basics,
CASE WHEN q.code_id = {WhatRaceEthnicity_Hispanic} THEN 1 ELSE 0 END AS WhatRaceEthnicity_Hispanic,
CASE WHEN q.code_id = {WhatRaceEthnicity_Black} THEN 1 ELSE 0 END AS WhatRaceEthnicity_Black,
CASE WHEN q.code_id = {WhatRaceEthnicity_White} THEN 1 ELSE 0 END AS WhatRaceEthnicity_White,
CASE WHEN q.code_id = {WhatRaceEthnicity_AIAN} THEN 1 ELSE 0 END AS WhatRaceEthnicity_AIAN,
CASE WHEN q.code_id IS NULL THEN 1 ELSE 0 END AS UNSET,
CASE WHEN q.code_id = {WhatRaceEthnicity_RaceEthnicityNoneOfThese} THEN 1 ELSE 0 END AS WhatRaceEthnicity_RaceEthnicityNoneOfThese,
CASE WHEN q.code_id = {WhatRaceEthnicity_Asian} THEN 1 ELSE 0 END AS WhatRaceEthnicity_Asian,
CASE WHEN q.code_id = {PMI_PreferNotToAnswer} THEN 1 ELSE 0 END AS PMI_PreferNotToAnswer,
CASE WHEN q.code_id = {WhatRaceEthnicity_MENA} THEN 1 ELSE 0 END AS WhatRaceEthnicity_MENA,
CASE WHEN q.code_id = {PMI_Skip} THEN 1 ELSE 0 END AS PMI_Skip,
CASE WHEN q.code_id = {WhatRaceEthnicity_NHPI} THEN 1 ELSE 0 END AS WhatRaceEthnicity_NHPI
FROM {temp_table_name} ps
LEFT JOIN participant_race_answers q ON ps.participant_id = q.participant_id
) x
GROUP BY participant_id, hpo_id, sign_up_time, consent_for_study_enrollment_time, enrollment_status_member_time, enrollment_status_core_stored_sample_time, participant_origin
) p,
calendar, metrics_tmp_participant_origin po
WHERE calendar.day >= :start_date
AND calendar.day <= :end_date
AND calendar.day >= Date(p.sign_up_time)
AND p.participant_origin = po.participant_origin
) y
GROUP BY day, hpo_id, registered, participant, consented, core, participant_origin
;
""".format(cache_type=self.cache_type,
temp_table_name=temp_table_name,
Race_WhatRaceEthnicity=race_code_dict['Race_WhatRaceEthnicity'],
WhatRaceEthnicity_Hispanic=race_code_dict['WhatRaceEthnicity_Hispanic'],
WhatRaceEthnicity_Black=race_code_dict['WhatRaceEthnicity_Black'],
WhatRaceEthnicity_White=race_code_dict['WhatRaceEthnicity_White'],
WhatRaceEthnicity_AIAN=race_code_dict['WhatRaceEthnicity_AIAN'],
WhatRaceEthnicity_RaceEthnicityNoneOfThese=
race_code_dict['WhatRaceEthnicity_RaceEthnicityNoneOfThese'],
WhatRaceEthnicity_Asian=race_code_dict['WhatRaceEthnicity_Asian'],
PMI_PreferNotToAnswer=race_code_dict['PMI_PreferNotToAnswer'],
WhatRaceEthnicity_MENA=race_code_dict['WhatRaceEthnicity_MENA'],
PMI_Skip=race_code_dict['PMI_Skip'],
WhatRaceEthnicity_NHPI=race_code_dict['WhatRaceEthnicity_NHPI'])
return [sql]
class MetricsRegionCacheDao(BaseDao):
def __init__(self, cache_type=MetricsCacheType.METRICS_V2_API, version=None):
super(MetricsRegionCacheDao, self).__init__(MetricsRegionCache)
self.version = version
self.table_name = MetricsRegionCache.__tablename__
try:
self.cache_type = MetricsCacheType(str(cache_type))
except TypeError:
raise TypeError("Invalid metrics cache type")
def get_serving_version_with_session(self, session):
status_dao = MetricsCacheJobStatusDao()
record = status_dao.get_last_complete_data_inserted_time(self.table_name)
if record is not None:
return record
else:
return (session.query(MetricsRegionCache)
.order_by(MetricsRegionCache.dateInserted.desc())
.first())
def get_active_buckets(self, cutoff, stratification, hpo_ids=None, enrollment_statuses=None,
participant_origins=None):
with self.session() as session:
last_inserted_record = self.get_serving_version_with_session(session)
if last_inserted_record is None:
return None
last_inserted_date = last_inserted_record.dateInserted
if self.cache_type == MetricsCacheType.PUBLIC_METRICS_EXPORT_API \
and stratification not in [Stratifications.FULL_AWARDEE, Stratifications.GEO_AWARDEE]:
query = session.query(MetricsRegionCache.date, MetricsRegionCache.stateName,
func.sum(MetricsRegionCache.stateCount).label('total'))
query = query.filter(MetricsRegionCache.dateInserted == last_inserted_date)
query = query.filter(MetricsRegionCache.date == cutoff)
if stratification in [Stratifications.FULL_STATE, Stratifications.FULL_CENSUS,
Stratifications.FULL_AWARDEE]:
query = query.filter(MetricsRegionCache.enrollmentStatus == 'core')
if hpo_ids:
query = query.filter(MetricsRegionCache.hpoId.in_(hpo_ids))
if enrollment_statuses:
status_filter_list = []
for status in enrollment_statuses:
if status == str(EnrollmentStatus.INTERESTED):
status_filter_list.append('registered')
status_filter_list.append('participant')
elif status == str(EnrollmentStatus.MEMBER):
status_filter_list.append('consented')
elif status == str(EnrollmentStatus.FULL_PARTICIPANT):
status_filter_list.append('core')
query = query.filter(MetricsRegionCache.enrollmentStatus.in_(status_filter_list))
return query.group_by(MetricsRegionCache.date, MetricsRegionCache.stateName).all()
else:
if self.version == MetricsAPIVersion.V2:
query = session.query(MetricsRegionCache.date, MetricsRegionCache.hpoName,
MetricsRegionCache.stateName,
func.sum(MetricsRegionCache.stateCount).label('total'))
query = query.filter(MetricsRegionCache.dateInserted == last_inserted_date)
query = query.filter(MetricsRegionCache.date == cutoff)
if stratification in [Stratifications.FULL_STATE, Stratifications.FULL_CENSUS,
Stratifications.FULL_AWARDEE]:
query = query.filter(MetricsRegionCache.enrollmentStatus == 'core')
if hpo_ids:
query = query.filter(MetricsRegionCache.hpoId.in_(hpo_ids))
if participant_origins:
query = query.filter(MetricsRegionCache.participantOrigin.in_(participant_origins))
if enrollment_statuses:
status_filter_list = []
for status in enrollment_statuses:
if status == str(EnrollmentStatusV2.REGISTERED):
status_filter_list.append('registered')
elif status == str(EnrollmentStatusV2.PARTICIPANT):
status_filter_list.append('participant')
elif status == str(EnrollmentStatusV2.FULLY_CONSENTED):
status_filter_list.append('consented')
elif status == str(EnrollmentStatusV2.CORE_PARTICIPANT):
status_filter_list.append('core')
query = query.filter(MetricsRegionCache.enrollmentStatus.in_(status_filter_list))
return query.group_by(MetricsRegionCache.date, MetricsRegionCache.hpoName,
MetricsRegionCache.stateName).all()
else:
query = session.query(MetricsRegionCache.date, MetricsRegionCache.hpoName,
MetricsRegionCache.stateName,
func.sum(MetricsRegionCache.stateCount).label('total'))
query = query.filter(MetricsRegionCache.dateInserted == last_inserted_date)
query = query.filter(MetricsRegionCache.date == cutoff)
if stratification in [Stratifications.FULL_STATE, Stratifications.FULL_CENSUS,
Stratifications.FULL_AWARDEE]:
query = query.filter(MetricsRegionCache.enrollmentStatus == 'core')
if hpo_ids:
query = query.filter(MetricsRegionCache.hpoId.in_(hpo_ids))
if enrollment_statuses:
status_filter_list = []
for status in enrollment_statuses:
if status == str(EnrollmentStatus.INTERESTED):
status_filter_list.append('registered')
status_filter_list.append('participant')
elif status == str(EnrollmentStatus.MEMBER):
status_filter_list.append('consented')
elif status == str(EnrollmentStatus.FULL_PARTICIPANT):
status_filter_list.append('core')
query = query.filter(MetricsRegionCache.enrollmentStatus.in_(status_filter_list))
return query.group_by(MetricsRegionCache.date, MetricsRegionCache.hpoName,
MetricsRegionCache.stateName).all()
def get_latest_version_from_cache(self, cutoff, stratification, hpo_ids=None,
enrollment_statuses=None, participant_origins=None):
stratification = Stratifications(str(stratification))
operation_funcs = {
Stratifications.FULL_STATE: self.to_state_client_json,
Stratifications.FULL_CENSUS: self.to_census_client_json,
Stratifications.FULL_AWARDEE: self.to_awardee_client_json,
Stratifications.GEO_STATE: self.to_state_client_json,
Stratifications.GEO_CENSUS: self.to_census_client_json,
Stratifications.GEO_AWARDEE: self.to_awardee_client_json
}
buckets = self.get_active_buckets(cutoff, stratification, hpo_ids, enrollment_statuses, participant_origins)
if buckets is None:
return []
return operation_funcs[stratification](buckets)
def delete_old_records(self, n_days_ago=7):
with self.session() as session:
last_inserted_record = self.get_serving_version_with_session(session)
if last_inserted_record is not None:
last_date_inserted = last_inserted_record.dateInserted
seven_days_ago = last_date_inserted - datetime.timedelta(days=n_days_ago)
delete_sql = """
delete from metrics_region_cache where date_inserted < :seven_days_ago
"""
params = {'seven_days_ago': seven_days_ago}
session.execute(delete_sql, params)
def remove_prefix(self, text, prefix):
if text.startswith(prefix):
return text[len(prefix):]
return text
def to_state_client_json(self, result_set):
client_json = []
if self.cache_type == MetricsCacheType.PUBLIC_METRICS_EXPORT_API:
for record in result_set:
state_name = self.remove_prefix(record.stateName, 'PIIState_')
if state_name not in census_regions:
continue
is_exist = False
for item in client_json:
if item['date'] == record.date.isoformat():
item['metrics'][state_name] = int(record.total)
is_exist = True
break
if not is_exist:
metrics = {stateName: 0 for stateName in census_regions.keys()}
new_item = {
'date': record.date.isoformat(),
'metrics': metrics
}
new_item['metrics'][state_name] = int(record.total)
client_json.append(new_item)
else:
for record in result_set:
state_name = self.remove_prefix(record.stateName, 'PIIState_')
if state_name not in census_regions:
continue
is_exist = False
for item in client_json:
if item['date'] == record.date.isoformat() and item['hpo'] == record.hpoName:
item['metrics'][state_name] = int(record.total)
is_exist = True
break
if not is_exist:
metrics = {stateName: 0 for stateName in census_regions.keys()}
new_item = {
'date': record.date.isoformat(),
'hpo': record.hpoName,
'metrics': metrics
}
new_item['metrics'][state_name] = int(record.total)
client_json.append(new_item)
return client_json
def to_census_client_json(self, result_set):
client_json = []
if self.cache_type == MetricsCacheType.PUBLIC_METRICS_EXPORT_API:
for record in result_set:
state_name = self.remove_prefix(record.stateName, 'PIIState_')
if state_name in census_regions:
census_name = census_regions[state_name]
else:
continue
is_exist = False
for item in client_json:
if item['date'] == record.date.isoformat():
item['metrics'][census_name] += int(record.total)
is_exist = True
break
if not is_exist:
new_item = {
'date': record.date.isoformat(),
'metrics': {
'NORTHEAST': 0,
'MIDWEST': 0,
'SOUTH': 0,
'WEST': 0
}
}
new_item['metrics'][census_name] = int(record.total)
client_json.append(new_item)
else:
for record in result_set:
state_name = self.remove_prefix(record.stateName, 'PIIState_')
if state_name in census_regions:
census_name = census_regions[state_name]
else:
continue
is_exist = False
for item in client_json:
if item['date'] == record.date.isoformat() and item['hpo'] == record.hpoName:
item['metrics'][census_name] += int(record.total)
is_exist = True
break
if not is_exist:
new_item = {
'date': record.date.isoformat(),
'hpo': record.hpoName,
'metrics': {
'NORTHEAST': 0,
'MIDWEST': 0,
'SOUTH': 0,
'WEST': 0
}
}
new_item['metrics'][census_name] = int(record.total)
client_json.append(new_item)
return client_json
def to_awardee_client_json(self, result_set):
client_json = []
for record in result_set:
is_exist = False
for item in client_json:
if item['date'] == record.date.isoformat() and item['hpo'] == record.hpoName:
item['count'] += int(record.total)
is_exist = True
break
if not is_exist:
new_item = {
'date': record.date.isoformat(),
'hpo': record.hpoName,
'count': int(record.total)
}
client_json.append(new_item)
return client_json
def get_metrics_cache_sql(self, hpo_id):
temp_table_name = TEMP_TABLE_PREFIX + str(hpo_id)
sql = """
INSERT INTO metrics_region_cache
SELECT
:date_inserted AS date_inserted,
'core' as enrollment_status,
:hpo_id AS hpo_id,
(SELECT name FROM hpo WHERE hpo_id=:hpo_id) AS hpo_name,
c.day,
IFNULL(ps.value,'UNSET') AS state_name,
count(ps.participant_id) AS state_count,
ps.participant_origin
FROM
(
SELECT participant_id, participant_origin, hpo_id, value, enrollment_status_core_stored_sample_time
FROM {temp_table_name}, code WHERE state_id=code_id
) ps,
metrics_tmp_participant_origin po,
calendar c
WHERE ps.participant_origin = po.participant_origin
AND ps.enrollment_status_core_stored_sample_time IS NOT NULL
AND DATE(ps.enrollment_status_core_stored_sample_time) <= c.day
AND c.day BETWEEN :start_date AND :end_date
GROUP BY c.day, ps.hpo_id, ps.value, ps.participant_origin
UNION ALL
SELECT
:date_inserted AS date_inserted,
'registered' as enrollment_status,
:hpo_id AS hpo_id,
(SELECT name FROM hpo WHERE hpo_id=:hpo_id) AS hpo_name,
c.day,
IFNULL(ps.value,'UNSET') AS state_name,
count(ps.participant_id) AS state_count,
ps.participant_origin
FROM
(
SELECT participant_id, participant_origin, hpo_id, value, sign_up_time, consent_for_study_enrollment_time
FROM {temp_table_name}, code WHERE state_id=code_id
) ps,
metrics_tmp_participant_origin po,
calendar c
WHERE ps.participant_origin = po.participant_origin
AND ps.sign_up_time IS NOT NULL
AND DATE(ps.sign_up_time) <= c.day
AND (ps.consent_for_study_enrollment_time IS NULL OR DATE(ps.consent_for_study_enrollment_time)>c.day)
AND c.day BETWEEN :start_date AND :end_date
GROUP BY c.day, ps.hpo_id, ps.value, ps.participant_origin
UNION ALL
SELECT
:date_inserted AS date_inserted,
'participant' as enrollment_status,
:hpo_id AS hpo_id,
(SELECT name FROM hpo WHERE hpo_id=:hpo_id) AS hpo_name,
c.day,
IFNULL(ps.value,'UNSET') AS state_name,
count(ps.participant_id) AS state_count,
ps.participant_origin
FROM
(
SELECT participant_id, participant_origin, hpo_id, value, consent_for_study_enrollment_time, enrollment_status_member_time
FROM {temp_table_name}, code WHERE state_id=code_id
) ps,
metrics_tmp_participant_origin po,
calendar c
WHERE ps.participant_origin = po.participant_origin
AND ps.consent_for_study_enrollment_time IS NOT NULL
AND DATE(ps.consent_for_study_enrollment_time) <= c.day
AND (ps.enrollment_status_member_time is null or DATE(ps.enrollment_status_member_time)>c.day)
AND c.day BETWEEN :start_date AND :end_date
GROUP BY c.day, ps.hpo_id, ps.value, ps.participant_origin
UNION ALL
SELECT
:date_inserted AS date_inserted,
'consented' as enrollment_status,
:hpo_id AS hpo_id,
(SELECT name FROM hpo WHERE hpo_id=:hpo_id) AS hpo_name,
c.day,
IFNULL(ps.value,'UNSET') AS state_name,
count(ps.participant_id) AS state_count,
ps.participant_origin
FROM
(
SELECT participant_id, participant_origin, hpo_id, value, enrollment_status_member_time, enrollment_status_core_stored_sample_time
FROM {temp_table_name}, code WHERE state_id=code_id
) ps,
metrics_tmp_participant_origin po,
calendar c
WHERE ps.participant_origin = po.participant_origin
AND ps.enrollment_status_member_time IS NOT NULL
AND DATE(ps.enrollment_status_member_time) <= c.day
AND (ps.enrollment_status_core_stored_sample_time is null or DATE(ps.enrollment_status_core_stored_sample_time)>c.day)
AND c.day BETWEEN :start_date AND :end_date
GROUP BY c.day, ps.hpo_id, ps.value, ps.participant_origin
;
""".format(temp_table_name=temp_table_name)
| |
"""Define tests for v3 System objects."""
# pylint: disable=protected-access,too-many-arguments,unused-argument
from datetime import datetime, timedelta
import logging
import aiohttp
import pytest
import pytz
from simplipy import API
from simplipy.errors import (
EndpointUnavailableError,
InvalidCredentialsError,
PinError,
RequestError,
SimplipyError,
)
from simplipy.system import SystemStates
from simplipy.system.v3 import Volume
from tests.common import (
TEST_AUTHORIZATION_CODE,
TEST_CODE_VERIFIER,
TEST_SUBSCRIPTION_ID,
TEST_SYSTEM_ID,
TEST_SYSTEM_SERIAL_NO,
TEST_USER_ID,
)
@pytest.mark.asyncio
async def test_as_dict(aresponses, v3_server):
"""Test dumping the system as a dict."""
async with aiohttp.ClientSession() as session:
simplisafe = await API.async_from_auth(
TEST_AUTHORIZATION_CODE, TEST_CODE_VERIFIER, session=session
)
systems = await simplisafe.async_get_systems()
system = systems[TEST_SYSTEM_ID]
assert system.as_dict() == {
"address": "1234 Main Street",
"alarm_going_off": False,
"connection_type": "wifi",
"notifications": [
{
"notification_id": "xxxxxxxxxxxxxxxxxxxxxxxx",
"text": "Power Outage - Backup battery in use.",
"category": "error",
"code": "2000",
"timestamp": 1581823228,
"link": "http://link.to.info",
"link_label": "More Info",
}
],
"serial": "1234ABCD",
"state": 10,
"system_id": 12345,
"temperature": 67,
"version": 3,
"sensors": [
{
"name": "Fire Door",
"serial": "825",
"type": 5,
"error": False,
"low_battery": False,
"offline": False,
"settings": {
"instantTrigger": False,
"away2": 1,
"away": 1,
"home2": 1,
"home": 1,
"off": 0,
},
"trigger_instantly": False,
"triggered": False,
},
{
"name": "Front Door",
"serial": "14",
"type": 5,
"error": False,
"low_battery": False,
"offline": False,
"settings": {
"instantTrigger": False,
"away2": 1,
"away": 1,
"home2": 1,
"home": 1,
"off": 0,
},
"trigger_instantly": False,
"triggered": False,
},
{
"name": "Patio Door",
"serial": "185",
"type": 5,
"error": False,
"low_battery": False,
"offline": False,
"settings": {
"instantTrigger": True,
"away2": 1,
"away": 1,
"home2": 1,
"home": 1,
"off": 0,
},
"trigger_instantly": True,
"triggered": False,
},
{
"name": "Basement",
"serial": "236",
"type": 13,
"error": False,
"low_battery": False,
"offline": False,
"settings": {
"alarmVolume": 3,
"doorChime": 0,
"exitBeeps": 0,
"entryBeeps": 2,
},
"trigger_instantly": False,
"triggered": False,
},
{
"name": "Front Door",
"serial": "789",
"type": 3,
"error": False,
"low_battery": False,
"offline": False,
"settings": {"alarm": 1},
"trigger_instantly": False,
"triggered": False,
},
{
"name": "Master BR",
"serial": "822",
"type": 3,
"error": False,
"low_battery": False,
"offline": False,
"settings": {"alarm": 1},
"trigger_instantly": False,
"triggered": False,
},
{
"name": "Kitchen",
"serial": "972",
"type": 1,
"error": False,
"low_battery": False,
"offline": False,
"settings": {"lowPowerMode": False, "alarm": 1},
"trigger_instantly": False,
"triggered": False,
},
{
"name": "Upstairs",
"serial": "93",
"type": 8,
"error": False,
"low_battery": False,
"offline": False,
"settings": {},
"trigger_instantly": False,
"triggered": False,
},
{
"name": "Downstairs",
"serial": "650",
"type": 8,
"error": False,
"low_battery": False,
"offline": False,
"settings": {},
"trigger_instantly": False,
"triggered": False,
},
{
"name": "<NAME>",
"serial": "491",
"type": 6,
"error": False,
"low_battery": False,
"offline": False,
"settings": {
"instantTrigger": False,
"away2": 1,
"away": 1,
"home2": 1,
"home": 1,
"off": 0,
},
"trigger_instantly": False,
"triggered": False,
},
{
"name": "<NAME>",
"serial": "280",
"type": 6,
"error": False,
"low_battery": False,
"offline": False,
"settings": {
"instantTrigger": False,
"away2": 1,
"away": 1,
"home2": 1,
"home": 1,
"off": 0,
},
"trigger_instantly": False,
"triggered": False,
},
{
"name": "Basement S",
"serial": "430",
"type": 6,
"error": False,
"low_battery": False,
"offline": False,
"settings": {
"instantTrigger": False,
"away2": 1,
"away": 1,
"home2": 1,
"home": 1,
"off": 0,
},
"trigger_instantly": False,
"triggered": False,
},
{
"name": "Laundry",
"serial": "129",
"type": 9,
"error": False,
"low_battery": False,
"offline": False,
"settings": {"alarm": 1},
"trigger_instantly": False,
"triggered": False,
},
{
"name": "Basement",
"serial": "975",
"type": 9,
"error": False,
"low_battery": False,
"offline": False,
"settings": {"alarm": 1},
"trigger_instantly": False,
"triggered": False,
},
{
"name": "Fridge",
"serial": "382",
"type": 9,
"error": False,
"low_battery": False,
"offline": False,
"settings": {"alarm": 1},
"trigger_instantly": False,
"triggered": False,
},
{
"name": "Basement",
"serial": "320",
"type": 10,
"error": False,
"low_battery": False,
"offline": False,
"settings": {"highTemp": 95, "lowTemp": 41, "alarm": 1},
"trigger_instantly": False,
"triggered": False,
"temperature": 67,
},
{
"name": "Upstairs",
"serial": "785",
"type": 4,
"error": False,
"low_battery": False,
"offline": False,
"settings": {
"instantTrigger": False,
"away2": 1,
"away": 1,
"home2": 0,
"home": 0,
"off": 0,
},
"trigger_instantly": False,
"triggered": False,
},
{
"name": "Downstairs",
"serial": "934",
"type": 4,
"error": False,
"low_battery": False,
"offline": False,
"settings": {
"instantTrigger": False,
"away2": 1,
"away": 1,
"home2": 0,
"home": 0,
"off": 0,
},
"trigger_instantly": False,
"triggered": False,
},
{
"name": "Landing",
"serial": "634",
"type": 6,
"error": False,
"low_battery": False,
"offline": False,
"settings": {
"instantTrigger": False,
"away2": 1,
"away": 1,
"home2": 1,
"home": 1,
"off": 0,
},
"trigger_instantly": False,
"triggered": False,
},
{
"name": "Living Room",
"serial": "801",
"type": 6,
"error": False,
"low_battery": False,
"offline": False,
"settings": {
"instantTrigger": False,
"away2": 1,
"away": 1,
"home2": 1,
"home": 1,
"off": 0,
},
"trigger_instantly": False,
"triggered": False,
},
{
"name": "Eating Area",
"serial": "946",
"type": 6,
"error": False,
"low_battery": False,
"offline": False,
"settings": {
"instantTrigger": False,
"away2": 1,
"away": 1,
"home2": 1,
"home": 1,
"off": 0,
},
"trigger_instantly": False,
"triggered": False,
},
{
"name": "Front Door",
"serial": "987a",
"type": 253,
"error": False,
"low_battery": False,
"offline": False,
"settings": {},
"trigger_instantly": False,
"triggered": False,
},
{
"name": "Front Door",
"serial": "654a",
"type": 253,
"error": False,
"low_battery": False,
"offline": False,
"settings": {},
"trigger_instantly": False,
"triggered": False,
},
{
"name": "Front Door",
"serial": "321a",
"type": 253,
"error": False,
"low_battery": False,
"offline": False,
"settings": {},
"trigger_instantly": False,
"triggered": False,
},
],
"alarm_duration": 240,
"alarm_volume": 3,
"battery_backup_power_level": 5293,
"cameras": [
{
"camera_settings": {
"cameraName": "Camera",
"pictureQuality": "720p",
"nightVision": "auto",
"statusLight": "off",
"micSensitivity": 100,
"micEnable": True,
"speakerVolume": 75,
"motionSensitivity": 0,
"shutterHome": "closedAlarmOnly",
"shutterAway": "open",
"shutterOff": "closedAlarmOnly",
"wifiSsid": "",
"canStream": False,
"canRecord": False,
"pirEnable": True,
"vaEnable": True,
"notificationsEnable": False,
"enableDoorbellNotification": True,
"doorbellChimeVolume": "off",
"privacyEnable": False,
"hdr": False,
"vaZoningEnable": False,
"vaZoningRows": 0,
"vaZoningCols": 0,
"vaZoningMask": [],
"maxDigitalZoom": 10,
"supportedResolutions": ["480p", "720p"],
"admin": {
"IRLED": 0,
"pirSens": 0,
"statusLEDState": 1,
"lux": "lowLux",
"motionDetectionEnabled": False,
"motionThresholdZero": 0,
"motionThresholdOne": 10000,
"levelChangeDelayZero": 30,
"levelChangeDelayOne": 10,
"audioDetectionEnabled": False,
"audioChannelNum": 2,
"audioSampleRate": 16000,
"audioChunkBytes": 2048,
"audioSampleFormat": 3,
"audioSensitivity": 50,
"audioThreshold": 50,
"audioDirection": 0,
"bitRate": 284,
"longPress": 2000,
"kframe": 1,
"gopLength": 40,
"idr": 1,
"fps": 20,
"firmwareVersion": "2.6.1.107",
"netConfigVersion": "",
"camAgentVersion": "",
"lastLogin": 1600639997,
"lastLogout": 1600639944,
"pirSampleRateMs": 800,
"pirHysteresisHigh": 2,
"pirHysteresisLow": 10,
"pirFilterCoefficient": 1,
"logEnabled": True,
"logLevel": 3,
"logQDepth": 20,
"firmwareGroup": "public",
"irOpenThreshold": 445,
"irCloseThreshold": 840,
"irOpenDelay": 3,
"irCloseDelay": 3,
"irThreshold1x": 388,
"irThreshold2x": 335,
"irThreshold3x": 260,
"rssi": [[1600935204, -43]],
"battery": [],
"dbm": 0,
"vmUse": 161592,
"resSet": 10540,
"uptime": 810043.74,
"wifiDisconnects": 1,
"wifiDriverReloads": 1,
"statsPeriod": 3600000,
"sarlaccDebugLogTypes": 0,
"odProcessingFps": 8,
"odObjectMinWidthPercent": 6,
"odObjectMinHeightPercent": 24,
"odEnableObjectDetection": True,
"odClassificationMask": 2,
"odClassificationConfidenceThreshold": 0.95,
"odEnableOverlay": False,
"odAnalyticsLib": 2,
"odSensitivity": 85,
"odEventObjectMask": 2,
"odLuxThreshold": 445,
"odLuxHysteresisHigh": 4,
"odLuxHysteresisLow": 4,
"odLuxSamplingFrequency": 30,
"odFGExtractorMode": 2,
"odVideoScaleFactor": 1,
"odSceneType": 1,
"odCameraView": 3,
"odCameraFOV": 2,
"odBackgroundLearnStationary": True,
"odBackgroundLearnStationarySpeed": 15,
"odClassifierQualityProfile": 1,
"odEnableVideoAnalyticsWhileStreaming": False,
"wlanMac": "XX:XX:XX:XX:XX:XX",
"region": "us-east-1",
"enableWifiAnalyticsLib": False,
"ivLicense": "",
},
"pirLevel": "medium",
"odLevel": "medium",
},
"camera_type": 0,
"name": "Camera",
"serial": "1234567890",
"shutter_open_when_away": True,
"shutter_open_when_home": False,
"shutter_open_when_off": False,
"status": "online",
"subscription_enabled": True,
},
{
"camera_settings": {
"cameraName": "Doorbell",
"pictureQuality": "720p",
"nightVision": "auto",
"statusLight": "off",
"micSensitivity": 100,
"micEnable": True,
"speakerVolume": 75,
"motionSensitivity": 0,
"shutterHome": "closedAlarmOnly",
"shutterAway": "open",
"shutterOff": "closedAlarmOnly",
"wifiSsid": "",
"canStream": False,
"canRecord": False,
"pirEnable": True,
"vaEnable": True,
"notificationsEnable": False,
"enableDoorbellNotification": True,
"doorbellChimeVolume": "off",
"privacyEnable": False,
"hdr": False,
"vaZoningEnable": False,
"vaZoningRows": 0,
"vaZoningCols": 0,
"vaZoningMask": [],
"maxDigitalZoom": 10,
"supportedResolutions": ["480p", "720p"],
"admin": {
"IRLED": 0,
"pirSens": 0,
"statusLEDState": 1,
"lux": "lowLux",
"motionDetectionEnabled": False,
"motionThresholdZero": 0,
"motionThresholdOne": 10000,
"levelChangeDelayZero": 30,
"levelChangeDelayOne": 10,
"audioDetectionEnabled": False,
"audioChannelNum": 2,
"audioSampleRate": 16000,
"audioChunkBytes": 2048,
"audioSampleFormat": 3,
"audioSensitivity": 50,
"audioThreshold": 50,
"audioDirection": 0,
"bitRate": 284,
"longPress": 2000,
"kframe": 1,
"gopLength": 40,
"idr": 1,
"fps": 20,
"firmwareVersion": "2.6.1.107",
"netConfigVersion": "",
"camAgentVersion": "",
"lastLogin": 1600639997,
"lastLogout": 1600639944,
"pirSampleRateMs": 800,
"pirHysteresisHigh": 2,
"pirHysteresisLow": 10,
"pirFilterCoefficient": 1,
"logEnabled": True,
"logLevel": 3,
"logQDepth": 20,
"firmwareGroup": "public",
"irOpenThreshold": 445,
"irCloseThreshold": 840,
"irOpenDelay": 3,
"irCloseDelay": 3,
"irThreshold1x": 388,
"irThreshold2x": 335,
"irThreshold3x": 260,
"rssi": [[1600935204, -43]],
"battery": [],
"dbm": 0,
"vmUse": 161592,
"resSet": 10540,
"uptime": 810043.74,
"wifiDisconnects": 1,
"wifiDriverReloads": 1,
"statsPeriod": 3600000,
"sarlaccDebugLogTypes": 0,
"odProcessingFps": 8,
"odObjectMinWidthPercent": 6,
"odObjectMinHeightPercent": 24,
"odEnableObjectDetection": True,
"odClassificationMask": 2,
"odClassificationConfidenceThreshold": 0.95,
"odEnableOverlay": False,
"odAnalyticsLib": 2,
"odSensitivity": 85,
"odEventObjectMask": 2,
"odLuxThreshold": 445,
"odLuxHysteresisHigh": 4,
"odLuxHysteresisLow": 4,
"odLuxSamplingFrequency": 30,
"odFGExtractorMode": 2,
"odVideoScaleFactor": 1,
"odSceneType": 1,
"odCameraView": 3,
"odCameraFOV": 2,
"odBackgroundLearnStationary": True,
"odBackgroundLearnStationarySpeed": 15,
"odClassifierQualityProfile": 1,
"odEnableVideoAnalyticsWhileStreaming": False,
"wlanMac": "XX:XX:XX:XX:XX:XX",
"region": "us-east-1",
"enableWifiAnalyticsLib": False,
"ivLicense": "",
},
"pirLevel": "medium",
"odLevel": "medium",
},
"camera_type": 1,
"name": "Doorbell",
"serial": "1234567892",
"shutter_open_when_away": True,
"shutter_open_when_home": False,
"shutter_open_when_off": False,
"status": "online",
"subscription_enabled": True,
},
{
"camera_settings": {
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2010 <NAME> <<EMAIL>>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""
This module contains various general utility functions.
"""
from __future__ import with_statement
import logging
import warnings
logger = logging.getLogger(__name__)
try:
from html.entities import name2codepoint as n2cp
except ImportError:
from htmlentitydefs import name2codepoint as n2cp
try:
import cPickle as _pickle
except ImportError:
import pickle as _pickle
import re
import unicodedata
import os
import random
import itertools
import tempfile
from functools import wraps # for `synchronous` function lock
import multiprocessing
import shutil
import sys
from contextlib import contextmanager
import subprocess
import numpy as np
import numbers
import scipy.sparse
if sys.version_info[0] >= 3:
unicode = str
from six import iterkeys, iteritems, u, string_types, unichr
from six.moves import xrange
try:
from smart_open import smart_open
except ImportError:
logger.info("smart_open library not found; falling back to local-filesystem-only")
def make_closing(base, **attrs):
"""
Add support for `with Base(attrs) as fout:` to the base class if it's missing.
The base class' `close()` method will be called on context exit, to always close the file properly.
This is needed for gzip.GzipFile, bz2.BZ2File etc in older Pythons (<=2.6), which otherwise
raise "AttributeError: GzipFile instance has no attribute '__exit__'".
"""
if not hasattr(base, '__enter__'):
attrs['__enter__'] = lambda self: self
if not hasattr(base, '__exit__'):
attrs['__exit__'] = lambda self, type, value, traceback: self.close()
return type('Closing' + base.__name__, (base, object), attrs)
def smart_open(fname, mode='rb'):
_, ext = os.path.splitext(fname)
if ext == '.bz2':
from bz2 import BZ2File
return make_closing(BZ2File)(fname, mode)
if ext == '.gz':
from gzip import GzipFile
return make_closing(GzipFile)(fname, mode)
return open(fname, mode)
PAT_ALPHABETIC = re.compile('(((?![\d])\w)+)', re.UNICODE)
RE_HTML_ENTITY = re.compile(r'&(#?)([xX]?)(\w{1,8});', re.UNICODE)
def get_random_state(seed):
"""
Turn seed into a np.random.RandomState instance.
Method originally from maciejkula/glove-python, and written by @joshloyal.
"""
if seed is None or seed is np.random:
return np.random.mtrand._rand
if isinstance(seed, (numbers.Integral, np.integer)):
return np.random.RandomState(seed)
if isinstance(seed, np.random.RandomState):
return seed
raise ValueError('%r cannot be used to seed a np.random.RandomState instance' % seed)
def synchronous(tlockname):
"""
A decorator to place an instance-based lock around a method.
Adapted from http://code.activestate.com/recipes/577105-synchronization-decorator-for-class-methods/
"""
def _synched(func):
@wraps(func)
def _synchronizer(self, *args, **kwargs):
tlock = getattr(self, tlockname)
logger.debug("acquiring lock %r for %s" % (tlockname, func.__name__))
with tlock: # use lock as a context manager to perform safe acquire/release pairs
logger.debug("acquired lock %r for %s" % (tlockname, func.__name__))
result = func(self, *args, **kwargs)
logger.debug("releasing lock %r for %s" % (tlockname, func.__name__))
return result
return _synchronizer
return _synched
class NoCM(object):
def acquire(self):
pass
def release(self):
pass
def __enter__(self):
pass
def __exit__(self, type, value, traceback):
pass
nocm = NoCM()
@contextmanager
def file_or_filename(input):
"""
Return a file-like object ready to be read from the beginning. `input` is either
a filename (gz/bz2 also supported) or a file-like object supporting seek.
"""
if isinstance(input, string_types):
# input was a filename: open as file
yield smart_open(input)
else:
# input already a file-like object; just reset to the beginning
input.seek(0)
yield input
def deaccent(text):
"""
Remove accentuation from the given string. Input text is either a unicode string or utf8 encoded bytestring.
Return input string with accents removed, as unicode.
>>> deaccent("Šéf chomutovských komunistů dostal poštou bílý prášek")
u'Sef chomutovskych komunistu dostal postou bily prasek'
"""
if not isinstance(text, unicode):
# assume utf8 for byte strings, use default (strict) error handling
text = text.decode('utf8')
norm = unicodedata.normalize("NFD", text)
result = u('').join(ch for ch in norm if unicodedata.category(ch) != 'Mn')
return unicodedata.normalize("NFC", result)
def copytree_hardlink(source, dest):
"""
Recursively copy a directory ala shutils.copytree, but hardlink files
instead of copying. Available on UNIX systems only.
"""
copy2 = shutil.copy2
try:
shutil.copy2 = os.link
shutil.copytree(source, dest)
finally:
shutil.copy2 = copy2
def tokenize(text, lowercase=False, deacc=False, encoding='utf8', errors="strict", to_lower=False,
lower=False):
"""
Iteratively yield tokens as unicode strings, removing accent marks
and optionally lowercasing the unidoce string by assigning True
to one of the parameters, lowercase, to_lower, or lower.
Input text may be either unicode or utf8-encoded byte string.
The tokens on output are maximal contiguous sequences of alphabetic
characters (no digits!).
>>> list(tokenize('Nic nemůže letět rychlostí vyšší, než 300 tisíc kilometrů za sekundu!', deacc = True))
[u'Nic', u'nemuze', u'letet', u'rychlosti', u'vyssi', u'nez', u'tisic', u'kilometru', u'za', u'sekundu']
"""
lowercase = lowercase or to_lower or lower
text = to_unicode(text, encoding, errors=errors)
if lowercase:
text = text.lower()
if deacc:
text = deaccent(text)
return simple_tokenize(text)
def simple_tokenize(text):
for match in PAT_ALPHABETIC.finditer(text):
yield match.group()
def simple_preprocess(doc, deacc=False, min_len=2, max_len=15):
"""
Convert a document into a list of tokens.
This lowercases, tokenizes, de-accents (optional). -- the output are final
tokens = unicode strings, that won't be processed any further.
"""
tokens = [
token for token in tokenize(doc, lower=True, deacc=deacc, errors='ignore')
if min_len <= len(token) <= max_len and not token.startswith('_')
]
return tokens
def any2utf8(text, errors='strict', encoding='utf8'):
"""Convert a string (unicode or bytestring in `encoding`), to bytestring in utf8."""
if isinstance(text, unicode):
return text.encode('utf8')
# do bytestring -> unicode -> utf8 full circle, to ensure valid utf8
return unicode(text, encoding, errors=errors).encode('utf8')
to_utf8 = any2utf8
def any2unicode(text, encoding='utf8', errors='strict'):
"""Convert a string (bytestring in `encoding` or unicode), to unicode."""
if isinstance(text, unicode):
return text
return unicode(text, encoding, errors=errors)
to_unicode = any2unicode
def call_on_class_only(*args, **kwargs):
"""Raise exception when load methods are called on instance"""
raise AttributeError('This method should be called on a class object.')
class SaveLoad(object):
"""
Objects which inherit from this class have save/load functions, which un/pickle
them to disk.
This uses pickle for de/serializing, so objects must not contain
unpicklable attributes, such as lambda functions etc.
"""
@classmethod
def load(cls, fname, mmap=None):
"""
Load a previously saved object from file (also see `save`).
If the object was saved with large arrays stored separately, you can load
these arrays via mmap (shared memory) using `mmap='r'`. Default: don't use
mmap, load large arrays as normal objects.
If the file being loaded is compressed (either '.gz' or '.bz2'), then
`mmap=None` must be set. Load will raise an `IOError` if this condition
is encountered.
"""
logger.info("loading %s object from %s" % (cls.__name__, fname))
compress, subname = SaveLoad._adapt_by_suffix(fname)
obj = unpickle(fname)
obj._load_specials(fname, mmap, compress, subname)
logger.info("loaded %s", fname)
return obj
def _load_specials(self, fname, mmap, compress, subname):
"""
Loads any attributes that were stored specially, and gives the same
opportunity to recursively included SaveLoad instances.
"""
mmap_error = lambda x, y: IOError(
'Cannot mmap compressed object %s in file %s. ' % (x, y) +
'Use `load(fname, mmap=None)` or uncompress files manually.')
for attrib in getattr(self, '__recursive_saveloads', []):
cfname = '.'.join((fname, attrib))
logger.info("loading %s recursively from %s.* with mmap=%s" % (
attrib, cfname, mmap))
getattr(self, attrib)._load_specials(cfname, mmap, compress, subname)
for attrib in getattr(self, '__numpys', []):
logger.info("loading %s from %s with mmap=%s" % (
attrib, subname(fname, attrib), mmap))
if compress:
if mmap:
raise mmap_error(attrib, subname(fname, attrib))
val = np.load(subname(fname, attrib))['val']
else:
val = np.load(subname(fname, attrib), mmap_mode=mmap)
setattr(self, attrib, val)
for attrib in getattr(self, '__scipys', []):
logger.info("loading %s from %s with mmap=%s" % (
attrib, subname(fname, attrib), mmap))
sparse = unpickle(subname(fname, attrib))
if compress:
if mmap:
raise mmap_error(attrib, subname(fname, attrib))
with np.load(subname(fname, attrib, 'sparse')) as f:
sparse.data = f['data']
sparse.indptr = f['indptr']
sparse.indices = f['indices']
else:
sparse.data = np.load(subname(fname, attrib, 'data'), mmap_mode=mmap)
sparse.indptr = np.load(subname(fname, attrib, 'indptr'), mmap_mode=mmap)
sparse.indices = np.load(subname(fname, attrib, 'indices'), mmap_mode=mmap)
setattr(self, attrib, sparse)
for attrib in getattr(self, '__ignoreds', []):
logger.info("setting ignored attribute %s to None" % (attrib))
setattr(self, attrib, None)
@staticmethod
def _adapt_by_suffix(fname):
"""Give appropriate compress setting and filename formula"""
if fname.endswith('.gz') or fname.endswith('.bz2'):
compress = True
subname = lambda *args: '.'.join(list(args) + ['npz'])
else:
compress = False
subname = lambda *args: '.'.join(list(args) + ['npy'])
return (compress, subname)
def _smart_save(self, fname, separately=None, sep_limit=10 * 1024**2,
ignore=frozenset(), pickle_protocol=2):
"""
Save the object to file (also see `load`).
If `separately` is None, automatically detect large
numpy/scipy.sparse arrays in the object being stored, and store
them into separate files. This avoids pickle memory errors and
allows mmap'ing large arrays back on load efficiently.
You can also set `separately` manually, in which case it must be
a list of attribute names to be stored in separate files. The
automatic check is not performed in this case.
`ignore` is a set of attribute names to *not* serialize (file
handles, caches etc). On subsequent load() these attributes will
be set to None.
| |
<gh_stars>0
# coding: utf-8
"""
Prisma Cloud Compute API
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: 21.04.439
Generated by: https://openapi-generator.tech
"""
try:
from inspect import getfullargspec
except ImportError:
from inspect import getargspec as getfullargspec
import pprint
import re # noqa: F401
import six
from openapi_client.configuration import Configuration
class SharedBinary(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'altered': 'bool',
'cve_count': 'int',
'deps': 'list[str]',
'function_layer': 'str',
'md5': 'str',
'missing_pkg': 'bool',
'name': 'str',
'path': 'str',
'pkg_root_dir': 'str',
'services': 'list[str]',
'version': 'str'
}
attribute_map = {
'altered': 'altered',
'cve_count': 'cveCount',
'deps': 'deps',
'function_layer': 'functionLayer',
'md5': 'md5',
'missing_pkg': 'missingPkg',
'name': 'name',
'path': 'path',
'pkg_root_dir': 'pkgRootDir',
'services': 'services',
'version': 'version'
}
def __init__(self, altered=None, cve_count=None, deps=None, function_layer=None, md5=None, missing_pkg=None, name=None, path=None, pkg_root_dir=None, services=None, version=None, local_vars_configuration=None): # noqa: E501
"""SharedBinary - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration.get_default_copy()
self.local_vars_configuration = local_vars_configuration
self._altered = None
self._cve_count = None
self._deps = None
self._function_layer = None
self._md5 = None
self._missing_pkg = None
self._name = None
self._path = None
self._pkg_root_dir = None
self._services = None
self._version = None
self.discriminator = None
if altered is not None:
self.altered = altered
if cve_count is not None:
self.cve_count = cve_count
if deps is not None:
self.deps = deps
if function_layer is not None:
self.function_layer = function_layer
if md5 is not None:
self.md5 = md5
if missing_pkg is not None:
self.missing_pkg = missing_pkg
if name is not None:
self.name = name
if path is not None:
self.path = path
if pkg_root_dir is not None:
self.pkg_root_dir = pkg_root_dir
if services is not None:
self.services = services
if version is not None:
self.version = version
@property
def altered(self):
"""Gets the altered of this SharedBinary. # noqa: E501
Indicates if the binary was installed from a package manager and modified/replaced (true) or not (false). # noqa: E501
:return: The altered of this SharedBinary. # noqa: E501
:rtype: bool
"""
return self._altered
@altered.setter
def altered(self, altered):
"""Sets the altered of this SharedBinary.
Indicates if the binary was installed from a package manager and modified/replaced (true) or not (false). # noqa: E501
:param altered: The altered of this SharedBinary. # noqa: E501
:type altered: bool
"""
self._altered = altered
@property
def cve_count(self):
"""Gets the cve_count of this SharedBinary. # noqa: E501
Total number of CVEs for this specific binary. # noqa: E501
:return: The cve_count of this SharedBinary. # noqa: E501
:rtype: int
"""
return self._cve_count
@cve_count.setter
def cve_count(self, cve_count):
"""Sets the cve_count of this SharedBinary.
Total number of CVEs for this specific binary. # noqa: E501
:param cve_count: The cve_count of this SharedBinary. # noqa: E501
:type cve_count: int
"""
self._cve_count = cve_count
@property
def deps(self):
"""Gets the deps of this SharedBinary. # noqa: E501
Third-party package files which are used by the binary. # noqa: E501
:return: The deps of this SharedBinary. # noqa: E501
:rtype: list[str]
"""
return self._deps
@deps.setter
def deps(self, deps):
"""Sets the deps of this SharedBinary.
Third-party package files which are used by the binary. # noqa: E501
:param deps: The deps of this SharedBinary. # noqa: E501
:type deps: list[str]
"""
self._deps = deps
@property
def function_layer(self):
"""Gets the function_layer of this SharedBinary. # noqa: E501
ID of the serverless layer in which the package was discovered. # noqa: E501
:return: The function_layer of this SharedBinary. # noqa: E501
:rtype: str
"""
return self._function_layer
@function_layer.setter
def function_layer(self, function_layer):
"""Sets the function_layer of this SharedBinary.
ID of the serverless layer in which the package was discovered. # noqa: E501
:param function_layer: The function_layer of this SharedBinary. # noqa: E501
:type function_layer: str
"""
self._function_layer = function_layer
@property
def md5(self):
"""Gets the md5 of this SharedBinary. # noqa: E501
Md5 hashset of the binary. # noqa: E501
:return: The md5 of this SharedBinary. # noqa: E501
:rtype: str
"""
return self._md5
@md5.setter
def md5(self, md5):
"""Sets the md5 of this SharedBinary.
Md5 hashset of the binary. # noqa: E501
:param md5: The md5 of this SharedBinary. # noqa: E501
:type md5: str
"""
self._md5 = md5
@property
def missing_pkg(self):
"""Gets the missing_pkg of this SharedBinary. # noqa: E501
Indicates if this binary is not related to any package (true) or not (false). # noqa: E501
:return: The missing_pkg of this SharedBinary. # noqa: E501
:rtype: bool
"""
return self._missing_pkg
@missing_pkg.setter
def missing_pkg(self, missing_pkg):
"""Sets the missing_pkg of this SharedBinary.
Indicates if this binary is not related to any package (true) or not (false). # noqa: E501
:param missing_pkg: The missing_pkg of this SharedBinary. # noqa: E501
:type missing_pkg: bool
"""
self._missing_pkg = missing_pkg
@property
def name(self):
"""Gets the name of this SharedBinary. # noqa: E501
Name of the binary. # noqa: E501
:return: The name of this SharedBinary. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this SharedBinary.
Name of the binary. # noqa: E501
:param name: The name of this SharedBinary. # noqa: E501
:type name: str
"""
self._name = name
@property
def path(self):
"""Gets the path of this SharedBinary. # noqa: E501
Relative path of the binary inside the container. # noqa: E501
:return: The path of this SharedBinary. # noqa: E501
:rtype: str
"""
return self._path
@path.setter
def path(self, path):
"""Sets the path of this SharedBinary.
Relative path of the binary inside the container. # noqa: E501
:param path: The path of this SharedBinary. # noqa: E501
:type path: str
"""
self._path = path
@property
def pkg_root_dir(self):
"""Gets the pkg_root_dir of this SharedBinary. # noqa: E501
Path for searching packages used by the binary. # noqa: E501
:return: The pkg_root_dir of this SharedBinary. # noqa: E501
:rtype: str
"""
return self._pkg_root_dir
@pkg_root_dir.setter
def pkg_root_dir(self, pkg_root_dir):
"""Sets the pkg_root_dir of this SharedBinary.
Path for searching packages used by the binary. # noqa: E501
:param pkg_root_dir: The pkg_root_dir of this SharedBinary. # noqa: E501
:type pkg_root_dir: str
"""
self._pkg_root_dir = pkg_root_dir
@property
def services(self):
"""Gets the services of this SharedBinary. # noqa: E501
Name of services which use the binary. # noqa: E501
:return: The services of this SharedBinary. # noqa: E501
:rtype: list[str]
"""
return self._services
@services.setter
def services(self, services):
"""Sets the services of this SharedBinary.
Name of services which use the binary. # noqa: E501
:param services: The services of this SharedBinary. # noqa: E501
:type services: list[str]
"""
self._services = services
@property
def version(self):
"""Gets the version of this SharedBinary. # noqa: E501
Version of the binary. # noqa: E501
:return: The version of this SharedBinary. # noqa: E501
:rtype: str
"""
return self._version
@version.setter
def version(self, version):
"""Sets the version of this SharedBinary.
Version of the binary. # noqa: E501
:param version: The version of this SharedBinary. # noqa: E501
:type version: str
"""
self._version = version
def to_dict(self, serialize=False):
"""Returns the model properties as a dict"""
result = {}
def convert(x):
if hasattr(x, "to_dict"):
args = getfullargspec(x.to_dict).args
if len(args) == 1:
return x.to_dict()
else:
return x.to_dict(serialize)
else:
return x
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
attr = self.attribute_map.get(attr, attr) if serialize else attr
if isinstance(value, list):
result[attr] = list(map(
lambda x: convert(x),
value
))
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], convert(item[1])),
value.items()
))
else:
result[attr] = convert(value)
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, SharedBinary):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, | |
import pytz
from datetime import datetime
import pendulum
from ..conftest import assert_datetime
def test_equal_to_true():
d1 = pendulum.datetime(2000, 1, 1, 1, 2, 3)
d2 = pendulum.datetime(2000, 1, 1, 1, 2, 3)
d3 = datetime(2000, 1, 1, 1, 2, 3, tzinfo= pendulum.UTC)
assert d2 == d1
assert d3 == d1
def test_equal_to_false():
d1 = pendulum.datetime(2000, 1, 1, 1, 2, 3)
d2 = pendulum.datetime(2000, 1, 2, 1, 2, 3)
d3 = datetime(2000, 1, 2, 1, 2, 3, tzinfo= pendulum.UTC)
assert d2 != d1
assert d3 != d1
def test_equal_with_timezone_true():
d1 = pendulum.datetime(2000, 1, 1, 12, 0, 0, tz='America/Toronto')
d2 = pendulum.datetime(2000, 1, 1, 9, 0, 0, tz='America/Vancouver')
d3 = datetime(2000, 1, 1, 12, 0, 0,
tzinfo= pendulum.timezone('America/Toronto'))
assert d2 == d1
assert d3 == d1
def test_equal_with_timezone_false():
d1 = pendulum.datetime(2000, 1, 1, tz='America/Toronto')
d2 = pendulum.datetime(2000, 1, 1, tz='America/Vancouver')
d3 = datetime(2000, 1, 1, tzinfo= pendulum.timezone('America/Toronto'))
assert d2 != d1
assert d3 == d1
def test_not_equal_to_true():
d1 = pendulum.datetime(2000, 1, 1, 1, 2, 3)
d2 = pendulum.datetime(2000, 1, 2, 1, 2, 3)
d3 = datetime(2000, 1, 2, 1, 2, 3, tzinfo= pendulum.UTC)
assert d2 != d1
assert d3 != d1
def test_not_equal_to_false():
d1 = pendulum.datetime(2000, 1, 1, 1, 2, 3)
d2 = pendulum.datetime(2000, 1, 1, 1, 2, 3)
d3 = datetime(2000, 1, 1, 1, 2, 3, tzinfo= pendulum.UTC)
assert d2 == d1
assert d3 == d1
def test_not_equal_with_timezone_true():
d1 = pendulum.datetime(2000, 1, 1, tz='America/Toronto')
d2 = pendulum.datetime(2000, 1, 1, tz='America/Vancouver')
d3 = datetime(2000, 1, 1, tzinfo= pendulum.timezone('America/Toronto'))
assert d2 != d1
assert d3 == d1
def test_not_equal_to_none():
d1 = pendulum.datetime(2000, 1, 1, 1, 2, 3)
assert d1 != None
def test_greater_than_true():
d1 = pendulum.datetime(2000, 1, 1)
d2 = pendulum.datetime(1999, 12, 31)
d3 = datetime(1999, 12, 31, tzinfo= pendulum.UTC)
assert d1 > d2
assert d1 > d3
def test_greater_than_false():
d1 = pendulum.datetime(2000, 1, 1)
d2 = pendulum.datetime(2000, 1, 2)
d3 = datetime(2000, 1, 2, tzinfo= pendulum.UTC)
assert not d1 > d2
assert not d1 > d3
def test_greater_than_with_timezone_true():
d1 = pendulum.datetime(2000, 1, 1, 12, 0, 0, tz='America/Toronto')
d2 = pendulum.datetime(2000, 1, 1, 8, 59, 59, tz='America/Vancouver')
d3 = pytz.timezone('America/Vancouver').localize(datetime(2000, 1, 1, 8, 59, 59))
assert d1 > d2
assert d1 > d3
def test_greater_than_with_timezone_false():
d1 = pendulum.datetime(2000, 1, 1, 12, 0, 0, tz='America/Toronto')
d2 = pendulum.datetime(2000, 1, 1, 9, 0, 1, tz='America/Vancouver')
d3 = pytz.timezone('America/Vancouver').localize(datetime(2000, 1, 1, 9, 0, 1))
assert not d1 > d2
assert not d1 > d3
def test_greater_than_or_equal_true():
d1 = pendulum.datetime(2000, 1, 1)
d2 = pendulum.datetime(1999, 12, 31)
d3 = datetime(1999, 12, 31, tzinfo= pendulum.UTC)
assert d1 >= d2
assert d1 >= d3
def test_greater_than_or_equal_true_equal():
d1 = pendulum.datetime(2000, 1, 1)
d2 = pendulum.datetime(2000, 1, 1)
d3 = datetime(2000, 1, 1, tzinfo= pendulum.UTC)
assert d1 >= d2
assert d1 >= d3
def test_greater_than_or_equal_false():
d1 = pendulum.datetime(2000, 1, 1)
d2 = pendulum.datetime(2000, 1, 2)
d3 = datetime(2000, 1, 2, tzinfo= pendulum.UTC)
assert not d1 >= d2
assert not d1 >= d3
def test_greater_than_or_equal_with_timezone_true():
d1 = pendulum.datetime(2000, 1, 1, 12, 0, 0, tz='America/Toronto')
d2 = pendulum.datetime(2000, 1, 1, 8, 59, 59, tz='America/Vancouver')
d3 = pytz.timezone('America/Vancouver').localize(datetime(2000, 1, 1, 8, 59, 59))
assert d1 >= d2
assert d1 >= d3
def test_greater_than_or_equal_with_timezone_false():
d1 = pendulum.datetime(2000, 1, 1, 12, 0, 0, tz='America/Toronto')
d2 = pendulum.datetime(2000, 1, 1, 9, 0, 1, tz='America/Vancouver')
d3 = pytz.timezone('America/Vancouver').localize(datetime(2000, 1, 1, 9, 0, 1))
assert not d1 >= d2
assert not d1 >= d3
def test_less_than_true():
d1 = pendulum.datetime(2000, 1, 1)
d2 = pendulum.datetime(2000, 1, 2)
d3 = datetime(2000, 1, 2, tzinfo=pendulum.UTC)
assert d1 < d2
assert d1 < d3
def test_less_than_false():
d1 = pendulum.datetime(2000, 1, 2)
d2 = pendulum.datetime(2000, 1, 1)
d3 = datetime(2000, 1, 1, tzinfo=pendulum.UTC)
assert not d1 < d2
assert not d1 < d3
def test_less_than_with_timezone_true():
d1 = pendulum.datetime(2000, 1, 1, 8, 59, 59, tz='America/Vancouver')
d2 = pendulum.datetime(2000, 1, 1, 12, 0, 0, tz='America/Toronto')
d3 = pytz.timezone('America/Toronto').localize(datetime(2000, 1, 1, 12, 0, 0))
assert d1 < d2
assert d1 < d3
def test_less_than_with_timezone_false():
d1 = pendulum.datetime(2000, 1, 1, 9, 0, 1, tz='America/Vancouver')
d2 = pendulum.datetime(2000, 1, 1, 12, 0, 0, tz='America/Toronto')
d3 = pytz.timezone('America/Toronto').localize(datetime(2000, 1, 1, 12, 0, 0))
assert not d1 < d2
assert not d1 < d3
def test_less_than_or_equal_true():
d1 = pendulum.datetime(2000, 1, 1)
d2 = pendulum.datetime(2000, 1, 2)
d3 = datetime(2000, 1, 2, tzinfo=pendulum.UTC)
assert d1 <= d2
assert d1 <= d3
def test_less_than_or_equal_true_equal():
d1 = pendulum.datetime(2000, 1, 1)
d2 = pendulum.datetime(2000, 1, 1)
d3 = datetime(2000, 1, 1, tzinfo=pendulum.UTC)
assert d1 <= d2
assert d1 <= d3
def test_less_than_or_equal_false():
d1 = pendulum.datetime(2000, 1, 2)
d2 = pendulum.datetime(2000, 1, 1)
d3 = datetime(2000, 1, 1, tzinfo=pendulum.UTC)
assert not d1 <= d2
assert not d1 <= d3
def test_less_than_or_equal_with_timezone_true():
d1 = pendulum.datetime(2000, 1, 1, 8, 59, 59, tz='America/Vancouver')
d2 = pendulum.datetime(2000, 1, 1, 12, 0, 0, tz='America/Toronto')
d3 = pytz.timezone('America/Toronto').localize(datetime(2000, 1, 1, 12, 0, 0))
assert d1 <= d2
assert d1 <= d3
def test_less_than_or_equal_with_timezone_false():
d1 = pendulum.datetime(2000, 1, 1, 9, 0, 1, tz='America/Vancouver')
d2 = pendulum.datetime(2000, 1, 1, 12, 0, 0, tz='America/Toronto')
d3 = pytz.timezone('America/Toronto').localize(datetime(2000, 1, 1, 12, 0, 0))
assert not d1 <= d2
assert not d1 <= d3
def test_is_birthday():
with pendulum.test(pendulum.now()):
d = pendulum.now()
a_birthday = d.subtract(years=1)
assert a_birthday.is_birthday()
not_a_birthday = d.subtract(days=1)
assert not not_a_birthday.is_birthday()
also_not_a_birthday = d.add(days=2)
assert not also_not_a_birthday.is_birthday()
d1 = pendulum.datetime(1987, 4, 23)
d2 = pendulum.datetime(2014, 9, 26)
d3 = pendulum.datetime(2014, 4, 23)
assert not d2.is_birthday(d1)
assert d3.is_birthday(d1)
def test_closest():
instance = pendulum.datetime(2015, 5, 28, 12, 0, 0)
dt1 = pendulum.datetime(2015, 5, 28, 11, 0, 0)
dt2 = pendulum.datetime(2015, 5, 28, 14, 0, 0)
closest = instance.closest(dt1, dt2)
assert closest == dt1
closest = instance.closest(dt2, dt1)
assert closest == dt1
dts = [pendulum.datetime(2015, 5, 28, 16, 0, 0) + pendulum.duration(
hours=x) for x in range(4)]
closest = instance.closest(*dts)
assert closest == dts[0]
closest = instance.closest(*(dts[::-1]))
assert closest == dts[0]
def test_closest_with_datetime():
instance = pendulum.datetime(2015, 5, 28, 12, 0, 0)
dt1 = datetime(2015, 5, 28, 11, 0, 0)
dt2 = datetime(2015, 5, 28, 14, 0, 0)
closest = instance.closest(dt1, dt2)
assert_datetime(closest, 2015, 5, 28, 11, 0, 0)
dts = [pendulum.datetime(2015, 5, 28, 16, 0, 0) + pendulum.duration(
hours=x) for x in range(4)]
closest = instance.closest(dt1, dt2, *dts)
assert_datetime(closest, 2015, 5, 28, 11, 0, 0)
def test_closest_with_equals():
instance = pendulum.datetime(2015, 5, 28, 12, 0, 0)
dt1 = pendulum.datetime(2015, 5, 28, 12, 0, 0)
dt2 = pendulum.datetime(2015, 5, 28, 14, 0, 0)
closest = instance.closest(dt1, dt2)
assert closest == dt1
def test_farthest():
instance = pendulum.datetime(2015, 5, 28, 12, 0, 0)
dt1 = pendulum.datetime(2015, 5, 28, 11, 0, 0)
dt2 = pendulum.datetime(2015, 5, 28, 14, 0, 0)
farthest = instance.farthest(dt1, dt2)
assert farthest == dt2
farthest = instance.farthest(dt2, dt1)
assert farthest == dt2
dts = [pendulum.datetime(2015, 5, 28, 16, 0, 0) + pendulum.duration(
hours=x) for x in range(4)]
farthest = instance.farthest(*dts)
assert farthest == dts[-1]
farthest = instance.farthest(*(dts[::-1]))
assert farthest == dts[-1]
f = pendulum.datetime(2010, 1, 1, 0, 0, 0)
assert f == instance.farthest(f, *(dts))
def test_farthest_with_datetime():
instance = pendulum.datetime(2015, 5, 28, 12, 0, 0)
dt1 = datetime(2015, 5, 28, 11, 0, 0, tzinfo= pendulum.UTC)
dt2 = datetime(2015, 5, 28, 14, 0, 0, tzinfo= pendulum.UTC)
farthest = instance.farthest(dt1, dt2)
assert_datetime(farthest, 2015, 5, 28, 14, 0, 0)
dts = [pendulum.datetime(2015, 5, 28, 16, 0, 0) + pendulum.duration(
hours=x) for x in range(4)]
farthest = instance.farthest(dt1, dt2, *dts)
assert_datetime(farthest, 2015, 5, 28, 19, 0, 0)
def test_farthest_with_equals():
instance = pendulum.datetime(2015, 5, 28, 12, 0, 0)
dt1 = pendulum.datetime(2015, 5, 28, 12, 0, 0)
dt2 = pendulum.datetime(2015, 5, 28, 14, 0, 0)
farthest = instance.farthest(dt1, dt2)
assert farthest == dt2
dts = [pendulum.datetime(2015, 5, 28, 16, 0, 0) + pendulum.duration(hours=x) for x in range(4)]
farthest = instance.farthest(dt1, dt2, *dts)
assert farthest == dts[-1]
def test_is_same_day():
dt1 = pendulum.datetime(2015, 5, 28, 12, 0, 0)
dt2 = pendulum.datetime(2015, 5, 29, 12, 0, 0)
dt3 = pendulum.datetime(2015, 5, 28, 12, 0, 0)
dt4 = datetime(2015, 5, 28, 12, 0, 0, tzinfo=pendulum.UTC)
dt5 = datetime(2015, 5, 29, 12, 0, 0, tzinfo=pendulum.UTC)
assert not dt1.is_same_day(dt2)
assert dt1.is_same_day(dt3)
assert dt1.is_same_day(dt4)
assert not dt1.is_same_day(dt5)
def test_comparison_to_unsupported():
dt1 = pendulum.now()
assert dt1 | |
1,
"cluster": "test",
"hostname": hostname,
"tf_version": "0001",
"event_type": "DEVICE_SNAPSHOT",
"device_infos": [],
"data": data_1
}
data_2 = {
"gcertstatus": "LOAS2",
"krbstatus": "KRB2"
}
host_event_2 = {
"time": 2,
"cluster": "test",
"hostname": hostname,
"tf_version": "0001",
"event_type": "DEVICE_SNAPSHOT",
"device_infos": [],
"data": data_2
}
event_1 = host_event.HostEvent(**host_event_1)
device_manager._UpdateHostWithDeviceSnapshotEvent(event_1)
ndb_host = device_manager.GetHost(hostname)
self.assertEqual(data_1, ndb_host.extra_info)
event_2 = host_event.HostEvent(**host_event_2)
device_manager._UpdateHostWithDeviceSnapshotEvent(event_2)
ndb_host = device_manager.GetHost(hostname)
self.assertEqual(data_2, ndb_host.extra_info)
def testUpdateHostWithDeviceSnapshotEvent_oldStateGone(self):
# Test update host with RUNNING if the old state is GONE.
hostname = "test-1.mtv.corp.example.com"
host = datastore_entities.HostInfo(id=hostname)
host.hostname = hostname
host.physical_cluster = "test"
host.timestamp = datetime.datetime.utcfromtimestamp(1)
host.host_state = api_messages.HostState.GONE
host.put()
host_event_1 = {
"time": 2,
"cluster": "test",
"event_type": "NOT_HOST_STATE_CHANGED",
"hostname": hostname,
"state": "RUNNING",
}
event_1 = host_event.HostEvent(**host_event_1)
device_manager._UpdateHostWithDeviceSnapshotEvent(event_1)
ndb_host = device_manager.GetHost(hostname)
self.assertEqual(api_messages.HostState.RUNNING, ndb_host.host_state)
host_history_list = device_manager.GetHostStateHistory(hostname)
self.assertEqual(host_history_list[0].state, api_messages.HostState.RUNNING)
def testUpdateHostWithDeviceSnapshotEvent_newTestHarnessInstance(self):
"""Test update host with new test harness instance host event."""
hostname = "host1.mtv.corp.example.com"
host = datastore_test_util.CreateHost(
cluster="test",
lab_name="alab",
hostname=hostname,
timestamp=datetime.datetime.utcfromtimestamp(1),
host_state=api_messages.HostState.KILLING,
extra_info={
"test_harness_start_time_ms": "1400000000000"
})
event = host_event.HostEvent(**self.HOST_EVENT_WITH_TEST_HARNESS_START_TIME)
device_manager._UpdateHostWithDeviceSnapshotEvent(event)
host = device_manager.GetHost(hostname)
# The new instance should override the old KILLING state.
self.assertEqual(api_messages.HostState.RUNNING, host.host_state)
host_history_list = device_manager.GetHostStateHistory(hostname)
self.assertEqual(host_history_list[0].state, api_messages.HostState.RUNNING)
def _GetHostHistories(self, hostname):
return (datastore_entities.HostInfoHistory
.query(ancestor=ndb.Key(datastore_entities.HostInfo, hostname))
.order(-datastore_entities.HostInfoHistory.timestamp)
.fetch())
def testUpdateHostWithHostChangedEvent_newState(self):
# Test update host with a new state
hostname = "test-1.mtv.corp.example.com"
host_event_1 = {
"time": 1,
"cluster": "test",
"event_type": "HOST_STATE_CHANGED",
"hostname": hostname,
"state": "RUNNING",
}
host_event_2 = {
"time": 2,
"cluster": "test",
"event_type": "HOST_STATE_CHANGED",
"hostname": hostname,
"state": "RUNNING",
}
host_event_3 = {
"time": 3,
"cluster": "test",
"event_type": "HOST_STATE_CHANGED",
"hostname": hostname,
"state": "QUITTING",
}
event_1 = host_event.HostEvent(**host_event_1)
device_manager._UpdateHostWithHostChangedEvent(event_1)
ndb_host = device_manager.GetHost(hostname)
self.assertEqual(api_messages.HostState.RUNNING, ndb_host.host_state)
event_2 = host_event.HostEvent(**host_event_2)
device_manager._UpdateHostWithHostChangedEvent(event_2)
ndb_host = device_manager.GetHost(hostname)
self.assertEqual(api_messages.HostState.RUNNING, ndb_host.host_state)
event_3 = host_event.HostEvent(**host_event_3)
device_manager._UpdateHostWithHostChangedEvent(event_3)
ndb_host = device_manager.GetHost(hostname)
self.assertEqual(api_messages.HostState.QUITTING, ndb_host.host_state)
host_state_histories = device_manager.GetHostStateHistory(hostname)
self.assertEqual(2, len(host_state_histories))
self.assertEqual(hostname, host_state_histories[0].hostname)
self.assertEqual(
api_messages.HostState.QUITTING, host_state_histories[0].state)
self.assertEqual(event_3.timestamp, host_state_histories[0].timestamp)
self.assertEqual(
api_messages.HostState.RUNNING, host_state_histories[1].state)
self.assertEqual(event_1.timestamp, host_state_histories[1].timestamp)
host_histories = self._GetHostHistories(hostname)
self.assertEqual(hostname, host_histories[0].hostname)
self.assertEqual(
api_messages.HostState.QUITTING, host_histories[0].host_state)
self.assertEqual(event_3.timestamp, host_histories[0].timestamp)
self.assertEqual(
api_messages.HostState.RUNNING, host_histories[1].host_state)
self.assertEqual(event_1.timestamp, host_histories[1].timestamp)
def testIsNewTestHarnessInstance(self):
hostname = self.HOST_EVENT_WITH_TEST_HARNESS_START_TIME["hostname"]
host = datastore_test_util.CreateHost(
"free", hostname, host_state=api_messages.HostState.RUNNING,
extra_info={
"test_harness_start_time_ms": "1430000000000"
})
event = host_event.HostEvent(**self.HOST_EVENT_WITH_TEST_HARNESS_START_TIME)
self.assertTrue(device_manager._IsNewTestHarnessInstance(host, event))
def testIsNewTestHarnessInstance_oldInstanceEvent(self):
hostname = self.HOST_EVENT_WITH_TEST_HARNESS_START_TIME["hostname"]
host = datastore_test_util.CreateHost(
"free", hostname, host_state=api_messages.HostState.RUNNING,
extra_info={
"test_harness_start_time_ms": "1440000000000"
})
event = host_event.HostEvent(**self.HOST_EVENT_WITH_TEST_HARNESS_START_TIME)
self.assertFalse(device_manager._IsNewTestHarnessInstance(host, event))
def testIsNewTestHarnessInstance_hostIsGONE(self):
"""Test _IsNewTestHarnessInstance, host is GONE."""
hostname = self.HOST_EVENT_WITH_TEST_HARNESS_START_TIME["hostname"]
host = datastore_test_util.CreateHost(
"free", hostname, host_state=api_messages.HostState.GONE)
event = host_event.HostEvent(**self.HOST_EVENT_WITH_TEST_HARNESS_START_TIME)
self.assertTrue(device_manager._IsNewTestHarnessInstance(host, event))
def testIsNewTestHarnessInstance_hostNoTestHarnessStartTime(self):
"""Test _IsNewTestHarnessInstance, host has no start time but event has."""
hostname = self.HOST_EVENT_WITH_TEST_HARNESS_START_TIME["hostname"]
host = datastore_test_util.CreateHost(
"free", hostname, host_state=api_messages.HostState.RUNNING,
extra_info={})
event = host_event.HostEvent(**self.HOST_EVENT_WITH_TEST_HARNESS_START_TIME)
self.assertTrue(device_manager._IsNewTestHarnessInstance(host, event))
def testIsNewTestHarnessInstance_eventNoTestHarnessStartTime(self):
"""Test _IsNewTestHarnessInstance, event has no start time."""
event_dict = copy.deepcopy(self.HOST_EVENT_WITH_TEST_HARNESS_START_TIME)
event_dict["data"] = {}
hostname = event_dict.get("hostname")
host = datastore_test_util.CreateHost(
"free", hostname, host_state=api_messages.HostState.RUNNING,
extra_info={})
event = host_event.HostEvent(**event_dict)
self.assertFalse(device_manager._IsNewTestHarnessInstance(host, event))
def _BuildDeviceStateHistory(self, timestamp, serial, state):
"""Helper to build and persist device state history records."""
device_snapshot = datastore_entities.DeviceStateHistory(
timestamp=timestamp,
device_serial=serial,
state=state)
device_snapshot.put()
def testCountDeviceForHost(self):
datastore_test_util.CreateHost("free", "host1")
datastore_test_util.CreateDevice(
"free", "host1", "s1",
run_target="run_target1")
datastore_test_util.CreateDevice(
"free", "host1", "s2",
run_target="run_target1")
datastore_test_util.CreateDevice(
"free", "host1", "s3",
run_target="run_target2",
state=common.DeviceState.ALLOCATED)
datastore_test_util.CreateDevice(
"free", "host1", "s4",
run_target="run_target2",
state=common.DeviceState.GONE)
datastore_test_util.CreateDevice(
"free", "host1", "s5",
run_target="run_target1",
state=common.DeviceState.GONE, hidden=True)
device_manager._CountDeviceForHost("host1")
host = device_manager.GetHost("host1")
self.assertEqual(4, host.total_devices)
self.assertEqual(2, host.available_devices)
self.assertEqual(1, host.allocated_devices)
self.assertEqual(1, host.offline_devices)
self.assertEqual(2, len(host.device_count_summaries))
for device_count_summary in host.device_count_summaries:
if device_count_summary.run_target == "run_target1":
self.assertEqual(2, device_count_summary.total)
self.assertEqual(2, device_count_summary.available)
self.assertEqual(0, device_count_summary.allocated)
self.assertEqual(0, device_count_summary.offline)
elif device_count_summary.run_target == "run_target2":
self.assertEqual(2, device_count_summary.total)
self.assertEqual(0, device_count_summary.available)
self.assertEqual(1, device_count_summary.allocated)
self.assertEqual(1, device_count_summary.offline)
else:
self.assertFalse(True)
def testCountDeviceForHost_hostWithoutDevice(self):
host = datastore_test_util.CreateHost(
"free", "host2",
device_count_summaries=[
datastore_test_util.CreateDeviceCountSummary(
run_target="run_target1",
offline=1,
available=5,
allocated=4)])
device_manager._CountDeviceForHost("host2")
ndb.get_context().clear_cache()
host = device_manager.GetHost("host2")
self.assertEqual(0, len(host.device_count_summaries))
self.assertEqual(0, host.total_devices)
self.assertEqual(0, host.available_devices)
self.assertEqual(0, host.allocated_devices)
self.assertEqual(0, host.offline_devices)
def testCountDeviceForHost_mhHost(self):
datastore_test_util.CreateHost(
"free", "mh_host", test_harness="MOBILEHARNESS")
datastore_test_util.CreateDevice(
"free", "mh_host", "s1",
run_target="run_target1",
state=common.DeviceState.IDLE)
datastore_test_util.CreateDevice(
"free", "mh_host", "s2",
run_target="run_target1",
state=common.DeviceState.IDLE)
datastore_test_util.CreateDevice(
"free", "mh_host", "s3",
run_target="run_target2",
state=common.DeviceState.BUSY)
datastore_test_util.CreateDevice(
"free", "mh_host", "s4",
run_target="run_target2",
state=common.DeviceState.OFFLINE)
datastore_test_util.CreateDevice(
"free", "mh_host", "s5",
run_target="run_target1",
state=common.DeviceState.GONE, hidden=True)
device_manager._CountDeviceForHost("mh_host")
host = device_manager.GetHost("mh_host")
self.assertEqual(4, host.total_devices)
self.assertEqual(2, host.available_devices)
self.assertEqual(1, host.allocated_devices)
self.assertEqual(1, host.offline_devices)
self.assertEqual(2, len(host.device_count_summaries))
for device_count_summary in host.device_count_summaries:
if device_count_summary.run_target == "run_target1":
self.assertEqual(2, device_count_summary.total)
self.assertEqual(2, device_count_summary.available)
self.assertEqual(0, device_count_summary.allocated)
self.assertEqual(0, device_count_summary.offline)
elif device_count_summary.run_target == "run_target2":
self.assertEqual(2, device_count_summary.total)
self.assertEqual(0, device_count_summary.available)
self.assertEqual(1, device_count_summary.allocated)
self.assertEqual(1, device_count_summary.offline)
else:
self.assertFalse(True)
def _AssertHostSyncTask(self, hostname):
tasks = self.mock_task_scheduler.GetTasks()
self.assertEqual(1, len(tasks))
host_sync = datastore_entities.HostSync.get_by_id(hostname)
self.assertEqual(host_sync.taskname, tasks[0].name)
expected_payload = {
"hostname": hostname,
"host_sync_id": host_sync.host_sync_id,
}
payload = json.loads(tasks[0].payload)
self.assertEqual(expected_payload, payload)
return host_sync.host_sync_id
def testStartHostSync(self):
device_manager.StartHostSync("host1")
self._AssertHostSyncTask("host1")
def testStartHostSync_alreadyExist(self):
device_manager.StartHostSync("host1")
self._AssertHostSyncTask("host1")
self.assertIsNone(device_manager.StartHostSync("host1"))
def testStartHostSync_differentHostSyncId(self):
device_manager.StartHostSync("host1")
self._AssertHostSyncTask("host1")
self.assertIsNone(
device_manager.StartHostSync("host1", "another_sync_id"))
@mock.patch.object(common, "Now")
def testStartHostSync_staleTask(self, mock_now):
now = datetime.datetime(2019, 11, 14, 10, 10)
before = now - datetime.timedelta(minutes=40)
mock_now.return_value = before
old_sync_id = device_manager.StartHostSync("host1")
self._AssertHostSyncTask("host1")
mock_now.return_value = now
new_sync_id = device_manager.StartHostSync("host1", "another_sync_id")
self.assertIsNotNone(new_sync_id)
self.assertNotEqual(old_sync_id, new_sync_id)
tasks = self.mock_task_scheduler.GetTasks()
# There will be 2 tasks, one for the stale one, the other is the new one.
self.assertEqual(2, len(tasks))
def testStartHostSync_sameHostSyncId(self):
host_sync_id = device_manager.StartHostSync("host1")
self._AssertHostSyncTask("host1")
new_host_sync_id = device_manager.StartHostSync("host1", host_sync_id)
self.assertIsNotNone(new_host_sync_id)
self.assertNotEqual(host_sync_id, new_host_sync_id)
def testStopHostSync(self):
host_sync_id = device_manager.StartHostSync("host1")
self._AssertHostSyncTask("host1")
device_manager.StopHostSync("host1", host_sync_id)
self.assertIsNone(datastore_entities.HostSync.get_by_id("host1"))
@mock.patch.object(common, "Now")
def testStopHostSync_staleTask(self, mock_now):
now = datetime.datetime(2019, 11, 14, 10, 10)
before = now - datetime.timedelta(minutes=40)
mock_now.return_value = before
device_manager.StartHostSync("host1")
self._AssertHostSyncTask("host1")
mock_now.return_value = now
device_manager.StopHostSync("host1", "another_sync_id")
self.assertIsNone(datastore_entities.HostSync.get_by_id("host1"))
def testStopHostSync_differentTaskname(self):
device_manager.StartHostSync("host1")
self._AssertHostSyncTask("host1")
device_manager.StopHostSync("host1", "another_sync_id")
self.assertIsNotNone(datastore_entities.HostSync.get_by_id("host1"))
@mock.patch.object(common, "Now")
def testUpdateGoneHost(self, mock_now):
now = datetime.datetime(2019, 11, 14, 10, 10)
mock_now.return_value = now
host = datastore_test_util.CreateHost("free", "host1")
d1 = datastore_test_util.CreateDevice(
"free", "host1", "s1", run_target="r1")
d2 = datastore_test_util.CreateDevice(
"free", "host1", "s2", run_target="r1")
device_manager._CountDeviceForHost("host1")
device_manager.UpdateGoneHost("host1")
ndb.get_context().clear_cache()
host = host.key.get()
self.assertEqual(api_messages.HostState.GONE, host.host_state)
d1 = d1.key.get()
self.assertEqual(common.DeviceState.GONE, d1.state)
d2 = d2.key.get()
self.assertEqual(common.DeviceState.GONE, d2.state)
host_histories = device_manager.GetHostStateHistory("host1")
self.assertEqual(1, len(host_histories))
self.assertEqual(now, host_histories[0].timestamp)
self.assertEqual(api_messages.HostState.GONE, host_histories[0].state)
device_histories = device_manager.GetDeviceStateHistory("host1", "s1")
self.assertEqual(1, len(device_histories))
self.assertEqual(now, device_histories[0].timestamp)
self.assertEqual(common.DeviceState.GONE, device_histories[0].state)
device_histories = device_manager.GetDeviceStateHistory("host1", "s2")
self.assertEqual(1, len(device_histories))
self.assertEqual(now, device_histories[0].timestamp)
self.assertEqual(1, len(host.device_count_summaries))
self.assertEqual(2, host.device_count_summaries[0].total)
self.assertEqual("r1", host.device_count_summaries[0].run_target)
self.assertEqual(2, host.device_count_summaries[0].offline)
@mock.patch.object(common, "Now")
def testUpdateGoneHost_alreadyGone(self, mock_now):
now = datetime.datetime(2019, 11, 14, 10, 10)
mock_now.return_value = now
host = datastore_test_util.CreateHost(
"free", "host1", host_state=api_messages.HostState.GONE)
device_manager.UpdateGoneHost("host1")
ndb.get_context().clear_cache()
host = host.key.get()
self.assertEqual(api_messages.HostState.GONE, host.host_state)
host_histories = device_manager.GetHostStateHistory("host1")
self.assertEqual(0, len(host_histories))
@mock.patch.object(common, "Now")
def testHideHost(self, mock_now):
now = datetime.datetime(2019, 11, 14, 10, 10)
mock_now.return_value = now
host = datastore_test_util.CreateHost("free", "host1")
d1 = datastore_test_util.CreateDevice("free", "host1", "s1")
d2 = datastore_test_util.CreateDevice("free", "host1", "s2")
device_manager.HideHost("host1")
ndb.get_context().clear_cache()
host = host.key.get()
self.assertTrue(host.hidden)
self.assertEqual(now, host.timestamp)
d1 = d1.key.get()
self.assertTrue(d1.hidden)
self.assertEqual(now, d1.timestamp)
d2 = d2.key.get()
self.assertTrue(d2.hidden)
self.assertEqual(now, d2.timestamp)
@mock.patch.object(common, "Now")
def testHideHost_alreadyHidden(self, mock_now):
now = datetime.datetime(2019, 11, 14, 10, 10)
before = now - datetime.timedelta(hours=10)
mock_now.return_value = now
host = datastore_test_util.CreateHost(
"free", "host1", hidden=True, timestamp=before)
device_manager.HideHost("host1")
ndb.get_context().clear_cache()
host = host.key.get()
self.assertEqual(before, host.timestamp)
@mock.patch.object(common, "Now")
def testRestoreHost(self, mock_now):
now = datetime.datetime(2019, 11, 14, 10, 10)
mock_now.return_value = now
host = datastore_test_util.CreateHost("free", "host1", hidden=True)
device_manager.RestoreHost("host1")
ndb.get_context().clear_cache()
host = host.key.get()
self.assertEqual(now, host.timestamp)
self.assertFalse(host.hidden)
@mock.patch.object(common, "Now")
def testHideDevice(self, mock_now):
now = datetime.datetime(2019, 11, 14, 10, 10)
mock_now.return_value = now
host = datastore_test_util.CreateHost("free", "host1")
datastore_test_util.CreateDevice("free", "host1", "s1")
d2 = datastore_test_util.CreateDevice("free", "host1", "s2")
device_manager._CountDeviceForHost("host1")
host = host.key.get()
self.assertEqual(1, len(host.device_count_summaries))
self.assertEqual(2, host.device_count_summaries[0].total)
device_manager.HideDevice("s2", "host1")
ndb.get_context().clear_cache()
d2 = d2.key.get()
self.assertEqual(now, d2.timestamp)
self.assertTrue(d2.hidden)
host = host.key.get()
self.assertEqual(1, len(host.device_count_summaries))
self.assertEqual(1, host.device_count_summaries[0].total)
@mock.patch.object(common, "Now")
def testRestoreDevice(self, mock_now):
now = datetime.datetime(2019, 11, 14, 10, 10)
mock_now.return_value = now
host = datastore_test_util.CreateHost("free", "host1")
datastore_test_util.CreateDevice("free", "host1", "s1")
d2 = datastore_test_util.CreateDevice("free", "host1", "s2", hidden=True)
device_manager._CountDeviceForHost("host1")
host = host.key.get()
self.assertEqual(1, len(host.device_count_summaries))
self.assertEqual(1, host.device_count_summaries[0].total)
device_manager.RestoreDevice("s2", "host1")
ndb.get_context().clear_cache()
d2 = d2.key.get()
self.assertEqual(now, d2.timestamp)
self.assertFalse(d2.hidden)
host = host.key.get()
self.assertEqual(1, len(host.device_count_summaries))
self.assertEqual(2, host.device_count_summaries[0].total)
def testAssignHosts(self):
host1 = datastore_test_util.CreateHost("free", "host1")
host2 = datastore_test_util.CreateHost("free", "host2")
device_manager.AssignHosts(["host1", "host2"], "assignee")
ndb.get_context().clear_cache()
host1 = host1.key.get()
self.assertEqual("assignee", host1.assignee)
host2 = host2.key.get()
self.assertEqual("assignee", host2.assignee)
def testAssignHosts_invalidHost(self):
host1 = datastore_test_util.CreateHost("free", "host1")
host2 = datastore_test_util.CreateHost("free", "host2")
device_manager.AssignHosts(
["host1", "host2", "invalid_host"], "assignee")
ndb.get_context().clear_cache()
host1 = host1.key.get()
self.assertEqual("assignee", host1.assignee)
host2 = host2.key.get()
self.assertEqual("assignee", host2.assignee)
def testAssignHosts_unassign(self):
host1 = datastore_test_util.CreateHost("free", "host1")
host2 = datastore_test_util.CreateHost("free", "host2")
device_manager.AssignHosts(["host1", "host2"], "assignee")
ndb.get_context().clear_cache()
host1 = host1.key.get()
self.assertEqual("assignee", host1.assignee)
host2 = host2.key.get()
self.assertEqual("assignee", host2.assignee)
device_manager.AssignHosts(["host1", "host2"], None)
ndb.get_context().clear_cache()
host1 = host1.key.get()
self.assertIsNone(host1.assignee)
self.assertIsNotNone(host1.last_recovery_time)
host2 = host2.key.get()
self.assertIsNone(host2.assignee)
self.assertIsNotNone(host2.last_recovery_time)
def testGetDevicesOnHost(self):
datastore_test_util.CreateHost("free", "host1")
datastore_test_util.CreateDevice("free", "host1", "s1")
datastore_test_util.CreateDevice("free", "host1", "s2")
datastore_test_util.CreateDevice("free", "host1", "s3", hidden=True)
devices = device_manager.GetDevicesOnHost("host1")
self.assertEqual(2, len(devices))
def testUpdateHostState(self):
"""Test UpdateHostState will update state and create state history."""
hostname = "test-1.mtv.corp.example.com"
timestamp1 = datetime.datetime.utcfromtimestamp(1)
timestamp2 = datetime.datetime.utcfromtimestamp(2)
host = datastore_test_util.CreateHost(
"test", hostname,
host_state=api_messages.HostState.GONE,
timestamp=timestamp1)
state_history, history = device_manager._UpdateHostState(
host, api_messages.HostState.RUNNING,
timestamp=timestamp2)
self.assertIsNotNone(state_history)
self.assertEqual(api_messages.HostState.RUNNING, host.host_state)
self.assertEqual(timestamp2, host.timestamp)
self.assertEqual(hostname, state_history.hostname)
self.assertEqual(api_messages.HostState.RUNNING, state_history.state)
self.assertEqual(api_messages.HostState.RUNNING, history.host_state)
self.assertEqual(host.extra_info, history.extra_info)
self.assertEqual(timestamp2, state_history.timestamp)
def testUpdateHostState_sameState(self):
"""Test UpdateHostState will ignore same state."""
hostname = "test-1.mtv.corp.example.com"
timestamp1 = datetime.datetime.utcfromtimestamp(1)
timestamp2 = | |
import os
import sys
import random
import torch
import torch.nn as nn
import numpy as np
from itranslit.config import LANGDATA
from itranslit.utils import download_model_and_get_path
class Encoder(nn.Module):
'''
Simple RNN based encoder network
'''
def __init__(self, input_dim, embed_dim, hidden_dim ,
rnn_type = 'gru', layers = 1,
bidirectional =False,
dropout = 0, device = "cpu"):
super(Encoder, self).__init__()
self.input_dim = input_dim #src_vocab_sz
self.enc_embed_dim = embed_dim
self.enc_hidden_dim = hidden_dim
self.enc_rnn_type = rnn_type
self.enc_layers = layers
self.enc_directions = 2 if bidirectional else 1
self.device = device
self.embedding = nn.Embedding(self.input_dim, self.enc_embed_dim)
if self.enc_rnn_type == "gru":
self.enc_rnn = nn.GRU(input_size= self.enc_embed_dim,
hidden_size= self.enc_hidden_dim,
num_layers= self.enc_layers,
bidirectional= bidirectional)
elif self.enc_rnn_type == "lstm":
self.enc_rnn = nn.LSTM(input_size= self.enc_embed_dim,
hidden_size= self.enc_hidden_dim,
num_layers= self.enc_layers,
bidirectional= bidirectional)
else:
raise Exception("unknown RNN type mentioned")
def forward(self, x, x_sz, hidden = None):
'''
x_sz: (batch_size, 1) - Unpadded sequence lengths used for pack_pad
Return:
output: (batch_size, max_length, hidden_dim)
hidden: (n_layer*num_directions, batch_size, hidden_dim) | if LSTM tuple -(h_n, c_n)
'''
batch_sz = x.shape[0]
# x: batch_size, max_length, enc_embed_dim
x = self.embedding(x)
## pack the padded data
# x: max_length, batch_size, enc_embed_dim -> for pack_pad
x = x.permute(1,0,2)
x = nn.utils.rnn.pack_padded_sequence(x, x_sz, enforce_sorted=False) # unpad
# output: packed_size, batch_size, enc_embed_dim --> hidden from all timesteps
# hidden: n_layer**num_directions, batch_size, hidden_dim | if LSTM (h_n, c_n)
output, hidden = self.enc_rnn(x)
## pad the sequence to the max length in the batch
# output: max_length, batch_size, enc_emb_dim*directions)
output, _ = nn.utils.rnn.pad_packed_sequence(output)
# output: batch_size, max_length, hidden_dim
output = output.permute(1,0,2)
return output, hidden
class Decoder(nn.Module):
'''
Used as decoder stage
'''
def __init__(self, output_dim, embed_dim, hidden_dim,
rnn_type = 'gru', layers = 1,
use_attention = True,
enc_outstate_dim = None, # enc_directions * enc_hidden_dim
dropout = 0, device = "cpu"):
super(Decoder, self).__init__()
self.output_dim = output_dim #tgt_vocab_sz
self.dec_hidden_dim = hidden_dim
self.dec_embed_dim = embed_dim
self.dec_rnn_type = rnn_type
self.dec_layers = layers
self.use_attention = use_attention
self.device = device
if self.use_attention:
self.enc_outstate_dim = enc_outstate_dim if enc_outstate_dim else hidden_dim
else:
self.enc_outstate_dim = 0
self.embedding = nn.Embedding(self.output_dim, self.dec_embed_dim)
if self.dec_rnn_type == 'gru':
self.dec_rnn = nn.GRU(input_size= self.dec_embed_dim + self.enc_outstate_dim, # to concat attention_output
hidden_size= self.dec_hidden_dim, # previous Hidden
num_layers= self.dec_layers,
batch_first = True )
elif self.dec_rnn_type == "lstm":
self.dec_rnn = nn.LSTM(input_size= self.dec_embed_dim + self.enc_outstate_dim, # to concat attention_output
hidden_size= self.dec_hidden_dim, # previous Hidden
num_layers= self.dec_layers,
batch_first = True )
else:
raise Exception("unknown RNN type mentioned")
self.fc = nn.Sequential(
nn.Linear(self.dec_hidden_dim, self.dec_embed_dim), nn.LeakyReLU(),
# nn.Linear(self.dec_embed_dim, self.dec_embed_dim), nn.LeakyReLU(), # removing to reduce size
nn.Linear(self.dec_embed_dim, self.output_dim),
)
##----- Attention ----------
if self.use_attention:
self.W1 = nn.Linear( self.enc_outstate_dim, self.dec_hidden_dim)
self.W2 = nn.Linear( self.dec_hidden_dim, self.dec_hidden_dim)
self.V = nn.Linear( self.dec_hidden_dim, 1)
def attention(self, x, hidden, enc_output):
'''
x: (batch_size, 1, dec_embed_dim) -> after Embedding
enc_output: batch_size, max_length, enc_hidden_dim *num_directions
hidden: n_layers, batch_size, hidden_size | if LSTM (h_n, c_n)
'''
## perform addition to calculate the score
# hidden_with_time_axis: batch_size, 1, hidden_dim
## hidden_with_time_axis = hidden.permute(1, 0, 2) ## replaced with below 2lines
hidden_with_time_axis = torch.sum(hidden, axis=0)
hidden_with_time_axis = hidden_with_time_axis.unsqueeze(1)
# score: batch_size, max_length, hidden_dim
score = torch.tanh(self.W1(enc_output) + self.W2(hidden_with_time_axis))
# attention_weights: batch_size, max_length, 1
# we get 1 at the last axis because we are applying score to self.V
attention_weights = torch.softmax(self.V(score), dim=1)
# context_vector shape after sum == (batch_size, hidden_dim)
context_vector = attention_weights * enc_output
context_vector = torch.sum(context_vector, dim=1)
# context_vector: batch_size, 1, hidden_dim
context_vector = context_vector.unsqueeze(1)
# attend_out (batch_size, 1, dec_embed_dim + hidden_size)
attend_out = torch.cat((context_vector, x), -1)
return attend_out, attention_weights
def forward(self, x, hidden, enc_output):
'''
x: (batch_size, 1)
enc_output: batch_size, max_length, dec_embed_dim
hidden: n_layer, batch_size, hidden_size | lstm: (h_n, c_n)
'''
if (hidden is None) and (self.use_attention is False):
raise Exception( "No use of a decoder with No attention and No Hidden")
batch_sz = x.shape[0]
if hidden is None:
# hidden: n_layers, batch_size, hidden_dim
hid_for_att = torch.zeros((self.dec_layers, batch_sz,
self.dec_hidden_dim )).to(self.device)
elif self.dec_rnn_type == 'lstm':
hid_for_att = hidden[0] # h_n
else:
hid_for_att = hidden
# x (batch_size, 1, dec_embed_dim) -> after embedding
x = self.embedding(x)
if self.use_attention:
# x (batch_size, 1, dec_embed_dim + hidden_size) -> after attention
# aw: (batch_size, max_length, 1)
x, aw = self.attention( x, hid_for_att, enc_output)
else:
x, aw = x, 0
# passing the concatenated vector to the GRU
# output: (batch_size, n_layers, hidden_size)
# hidden: n_layers, batch_size, hidden_size | if LSTM (h_n, c_n)
output, hidden = self.dec_rnn(x, hidden) if hidden is not None else self.dec_rnn(x)
# output :shp: (batch_size * 1, hidden_size)
output = output.view(-1, output.size(2))
# output :shp: (batch_size * 1, output_dim)
output = self.fc(output)
return output, hidden, aw
class Seq2Seq(nn.Module):
'''
Used to construct seq2seq architecture with encoder decoder objects
'''
def __init__(self, encoder, decoder, pass_enc2dec_hid=False, dropout = 0, device = "cpu"):
super(Seq2Seq, self).__init__()
self.encoder = encoder
self.decoder = decoder
self.device = device
self.pass_enc2dec_hid = pass_enc2dec_hid
if self.pass_enc2dec_hid:
assert decoder.dec_hidden_dim == encoder.enc_hidden_dim, "Hidden Dimension of encoder and decoder must be same, or unset `pass_enc2dec_hid`"
if decoder.use_attention:
assert decoder.enc_outstate_dim == encoder.enc_directions*encoder.enc_hidden_dim,"Set `enc_out_dim` correctly in decoder"
assert self.pass_enc2dec_hid or decoder.use_attention, "No use of a decoder with No attention and No Hidden from Encoder"
def forward(self, src, tgt, src_sz, teacher_forcing_ratio = 0):
'''
src: (batch_size, sequence_len.padded)
tgt: (batch_size, sequence_len.padded)
src_sz: [batch_size, 1] - Unpadded sequence lengths
'''
batch_size = tgt.shape[0]
# enc_output: (batch_size, padded_seq_length, enc_hidden_dim*num_direction)
# enc_hidden: (enc_layers*num_direction, batch_size, hidden_dim)
enc_output, enc_hidden = self.encoder(src, src_sz)
if self.pass_enc2dec_hid:
# dec_hidden: dec_layers, batch_size , dec_hidden_dim
dec_hidden = enc_hidden
else:
# dec_hidden -> Will be initialized to zeros internally
dec_hidden = None
# pred_vecs: (batch_size, output_dim, sequence_sz) -> shape required for CELoss
pred_vecs = torch.zeros(batch_size, self.decoder.output_dim, tgt.size(1)).to(self.device)
# dec_input: (batch_size, 1)
dec_input = tgt[:,0].unsqueeze(1) # initialize to start token
pred_vecs[:,1,0] = 1 # Initialize to start tokens all batches
for t in range(1, tgt.size(1)):
# dec_hidden: dec_layers, batch_size , dec_hidden_dim
# dec_output: batch_size, output_dim
# dec_input: (batch_size, 1)
dec_output, dec_hidden, _ = self.decoder( dec_input,
dec_hidden,
enc_output, )
pred_vecs[:,:,t] = dec_output
# # prediction: batch_size
prediction = torch.argmax(dec_output, dim=1)
# Teacher Forcing
if random.random() < teacher_forcing_ratio:
dec_input = tgt[:, t].unsqueeze(1)
else:
dec_input = prediction.unsqueeze(1)
return pred_vecs #(batch_size, output_dim, sequence_sz)
def inference(self, src, max_tgt_sz=50, debug = 0):
'''
single input only, No batch Inferencing
src: (sequence_len)
debug: if True will return attention weights also
'''
batch_size = 1
start_tok = src[0]
end_tok = src[-1]
src_sz = torch.tensor([len(src)])
src_ = src.unsqueeze(0)
# enc_output: (batch_size, padded_seq_length, enc_hidden_dim*num_direction)
# enc_hidden: (enc_layers*num_direction, batch_size, hidden_dim)
enc_output, enc_hidden = self.encoder(src_, src_sz)
if self.pass_enc2dec_hid:
# dec_hidden: dec_layers, batch_size , dec_hidden_dim
dec_hidden = enc_hidden
else:
# dec_hidden -> Will be initialized to zeros internally
dec_hidden = None
# pred_arr: (sequence_sz, 1) -> shape required for CELoss
pred_arr = torch.zeros(max_tgt_sz, 1).to(self.device)
if debug: attend_weight_arr = torch.zeros(max_tgt_sz, len(src)).to(self.device)
# dec_input: (batch_size, 1)
dec_input = start_tok.view(1,1) # initialize to start token
pred_arr[0] = start_tok.view(1,1) # initialize to start token
for t in range(max_tgt_sz):
# dec_hidden: dec_layers, batch_size , dec_hidden_dim
# dec_output: batch_size, output_dim
# dec_input: (batch_size, 1)
dec_output, dec_hidden, aw = self.decoder( dec_input,
dec_hidden,
enc_output, )
# prediction :shp: (1,1)
prediction = torch.argmax(dec_output, dim=1)
dec_input = prediction.unsqueeze(1)
pred_arr[t] = prediction
if debug: attend_weight_arr[t] = aw.squeeze(-1)
if torch.eq(prediction, end_tok):
break
if debug: return pred_arr.squeeze(), attend_weight_arr
# pred_arr :shp: (sequence_len)
return pred_arr.squeeze().to(dtype=torch.long)
def active_beam_inference(self, src, beam_width=3, max_tgt_sz=50):
''' Active beam Search based decoding
src: (sequence_len)
'''
def _avg_score(p_tup):
''' Used for Sorting
TODO: Dividing by length of sequence power alpha as hyperparam
'''
return p_tup[0]
batch_size = 1
start_tok = src[0]
end_tok = src[-1]
src_sz = torch.tensor([len(src)])
src_ = src.unsqueeze(0)
# enc_output: (batch_size, padded_seq_length, enc_hidden_dim*num_direction)
# enc_hidden: (enc_layers*num_direction, batch_size, hidden_dim)
enc_output, enc_hidden = self.encoder(src_, src_sz)
if self.pass_enc2dec_hid:
# dec_hidden: dec_layers, batch_size , dec_hidden_dim
init_dec_hidden = enc_hidden
else:
# dec_hidden -> Will be initialized to zeros internally
init_dec_hidden = None
# top_pred[][0] = Σ-log_softmax
# top_pred[][1] = sequence torch.tensor shape: (1)
# top_pred[][2] = dec_hidden
top_pred_list = [ (0, start_tok.unsqueeze(0) , init_dec_hidden) ]
for t in range(max_tgt_sz):
cur_pred_list = []
for p_tup in top_pred_list:
if p_tup[1][-1] == end_tok:
cur_pred_list.append(p_tup)
continue
# dec_hidden: dec_layers, 1, hidden_dim
# dec_output: 1, output_dim
dec_output, dec_hidden, _ = self.decoder( x = p_tup[1][-1].view(1,1), #dec_input: (1,1)
hidden = | |
])
self._pad0048 = v_bytes(size=4)
class _unnamed_29211(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Head = v_uint64()
class ALPC_COMPLETION_LIST_STATE(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.u1 = _unnamed_27537()
class WHEA_ERROR_RECORD_SECTION_DESCRIPTOR_FLAGS(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Primary = v_uint32()
class TP_CALLBACK_ENVIRON_V3(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Version = v_uint32()
self._pad0008 = v_bytes(size=4)
self.Pool = v_ptr64()
self.CleanupGroup = v_ptr64()
self.CleanupGroupCancelCallback = v_ptr64()
self.RaceDll = v_ptr64()
self.ActivationContext = v_ptr64()
self.FinalizationCallback = v_ptr64()
self.u = _unnamed_18815()
self.CallbackPriority = v_uint32()
self.Size = v_uint32()
self._pad0048 = v_bytes(size=4)
class _unnamed_24051(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Type = v_uint16()
self.DataInfoOffset = v_uint16()
class MEMORY_ALLOCATION_DESCRIPTOR(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.ListEntry = LIST_ENTRY()
self.MemoryType = v_uint32()
self._pad0018 = v_bytes(size=4)
self.BasePage = v_uint64()
self.PageCount = v_uint64()
class MMPTE_TRANSITION(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Valid = v_uint64()
class WHEA_ERROR_PACKET_FLAGS(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.PreviousError = v_uint32()
class ARM_DBGKD_CONTROL_SET(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Continue = v_uint32()
self.CurrentSymbolStart = v_uint32()
self.CurrentSymbolEnd = v_uint32()
class ALPC_PROCESS_CONTEXT(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Lock = EX_PUSH_LOCK()
self.ViewListHead = LIST_ENTRY()
self.PagedPoolQuotaCache = v_uint64()
class DIAGNOSTIC_CONTEXT(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.CallerType = v_uint32()
self._pad0008 = v_bytes(size=4)
self.Process = v_ptr64()
self.ServiceTag = v_uint32()
self._pad0018 = v_bytes(size=4)
self.ReasonSize = v_uint64()
class OBJECT_HANDLE_INFORMATION(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.HandleAttributes = v_uint32()
self.GrantedAccess = v_uint32()
class KSPIN_LOCK_QUEUE(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Next = v_ptr64()
self.Lock = v_ptr64()
class HEAP_LOCK(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Lock = _unnamed_23833()
class XSTATE_CONFIGURATION(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.EnabledFeatures = v_uint64()
self.Size = v_uint32()
self.OptimizedSave = v_uint32()
self.Features = vstruct.VArray([ XSTATE_FEATURE() for i in xrange(64) ])
class PS_CLIENT_SECURITY_CONTEXT(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.ImpersonationData = v_uint64()
class RTL_AVL_TABLE(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.BalancedRoot = RTL_BALANCED_LINKS()
self.OrderedPointer = v_ptr64()
self.WhichOrderedElement = v_uint32()
self.NumberGenericTableElements = v_uint32()
self.DepthOfTree = v_uint32()
self._pad0038 = v_bytes(size=4)
self.RestartKey = v_ptr64()
self.DeleteCount = v_uint32()
self._pad0048 = v_bytes(size=4)
self.CompareRoutine = v_ptr64()
self.AllocateRoutine = v_ptr64()
self.FreeRoutine = v_ptr64()
self.TableContext = v_ptr64()
class _unnamed_26621(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Generic = _unnamed_27286()
self._pad0010 = v_bytes(size=4)
class _unnamed_27306(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Start = v_uint32()
self.Length = v_uint32()
self.Reserved = v_uint32()
class PNP_ASSIGN_RESOURCES_CONTEXT(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.IncludeFailedDevices = v_uint32()
self.DeviceCount = v_uint32()
self.DeviceList = vstruct.VArray([ v_ptr64() for i in xrange(1) ])
class _unnamed_27302(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Channel = v_uint32()
self.Port = v_uint32()
self.Reserved1 = v_uint32()
class MAPPED_FILE_SEGMENT(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.ControlArea = v_ptr64()
self.TotalNumberOfPtes = v_uint32()
self.SegmentFlags = SEGMENT_FLAGS()
self.NumberOfCommittedPages = v_uint64()
self.SizeOfSegment = v_uint64()
self.ExtendInfo = v_ptr64()
self.SegmentLock = EX_PUSH_LOCK()
class _unnamed_26712(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Length64 = v_uint32()
self.Alignment64 = v_uint32()
self.MinimumAddress = LARGE_INTEGER()
self.MaximumAddress = LARGE_INTEGER()
class DBGKD_GET_INTERNAL_BREAKPOINT64(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.BreakpointAddress = v_uint64()
self.Flags = v_uint32()
self.Calls = v_uint32()
self.MaxCallsPerPeriod = v_uint32()
self.MinInstructions = v_uint32()
self.MaxInstructions = v_uint32()
self.TotalInstructions = v_uint32()
class OWNER_ENTRY(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.OwnerThread = v_uint64()
self.IoPriorityBoosted = v_uint32()
self._pad0010 = v_bytes(size=4)
class ETW_BUFFER_HANDLE(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.TraceBuffer = v_ptr64()
self.BufferFastRef = v_ptr64()
class DEVOBJ_EXTENSION(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Type = v_uint16()
self.Size = v_uint16()
self._pad0008 = v_bytes(size=4)
self.DeviceObject = v_ptr64()
self.PowerFlags = v_uint32()
self._pad0018 = v_bytes(size=4)
self.Dope = v_ptr64()
self.ExtensionFlags = v_uint32()
self._pad0028 = v_bytes(size=4)
self.DeviceNode = v_ptr64()
self.AttachedTo = v_ptr64()
self.StartIoCount = v_uint32()
self.StartIoKey = v_uint32()
self.StartIoFlags = v_uint32()
self._pad0048 = v_bytes(size=4)
self.Vpb = v_ptr64()
self.DependentList = LIST_ENTRY()
self.ProviderList = LIST_ENTRY()
class HEAP_LOCAL_SEGMENT_INFO(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
class ARBITER_ALLOCATION_STATE(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Start = v_uint64()
self.End = v_uint64()
self.CurrentMinimum = v_uint64()
self.CurrentMaximum = v_uint64()
self.Entry = v_ptr64()
self.CurrentAlternative = v_ptr64()
self.AlternativeCount = v_uint32()
self._pad0038 = v_bytes(size=4)
self.Alternatives = v_ptr64()
self.Flags = v_uint16()
self.RangeAttributes = v_uint8()
self.RangeAvailableAttributes = v_uint8()
self._pad0048 = v_bytes(size=4)
self.WorkSpace = v_uint64()
class BLOB_TYPE(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.ResourceId = v_uint32()
self.PoolTag = v_uint32()
self.Flags = v_uint32()
self.CreatedObjects = v_uint32()
self.DeletedObjects = v_uint32()
self._pad0018 = v_bytes(size=4)
self.DeleteProcedure = v_ptr64()
self.DestroyProcedure = v_ptr64()
self.UsualSize = v_uint64()
self.LookasideIndex = v_uint32()
self._pad0038 = v_bytes(size=4)
class DBGKD_SET_INTERNAL_BREAKPOINT64(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.BreakpointAddress = v_uint64()
self.Flags = v_uint32()
self._pad0010 = v_bytes(size=4)
class OPEN_PACKET(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Type = v_uint16()
self.Size = v_uint16()
self._pad0008 = v_bytes(size=4)
self.FileObject = v_ptr64()
self.FinalStatus = v_uint32()
self._pad0018 = v_bytes(size=4)
self.Information = v_uint64()
self.ParseCheck = v_uint32()
self._pad0028 = v_bytes(size=4)
self.RelatedFileObject = v_ptr64()
self.OriginalAttributes = v_ptr64()
self.AllocationSize = LARGE_INTEGER()
self.CreateOptions = v_uint32()
self.FileAttributes = v_uint16()
self.ShareAccess = v_uint16()
self.EaBuffer = v_ptr64()
self.EaLength = v_uint32()
self.Options = v_uint32()
self.Disposition = v_uint32()
self._pad0060 = v_bytes(size=4)
self.BasicInformation = v_ptr64()
self.NetworkInformation = v_ptr64()
self.CreateFileType = v_uint32()
self._pad0078 = v_bytes(size=4)
self.MailslotOrPipeParameters = v_ptr64()
self.Override = v_uint8()
self.QueryOnly = v_uint8()
self.DeleteOnly = v_uint8()
self.FullAttributes = v_uint8()
self._pad0088 = v_bytes(size=4)
self.LocalFileObject = v_ptr64()
self.InternalFlags = v_uint32()
self._pad0098 = v_bytes(size=4)
self.DriverCreateContext = IO_DRIVER_CREATE_CONTEXT()
class HANDLE_TABLE_ENTRY(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Object = v_ptr64()
self.GrantedAccess = v_uint32()
self._pad0010 = v_bytes(size=4)
class HEAP_COUNTERS(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.TotalMemoryReserved = v_uint64()
self.TotalMemoryCommitted = v_uint64()
self.TotalMemoryLargeUCR = v_uint64()
self.TotalSizeInVirtualBlocks = v_uint64()
self.TotalSegments = v_uint32()
self.TotalUCRs = v_uint32()
self.CommittOps = v_uint32()
self.DeCommitOps = v_uint32()
self.LockAcquires = v_uint32()
self.LockCollisions = v_uint32()
self.CommitRate = v_uint32()
self.DecommittRate = v_uint32()
self.CommitFailures = v_uint32()
self.InBlockCommitFailures = v_uint32()
self.CompactHeapCalls = v_uint32()
self.CompactedUCRs = v_uint32()
self.AllocAndFreeOps = v_uint32()
self.InBlockDeccommits = v_uint32()
self.InBlockDeccomitSize = v_uint64()
self.HighWatermarkSize = v_uint64()
self.LastPolledSize = v_uint64()
class WHEA_MEMORY_ERROR_SECTION_VALIDBITS(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.ErrorStatus = v_uint64()
class BLOB(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.ResourceList = LIST_ENTRY()
self.u1 = _unnamed_24097()
self.ResourceId = v_uint8()
self.CachedReferences = v_uint16()
self.ReferenceCount = v_uint32()
self.Lock = EX_PUSH_LOCK()
class WORK_QUEUE_ENTRY(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.WorkQueueLinks = LIST_ENTRY()
self.Parameters = _unnamed_23696()
self.Function = v_uint8()
self._pad0020 = v_bytes(size=7)
class PI_BUS_EXTENSION(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Flags = v_uint32()
self.NumberCSNs = v_uint8()
self._pad0008 = v_bytes(size=3)
self.ReadDataPort = v_ptr64()
self.DataPortMapped = v_uint8()
self._pad0018 = v_bytes(size=7)
self.AddressPort = v_ptr64()
self.AddrPortMapped = v_uint8()
self._pad0028 = v_bytes(size=7)
self.CommandPort = v_ptr64()
self.CmdPortMapped = v_uint8()
self._pad0034 = v_bytes(size=3)
self.NextSlotNumber = v_uint32()
self.DeviceList = SINGLE_LIST_ENTRY()
self.CardList = SINGLE_LIST_ENTRY()
self.PhysicalBusDevice = v_ptr64()
self.FunctionalBusDevice = v_ptr64()
self.AttachedDevice = v_ptr64()
self.BusNumber = v_uint32()
self.SystemPowerState = v_uint32()
self.DevicePowerState = v_uint32()
self._pad0070 = v_bytes(size=4)
class MAILSLOT_CREATE_PARAMETERS(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.MailslotQuota = v_uint32()
self.MaximumMessageSize = v_uint32()
self.ReadTimeout = LARGE_INTEGER()
self.TimeoutSpecified = v_uint8()
self._pad0018 = v_bytes(size=7)
class FS_FILTER_CALLBACK_DATA(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.SizeOfFsFilterCallbackData = v_uint32()
self.Operation = v_uint8()
self.Reserved = v_uint8()
self._pad0008 = v_bytes(size=2)
self.DeviceObject = v_ptr64()
self.FileObject = v_ptr64()
self.Parameters = FS_FILTER_PARAMETERS()
class REQUEST_MAILBOX(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Next = v_ptr64()
self.RequestSummary = v_uint64()
self.RequestPacket = KREQUEST_PACKET()
self._pad0040 = v_bytes(size=16)
class PPM_IDLE_STATE(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.DomainMembers = KAFFINITY_EX()
self.IdleCheck = v_ptr64()
self.IdleHandler = v_ptr64()
self.HvConfig = v_uint64()
self.Context = v_ptr64()
self.Latency = v_uint32()
self.Power = v_uint32()
self.TimeCheck = v_uint32()
self.StateFlags = v_uint32()
self.PromotePercent = v_uint8()
self.DemotePercent = v_uint8()
self.PromotePercentBase = v_uint8()
self.DemotePercentBase = v_uint8()
self.StateType = v_uint8()
self._pad0060 = v_bytes(size=3)
class XSTATE_CONTEXT(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Mask = v_uint64()
self.Length = v_uint32()
self.Reserved1 = v_uint32()
self.Area = v_ptr64()
self.Buffer = v_ptr64()
class ACCESS_STATE(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.OperationID = LUID()
self.SecurityEvaluated = v_uint8()
self.GenerateAudit = v_uint8()
self.GenerateOnClose = v_uint8()
self.PrivilegesAllocated = v_uint8()
self.Flags = v_uint32()
self.RemainingDesiredAccess = v_uint32()
self.PreviouslyGrantedAccess = v_uint32()
self.OriginalDesiredAccess = v_uint32()
self._pad0020 = v_bytes(size=4)
self.SubjectSecurityContext = SECURITY_SUBJECT_CONTEXT()
self.SecurityDescriptor = v_ptr64()
self.AuxData = v_ptr64()
self.Privileges = _unnamed_20937()
self.AuditPrivileges = v_uint8()
self._pad0080 = v_bytes(size=3)
self.ObjectName = UNICODE_STRING()
self.ObjectTypeName = UNICODE_STRING()
class DBGKD_SWITCH_PARTITION(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Partition = v_uint32()
class TP_CALLBACK_INSTANCE(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
class AMD64_DBGKD_CONTROL_SET(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.TraceFlag = v_uint32()
self.Dr7 = v_uint64()
self.CurrentSymbolStart = v_uint64()
self.CurrentSymbolEnd = v_uint64()
class PROC_IDLE_ACCOUNTING(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.StateCount = v_uint32()
self.TotalTransitions = v_uint32()
self.ResetCount = v_uint32()
self._pad0010 = v_bytes(size=4)
self.StartTime = v_uint64()
self.BucketLimits = vstruct.VArray([ v_uint64() for i in xrange(16) ])
self.State = vstruct.VArray([ PROC_IDLE_STATE_ACCOUNTING() for i in xrange(1) ])
class _unnamed_19929(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Depth = v_uint64()
self.HeaderType = v_uint64()
class GDI_TEB_BATCH(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Offset = v_uint32()
self._pad0008 = v_bytes(size=4)
self.HDC = v_uint64()
self.Buffer = vstruct.VArray([ v_uint32() for i in xrange(310) ])
class DBGKD_SET_SPECIAL_CALL32(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.SpecialCall = v_uint32()
class SYSTEM_POWER_LEVEL(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Enable = v_uint8()
self.Spare = vstruct.VArray([ v_uint8() for i in xrange(3) ])
self.BatteryLevel = v_uint32()
self.PowerPolicy = POWER_ACTION_POLICY()
self.MinSystemState = v_uint32()
class DBGKD_SET_SPECIAL_CALL64(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.SpecialCall = v_uint64()
class DBGKM_EXCEPTION32(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.ExceptionRecord = EXCEPTION_RECORD32()
self.FirstChance = v_uint32()
class PAGEFAULT_HISTORY(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
class _unnamed_27001(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.AsUCHAR = v_uint8()
class ECP_LIST(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Signature = v_uint32()
self.Flags = v_uint32()
self.EcpList = LIST_ENTRY()
class _unnamed_21379(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.IdType = v_uint32()
class PEB32(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.InheritedAddressSpace = v_uint8()
self.ReadImageFileExecOptions = v_uint8()
self.BeingDebugged = v_uint8()
self.BitField = v_uint8()
self.Mutant = v_uint32()
self.ImageBaseAddress = v_uint32()
self.Ldr = v_uint32()
self.ProcessParameters = v_uint32()
self.SubSystemData = v_uint32()
self.ProcessHeap = v_uint32()
self.FastPebLock = v_uint32()
self.AtlThunkSListPtr = v_uint32()
self.IFEOKey = v_uint32()
self.CrossProcessFlags = v_uint32()
self.KernelCallbackTable = v_uint32()
self.SystemReserved = vstruct.VArray([ v_uint32() for i in xrange(1) ])
self.AtlThunkSListPtr32 = | |
<reponame>scratchmex/django-rest-framework
import pytest
from django.db import models
from django.http import Http404
from django.shortcuts import get_object_or_404
from django.test import TestCase
from rest_framework import generics, renderers, serializers, status
from rest_framework.response import Response
from rest_framework.test import APIRequestFactory
from tests.models import (
BasicModel, ForeignKeySource, ForeignKeyTarget, RESTFrameworkModel,
UUIDForeignKeyTarget
)
factory = APIRequestFactory()
# Models
class SlugBasedModel(RESTFrameworkModel):
text = models.CharField(max_length=100)
slug = models.SlugField(max_length=32)
# Model for regression test for #285
class Comment(RESTFrameworkModel):
email = models.EmailField()
content = models.CharField(max_length=200)
created = models.DateTimeField(auto_now_add=True)
# Serializers
class BasicSerializer(serializers.ModelSerializer):
class Meta:
model = BasicModel
fields = '__all__'
class ForeignKeySerializer(serializers.ModelSerializer):
class Meta:
model = ForeignKeySource
fields = '__all__'
class SlugSerializer(serializers.ModelSerializer):
slug = serializers.ReadOnlyField()
class Meta:
model = SlugBasedModel
fields = ('text', 'slug')
# Views
class RootView(generics.ListCreateAPIView):
queryset = BasicModel.objects.all()
serializer_class = BasicSerializer
class InstanceView(generics.RetrieveUpdateDestroyAPIView):
queryset = BasicModel.objects.exclude(text='filtered out')
serializer_class = BasicSerializer
class FKInstanceView(generics.RetrieveUpdateDestroyAPIView):
queryset = ForeignKeySource.objects.all()
serializer_class = ForeignKeySerializer
class SlugBasedInstanceView(InstanceView):
"""
A model with a slug-field.
"""
queryset = SlugBasedModel.objects.all()
serializer_class = SlugSerializer
lookup_field = 'slug'
# Tests
class TestRootView(TestCase):
def setUp(self):
"""
Create 3 BasicModel instances.
"""
items = ['foo', 'bar', 'baz']
for item in items:
BasicModel(text=item).save()
self.objects = BasicModel.objects
self.data = [
{'id': obj.id, 'text': obj.text}
for obj in self.objects.all()
]
self.view = RootView.as_view()
def test_get_root_view(self):
"""
GET requests to ListCreateAPIView should return list of objects.
"""
request = factory.get('/')
with self.assertNumQueries(1):
response = self.view(request).render()
assert response.status_code == status.HTTP_200_OK
assert response.data == self.data
def test_head_root_view(self):
"""
HEAD requests to ListCreateAPIView should return 200.
"""
request = factory.head('/')
with self.assertNumQueries(1):
response = self.view(request).render()
assert response.status_code == status.HTTP_200_OK
def test_post_root_view(self):
"""
POST requests to ListCreateAPIView should create a new object.
"""
data = {'text': 'foobar'}
request = factory.post('/', data, format='json')
with self.assertNumQueries(1):
response = self.view(request).render()
assert response.status_code == status.HTTP_201_CREATED
assert response.data == {'id': 4, 'text': 'foobar'}
created = self.objects.get(id=4)
assert created.text == 'foobar'
def test_put_root_view(self):
"""
PUT requests to ListCreateAPIView should not be allowed
"""
data = {'text': 'foobar'}
request = factory.put('/', data, format='json')
with self.assertNumQueries(0):
response = self.view(request).render()
assert response.status_code == status.HTTP_405_METHOD_NOT_ALLOWED
assert response.data == {"detail": 'Method "PUT" not allowed.'}
def test_delete_root_view(self):
"""
DELETE requests to ListCreateAPIView should not be allowed
"""
request = factory.delete('/')
with self.assertNumQueries(0):
response = self.view(request).render()
assert response.status_code == status.HTTP_405_METHOD_NOT_ALLOWED
assert response.data == {"detail": 'Method "DELETE" not allowed.'}
def test_post_cannot_set_id(self):
"""
POST requests to create a new object should not be able to set the id.
"""
data = {'id': 999, 'text': 'foobar'}
request = factory.post('/', data, format='json')
with self.assertNumQueries(1):
response = self.view(request).render()
assert response.status_code == status.HTTP_201_CREATED
assert response.data == {'id': 4, 'text': 'foobar'}
created = self.objects.get(id=4)
assert created.text == 'foobar'
def test_post_error_root_view(self):
"""
POST requests to ListCreateAPIView in HTML should include a form error.
"""
data = {'text': 'foobar' * 100}
request = factory.post('/', data, HTTP_ACCEPT='text/html')
response = self.view(request).render()
expected_error = '<span class="help-block">Ensure this field has no more than 100 characters.</span>'
assert expected_error in response.rendered_content.decode()
EXPECTED_QUERIES_FOR_PUT = 2
class TestInstanceView(TestCase):
def setUp(self):
"""
Create 3 BasicModel instances.
"""
items = ['foo', 'bar', 'baz', 'filtered out']
for item in items:
BasicModel(text=item).save()
self.objects = BasicModel.objects.exclude(text='filtered out')
self.data = [
{'id': obj.id, 'text': obj.text}
for obj in self.objects.all()
]
self.view = InstanceView.as_view()
self.slug_based_view = SlugBasedInstanceView.as_view()
def test_get_instance_view(self):
"""
GET requests to RetrieveUpdateDestroyAPIView should return a single object.
"""
request = factory.get('/1')
with self.assertNumQueries(1):
response = self.view(request, pk=1).render()
assert response.status_code == status.HTTP_200_OK
assert response.data == self.data[0]
def test_post_instance_view(self):
"""
POST requests to RetrieveUpdateDestroyAPIView should not be allowed
"""
data = {'text': 'foobar'}
request = factory.post('/', data, format='json')
with self.assertNumQueries(0):
response = self.view(request).render()
assert response.status_code == status.HTTP_405_METHOD_NOT_ALLOWED
assert response.data == {"detail": 'Method "POST" not allowed.'}
def test_put_instance_view(self):
"""
PUT requests to RetrieveUpdateDestroyAPIView should update an object.
"""
data = {'text': 'foobar'}
request = factory.put('/1', data, format='json')
with self.assertNumQueries(EXPECTED_QUERIES_FOR_PUT):
response = self.view(request, pk='1').render()
assert response.status_code == status.HTTP_200_OK
assert dict(response.data) == {'id': 1, 'text': 'foobar'}
updated = self.objects.get(id=1)
assert updated.text == 'foobar'
def test_patch_instance_view(self):
"""
PATCH requests to RetrieveUpdateDestroyAPIView should update an object.
"""
data = {'text': 'foobar'}
request = factory.patch('/1', data, format='json')
with self.assertNumQueries(EXPECTED_QUERIES_FOR_PUT):
response = self.view(request, pk=1).render()
assert response.status_code == status.HTTP_200_OK
assert response.data == {'id': 1, 'text': 'foobar'}
updated = self.objects.get(id=1)
assert updated.text == 'foobar'
def test_delete_instance_view(self):
"""
DELETE requests to RetrieveUpdateDestroyAPIView should delete an object.
"""
request = factory.delete('/1')
with self.assertNumQueries(2):
response = self.view(request, pk=1).render()
assert response.status_code == status.HTTP_204_NO_CONTENT
assert response.content == b''
ids = [obj.id for obj in self.objects.all()]
assert ids == [2, 3]
def test_get_instance_view_incorrect_arg(self):
"""
GET requests with an incorrect pk type, should raise 404, not 500.
Regression test for #890.
"""
request = factory.get('/a')
with self.assertNumQueries(0):
response = self.view(request, pk='a').render()
assert response.status_code == status.HTTP_404_NOT_FOUND
def test_put_cannot_set_id(self):
"""
PUT requests to create a new object should not be able to set the id.
"""
data = {'id': 999, 'text': 'foobar'}
request = factory.put('/1', data, format='json')
with self.assertNumQueries(EXPECTED_QUERIES_FOR_PUT):
response = self.view(request, pk=1).render()
assert response.status_code == status.HTTP_200_OK
assert response.data == {'id': 1, 'text': 'foobar'}
updated = self.objects.get(id=1)
assert updated.text == 'foobar'
def test_put_to_deleted_instance(self):
"""
PUT requests to RetrieveUpdateDestroyAPIView should return 404 if
an object does not currently exist.
"""
self.objects.get(id=1).delete()
data = {'text': 'foobar'}
request = factory.put('/1', data, format='json')
with self.assertNumQueries(1):
response = self.view(request, pk=1).render()
assert response.status_code == status.HTTP_404_NOT_FOUND
def test_put_to_filtered_out_instance(self):
"""
PUT requests to an URL of instance which is filtered out should not be
able to create new objects.
"""
data = {'text': 'foo'}
filtered_out_pk = BasicModel.objects.filter(text='filtered out')[0].pk
request = factory.put('/{}'.format(filtered_out_pk), data, format='json')
response = self.view(request, pk=filtered_out_pk).render()
assert response.status_code == status.HTTP_404_NOT_FOUND
def test_patch_cannot_create_an_object(self):
"""
PATCH requests should not be able to create objects.
"""
data = {'text': 'foobar'}
request = factory.patch('/999', data, format='json')
with self.assertNumQueries(1):
response = self.view(request, pk=999).render()
assert response.status_code == status.HTTP_404_NOT_FOUND
assert not self.objects.filter(id=999).exists()
def test_put_error_instance_view(self):
"""
Incorrect PUT requests in HTML should include a form error.
"""
data = {'text': 'foobar' * 100}
request = factory.put('/', data, HTTP_ACCEPT='text/html')
response = self.view(request, pk=1).render()
expected_error = '<span class="help-block">Ensure this field has no more than 100 characters.</span>'
assert expected_error in response.rendered_content.decode()
class TestFKInstanceView(TestCase):
def setUp(self):
"""
Create 3 BasicModel instances.
"""
items = ['foo', 'bar', 'baz']
for item in items:
t = ForeignKeyTarget(name=item)
t.save()
ForeignKeySource(name='source_' + item, target=t).save()
self.objects = ForeignKeySource.objects
self.data = [
{'id': obj.id, 'name': obj.name}
for obj in self.objects.all()
]
self.view = FKInstanceView.as_view()
class TestOverriddenGetObject(TestCase):
"""
Test cases for a RetrieveUpdateDestroyAPIView that does NOT use the
queryset/model mechanism but instead overrides get_object()
"""
def setUp(self):
"""
Create 3 BasicModel instances.
"""
items = ['foo', 'bar', 'baz']
for item in items:
BasicModel(text=item).save()
self.objects = BasicModel.objects
self.data = [
{'id': obj.id, 'text': obj.text}
for obj in self.objects.all()
]
class OverriddenGetObjectView(generics.RetrieveUpdateDestroyAPIView):
"""
Example detail view for override of get_object().
"""
serializer_class = BasicSerializer
def get_object(self):
pk = int(self.kwargs['pk'])
return get_object_or_404(BasicModel.objects.all(), id=pk)
self.view = OverriddenGetObjectView.as_view()
def test_overridden_get_object_view(self):
"""
GET requests to RetrieveUpdateDestroyAPIView should return a single object.
"""
request = factory.get('/1')
with self.assertNumQueries(1):
response = self.view(request, pk=1).render()
assert response.status_code == status.HTTP_200_OK
assert response.data == self.data[0]
# Regression test for #285
class CommentSerializer(serializers.ModelSerializer):
class Meta:
model = Comment
exclude = ('created',)
class CommentView(generics.ListCreateAPIView):
serializer_class = CommentSerializer
model = Comment
class TestCreateModelWithAutoNowAddField(TestCase):
def setUp(self):
self.objects = Comment.objects
self.view = CommentView.as_view()
def test_create_model_with_auto_now_add_field(self):
"""
Regression test for #285
https://github.com/encode/django-rest-framework/issues/285
"""
data = {'email': '<EMAIL>', 'content': 'foobar'}
request = factory.post('/', data, format='json')
response = self.view(request).render()
assert response.status_code == status.HTTP_201_CREATED
created = self.objects.get(id=1)
assert created.content == 'foobar'
# Test for particularly ugly regression with m2m in browsable API
class ClassB(models.Model):
name = models.CharField(max_length=255)
class ClassA(models.Model):
name = models.CharField(max_length=255)
children = models.ManyToManyField(ClassB, blank=True, null=True)
class ClassASerializer(serializers.ModelSerializer):
children = serializers.PrimaryKeyRelatedField(
many=True, queryset=ClassB.objects.all()
)
class Meta:
model = ClassA
fields = '__all__'
class ExampleView(generics.ListCreateAPIView):
serializer_class = ClassASerializer
queryset = ClassA.objects.all()
class TestM2MBrowsableAPI(TestCase):
def test_m2m_in_browsable_api(self):
"""
Test for particularly ugly regression with m2m in browsable API
"""
request = factory.get('/', HTTP_ACCEPT='text/html')
view = ExampleView().as_view()
response = view(request).render()
assert response.status_code == status.HTTP_200_OK
class InclusiveFilterBackend:
def filter_queryset(self, request, queryset, view):
return queryset.filter(text='foo')
class ExclusiveFilterBackend:
def filter_queryset(self, request, queryset, view):
return queryset.filter(text='other')
class TwoFieldModel(models.Model):
field_a = models.CharField(max_length=100)
field_b = models.CharField(max_length=100)
class DynamicSerializerView(generics.ListCreateAPIView):
queryset = TwoFieldModel.objects.all()
renderer_classes = (renderers.BrowsableAPIRenderer, renderers.JSONRenderer)
def get_serializer_class(self):
if self.request.method == 'POST':
class DynamicSerializer(serializers.ModelSerializer):
class Meta:
model = TwoFieldModel
fields = ('field_b',)
else:
class DynamicSerializer(serializers.ModelSerializer):
class Meta:
model = TwoFieldModel
fields = '__all__'
return DynamicSerializer
class TestFilterBackendAppliedToViews(TestCase):
def setUp(self):
"""
| |
a contribution, assessment, review is in a certain stage
in the workflow, its record may be closed to others than the owner, and
after finalization, some fields may be open to authenticated users or
the public.
This method determines the record is readable by the current user.
If the record is not part of the workflow, `None` is returned, and
the normal permission rules apply.
!!! note
It also depends on the current user.
Power users will not be prevented to read records because of
workflow conditions.
Here are the rules:
#### Assessment, Criteria Entry
Not submitted and not in revision:
: authors and editors only
Submitted, review not yet complete, or negative outcome
: authors, editors, reviewers, national coordinator only
Review with positive outcome
: public
In revision, or review with a negative outcome
: authors, editors, reviewers, national coordinator only
#### Review, Review Entry
Review has no decision and there is no final decision
: authors, editors, the other reviewer
Review in question has a decision, but still no final positive decision
: authors/editors, other reviewer, authors/editors of the assessment,
national coordinator
There is a positive final decision
: public
!!! caution "The influence of selection is nihil"
Whether a contribution is selected or not has no influence on the
readability of the assessment and review.
!!! caution "The influence on the contribution records is nihil"
Whether a contribution is readable does not depend on the
workflow, only on the normal rules.
Parameters
----------
recordObj: object
The record in question (from which the table and the kind
maybe inferred. It should be the record that contains this
WorkflowItem object as its `wfitem` attribute.
field: string, optional `None`
If None, we check for the readability of the record as a whole.
Otherwise, we check for the readability of this field in the record.
Returns
-------
boolean | `None`
"""
isSuperuser = self.isSuperuser
if isSuperuser:
return None
table = recordObj.table
if table not in SENSITIVE_TABLES:
return None
kind = recordObj.kind
perm = recordObj.perm
uid = self.uid
(stage,) = self.info(table, N.stage, kind=kind)
if table in {N.assessment, N.criteriaEntry}:
(r2Stage,) = self.info(N.review, N.stage, kind=N.final)
return (
True
if r2Stage == N.reviewAccept
else perm[N.isOur]
if stage
in {
N.submitted,
N.incompleteRevised,
N.completeRevised,
N.submittedRevised,
}
else perm[N.isEdit]
)
if table in {N.review, N.reviewEntry}:
(creators,) = self.info(N.assessment, N.creators)
(r2Stage,) = self.info(N.review, N.stage, kind=N.final)
result = (
True
if r2Stage == N.reviewAccept
else uid in creators or perm[N.isOur]
if stage
in {
N.reviewAdviseRevise,
N.reviewAdviseAccept,
N.reviewAdviseReject,
N.reviewRevise,
N.reviewReject,
}
or r2Stage in {N.reviewRevise, N.reviewReject}
else perm[N.isReviewer] or perm[N.isEdit]
)
return result
return None
def checkFixed(self, recordObj, field=None):
"""Whether a record or field is fixed because of workflow.
When a contribution, assessment, review is in a certain stage
in the workflow, its record or some fields in its record may be
fixated, either temporarily or permanently.
This method checks whether a record or field is currently fixed,
i.e. whether editing is possible.
!!! note
It might also depend on the current user.
!!! caution
Here is a case where the sysadmin and the root are less powerful
than the office users: only the office users can assign reviewers,
i.e. only they can update `reviewerE` and `reviewerF` inn assessment fields.
Parameters
----------
recordObj: object
The record in question (from which the table and the kind
maybe inferred. It should be the record that contains this
WorkflowItem object as its `wfitem` attribute.
field: string, optional `None`
If None, we check for the fixity of the record as a whole.
Otherwise, we check for the fixity of this field in the record.
Returns
-------
boolean
"""
auth = self.auth
table = recordObj.table
kind = recordObj.kind
(frozen, done, locked) = self.info(table, N.frozen, N.done, N.locked, kind=kind)
if field is None:
return frozen or done or locked
if frozen or done:
return True
if not locked:
return False
isOffice = auth.officeuser()
if isOffice and table == N.assessment:
return field not in {N.reviewerE, N.reviewerF}
return True
def permission(self, task, kind=None):
"""Checks whether a workflow task is permitted.
Note that the tasks are listed per kind of record they apply to:
contrib, assessment, review.
They are typically triggered by big workflow buttons on the interface.
When the request to execute such a task reaches the server, it will
check whether the current user is allowed to execute this task
on the records in question.
!!! hint
See above for explanation of the properties of the tasks.
!!! note
If you try to run a task on a kind of record that it is not
designed for, it will be detected and no permission will be given.
!!! note
Some tasks are designed to set a field to a value.
If that field already has that value, the task will not be permitted.
This already rules out a lot of things and relieves the burden of
prohibiting non-sensical tasks.
It may be that the task is only permitted for some limited time from now on.
Then a timedelta object with the amount of time left is returned.
More precisely, the workflow configuration table (yaml/workflow.yaml)
my specify a set of delays for a set of user roles.
* `all` specifies the default for users
whose role has not got a corresponding delay
* `coord` is national coordinator of the relevant country
* `office` is any office user
* `super` is any super user, i.e. `system` or `root`
The value specified for each of these roles is either an integer,
which is the amount of hours of the delay.
Or it is `false` (no delay) or `true` (infinite delay).
Parameters
----------
table: string
In order to check permissions, we must specify the kind of record that
the task acts on: contrib, assessment, or review.
task: string
An string consisting of the name of a task.
kind: string {`expert`, `final`}, optional `None`
Only if we want review attributes
Returns
-------
boolean | timedelta | string
"""
db = self.db
auth = self.auth
uid = self.uid
if task not in TASKS:
return False
taskInfo = TASKS[task]
table = G(taskInfo, N.table)
if uid is None or table not in USER_TABLES:
return False
taskField = (
N.selected
if table == N.contrib
else N.submitted
if table == N.assessment
else N.decision
if table == N.review
else None
)
myKind = self.myKind
(
locked,
done,
frozen,
mayAdd,
stage,
stageDate,
creators,
countryId,
taskValue,
) = self.info(
table,
N.locked,
N.done,
N.frozen,
N.mayAdd,
N.stage,
N.stageDate,
N.creators,
N.country,
taskField,
kind=kind,
)
operator = G(taskInfo, N.operator)
value = G(taskInfo, N.value)
if operator == N.set:
if taskField == N.decision:
value = G(db.decisionInv, value)
(contribId,) = self.info(N.contrib, N._id)
isOwn = creators and uid in creators
isCoord = countryId and auth.coordinator(countryId=countryId)
isSuper = auth.superuser()
isOffice = auth.officeuser()
isSysadmin = auth.sysadmin()
decisionDelay = G(taskInfo, N.delay, False)
if decisionDelay:
if type(decisionDelay) is int:
decisionDelay = timedelta(hours=decisionDelay)
elif type(decisionDelay) is dict:
defaultDecisionDelay = G(decisionDelay, N.all, False)
decisionDelay = (
G(decisionDelay, N.coord, defaultDecisionDelay)
if isCoord
else G(decisionDelay, N.sysadmin, defaultDecisionDelay)
if isSysadmin
else G(decisionDelay, N.office, defaultDecisionDelay)
if isOffice
else defaultDecisionDelay
)
if type(decisionDelay) is int:
decisionDelay = timedelta(hours=decisionDelay)
elif type(decisionDelay) is not bool:
decisionDelay = False
justNow = now()
remaining = False
if decisionDelay and stageDate:
if type(decisionDelay) is bool:
remaining = True
else:
remaining = stageDate + decisionDelay - justNow
if remaining <= timedelta(hours=0):
remaining = False
forbidden = frozen or done
if forbidden:
if (
task == N.unselectContrib
and table == N.contrib
):
if remaining is True:
return "as intervention"
if remaining:
return remaining
if not remaining:
return False
if table == N.contrib:
if not isOwn and not isCoord and not isSuper:
return False
if task == N.startAssessment:
return not forbidden and isOwn and mayAdd
if value == taskValue:
return False
if not isCoord:
return False
answer = not frozen or remaining
if task == N.selectContrib:
return stage != | |
a substring of shorttext exist within longtext such that the
substring is at least half the length of longtext?
Closure, but does not reference any external variables.
Args:
longtext: Longer string.
shorttext: Shorter string.
i: Start index of quarter length substring within longtext.
Returns:
Five element Array, containing the prefix of longtext, the suffix of
longtext, the prefix of shorttext, the suffix of shorttext and the
common middle. Or None if there was no match.
"""
seed = longtext[i:i + len(longtext) // 4]
best_common = ''
j = shorttext.find(seed)
while j != -1:
prefixLength = self.diff_commonPrefix(longtext[i:], shorttext[j:])
suffixLength = self.diff_commonSuffix(longtext[:i], shorttext[:j])
if len(best_common) < suffixLength + prefixLength:
best_common = (shorttext[j - suffixLength:j] +
shorttext[j:j + prefixLength])
best_longtext_a = longtext[:i - suffixLength]
best_longtext_b = longtext[i + prefixLength:]
best_shorttext_a = shorttext[:j - suffixLength]
best_shorttext_b = shorttext[j + prefixLength:]
j = shorttext.find(seed, j + 1)
if len(best_common) * 2 >= len(longtext):
return (best_longtext_a, best_longtext_b,
best_shorttext_a, best_shorttext_b, best_common)
else:
return None
# First check if the second quarter is the seed for a half-match.
hm1 = diff_halfMatchI(longtext, shorttext, (len(longtext) + 3) // 4)
# Check again based on the third quarter.
hm2 = diff_halfMatchI(longtext, shorttext, (len(longtext) + 1) // 2)
if not hm1 and not hm2:
return None
elif not hm2:
hm = hm1
elif not hm1:
hm = hm2
else:
# Both matched. Select the longest.
if len(hm1[4]) > len(hm2[4]):
hm = hm1
else:
hm = hm2
# A half-match was found, sort out the return data.
if len(text1) > len(text2):
(text1_a, text1_b, text2_a, text2_b, mid_common) = hm
else:
(text2_a, text2_b, text1_a, text1_b, mid_common) = hm
return (text1_a, text1_b, text2_a, text2_b, mid_common)
def diff_cleanupSemantic(self, diffs):
"""Reduce the number of edits by eliminating semantically trivial
equalities.
Args:
diffs: Array of diff tuples.
"""
changes = False
equalities = [] # Stack of indices where equalities are found.
lastequality = None # Always equal to diffs[equalities[-1]][1]
pointer = 0 # Index of current position.
# Number of chars that changed prior to the equality.
length_insertions1, length_deletions1 = 0, 0
# Number of chars that changed after the equality.
length_insertions2, length_deletions2 = 0, 0
while pointer < len(diffs):
if diffs[pointer][0] == self.DIFF_EQUAL: # Equality found.
equalities.append(pointer)
length_insertions1, length_insertions2 = length_insertions2, 0
length_deletions1, length_deletions2 = length_deletions2, 0
lastequality = diffs[pointer][1]
else: # An insertion or deletion.
if diffs[pointer][0] == self.DIFF_INSERT:
length_insertions2 += len(diffs[pointer][1])
else:
length_deletions2 += len(diffs[pointer][1])
# Eliminate an equality that is smaller or equal to the edits on both
# sides of it.
if (lastequality and (len(lastequality) <=
max(length_insertions1, length_deletions1)) and
(len(lastequality) <= max(length_insertions2, length_deletions2))):
# Duplicate record.
diffs.insert(equalities[-1], (self.DIFF_DELETE, lastequality))
# Change second copy to insert.
diffs[equalities[-1] + 1] = (self.DIFF_INSERT,
diffs[equalities[-1] + 1][1])
# Throw away the equality we just deleted.
equalities.pop()
# Throw away the previous equality (it needs to be reevaluated).
if len(equalities):
equalities.pop()
if len(equalities):
pointer = equalities[-1]
else:
pointer = -1
# Reset the counters.
length_insertions1, length_deletions1 = 0, 0
length_insertions2, length_deletions2 = 0, 0
lastequality = None
changes = True
pointer += 1
# Normalize the diff.
if changes:
self.diff_cleanupMerge(diffs)
self.diff_cleanupSemanticLossless(diffs)
# Find any overlaps between deletions and insertions.
# e.g: <del>abcxxx</del><ins>xxxdef</ins>
# -> <del>abc</del>xxx<ins>def</ins>
# e.g: <del>xxxabc</del><ins>defxxx</ins>
# -> <ins>def</ins>xxx<del>abc</del>
# Only extract an overlap if it is as big as the edit ahead or behind it.
pointer = 1
while pointer < len(diffs):
if (diffs[pointer - 1][0] == self.DIFF_DELETE and
diffs[pointer][0] == self.DIFF_INSERT):
deletion = diffs[pointer - 1][1]
insertion = diffs[pointer][1]
overlap_length1 = self.diff_commonOverlap(deletion, insertion)
overlap_length2 = self.diff_commonOverlap(insertion, deletion)
if overlap_length1 >= overlap_length2:
if (overlap_length1 >= len(deletion) / 2.0 or
overlap_length1 >= len(insertion) / 2.0):
# Overlap found. Insert an equality and trim the surrounding edits.
diffs.insert(pointer, (self.DIFF_EQUAL,
insertion[:overlap_length1]))
diffs[pointer - 1] = (self.DIFF_DELETE,
deletion[:len(deletion) - overlap_length1])
diffs[pointer + 1] = (self.DIFF_INSERT,
insertion[overlap_length1:])
pointer += 1
else:
if (overlap_length2 >= len(deletion) / 2.0 or
overlap_length2 >= len(insertion) / 2.0):
# Reverse overlap found.
# Insert an equality and swap and trim the surrounding edits.
diffs.insert(pointer, (self.DIFF_EQUAL, deletion[:overlap_length2]))
diffs[pointer - 1] = (self.DIFF_INSERT,
insertion[:len(insertion) - overlap_length2])
diffs[pointer + 1] = (self.DIFF_DELETE, deletion[overlap_length2:])
pointer += 1
pointer += 1
pointer += 1
def diff_cleanupSemanticLossless(self, diffs):
"""Look for single edits surrounded on both sides by equalities
which can be shifted sideways to align the edit to a word boundary.
e.g: The c<ins>at c</ins>ame. -> The <ins>cat </ins>came.
Args:
diffs: Array of diff tuples.
"""
def diff_cleanupSemanticScore(one, two):
"""Given two strings, compute a score representing whether the
internal boundary falls on logical boundaries.
Scores range from 6 (best) to 0 (worst).
Closure, but does not reference any external variables.
Args:
one: First string.
two: Second string.
Returns:
The score.
"""
if not one or not two:
# Edges are the best.
return 6
# Each port of this function behaves slightly differently due to
# subtle differences in each language's definition of things like
# 'whitespace'. Since this function's purpose is largely cosmetic,
# the choice has been made to use each language's native features
# rather than force total conformity.
char1 = one[-1]
char2 = two[0]
nonAlphaNumeric1 = not char1.isalnum()
nonAlphaNumeric2 = not char2.isalnum()
whitespace1 = nonAlphaNumeric1 and char1.isspace()
whitespace2 = nonAlphaNumeric2 and char2.isspace()
lineBreak1 = whitespace1 and (char1 == "\r" or char1 == "\n")
lineBreak2 = whitespace2 and (char2 == "\r" or char2 == "\n")
blankLine1 = lineBreak1 and self.BLANKLINEEND.search(one)
blankLine2 = lineBreak2 and self.BLANKLINESTART.match(two)
if blankLine1 or blankLine2:
# Five points for blank lines.
return 5
elif lineBreak1 or lineBreak2:
# Four points for line breaks.
return 4
elif nonAlphaNumeric1 and not whitespace1 and whitespace2:
# Three points for end of sentences.
return 3
elif whitespace1 or whitespace2:
# Two points for whitespace.
return 2
elif nonAlphaNumeric1 or nonAlphaNumeric2:
# One point for non-alphanumeric.
return 1
return 0
pointer = 1
# Intentionally ignore the first and last element (don't need checking).
while pointer < len(diffs) - 1:
if (diffs[pointer - 1][0] == self.DIFF_EQUAL and
diffs[pointer + 1][0] == self.DIFF_EQUAL):
# This is a single edit surrounded by equalities.
equality1 = diffs[pointer - 1][1]
edit = diffs[pointer][1]
equality2 = diffs[pointer + 1][1]
# First, shift the edit as far left as possible.
commonOffset = self.diff_commonSuffix(equality1, edit)
if commonOffset:
commonString = edit[-commonOffset:]
equality1 = equality1[:-commonOffset]
edit = commonString + edit[:-commonOffset]
equality2 = commonString + equality2
# Second, step character by character right, looking for the best fit.
bestEquality1 = equality1
bestEdit = edit
bestEquality2 = equality2
bestScore = (diff_cleanupSemanticScore(equality1, edit) +
diff_cleanupSemanticScore(edit, equality2))
while edit and equality2 and edit[0] == equality2[0]:
equality1 += edit[0]
edit = edit[1:] + equality2[0]
equality2 = equality2[1:]
score = (diff_cleanupSemanticScore(equality1, edit) +
diff_cleanupSemanticScore(edit, equality2))
# The >= encourages trailing rather than leading whitespace on edits.
if score >= bestScore:
bestScore = score
bestEquality1 = equality1
bestEdit = edit
bestEquality2 = equality2
if diffs[pointer - 1][1] != bestEquality1:
# We have an improvement, save it back to the diff.
if bestEquality1:
diffs[pointer - 1] = (diffs[pointer - 1][0], bestEquality1)
else:
del diffs[pointer - 1]
pointer -= 1
diffs[pointer] = (diffs[pointer][0], bestEdit)
if bestEquality2:
diffs[pointer + 1] = (diffs[pointer + 1][0], bestEquality2)
else:
del diffs[pointer + 1]
pointer -= 1
pointer += 1
# Define some regex patterns for matching boundaries.
BLANKLINEEND = re.compile(r"\n\r?\n$");
BLANKLINESTART = re.compile(r"^\r?\n\r?\n");
def diff_cleanupEfficiency(self, diffs):
"""Reduce the number of edits by eliminating operationally trivial
equalities.
Args:
diffs: Array of diff tuples.
"""
changes = False
equalities = [] # Stack of indices where equalities are found.
lastequality = None # Always equal to diffs[equalities[-1]][1]
pointer = 0 # Index of current position.
pre_ins = False # Is there an insertion operation before the last equality.
pre_del = False # Is there a deletion operation before the last equality.
post_ins = False # Is there an insertion operation after the last equality.
post_del = False # Is there a deletion operation |
Subsets and Splits