metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "Jenso/ProjectY",
"score": 2
}
|
#### File: pinry/loadimages/views.py
```python
from django.views.generic import View, ListView, CreateView, TemplateView, DetailView
from elementtree.ElementTree import parse
import os
import sys
from pinry.pins.models import Pin, Category
from pinry.loadimages.models import FailedSKUs, Feeds
import celery
@celery.task
def loadfeeds():
for feed in Feeds.objects.all():
tree = parse("pinry/loadimages/feedfiles/" + feed.filename)
elem = tree.getroot()
# should be dynamically set
feed = Feeds.objects.get(id=1)
i = 0
for el in elem:
try:
# dont add products which already have been added
sku = el.find("SKU").text
pin_object = Pin.objects.filter(sku=sku, store=feed)
# product already exist
if pin_object:
continue
# if product has no category -> mark as failed
category = el.find("Category").text
if not category:
fail = FailedSKUs(feed=feed, sku=sku)
fail.save()
continue
category_obj, is_new_obj = Category.objects.get_or_create(name=category)
new_pin = Pin(url=el.find("ImageUrl").text,
tracking_url=el.find("TrackingUrl").text,
price=el.find("Price").text,
product_url=el.find("ProductUrl").text,
submitter_id=1,
category=category_obj,
real_description=el.find("Description").text,
name=el.find("Name").text,
brand=el.find("Brand").text,
sku=sku,
store=feed
)
new_pin.save()
except:
fail = FailedSKUs(feed=feed, sku=sku)
fail.save()
class LoadImagesView(TemplateView):
"""
A search view for all cities. Enables static text instead of dynamically
loaded data via ajax. Google likes static text better.
"""
template_name = "load.html"
def get_context_data(self, **kwargs):
context = super(LoadImagesView, self).get_context_data(**kwargs)
loadfeeds.delay()
return context
```
|
{
"source": "jensparky83/reliability",
"score": 3
}
|
#### File: reliability/reliability/Other_functions.py
```python
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.lines import Line2D
from mplcursors import cursor
import warnings
from reliability.Distributions import Weibull_Distribution, Normal_Distribution, Lognormal_Distribution, Exponential_Distribution, Gamma_Distribution, Beta_Distribution
from reliability.Fitters import Fit_Everything
class similar_distributions:
'''
similar_distributions
This is a tool to find similar distributions when given an input distribution.
It is useful to see how similar one distribution is to another. For example, you may look at a Weibull distribution and think it looks like a Normal distribution.
Using this tool you can determine the parameters of the Normal distribution that most closely matches your Weibull distribution.
Inputs:
distribution - a distribution object created using the reliability.Distributions module
include_location_shifted - True/False. Default is True. When set to True it will include Weibull_3P, Lognormal_3P, Gamma_3P, Expon_2P
show_plot - True/False. Default is True
print_results - True/False. Default is True
number_of_distributions_to_show - the number of similar distributions to show. Default is 3. If the number specified exceeds the number available (typically 8), then the number specified will automatically be reduced.
Outputs:
results - an array of distributions objects ranked in order of best fit.
most_similar_distribution - a distribution object. This is the first item from results.
Example usage:
from reliability.Distributions import Weibull_Distribution
from reliability.Other_functions import similar_distributions
dist = Weibull_Distribution(alpha=50,beta=3.3)
similar_distributions(distribution=dist)
'''
def __init__(self, distribution, include_location_shifted=True, show_plot=True, print_results=True, number_of_distributions_to_show=3):
# ensure the input is a distribution object
if type(distribution) not in [Weibull_Distribution, Normal_Distribution, Lognormal_Distribution, Exponential_Distribution, Gamma_Distribution, Beta_Distribution]:
raise ValueError('distribution must be a probability distribution object from the reliability.Distributions module. First define the distribution using Reliability.Distributions.___')
# sample the CDF from 0.001 to 0.999. These samples will be used to fit all other distributions.
RVS = distribution.quantile(np.linspace(0.001, 0.999, 698)) # 698 samples is the ideal number for the points to align. Evidenced using plot_points.
# filter out negative values
RVS_filtered = []
negative_values_error = False
for item in RVS:
if item > 0:
RVS_filtered.append(item)
else:
negative_values_error = True
if negative_values_error is True:
print('WARNING: The input distribution has non-negligible area for x<0. Samples from this region have been discarded to enable other distributions to be fitted.')
fitted_results = Fit_Everything(failures=RVS_filtered, print_results=False, show_probability_plot=False, show_histogram_plot=False, show_PP_plot=False) # fit all distributions to the filtered samples
ranked_distributions = list(fitted_results.results.index.values)
ranked_distributions.remove(distribution.name2) # removes the fitted version of the original distribution
ranked_distributions_objects = []
ranked_distributions_labels = []
sigfig = 2
for dist_name in ranked_distributions:
if dist_name == 'Weibull_2P':
ranked_distributions_objects.append(Weibull_Distribution(alpha=fitted_results.Weibull_2P_alpha, beta=fitted_results.Weibull_2P_beta))
ranked_distributions_labels.append(str('Weibull_2P (ฮฑ=' + str(round(fitted_results.Weibull_2P_alpha, sigfig)) + ',ฮฒ=' + str(round(fitted_results.Weibull_2P_beta, sigfig)) + ')'))
elif dist_name == 'Gamma_2P':
ranked_distributions_objects.append(Gamma_Distribution(alpha=fitted_results.Gamma_2P_alpha, beta=fitted_results.Gamma_2P_beta))
ranked_distributions_labels.append(str('Gamma_2P (ฮฑ=' + str(round(fitted_results.Gamma_2P_alpha, sigfig)) + ',ฮฒ=' + str(round(fitted_results.Gamma_2P_beta, sigfig)) + ')'))
elif dist_name == 'Normal_2P':
ranked_distributions_objects.append(Normal_Distribution(mu=fitted_results.Normal_2P_mu, sigma=fitted_results.Normal_2P_sigma))
ranked_distributions_labels.append(str('Normal_2P (ฮผ=' + str(round(fitted_results.Normal_2P_mu, sigfig)) + ',ฯ=' + str(round(fitted_results.Normal_2P_sigma, sigfig)) + ')'))
elif dist_name == 'Lognormal_2P':
ranked_distributions_objects.append(Lognormal_Distribution(mu=fitted_results.Lognormal_2P_mu, sigma=fitted_results.Lognormal_2P_sigma))
ranked_distributions_labels.append(str('Lognormal_2P (ฮผ=' + str(round(fitted_results.Lognormal_2P_mu, sigfig)) + ',ฯ=' + str(round(fitted_results.Lognormal_2P_sigma, sigfig)) + ')'))
elif dist_name == 'Exponential_1P':
ranked_distributions_objects.append(Exponential_Distribution(Lambda=fitted_results.Expon_1P_lambda))
ranked_distributions_labels.append(str('Exponential_1P (lambda=' + str(round(fitted_results.Expon_1P_lambda, sigfig)) + ')'))
elif dist_name == 'Beta_2P':
ranked_distributions_objects.append(Beta_Distribution(alpha=fitted_results.Beta_2P_alpha, beta=fitted_results.Beta_2P_beta))
ranked_distributions_labels.append(str('Beta_2P (ฮฑ=' + str(round(fitted_results.Beta_2P_alpha, sigfig)) + ',ฮฒ=' + str(round(fitted_results.Beta_2P_beta, sigfig)) + ')'))
if include_location_shifted is True:
if dist_name == 'Weibull_3P':
ranked_distributions_objects.append(Weibull_Distribution(alpha=fitted_results.Weibull_3P_alpha, beta=fitted_results.Weibull_3P_beta, gamma=fitted_results.Weibull_3P_gamma))
ranked_distributions_labels.append(str('Weibull_3P (ฮฑ=' + str(round(fitted_results.Weibull_3P_alpha, sigfig)) + ',ฮฒ=' + str(round(fitted_results.Weibull_3P_beta, sigfig)) + ',ฮณ=' + str(round(fitted_results.Weibull_3P_gamma, sigfig)) + ')'))
elif dist_name == 'Gamma_3P':
ranked_distributions_objects.append(Gamma_Distribution(alpha=fitted_results.Gamma_3P_alpha, beta=fitted_results.Gamma_3P_beta, gamma=fitted_results.Gamma_3P_gamma))
ranked_distributions_labels.append(str('Gamma_3P (ฮฑ=' + str(round(fitted_results.Gamma_3P_alpha, sigfig)) + ',ฮฒ=' + str(round(fitted_results.Gamma_3P_beta, sigfig)) + ',ฮณ=' + str(round(fitted_results.Gamma_3P_gamma, sigfig)) + ')'))
elif dist_name == 'Lognormal_3P':
ranked_distributions_objects.append(Lognormal_Distribution(mu=fitted_results.Lognormal_3P_mu, sigma=fitted_results.Lognormal_3P_sigma, gamma=fitted_results.Lognormal_3P_gamma))
ranked_distributions_labels.append(str('Lognormal_3P (ฮผ=' + str(round(fitted_results.Lognormal_3P_mu, sigfig)) + ',ฯ=' + str(round(fitted_results.Lognormal_3P_sigma, sigfig)) + ',ฮณ=' + str(round(fitted_results.Lognormal_3P_gamma, sigfig)) + ')'))
elif dist_name == 'Exponential_2P':
ranked_distributions_objects.append(Exponential_Distribution(Lambda=fitted_results.Expon_1P_lambda, gamma=fitted_results.Expon_2P_gamma))
ranked_distributions_labels.append(str('Exponential_1P (lambda=' + str(round(fitted_results.Expon_1P_lambda, sigfig)) + ',ฮณ=' + str(round(fitted_results.Expon_2P_gamma, sigfig)) + ')'))
number_of_distributions_fitted = len(ranked_distributions_objects)
self.results = ranked_distributions_objects
self.most_similar_distribution = ranked_distributions_objects[0]
if print_results is True:
print('The input distribution was:')
print(distribution.param_title_long)
if number_of_distributions_fitted < number_of_distributions_to_show:
number_of_distributions_to_show = number_of_distributions_fitted
print('\nThe top', number_of_distributions_to_show, 'most similar distributions are:')
counter = 0
while counter < number_of_distributions_to_show and counter < number_of_distributions_fitted:
dist = ranked_distributions_objects[counter]
print(dist.param_title_long)
counter += 1
if show_plot is True:
plt.figure(figsize=(14, 6))
plt.suptitle(str('Plot of similar distributions to ' + distribution.param_title_long))
counter = 0
xlower = distribution.quantile(0.001)
xupper = distribution.quantile(0.999)
x_delta = xupper - xlower
plt.subplot(121)
distribution.PDF(label=str('Input distribution [' + distribution.name2 + ']'), linestyle='--')
while counter < number_of_distributions_to_show and counter < number_of_distributions_fitted:
ranked_distributions_objects[counter].PDF(label=ranked_distributions_labels[counter])
counter += 1
plt.xlim([xlower - x_delta * 0.1, xupper + x_delta * 0.1])
plt.legend()
plt.title('PDF')
counter = 0
plt.subplot(122)
distribution.CDF(label=str('Input distribution [' + distribution.name2 + ']'), linestyle='--')
while counter < number_of_distributions_to_show and counter < number_of_distributions_fitted:
ranked_distributions_objects[counter].CDF(label=ranked_distributions_labels[counter])
counter += 1
plt.xlim([xlower - x_delta * 0.1, xupper + x_delta * 0.1])
plt.legend()
plt.title('CDF')
plt.subplots_adjust(left=0.08, right=0.95)
plt.show()
def histogram(data, white_above=None, bins=None, density=True, cumulative=False, **kwargs):
'''
histogram
plots a histogram using the data specified
This is similar to plt.hist except that it calculates the optimal number of bins to use based on the FreedmanโDiaconis rule ==> https://en.wikipedia.org/wiki/Freedman%E2%80%93Diaconis_rule
If you would like to specify the number of bins rather than having the optimal number calculated, then the bins argument allows this.
This function also shades the bins white above a specified value (white_above). This is useful for representing complete data as right censored data in a histogram.
Inputs:
data - the data to plot. Array or list.
white_above - bins above this value will be shaded white
bins - the number of bins to use. Must be int. Leave empty to have the optimal number calculated automatically
density - True/False. Default is True. Always use True if plotting with a probability distribution.
cumulative - True/False. Default is False. Use False for PDF and True for CDF.
kwargs - plotting kwargs for the histogram (color, alpha, etc.)
'''
if type(data) not in [np.ndarray, list]:
raise ValueError('data must be an array or list')
if white_above is not None:
if type(white_above) not in [int, float, np.float64]:
raise ValueError('white_above must be int or float')
if white_above < min(data):
raise ValueError('white_above must be greater than min(data)')
if bins is not None:
if type(bins) is not int:
raise ValueError('bins is the number of bins to use. It must be type int. Leave empty to calculate the optimal number')
else:
iqr = np.subtract(*np.percentile(data, [75, 25])) # interquartile range
bin_width = 2 * iqr * len(data) ** -(1 / 3) # FreedmanโDiaconis rule ==> https://en.wikipedia.org/wiki/Freedman%E2%80%93Diaconis_rule
bins = int(np.ceil((max(data) - min(data)) / bin_width))
if 'color' in kwargs:
color = kwargs.pop('color')
elif 'c' in kwargs:
color = kwargs.pop('c')
else:
color = 'lightgrey'
if 'edgecolor' in kwargs:
edgecolor = kwargs.pop('egdecolor')
else:
edgecolor = 'k'
if 'linewidth' in kwargs:
linewidth = kwargs.pop('linewidth')
elif 'lw' in kwargs:
linewidth = kwargs.pop('lw')
else:
linewidth = 0.5
_, bins_out, patches = plt.hist(data, density=density, cumulative=cumulative, color=color, bins=bins, edgecolor=edgecolor, linewidth=linewidth, **kwargs) # plots the histogram of the data
if white_above is not None:
for i in range(np.argmin(abs(np.array(bins_out) - white_above)), len(patches)): # this is to shade part of the histogram as white
patches[i].set_facecolor('white')
def convert_dataframe_to_grouped_lists(input_dataframe):
'''
Accepts a dataframe containing 2 columns
This function assumes the identifying column is the left column
returns:
lists , names - lists is a list of the grouped lists
- names is the identifying values used to group the lists from the first column
Example usage:
#create sample data
import pandas as pd
data = {'outcome': ['Failed', 'Censored', 'Failed', 'Failed', 'Censored'],
'cycles': [1253,1500,1342,1489,1500]}
df = pd.DataFrame(data, columns = ['outcome', 'cycles'])
#usage of the function
lists,names = convert_dataframe_to_grouped_lists(df)
print(names[1]) >>> Failed
print(lists[1]) >>> [1253, 1342, 1489]
'''
df = input_dataframe
column_names = df.columns.values
if len(column_names) > 2:
raise ValueError('Dataframe contains more than 2 columns. There should only be 2 columns with the first column containing the labels to group by and the second containing the values to be returned in groups.')
grouped_lists = []
group_list_names = []
for key, items in df.groupby(column_names[0]):
values = list(items.iloc[:, 1].values)
grouped_lists.append(values)
group_list_names.append(key)
return grouped_lists, group_list_names
class make_right_censored_data:
'''
make_right_censored_data
Right censors data based on specified threshold
Inputs:
data - list or array of data
threshold - point to right censor (right censoring is done if value is > threshold)
Outputs:
failures - array of failures (data <= threshold)
right_censored - array of right_censored values (data > threshold). These will be set to the value of the threshold.
'''
def __init__(self, data, threshold):
if type(data) is list:
data = np.array(data)
self.failures = data[data <= threshold]
self.right_censored = np.ones_like(data[data > threshold]) * threshold
class crosshairs:
'''
Adds interactive crosshairs to matplotlib plots
Ensure this is used after you plot everything as anything plotted after crosshairs() is called will not be recognised by the snap-to feature.
:param xlabel: the xlabel for annotations. Default is 'x'
:param ylabel: the ylabel for annotations. Default is 'y'
:param decimals: the number of decimals for rounding. Default is 2.
:param kwargs: plotting kwargs to change the style of the crosshairs (eg. color, linestyle, etc.)
'''
def __init__(self, xlabel=None, ylabel=None, decimals=2, **kwargs):
crosshairs.__generate_crosshairs(self, xlabel=xlabel, ylabel=ylabel, decimals=decimals, **kwargs)
def __add_lines_and_text_to_crosshairs(sel, decimals, **kwargs):
# set the default properties of the lines and text if they were not provided as kwargs
if 'c' in kwargs:
color = kwargs.pop('c')
elif 'color' in kwargs:
color = kwargs.pop('color')
else:
color = 'k'
if 'lw' in kwargs:
linewidth = kwargs.pop('lw')
elif 'linewidth' in kwargs:
linewidth = kwargs.pop('linewidth')
else:
linewidth = 0.5
if 'ls' in kwargs:
linestyle = kwargs.pop('ls')
elif 'linestyle' in kwargs:
linestyle = kwargs.pop('linestyle')
else:
linestyle = '--'
if 'size' in kwargs:
fontsize = kwargs.pop('size')
elif 'fontsize' in kwargs:
fontsize = kwargs.pop('fontsize')
else:
fontsize = 10
if 'fontweight' in kwargs:
fontweight = kwargs.pop('fontweight')
elif 'weight' in kwargs:
fontweight = kwargs.pop('weight')
else:
fontweight = 0
if 'fontstyle' in kwargs:
fontstyle = kwargs.pop('fontstyle')
elif 'style' in kwargs:
fontstyle = kwargs.pop('style')
else:
fontstyle = 'normal'
sel.annotation.set(visible=False) # Hide the normal annotation during hover
try:
ax = sel.artist.axes
except:
ax = sel.annotation.axes # this exception occurs for bar charts
x, y = sel.target
lines = [Line2D([x, x], [0, 1], transform=ax.get_xaxis_transform(), c=color, lw=linewidth, ls=linestyle, **kwargs),
Line2D([0, 1], [y, y], transform=ax.get_yaxis_transform(), c=color, lw=linewidth, ls=linestyle, **kwargs)]
texts = [ax.text(s=round(y, decimals), x=0, y=y, transform=ax.get_yaxis_transform(), color=color, fontsize=fontsize, fontweight=fontweight, fontstyle=fontstyle, **kwargs),
ax.text(s=round(x, decimals), x=x, y=0, transform=ax.get_xaxis_transform(), color=color, fontsize=fontsize, fontweight=fontweight, fontstyle=fontstyle, **kwargs)]
for i in [0, 1]:
line = lines[i]
text = texts[i]
ax.add_line(line)
# the lines and text need to be registered with sel so that they are updated during mouse motion events
sel.extras.append(line)
sel.extras.append(text)
def __format_annotation(sel, decimals, label): # this is some simple formatting for the annotations (applied on click)
[x, y] = sel.annotation.xy
text = str(label[0] + ' = ' + str(round(x, decimals)) + '\n' + label[1] + ' = ' + str(round(y, decimals)))
sel.annotation.set_text(text)
sel.annotation.get_bbox_patch().set(fc="white")
def __hide_crosshairs(event):
ax = event.inaxes # this gets the axes where the event occurred.
if len(ax.texts) >= 2: # the lines can't be deleted if they haven't been drawn.
if ax.texts[-1].get_position()[1] == 0 and ax.texts[-2].get_position()[0] == 0: # this identifies the texts (crosshair text coords) based on their combination of unique properties
ax.lines[-1].set_visible(False)
ax.lines[-2].set_visible(False)
ax.texts[-1].set_visible(False)
ax.texts[-2].set_visible(False)
event.canvas.draw()
def __generate_crosshairs(self, xlabel=None, ylabel=None, decimals=2, **kwargs): # this is the main program
warnings.simplefilter('ignore') # required when using fill_between due to warning in mplcursors: "UserWarning: Pick support for PolyCollection is missing."
ch = cursor(hover=True)
add_lines_and_text_with_kwargs = lambda _: crosshairs.__add_lines_and_text_to_crosshairs(_, decimals, **kwargs) # adds the line's kwargs before connecting it to cursor
ch.connect("add", add_lines_and_text_with_kwargs)
plt.gcf().canvas.mpl_connect('axes_leave_event', crosshairs.__hide_crosshairs) # hide the crosshairs and text when the mouse leaves the axes
# does the annotation part
if xlabel is None:
xlabel = 'x'
if ylabel is None:
ylabel = 'y'
warnings.simplefilter('ignore') # required when using fill_between due to warning in mplcursors: "UserWarning: Pick support for PolyCollection is missing."
annot = cursor(multiple=True, bindings={"toggle_visible": "h"})
format_annotation_labeled = lambda _: crosshairs.__format_annotation(_, decimals, [xlabel, ylabel]) # adds the labels to the 'format_annotation' function before connecting it to cursor
annot.connect("add", format_annotation_labeled)
```
#### File: reliability/reliability/Reliability_testing.py
```python
import scipy.stats as ss
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
def one_sample_proportion(trials=None, successes=None, CI=0.95):
'''
Calculates the upper and lower bounds of reliability for a given number of trials and successes.
inputs:
trials - the number of trials which were conducted
successes - the number of trials which were successful
CI - the desired confidence interval. Defaults to 0.95 for 95% CI.
returns: lower, upper - Confidence interval limits.
will return nan for lower or upper if only one sided CI is calculated (ie. when successes=0 or successes=trials).
'''
if trials is None or successes is None:
raise ValueError('You must specify the number of trials and successes.')
if successes > trials:
raise ValueError('successes cannot be greater than trials')
if successes == 0 or successes == trials: # calculate 1 sided CI in these cases
n = 1
else:
n = 2
V1_lower = 2 * successes
V2_lower = 2 * (trials - successes + 1)
alpha_lower = (1 - CI) / n
F_lower = ss.f.ppf(alpha_lower, V1_lower, V2_lower)
LOWER_LIM = (V1_lower * F_lower) / (V2_lower + V1_lower * F_lower)
V1_upper = 2 * (successes + 1)
V2_upper = 2 * (trials - successes)
alpha_upper = 1 - alpha_lower
F_upper = ss.f.ppf(alpha_upper, V1_upper, V2_upper)
UPPER_LIM = (V1_upper * F_upper) / (V2_upper + V1_upper * F_upper)
return LOWER_LIM, UPPER_LIM # will return nan for lower or upper if only one sided CI is calculated (ie. when successes=0 or successes=trials).
def two_proportion_test(sample_1_trials=None, sample_1_successes=None, sample_2_trials=None, sample_2_successes=None, CI=0.95):
'''
Calculates whether the difference in test results between two samples is statistically significant. For example, assume we have
a poll of respondents in which 27/40 people agreed, and another poll in which 42/80 agreed. This test will determine if the difference
is statistically significant for the given sample sizes at the specified confidence level.
inputs:
sample_1_trials - number of trials in the first sample
sample_1_successes - number of successes in the first sample
sample_2_trials - number of trials in the second sample
sample_2_successes - number of successes in the second sample
CI - desired confidence interval. Defaults to 0.95 for 95% CI.
returns:
lower,upper,result - lower and upper are bounds on the difference. If the bounds do not include 0 then it is a statistically significant difference.
'''
if CI < 0.5 or CI >= 1:
raise ValueError('CI must be between 0.5 and 1. Default is 0.95')
if sample_1_trials is None or sample_1_successes is None or sample_2_trials is None or sample_2_successes is None:
raise ValueError('You must specify the number of trials and successes for both samples.')
if sample_1_successes > sample_1_trials or sample_2_successes > sample_2_trials:
raise ValueError('successes cannot be greater than trials')
p1 = sample_1_successes / sample_1_trials
p2 = sample_2_successes / sample_2_trials
diff = p1 - p2
Z = ss.norm.ppf(1 - ((1 - CI) / 2))
k = Z * ((p1 * (1 - p1) / sample_1_trials) + (p2 * (1 - p2) / sample_2_trials)) ** 0.5
lower = diff - k
upper = diff + k
if lower < 0 and upper > 0:
result = 'non-significant'
else:
result = 'significant'
return lower, upper, result
def sample_size_no_failures(reliability, CI=0.95, lifetimes=1, weibull_shape=1):
'''
This is used to determine the sample size required for a test in which no failures are expected, and the desired
outcome is the lower bound on the reliability based on the sample size and desired confidence interval.
inputs:
reliability - lower bound on product reliability (between 0 and 1)
CI - confidence interval of result (between 0.5 and 1). Defaults to 0.95 for 95% CI.
lifetimes - if testing the product for multiple lifetimes then more failures are expected so a smaller sample
size will be required to demonstrate the desired reliability (assuming no failures). Conversely, if testing for
less than one full lifetime then a larger sample size will be required. Default is 1.
weibull_shape - if the weibull shape (beta) of the failure mode is known, specify it here. Otherwise leave the
default of 1 for the exponential distribution.
returns:
number of items required in the test. This will always be an integer (rounded up).
'''
if CI < 0.5 or CI >= 1:
raise ValueError('CI must be between 0.5 and 1')
if reliability <= 0 or reliability >= 1:
raise ValueError('Reliability must be between 0 and 1')
if weibull_shape < 0:
raise ValueError('Weibull shape must be greater than 0. Default (exponential distribution) is 1. If unknown then use 1.')
if lifetimes > 5:
print('Testing for greater than 5 lifetimes is highly unlikely to result in zero failures.')
if lifetimes <= 0:
raise ValueError('lifetimes must be >0. Default is 1. No more than 5 is recommended due to test feasibility.')
n = int(np.ceil((np.log(1 - CI)) / (lifetimes ** weibull_shape * np.log(reliability)))) # rounds up to nearest integer
return n
def sequential_samling_chart(p1, p2, alpha, beta, show_plot=True, print_results=True, test_results=None, max_samples=100):
'''
This function plots the accept/reject boundaries for a given set of quality and risk levels. If supplied, the test results are also
plotted on the chart.
inputs:
p1 - producer_quality. The acceptable failure rate for the producer (typical around 0.01)
p2 - consumer_quality. The acceptable failure rate for the consumer (typical around 0.1)
alpha - producer_risk. Producer's CI = 1-alpha (typically 0.05)
beta - consumer_risk. Consumer's CI = 1-beta (typically 0.1)
test_results - array or list of binary test results. eg. [0,0,0,1] for 3 successes and 1 failure. Default=None
show_plot - True/False. Defaults to True.
print_results - True/False. Defaults to True.
max_samples - the x_lim of the plot. optional input. Default=100.
outputs:
The sequential sampling chart - A plot of sequential sampling chart with decision boundaries. test_results are only plotted on the chart
if provided as an input.
results - a dataframe of tabulated decision results.
'''
if type(test_results) == list:
F = np.array(test_results)
elif type(test_results) == np.ndarray:
F = test_results
elif test_results is None:
F = None
else:
raise ValueError('test_results must be a binary array or list with 1 as failures and 0 as successes. eg. [0 0 0 1] for 3 successes and 1 failure.')
a = 1 - alpha
b = 1 - beta
d = np.log(p2 / p1) + np.log((1 - p1) / (1 - p2))
h1 = np.log((1 - a) / b) / d
h2 = np.log((1 - b) / a) / d
s = np.log((1 - p1) / (1 - p2)) / d
xvals = np.arange(max_samples + 1)
rejection_line = s * xvals - h1
acceptance_line = s * xvals + h2
acceptance_line[acceptance_line < 0] = 0
upper_line = np.ones_like(xvals) * (s * max_samples - h1)
lower_line_range = np.linspace(-h2 / s, max_samples, max_samples + 1)
acceptance_line2 = s * lower_line_range + h2 # this is the visible part of the line that starts beyond x=0
acceptance_array = np.asarray(np.floor(s * xvals + h2), dtype=int)
rejection_array = np.asarray(np.ceil(s * xvals - h1), dtype=int)
for i, x in enumerate(xvals): # this replaces cases where the criteria exceeds the number of samples
if rejection_array[i] > x:
rejection_array[i] = -1
data = {'Samples': xvals, 'Failures to accept': acceptance_array, 'Failures to reject': rejection_array}
df = pd.DataFrame(data, columns=['Samples', 'Failures to accept', 'Failures to reject'])
df.set_index('Samples', inplace=True)
df.loc[df['Failures to accept'] < 0, 'Failures to accept'] = 'x'
df.loc[df['Failures to reject'] < 0, 'Failures to reject'] = 'x'
if print_results is True:
print(df)
if show_plot is True:
# plots the results of tests if they are specified
if type(F) == np.ndarray:
if all(F) not in [0, 1]:
raise ValueError('test_results must be a binary array or list with 0 as failures and 1 as successes. eg. [0 0 0 1] for 3 successes and 1 failure.')
nx = []
ny = []
failure_count = 0
sample_count = 0
for f in F:
if f == 0:
sample_count += 1
nx.append(sample_count)
ny.append(failure_count)
elif f == 1:
sample_count += 1
nx.append(sample_count)
ny.append(failure_count)
failure_count += 1
nx.append(sample_count)
ny.append(failure_count)
else:
raise ValueError('test_results must be a binary array or list with 0 as failures and 1 as successes. eg. [0 0 0 1] for 3 successes and 1 failure.')
plt.plot(nx, ny, label='test results')
# plots the decision boundaries and shades the areas red and green
plt.plot(lower_line_range, acceptance_line2, linestyle='--', color='green')
plt.plot(xvals, rejection_line, linestyle='--', color='red')
plt.fill_between(xvals, rejection_line, upper_line, color='red', alpha=0.3, label='Reject sample')
plt.fill_between(xvals, acceptance_line, rejection_line, color='gray', alpha=0.1, label='Keep Testing')
plt.fill_between(lower_line_range, 0, acceptance_line2, color='green', alpha=0.3, label='Accept Sample')
plt.ylim([0, max(rejection_line)])
plt.xlim([0, max(xvals)])
plt.xlabel('Number of samples tested')
plt.ylabel('Number of failures from samples tested')
plt.title('Sequential sampling decision boundaries')
plt.legend()
plt.show()
return df
class reliability_test_planner:
'''
reliability_test_planner
Solves for unknown test planner variables, given known variables.
The Chi-squared distribution is used to find the lower confidence bound on MTBF for a given test duration, number of failures, and specified confidence interval.
The equation for time-terminated tests is: MTBF = (2*test_duration)/(chisquared_inverse(CI, 2*number_of_failures+2))
The equation for failure-terminated tests is: MTBF = (2*test_duration)/(chisquared_inverse(CI, 2*number_of_failures))
This equation can be rearranged to solve for any of the 4 variables. For example, you may want to know how many failures you are allowed to have in a given test duration to achieve a particular MTBF.
The user must specify any 3 out of the 4 variables (not including two_sided, print_results, or time_terminated) and the remaining variable will be calculated.
Inputs:
MTBF - mean time between failures. This is the lower confidence bound on the MTBF. Units given in same units as the test_duration.
number_of_failures - the number of failures recorded (or allowed) to achieve the MTBF. Must be an integer.
test_duration - the amount of time on test required (or performed) to achieve the MTBF. May also be distance, rounds fires, cycles, etc. Units given in same units as MTBF.
CI - the confidence interval at which the lower confidence bound on the MTBF is given. Must be between 0.5 and 1. For example, specify 0.95 for 95% confidence interval.
print_results - True/False. Default is True.
two_sided - True/False. Default is True. If set to False, the 1 sided confidence interval will be returned.
time_terminated - True/False. Default is True. If set to False, the formula for the failure-terminated test will be used.
Outputs:
If print_results is True, all the variables will be printed.
An output object is also returned with the same values as the inputs and the remaining value also calculated.
Examples:
reliability_test_planner(test_duration=19520,CI=0.8,number_of_failures=7)
Reliability Test Planner results for time-terminated test
Solving for MTBF
Test duration: 19520
MTBF (lower confidence bound): 1658.3248534993454
Number of failures: 7
Confidence interval (2 sided):0.8
output = reliability_test_planner(number_of_failures=6,test_duration=10000,CI=0.8, print_results=False)
print(output.MTBF)
949.4807763260345
'''
def __init__(self, MTBF=None, number_of_failures=None, CI=None, test_duration=None, two_sided=True, time_terminated=True, print_results=True):
print_CI_warn = False # used later if the CI is calculated
if CI is not None:
if CI < 0.5 or CI >= 1:
raise ValueError('CI must be between 0.5 and 1. For example, specify CI=0.95 for 95% confidence interval')
if two_sided is False:
CI_adj = CI
else:
CI_adj = 1 - ((1 - CI) / 2)
if time_terminated is True:
p = 2
elif time_terminated is False:
p = 0
else:
raise ValueError('time_terminated must be True or False. Default is True for the time terminated test (a test stopped after a set time rather than after a set number of failures).')
if two_sided is False:
sides = 1
elif two_sided is True:
sides = 2
else:
raise ValueError('two_sided must be True or False. Default is True for the two sided confidence interval.')
if number_of_failures is not None:
if number_of_failures % 1 != 0 or number_of_failures < 0:
raise ValueError('number_of_failures must be a positive integer')
if MTBF is None and number_of_failures is not None and CI is not None and test_duration is not None:
soln_type = 'MTBF'
MTBF = (2 * test_duration) / ss.chi2.ppf(CI_adj, 2 * number_of_failures + p)
elif MTBF is not None and number_of_failures is None and CI is not None and test_duration is not None:
soln_type = 'failures'
number_of_failures = 0
while True: # this requires an iterative search. Begins at 0 and increments by 1 until the solution is found
result = (2 * test_duration) / ss.chi2.ppf(CI_adj, 2 * number_of_failures + p) - MTBF
if result < 0: # solution is found when result returns a negative number (indicating too many failures)
break
number_of_failures += 1
MTBF_check = (2 * test_duration) / ss.chi2.ppf(CI_adj, 2 * 0 + p) # checks that the maximum possible MTBF (when there are 0 failures) is within the test_duration
if MTBF_check < MTBF:
raise ValueError('The specified MTBF is not possible given the specified test_duration. You must increase your test_duration or decrease your MTBF.')
elif MTBF is not None and number_of_failures is not None and CI is None and test_duration is not None:
soln_type = 'CI'
CI_calc = ss.chi2.cdf(test_duration / (MTBF * 0.5), 2 * number_of_failures + p)
if two_sided is False:
CI = CI_calc
else:
CI = 1 - (2 * (1 - CI_calc)) # this can give negative numbers, but only when the inputs result in an impossible CI.
if CI < 0.5:
print_CI_warn = True
elif MTBF is not None and number_of_failures is not None and CI is not None and test_duration is None:
soln_type = 'test_duration'
test_duration = ss.chi2.ppf(CI_adj, 2 * number_of_failures + p) * MTBF / 2
elif MTBF is not None and number_of_failures is not None and CI is not None and test_duration is not None:
raise ValueError('All inputs were specified. Nothing to calculate.')
else:
raise ValueError('More than one input was not specified. You must specify any 3 out of the 4 inputs (not including two_sided or print_results) and the remaining input will be calculated.')
self.test_duration = test_duration
self.MTBF = MTBF
self.number_of_failures = number_of_failures
self.CI = CI
if print_results is True:
if time_terminated is True:
print('\nReliability Test Planner results for time-terminated test')
else:
print('\nReliability Test Planner results for failure-terminated test')
if soln_type == 'MTBF':
print('Solving for MTBF')
elif soln_type == 'failures':
print('Solving for number_of_failures')
elif soln_type == 'CI':
print('Solving for CI')
else:
print('Solving for test_duration')
print('Test duration:', self.test_duration)
print('MTBF (lower confidence bound):', self.MTBF)
print('Number of failures:', self.number_of_failures)
print(str('Confidence interval (' + str(sides) + ' sided):' + str(self.CI)))
if print_CI_warn is True:
print('WARNING: The calculated CI is less than 0.5. This indicates that the desired MTBF is unachievable for the specified test_duration and number_of_failures.')
```
|
{
"source": "JensPars/BA-project",
"score": 4
}
|
#### File: JensPars/BA-project/get_data.py
```python
def get_data(df,drop = [],split = False, dummies = []):
'''
Input:
pandas.DataFrame
list of keys to be dropped
boolean determining wether to split
the keys of categorical features for onehot encoding
Output:
if split == True
data cleaned and split into test and train
if split == False
the whole dataframe cleaned
'''
df = df.drop(drop,axis=1)
if split == True:
## get dummies
if dummies != []:
df = pd.get_dummies(df,columns=dummies)
## else remove categorical features
else:
df = df.drop(['Continent'],axis=1)
y = df['CO2 Emissions per Capita (metric tonnes)']
X = df.drop(['CO2 Emissions per Capita (metric tonnes)'],axis=1)
split = int(len(df)*0.75)
X_train = X[:split]
y_train = y[:split]
X_test = X[split:]
y_test = y[split:]
return X_train,y_train,X_test,y_test
else:
return df
```
|
{
"source": "jenspetersen/2017_JMI",
"score": 2
}
|
#### File: 2017_JMI/util/iters.py
```python
import h5py
import itertools as it
# =============================================================================
# PROGRAM METADATA
# =============================================================================
__author__ = "<NAME>"
__email__ = "<EMAIL>"
__copyright__ = ""
__license__ = ""
__date__ = "Wed Nov 9 17:58:37 2016"
__version__ = "0.1"
# =============================================================================
# METHODS & CLASSES
# =============================================================================
def kwargs(parameters, *args):
"""For a dictionary of parameter lists, yield possible combinations.
Combinations will be returned as dictionaries. Additional arguments
will no be split (lists remain lists etc.)"""
i = 0
# make everything iterable
# copy values to not change input object
keys = sorted(parameters.keys())
values = []
for key in keys:
if hasattr(parameters[key], "__iter__") and key not in args:
values.append(parameters[key])
else:
values.append([parameters[key]])
for comb in it.product(*values):
yield i, dict(zip(keys, comb))
i += 1
def walk_h5(h5object, yield_datasets=True):
"""Similar to os.walk, walk through tree structure in a HDF5 file.
Yields (current object, [dataset names], [group names])."""
if isinstance(h5object, h5py.Dataset):
if yield_datasets:
yield h5object, [], []
else:
raise TypeError("Datasets ignored but object ist dataset.")
else:
groups = []
datasets = []
for key in h5object.keys():
if isinstance(h5object[key], h5py.Group):
groups.append(key)
if isinstance(h5object[key], h5py.Dataset):
datasets.append(key)
yield h5object, groups, datasets
for g in groups:
for el in walk_h5(h5object[g], yield_datasets):
yield el
if yield_datasets:
for d in datasets:
for el in walk_h5(h5object[d], yield_datasets):
yield el
# =============================================================================
# MAIN METHOD
# =============================================================================
def main():
import IPython
IPython.embed()
# =============================================================================
# RUN
# =============================================================================
if __name__ == "__main__":
main()
```
|
{
"source": "jenspetersen/probabilistic-unet",
"score": 2
}
|
#### File: probunet/experiment/probabilistic_unet_segmentation.py
```python
from probunet.experiment.probabilistic_unet_future_segmentation import ProbabilisticUNetFutureSegmentation
import os
import numpy as np
import time
import torch
from torch import nn, optim, distributions
from trixi.util import Config
from batchgenerators.transforms import (
MirrorTransform,
SpatialTransform,
CenterCropTransform,
SegLabelSelectionBinarizeTransform,
Compose
)
from batchgenerators.dataloading import MultiThreadedAugmenter
from probunet.model import ProbabilisticSegmentationNet, InjectionUNet3D, InjectionConvEncoder3D
from probunet.eval import Evaluator, dice
from probunet.util import (
get_default_experiment_parser,
run_experiment,
make_onehot as make_onehot_segmentation,
coordinate_grid_samples
)
from probunet import data
DESCRIPTION = "Segmentation with a Probabilistic U-Net. .test() will give results for upper bound in paper, .test_future() will give results for lower bound."
def make_defaults(patch_size=112,
in_channels=4,
latent_size=3,
labels=[0, 1, 2, 3]):
if hasattr(patch_size, "__iter__"):
if len(patch_size) > 1:
patch_size = tuple(patch_size)
else:
patch_size = patch_size[0]
if not hasattr(patch_size, "__iter__"):
patch_size = tuple([patch_size, ] * 3)
DEFAULTS = Config(
# Base
name=os.path.basename(__file__).split(".")[0],
description=DESCRIPTION,
n_epochs=50000,
batch_size=2,
batch_size_val=1,
patch_size=patch_size,
in_channels=in_channels,
out_channels=len(labels),
latent_size=latent_size,
seed=1,
device="cuda",
# Data
split_val=3,
split_test=4,
data_module=data,
data_dir=None, # we're setting data_module.data_dir if this is given
mmap_mode="r",
npz=False,
debug=0, # 1 selects (10, 5, 5) patients, 2 a single batch
train_on_all=False, # adds val and test to training set
generator_train=data.RandomBatchGenerator,
generator_val=data.LinearBatchGenerator,
transforms_train={
0: {
"type": SpatialTransform,
"kwargs": {
"patch_size": patch_size,
"patch_center_dist_from_border": patch_size[0] // 2,
"do_elastic_deform": False,
"p_el_per_sample": 0.2,
"p_rot_per_sample": 0.3,
"p_scale_per_sample": 0.3
},
"active": True
},
1: {
"type": MirrorTransform,
"kwargs": {"axes": (0, 1, 2)},
"active": True
},
2: {
"type": SegLabelSelectionBinarizeTransform,
"kwargs": {"label": [1, 2, 3]},
"active": False
}
},
transforms_val={
0: {
"type": CenterCropTransform,
"kwargs": {"crop_size": patch_size},
"active": False
},
1: {
"type": SegLabelSelectionBinarizeTransform,
"kwargs": {"label": [1, 2, 3]},
"active": False
},
2: {
"type": SpatialTransform,
"kwargs": {
"patch_size": patch_size,
"patch_center_dist_from_border": patch_size[0] // 2,
"do_elastic_deform": False,
"do_rotation": False,
"do_scale": True,
"p_scale_per_sample": 1,
"scale": (1.25, 1.25)
},
"active": False
}
},
augmenter_train=MultiThreadedAugmenter,
augmenter_train_kwargs={
"num_processes": 11,
"num_cached_per_queue": 6,
"pin_memory": True
},
augmenter_val=MultiThreadedAugmenter,
augmenter_val_kwargs={
"num_processes": 2,
"pin_memory": True
},
# Model
model=ProbabilisticSegmentationNet,
model_kwargs={
"in_channels": in_channels,
"out_channels": len(labels),
"num_feature_maps": 24,
"latent_size": latent_size,
"depth": 5,
"latent_distribution": distributions.Normal,
"task_op": InjectionUNet3D,
"task_kwargs": {
"output_activation_op": nn.LogSoftmax,
"output_activation_kwargs": {"dim": 1},
"activation_kwargs": {"inplace": True}
},
"prior_op": InjectionConvEncoder3D,
"prior_kwargs": {
"in_channels": in_channels,
"out_channels": latent_size * 2,
"depth": 5,
"block_depth": 2,
"num_feature_maps": 24,
"feature_map_multiplier": 2,
"activation_kwargs": {"inplace": True},
"norm_depth": 2,
},
"posterior_op": InjectionConvEncoder3D,
"posterior_kwargs": {
"in_channels": in_channels + len(labels),
"out_channels": latent_size * 2,
"depth": 5,
"block_depth": 2,
"num_feature_maps": 24,
"feature_map_multiplier": 2,
"activation_kwargs": {"inplace": True},
"norm_depth": 2,
},
},
model_init_weights_args=[nn.init.kaiming_uniform_, 0],
model_init_bias_args=[nn.init.constant_, 0],
# Learning
optimizer=optim.Adam,
optimizer_kwargs={"lr": 1e-4},
scheduler=optim.lr_scheduler.StepLR,
scheduler_kwargs={"step_size": 200, "gamma": 0.985},
criterion_segmentation=nn.NLLLoss,
criterion_segmentation_kwargs={"reduction": "sum"},
criterion_latent=distributions.kl_divergence,
criterion_latent_kwargs={},
criterion_latent_init=False,
criterion_segmentation_seg_onehot=False,
criterion_segmentation_weight=1.0,
criterion_latent_weight=1.0,
criterion_segmentation_seg_dtype=torch.long,
# Logging
backup_every=1000,
validate_every=1000,
validate_subset=0.1, # validate only this percentage randomly
show_every=10,
validate_metrics=["Dice"],
labels=labels,
evaluator=Evaluator,
evaluator_kwargs={
"label_values": list(labels) + [tuple(labels[1:])],
"label_names": {
0: "Background",
1: "Edema",
2: "Enhancing",
3: "Necrosis",
tuple(labels[1:]): "Whole Tumor"
},
"nan_for_nonexisting": True
},
val_save_output=False,
val_example_samples=10,
val_save_images=False,
latent_plot_range=[-5, 5],
test_on_val=True,
test_save_output=False,
test_future=True,
test_std_factor=3,
test_std_scale=1.
)
TASKMEAN = Config(
criterion_segmentation_kwargs={"reduction": "elementwise_mean"}
)
ELASTIC = Config(
transforms_train={0: {"kwargs": {"do_elastic_deform": True}}}
)
NONORM = Config(
model_kwargs={
"prior_kwargs": {"norm_depth": 0},
"posterior_kwargs": {"norm_depth": 0}
}
)
FULLNORM = Config(
model_kwargs={
"prior_kwargs": {"norm_depth": "full"},
"posterior_kwargs": {"norm_depth": "full"}
}
)
BATCHNORM = Config(
model_kwargs={
"prior_kwargs": {"norm_op": nn.BatchNorm3d},
"posterior_kwargs": {"norm_op": nn.BatchNorm3d},
"task_kwargs": {"norm_op": nn.BatchNorm3d}
}
)
WHOLETUMOR = Config(
transforms_train={2: {"active": True}},
transforms_val={1: {"active": True}},
out_channels=2,
labels=[0, 1],
model_kwargs={
"out_channels": 2,
"posterior_kwargs": {"in_channels": in_channels + 2}
},
evaluator_kwargs={
"label_values": [0, 1],
"label_names": {
0: "Background",
1: "Whole Tumor"
}
}
)
ENHANCING = Config(
transforms_train={2: {
"kwargs": {"label": 2},
"active": True
}},
transforms_val={1: {
"kwargs": {"label": 2},
"active": True
}},
out_channels=2,
labels=[0, 1],
model_kwargs={
"out_channels": 2,
"posterior_kwargs": {"in_channels": in_channels + 2}
},
evaluator_kwargs={
"label_values": [0, 1],
"label_names": {
0: "Background",
1: "Whole Tumor"
}
}
)
NOAUGMENT = Config(
transforms_train={
0: {
"kwargs": {
"p_el_per_sample": 0,
"p_rot_per_sample": 0,
"p_scale_per_sample": 0
}
},
1: {"active": False}
}
)
LOWAUGMENT = Config(
transforms_train={
0: {
"kwargs": {
"p_el_per_sample": 0.,
"p_rot_per_sample": 0.15,
"p_scale_per_sample": 0.15
}
}
}
)
NOBG = Config(
criterion_segmentation_kwargs={"ignore_index": 0}
)
VALIDATEPATCHED = Config(
transforms_val={2: {"active": True}}
)
MODS = {
"TASKMEAN": TASKMEAN,
"ELASTIC": ELASTIC,
"NONORM": NONORM,
"FULLNORM": FULLNORM,
"BATCHNORM": BATCHNORM,
"WHOLETUMOR": WHOLETUMOR,
"ENHANCING": ENHANCING,
"NOAUGMENT": NOAUGMENT,
"LOWAUGMENT": LOWAUGMENT,
"NOBG": NOBG,
"VALIDATEPATCHED": VALIDATEPATCHED
}
return {"DEFAULTS": DEFAULTS}, MODS
class ProbabilisticUNetSegmentation(ProbabilisticUNetFutureSegmentation):
def setup_data(self):
c = self.config
self.data_train = c.data_module.load(c.mmap_mode, subjects=c.subjects_train, npz=c.npz)
self.data_val = c.data_module.load(c.mmap_mode, subjects=c.subjects_val, npz=c.npz)
self.data_test = c.data_module.load(c.mmap_mode, subjects=c.subjects_test, npz=c.npz)
self.generator_train = c.generator_train(
self.data_train, c.batch_size, 3,
number_of_threads_in_multithreaded=c.augmenter_train_kwargs.num_processes)
self.generator_val = c.generator_val(
self.data_val, c.batch_size_val, 3,
number_of_threads_in_multithreaded=c.augmenter_val_kwargs.num_processes)
self.generator_test = c.generator_val(
self.data_test, c.batch_size_val, 3,
number_of_threads_in_multithreaded=c.augmenter_val_kwargs.num_processes)
def process_data(self, data, epoch):
c = self.config
if c.debug < 2 or epoch == 0:
input_ = torch.from_numpy(data["data"][:, c.in_channels:2*c.in_channels]).to(dtype=torch.float32, device=c.device)
gt_segmentation = make_onehot_segmentation(torch.from_numpy(data["seg"][:, -2:-1]).to(dtype=torch.float32, device=c.device), c.labels)
if c.debug == 2:
self._memo_batch = (input_, gt_segmentation, data)
else:
return self._memo_batch
return input_, gt_segmentation, data
def process_data_future(self, data, epoch):
c = self.config
if c.debug < 2 or epoch == 0:
input_ = torch.from_numpy(data["data"][:, c.in_channels:2*c.in_channels]).to(dtype=torch.float32, device=c.device)
gt_segmentation = make_onehot_segmentation(torch.from_numpy(data["seg"][:, -1:]).to(dtype=torch.float32, device=c.device), c.labels)
if c.debug == 2:
self._memo_batch = (input_, gt_segmentation, data)
else:
return self._memo_batch
return input_, gt_segmentation, data
def test(self):
info = self.validate_make_default_info()
info["coords"]["Metric"] = info["coords"]["Metric"] + ["Reference KL",
"Reference Reconstruction NLL",
"Reference Reconstruction Dice",
"Prior Maximum Dice",
"Prior Best Volume Dice"]
if self.config.test_on_val:
augmenter = self.augmenter_val
else:
augmenter = self.augmenter_test
test_scores, info = self.test_inner(augmenter, [], info)
test_scores = np.array(test_scores)
self.elog.save_numpy_data(test_scores, "test.npy")
self.elog.save_dict(info, "test.json")
if self.config.test_future:
self.test_future()
def test_inner(self, augmenter, scores, info, future=False):
c = self.config
with torch.no_grad():
self.model.eval()
for data in augmenter:
if future:
input_, gt_segmentation, data = self.process_data_future(data, 0)
else:
input_, gt_segmentation, data = self.process_data(data, 0)
prediction = self.model(input_, gt_segmentation, make_onehot=False).cpu()
self.model.encode_posterior(input_, gt_segmentation, make_onehot=False)
reference_reconstruction = self.model.reconstruct(out_device="cpu")
gt_segmentation = torch.argmax(gt_segmentation.cpu(), 1, keepdim=False)
reference_kl = distributions.kl_divergence(self.model.posterior, self.model.prior)
# sample latent space for volumes and dice scores
prior_mean = self.model.prior.loc.cpu().numpy()
prior_mean = prior_mean.reshape(prior_mean.shape[0], -1)
prior_std = self.model.prior.scale.cpu().numpy()
prior_std = prior_std.reshape(prior_std.shape[0], -1)
batch_samples = []
for b in range(prior_mean.shape[0]):
batch_samples.append(coordinate_grid_samples(prior_mean[b], prior_std[b], c.test_std_factor, c.test_std_scale))
if len(batch_samples) > 1:
batch_samples = list(zip(*batch_samples))
batch_samples = list(map(np.stack, batch_samples))
else:
batch_samples = batch_samples[0]
batch_samples = list(map(lambda x: x[np.newaxis, ...], batch_samples))
volumes = np.zeros((prior_mean.shape[0], len(batch_samples)))
dice_scores = np.zeros((prior_mean.shape[0], len(batch_samples)))
for s, sample in enumerate(batch_samples):
sample = torch.tensor(sample)
sample_prediction = self.model.reconstruct(sample=sample, out_device="cpu")
sample_prediction = torch.argmax(sample_prediction, 1, keepdim=False).numpy()
volumes_current = []
dice_scores_current = []
for b in range(sample_prediction.shape[0]):
volumes_current.append(np.sum(sample_prediction[b] != 0))
dice_scores_current.append(dice(sample_prediction[b] != 0, data["seg"][b, -1] != 0))
volumes[:, s] = volumes_current
dice_scores[:, s] = dice_scores_current
max_dice = np.max(dice_scores, 1)
dice_for_best_volume = []
for b in range(volumes.shape[0]):
idx = np.argmin(np.abs(volumes[b] - np.sum(data["seg"][b, -1] != 0)))
dice_for_best_volume.append(dice_scores[b, idx])
for s, subject in enumerate(data["subject"]):
name = "{}_t{}".format(subject, data["timestep"][s])
if name in info["coords"]["Subject and Timestep"]:
continue
else:
info["coords"]["Subject and Timestep"].append(name)
# regular evaluation
summary = data.copy()
summary["data"] = data["data"][s:s+1]
summary["seg"] = data["seg"][s:s+1]
if future:
summary["seg"] = summary["seg"][:, -1:]
else:
summary["seg"] = summary["seg"][:, -2:-1]
summary["prediction"] = prediction[s:s+1]
if c.test_save_output:
self.elog.save_numpy_data(summary["prediction"].numpy(), "test/{}_prediction.npy".format(name))
# regular results like in validation
current_score = self.validate_score(summary, 0)
# surprise / information gain
current_reference_kl = reference_kl[s:s+1].sum().item()
# ability to reconstruct groundtruth
current_reference_nll = nn.NLLLoss(reduction="sum")(reference_reconstruction[s:s+1], gt_segmentation[s:s+1]).item()
current_reference_dice = self.validate_score({"prediction": reference_reconstruction[s:s+1], "seg": summary["seg"]}, 0)
current_reference_dice = current_reference_dice[:, self.evaluator.metrics.index("Dice")]
# create arrays, repeating NLL and KL for each label
current_reference_kl = np.array([current_reference_kl, ] * len(c.evaluator_kwargs.label_values))
current_reference_nll = np.array([current_reference_nll, ] * len(c.evaluator_kwargs.label_values))
current_max_dice = np.array([max_dice[s], ] * len(c.evaluator_kwargs.label_values))
current_dice_best_volume = np.array([dice_for_best_volume[s], ] * len(c.evaluator_kwargs.label_values))
current_score_extended = np.stack([current_reference_kl,
current_reference_nll,
current_reference_dice,
current_max_dice,
current_dice_best_volume], 1)
current_score = np.concatenate([current_score, current_score_extended], 1)
scores.append(current_score)
del input_, gt_segmentation
return scores, info
def test_future(self):
info = self.validate_make_default_info()
info["coords"]["Metric"] = info["coords"]["Metric"] + ["Future KL",
"Future Reconstruction NLL",
"Future Reconstruction Dice",
"Prior Maximum Dice",
"Prior Best Volume Dice"]
# our regular generators only produce 1 timestep, so we create this here manually
if self.config.test_on_val:
test_data = self.data_val
else:
test_data = self.data_test
generator = self.config.generator_val(
test_data, self.config.batch_size_val, 3,
number_of_threads_in_multithreaded=self.config.augmenter_val_kwargs.num_processes)
transforms = []
for t in sorted(self.config.transforms_val.keys()):
if self.config.transforms_val[t]["active"]:
cls = self.config.transforms_val[t]["type"]
kwargs = self.config.transforms_val[t]["kwargs"]
transforms.append(cls(**kwargs))
augmenter = self.config.augmenter_val(generator,
Compose(transforms),
**self.config.augmenter_val_kwargs)
test_scores, info = self.test_inner(augmenter, [], info, future=True)
test_scores = np.array(test_scores)
self.elog.save_numpy_data(test_scores, "test_future.npy")
self.elog.save_dict(info, "test_future.json")
if __name__ == '__main__':
parser = get_default_experiment_parser()
parser.add_argument("-p", "--patch_size", type=int, nargs="+", default=112)
parser.add_argument("-in", "--in_channels", type=int, default=4)
parser.add_argument("-lt", "--latent_size", type=int, default=3)
parser.add_argument("-lb", "--labels", type=int, nargs="+", default=[0, 1, 2, 3])
args, _ = parser.parse_known_args()
DEFAULTS, MODS = make_defaults(patch_size=args.patch_size, in_channels=args.in_channels, latent_size=args.latent_size, labels=args.labels)
run_experiment(ProbabilisticUNetSegmentation,
DEFAULTS,
args,
mods=MODS,
explogger_kwargs=dict(folder_format="{experiment_name}_%Y%m%d-%H%M%S"),
globs=globals(),
resume_save_types=("model", "simple", "th_vars", "results"))
```
|
{
"source": "jensqin/samplepack",
"score": 3
}
|
#### File: samplepack/sample01/sample01.py
```python
from samplepack.settings import prstat
# function definition.
def add_one(n):
"""
add_one(n)
The sum of a number and one.
Parameters
__________
n : int
The input number.
Returns
_______
int
The value :math:`n + 1`.
"""
print(prstat)
return n + 1
if __name__ == "__main__":
print("hello world 1")
```
#### File: samplepack/tests/test_sample.py
```python
import pytest
import json
from samplepack.sample01 import add_one
from samplepack.sample02.sample02 import add_two
with open("tests/data/sample.json") as jsonfile:
inputs = json.load(jsonfile)
def test_load_data():
assert 0 == inputs["set"]
def test_add_one():
res = add_one(0)
assert res == 1
def test_add_two():
res = add_two(0)
assert res == 2
```
|
{
"source": "JensRantil/cligh",
"score": 3
}
|
#### File: cligh/cligh/repos.py
```python
from cligh.utils import get_working_repo, read_user_input
def create(client, args):
"""Create a new repository."""
def validate_description(text):
if len(text) == 0:
print 'Description may not be empty. Try again.'
return False
return True
def validate_name(text):
if len(text) == 0:
print 'Name may not be empty. Try again.'
return False
if any(char for char in text if char.isspace()):
print 'Name may not contain spaces. Try again.'
return False
# What other characters don't belong in the name?
return True
def validate_homepage(text):
# This is a lame excuse for validation.
if len(text) == 0:
print 'Home page may not be empty. Try again.'
return False
return True
name = read_user_input('Repository name', validate_name)
homepage = read_user_input('Homepage', validate_homepage)
description = read_user_input('Description', validate_description)
user = client.get_user()
print client.get_user().create_repo(name=name, description=description, homepage=homepage)
def fork(client, args):
"""Fork a repository."""
repo_to_fork = get_working_repo(client, args.repository)
client.get_user().create_fork(repo_to_fork)
print 'Repository forked.'
def do_list(client, args):
"""Command to list the repos for a given user."""
user = client.get_user(args.user)
repos = user.get_repos()
print '%s has the following repositories:' % args.user
print 'Name - Description'
for repo in repos:
print '%s - %s' % (repo.name, repo.description)
def addlabel(client, args):
# xxx Make this configurable by the user. White is a sane
# default, for now.
color = 'ffffff'
repository = get_working_repo(client, args.repository)
try:
repository.create_label(args.label, color)
except GithubException as e:
die('''Unable to create label %s.
The complete error response was:
%s
''' % (args.label, e.data))
print 'Label added.'
def remlabel(client, args):
repository = get_working_repo(client, args.repository)
try:
label = repository.get_label(args.label)
label.delete()
except GithubException as e:
die('''Unable to delete label %s from this repository.
Error message: %s
''' % (args.label, e.data['message']))
print 'Label removed.'
def make_repo_parser(subparsers):
repo = subparsers.add_parser('repo', help='Manage and query repositories.')
subparsers = repo.add_subparsers(title='Repository-related Subcommands')
repo_list = subparsers.add_parser('list', help='List repositories belonging to a given user.')
repo_list.set_defaults(func=do_list)
repo_list.add_argument('user')
repo_create = subparsers.add_parser('create', help='Create a new repository.')
repo_create.set_defaults(func=create)
repo_fork = subparsers.add_parser('fork', help='Fork an existing repository.')
repo_fork.set_defaults(func=fork)
repo_fork.add_argument('repository', help='Name of the repository, in the form USERNAME/REPONAME')
repo_addlabel = subparsers.add_parser('add_label', help='Add a label to a repository.')
repo_addlabel.set_defaults(func=addlabel)
repo_addlabel.add_argument('--repository', help='Name of the repository, in the form USERNAME/REPONAME')
repo_addlabel.add_argument('label', help='Name of the label to add')
repo_remlabel = subparsers.add_parser('remove_label', help='Remove a label from a repository.')
repo_remlabel.set_defaults(func=remlabel)
repo_remlabel.add_argument('--repository', help='Name of the repository, in the form USERNAME/REPONAME')
repo_remlabel.add_argument('label', help='Name of the label to remove')
```
|
{
"source": "JensRantil/rewind-client",
"score": 2
}
|
#### File: rewind/client/__init__.py
```python
import logging
import zmq
logger = logging.getLogger(__name__)
class QueryException(Exception):
"""Raised when rewind server returns an error.
Usually this exception means you have used a non-existing query key.
"""
pass
def query_events(socket, from_=None, to=None):
"""Yield a queried range of events.
Parameters:
socket -- ZeroMQ socket to use. It must be previously connected to
a Rewind instance and of type REQ.
from_ -- the (optional) event id for the (chronologically) earliest end
of the range. It is exclusive. If not specified, or None, all
events from beginning of time are queried for.
to -- the (optional) event id for the (chronologically) latest end of
the range. It is exclusive. If not specified, or None, all
events up to the latest event seen are queried for.
Raises `QueryException` if a query failed. Usually this is raised because a
given `from_` or `to` does not exist in the event store.
This function returns nothing, but yields events that are returned.
"""
assert from_ is None or isinstance(from_, bytes)
assert to is None or isinstance(to, bytes)
first_msg = True
done = False
while not done:
# _real_query(...) are giving us events in small batches
done, events = _real_query(socket, from_, to)
for eventid, eventdata in events:
if first_msg:
assert eventid != from_, "First message ID wrong"
first_msg = False
from_ = eventid
yield (eventid, eventdata)
def _real_query(socket, from_, to):
"""Make the actual query for events.
Since the Rewind streams events in batches, this method might not
receive all requested events.
Returns the tuple `(done, events)` where
* `done` is a boolean whether the limited query result reached the
end, or whether there's more events that need to be collected.
* `events` is a list of `(eventid, eventdata)` event tuples where
* `eventid` is a unique string the signifies the event; and
* `eventdata` is a byte string containing the serialized event.
"""
assert from_ is None or isinstance(from_, bytes), type(from_)
assert to is None or isinstance(to, bytes), type(to)
socket.send(b'QUERY', zmq.SNDMORE)
socket.send(from_ if from_ else b'', zmq.SNDMORE)
socket.send(to if to else b'')
more = True
done = False
events = []
while more:
data = socket.recv()
if data == b"END":
assert not socket.getsockopt(zmq.RCVMORE)
done = True
elif data.startswith(b"ERROR"):
assert not socket.getsockopt(zmq.RCVMORE)
raise QueryException("Could not query: {0}".format(data))
else:
eventid = data
assert isinstance(eventid, bytes), type(eventid)
assert socket.getsockopt(zmq.RCVMORE)
eventdata = socket.recv()
eventtuple = (eventid, eventdata)
events.append(eventtuple)
if not socket.getsockopt(zmq.RCVMORE):
more = False
return done, events
def _get_single_streamed_event(streamsock):
"""Retrieve a streamed event off a socket.
Parameters:
streamsock -- the stream socket to be reading from.
Returns a tuple consisting of:
eventid -- the ID of the streamed event
lasteventid -- the ID of the previous streamed event. Can be empty for
the first event (which pretty much never happens)
eventdata -- the (serialized) data for the event.
"""
eventid = streamsock.recv()
assert streamsock.getsockopt(zmq.RCVMORE)
lasteventid = streamsock.recv()
assert streamsock.getsockopt(zmq.RCVMORE)
eventdata = streamsock.recv()
assert not streamsock.getsockopt(zmq.RCVMORE)
return eventid, lasteventid, eventdata
def yield_events_after(streamsock, reqsock, lasteventid=None):
"""Generator that yields all the missed out events.
Parameters:
lasteventid -- the event id of the last seen event.
TODO: Handle when there is no lasteventid.
"""
assert lasteventid is None or isinstance(lasteventid, bytes)
funclogger = logger.getChild('yield_events_after')
cureventid, preveventid, evdata = _get_single_streamed_event(streamsock)
if preveventid != lasteventid and preveventid != b'':
# Making sure we did not reach high watermark inbetween here.
msg = ('Seem to have reached high watermark. Doing manually querying'
' to catch up.')
funclogger.info(msg)
for qeventid, qeventdata in query_events(reqsock, lasteventid,
preveventid):
# Note that this for loop's last event will be preveventid since
# its last element is inclusive.
yield qeventid, qeventdata
yield cureventid, evdata
def publish_event(socket, event):
"""Publish a new event to Rewind.
Parameters:
socket -- a ZeroMQ REQ socket connected to a Rewind instance.
event -- event to be published. Is instance of bytes.
"""
assert isinstance(event, bytes), type(event)
socket.send(b'PUBLISH', zmq.SNDMORE)
socket.send(event)
response = socket.recv()
assert response == b'PUBLISHED'
assert not socket.getsockopt(zmq.RCVMORE)
```
#### File: client/test/test_client.py
```python
from __future__ import print_function
try:
# Python < 3
import ConfigParser as configparser
except ImportError:
# Python >= 3
import configparser
try:
# Python < 3
import StringIO as io
except ImportError:
# Python >= 3
import io
import threading
import time
import unittest
import uuid
import re
import mock
import zmq
import rewind.client as clients
import rewind.server.main as main
class _RewindRunnerThread(threading.Thread):
"""A thread that runs a rewind instance.
While the thread is given command line arguments, Rewind is started as
thread rather than external process. This makes it possible to check code
coverage and track exit codes etc.
"""
_EXIT_CODE = b'EXIT'
def __init__(self, bootparams, exit_addr=None):
"""Constructor.
Parameters:
bootparams -- Can be either a dictionary of configuration options
grouped by section, or a list of command line argument
strings.
exit_addr -- the ZeroMQ address used to send the exit message to.
"""
assert isinstance(bootparams, list) or isinstance(bootparams, dict)
thread = self
if isinstance(bootparams, list):
assert '--exit-codeword' not in bootparams, \
("'--exit-codeword' is added by _RewindRunnerThread."
" Not from elsewhere.")
args = (main.main,
bootparams + ['--exit-codeword',
_RewindRunnerThread._EXIT_CODE.decode()])
else:
assert isinstance(bootparams, dict)
bootparams = dict(bootparams)
if "general" not in bootparams:
bootparams['general'] = {}
EXCODE = _RewindRunnerThread._EXIT_CODE
bootparams['general']['exit-code'] = EXCODE
rows = []
for section, keyvals in bootparams.items():
rows.append("[{0}]".format(section))
for key, val in keyvals.items():
rows.append("{0}={1}".format(key, val))
configfilecontent = "\n".join(rows)
options = configparser.SafeConfigParser()
options.readfp(io.StringIO(configfilecontent))
args = (main.run, options, _RewindRunnerThread._EXIT_CODE.decode())
def exitcode_runner(func, *args, **kwargs):
try:
thread.exit_code = func(*args, **kwargs)
except SystemExit as e:
print("Runner made SystemExit.")
thread.exit_code = e.code
except Exception as e:
print("Exception happened:", e)
traceback.print_exc()
thread.exit_code = None
else:
print("Clean exit of runner.")
super(_RewindRunnerThread, self).__init__(target=exitcode_runner,
name="test-rewind",
args=args)
self._exit_addr = exit_addr
def stop(self, context=None):
"""Send a stop message to the event thread."""
assert self._exit_addr is not None
if context is None:
context = zmq.Context(1)
socket = context.socket(zmq.REQ)
socket.connect(self._exit_addr)
socket.send(_RewindRunnerThread._EXIT_CODE)
time.sleep(0.5) # Acceptable exit time
assert not self.isAlive()
socket.close()
class TestReplication(unittest.TestCase):
"""Test high-level replication behaviour."""
UUID_REGEXP = ("[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-"
"[0-9a-f]{12}")
def setUp(self):
"""Starting a Rewind instance to test replication."""
args = {
'general': {
'query-bind-endpoint': 'tcp://127.0.0.1:8090',
'streaming-bind-endpoint': 'tcp://127.0.0.1:8091',
}
}
self.rewind = _RewindRunnerThread(args, 'tcp://127.0.0.1:8090')
self.rewind.start()
self.context = zmq.Context(3)
self.transmitter = self.context.socket(zmq.REQ)
self.transmitter.connect('tcp://127.0.0.1:8090')
# Making sure context.term() does not time out
# Could be removed if this test works as expected
self.transmitter.setsockopt(zmq.LINGER, 1000)
self.receiver = self.context.socket(zmq.SUB)
self.receiver.setsockopt(zmq.SUBSCRIBE, b'')
self.receiver.connect('tcp://127.0.0.1:8091')
# Time it takes to connect. This is particularly important so that the
# receiver does not just receive the tail of the stream.
time.sleep(0.5)
def testBasicEventProxying(self):
"""Asserting a single event is proxied."""
eventstring = b"THIS IS AN EVENT"
clients.publish_event(self.transmitter, eventstring)
received_id = self.receiver.recv().decode()
self.assertTrue(bool(self.receiver.getsockopt(zmq.RCVMORE)))
prev_received_id = self.receiver.recv()
self.assertEquals(prev_received_id, b'')
self.assertTrue(bool(self.receiver.getsockopt(zmq.RCVMORE)))
received_string = self.receiver.recv()
self.assertFalse(bool(self.receiver.getsockopt(zmq.RCVMORE)))
self.assertIsNotNone(re.match(self.UUID_REGEXP, received_id))
self.assertEqual(received_string, eventstring)
def testProxyingABunchOfEvents(self):
"""Testing that a bunch of incoming messages processed correctly.
That is, they are all being proxied and in order.
"""
NMESSAGES = 200
messages = []
for id in range(NMESSAGES):
eventstring = "THIS IS EVENT NUMBER {0}".format(id).encode()
messages.append(eventstring)
# Sending
for msg in messages:
clients.publish_event(self.transmitter, msg)
# Receiving and asserting correct messages
eventids = []
received_messages = []
previd = b''
for msg in messages:
received_id = self.receiver.recv()
self.assertTrue(bool(self.receiver.getsockopt(zmq.RCVMORE)))
received_prev_id = self.receiver.recv()
self.assertEquals(received_prev_id, previd)
previd = received_id
self.assertTrue(bool(self.receiver.getsockopt(zmq.RCVMORE)))
received_string = self.receiver.recv()
received_messages.append(received_string)
self.assertFalse(bool(self.receiver.getsockopt(zmq.RCVMORE)))
self.assertIsNotNone(re.match(self.UUID_REGEXP,
received_id.decode()))
eventids.append(received_id)
self.assertEqual(received_string, msg)
self.assertEqual(len(set(eventids)), len(eventids),
"Found duplicate event id!")
self.assertEqual(messages, received_messages,
"Not all messages received")
def tearDown(self):
"""Shutting down Rewind test instance."""
self.transmitter.close()
self.receiver.close()
self.assertTrue(self.rewind.isAlive(),
"Did rewind crash? Not running.")
self.rewind.stop(self.context)
self.assertFalse(self.rewind.isAlive(),
"Rewind should not have been running. It was.")
self.context.term()
class TestQuerying(unittest.TestCase):
"""Test high-level event querying behaviour."""
def setUp(self):
"""Start and populate a Rewind instance to test querying."""
args = {
'general': {
'query-bind-endpoint': 'tcp://127.0.0.1:8090',
}
}
self.rewind = _RewindRunnerThread(args, 'tcp://127.0.0.1:8090')
self.rewind.start()
self.context = zmq.Context(3)
self.querysock = self.context.socket(zmq.REQ)
self.querysock.connect('tcp://127.0.0.1:8090')
ids = [uuid.uuid1().hex for i in range(200)]
self.assertEqual(len(ids), len(set(ids)), 'There were duplicate IDs.'
' Maybe the UUID1 algorithm is flawed?')
users = [uuid.uuid1().hex for i in range(30)]
self.assertEqual(len(users), len(set(users)),
'There were duplicate users.'
' Maybe the UUID1 algorithm is flawed?')
self.sent = []
for id in ids:
eventstr = "Event with id '{0}'".format(id).encode()
self.querysock.send(b"PUBLISH", zmq.SNDMORE)
self.querysock.send(eventstr)
response = self.querysock.recv()
assert response == b'PUBLISHED'
assert not self.querysock.getsockopt(zmq.RCVMORE)
self.sent.append(eventstr)
def testSyncAllPastEvents(self):
"""Test querying all events."""
time.sleep(0.5) # Max time to persist the messages
allevents = [event[1]
for event in clients.query_events(self.querysock)]
self.assertEqual(allevents, self.sent)
self.assertEqual(allevents, self.sent, "Elements don't match.")
def testSyncEventsSince(self):
"""Test querying events after a certain time."""
time.sleep(0.5) # Max time to persist the messages
allevents = [event for event in clients.query_events(self.querysock)]
from_ = allevents[3][0]
events = [event[1] for event in clients.query_events(self.querysock,
from_=from_)]
self.assertEqual([event[1] for event in allevents[4:]], events)
def testSyncEventsBefore(self):
"""Test querying events before a certain time."""
time.sleep(0.5) # Max time to persist the messages
allevents = [event
for event in clients.query_events(self.querysock)]
to = allevents[-3][0]
events = [event[1]
for event in clients.query_events(self.querysock, to=to)]
self.assertEqual([event[1] for event in allevents[:-2]], events)
def testSyncEventsBetween(self):
"""Test querying events a slice of the events."""
time.sleep(0.5) # Max time to persist the messages
allevents = [event for event in clients.query_events(self.querysock)]
from_ = allevents[3][0]
to = allevents[-3][0]
events = [event[1]
for event in clients.query_events(self.querysock,
from_=from_,
to=to)]
self.assertEqual([event[1] for event in allevents[4:-2]], events)
def testSyncNontExistentEvent(self):
"""Test when querying for non-existent event id."""
result = clients.query_events(self.querysock, from_=b"non-exist")
self.assertRaises(clients.QueryException,
list, result)
def tearDown(self):
"""Close Rewind test instance."""
self.querysock.close()
self.assertTrue(self.rewind.isAlive(),
"Did rewind crash? Not running.")
self.rewind.stop(self.context)
self.assertFalse(self.rewind.isAlive(),
"Rewind should not have been running. It was.")
self.context.term()
class TestEventReception(unittest.TestCase):
"""Test event reception using `yield_events_after`."""
def setUp(self):
"""Set up the each test."""
self.events = [
(b'a', b'', b'event1'),
(b'b', b'a', b'event2'),
(b'c', b'b', b'event3'),
]
def testRecvFirstEvent(self):
"""Test fetching the absolutely first event."""
streamsock = mock.NonCallableMock()
streamsock.recv.side_effect = self.events[0]
streamsock.getsockopt.side_effect = [True, True, False, False]
reqsock = mock.NonCallableMock()
results = []
for result in clients.yield_events_after(streamsock, reqsock):
results.append(result)
self.assertEqual(results, [(self.events[0][0], self.events[0][2])])
assert streamsock.recv.called
assert not reqsock.recv.called
def testRecvNonFloodedNextEvent(self):
"""Test receiving the next event through streaming socket only."""
streamsock = mock.NonCallableMock()
streamsock.recv.side_effect = self.events[2]
streamsock.getsockopt.side_effect = [True, True, False]
reqsock = mock.NonCallableMock()
results = []
for result in clients.yield_events_after(streamsock, reqsock,
self.events[1][0]):
results.append(result)
self.assertEqual(results, [(self.events[2][0], self.events[2][2])])
assert streamsock.recv.called
assert not reqsock.recv.called
def testRecvFloodedSocket(self):
"""Test receiving an event when watermark was passed."""
streamsock = mock.NonCallableMock()
streamsock.recv.side_effect = self.events[2]
streamsock.getsockopt.side_effect = [True, True, False]
reqsock = mock.NonCallableMock()
toreceive = (self.events[1][0], self.events[1][2], b'END')
reqsock.recv.side_effect = toreceive
# Need two 'False' here due to assertion logic in query code
reqsock.getsockopt.side_effect = [True, True, False, False]
results = []
for result in clients.yield_events_after(streamsock, reqsock,
self.events[0][0]):
results.append(result)
# Implementation specific tests that have been used mostly for
# debugging of the code. Can be removed without being too worried.
assert not streamsock.send.called
reqsock.send.assert_has_calls([mock.call(b"QUERY", zmq.SNDMORE),
mock.call(self.events[0][0],
zmq.SNDMORE),
mock.call(self.events[1][0])])
self.assertEqual(streamsock.recv.call_count, 3,
streamsock.recv.call_args_list)
self.assertEqual(reqsock.recv.call_count, 3)
# The actual test that makes sure result is what it's supposed to be.
self.assertEqual(results, [(self.events[1][0], self.events[1][2]),
(self.events[2][0], self.events[2][2])])
```
|
{
"source": "JensRantil/salt",
"score": 2
}
|
#### File: salt/modules/debbuild.py
```python
from __future__ import absolute_import, print_function
import os
import tempfile
import shutil
from salt.ext.six.moves.urllib.parse import urlparse as _urlparse # pylint: disable=no-name-in-module,import-error
# Import salt libs
import salt.utils
from salt.exceptions import SaltInvocationError
# pylint: disable=import-error
__virtualname__ = 'pkgbuild'
def __virtual__():
'''
Confirm this module is on a Debian based system
'''
if __grains__.get('os_family', False) in ('Kali', 'Debian'):
return __virtualname__
return False
def _get_build_env(env):
'''
Get build environment overrides dictionary to use in build process
'''
env_override = ''
if env is None:
return env_override
if not isinstance(env, dict):
raise SaltInvocationError(
'\'env\' must be a Python dictionary'
)
for key, value in env.items():
env_override += '{0}={1}\n'.format(key, value)
env_override += 'export {0}\n'.format(key)
return env_override
def _get_repo_env(env):
'''
Get repo environment overrides dictionary to use in repo process
'''
env_options = ''
if env is None:
return env_options
if not isinstance(env, dict):
raise SaltInvocationError(
'\'env\' must be a Python dictionary'
)
for key, value in env.items():
env_options += '{0}\n'.format(value)
return env_options
def _create_pbuilders(env):
'''
Create the .pbuilder family of files in user's home directory
env
A list or dictionary of environment variables to be set prior to execution.
Example:
.. code-block:: yaml
- env:
- DEB_BUILD_OPTIONS: 'nocheck'
.. warning::
The above illustrates a common PyYAML pitfall, that **yes**,
**no**, **on**, **off**, **true**, and **false** are all loaded as
boolean ``True`` and ``False`` values, and must be enclosed in
quotes to be used as strings. More info on this (and other) PyYAML
idiosyncrasies can be found :doc:`here
</topics/troubleshooting/yaml_idiosyncrasies>`.
'''
hook_text = '''#!/bin/sh
set -e
cat > "/etc/apt/preferences" << EOF
Package: python-abalaster
Pin: release a=testing
Pin-Priority: 950
Package: python-sphinx
Pin: release a=experimental
Pin-Priority: 900
Package: sphinx-common
Pin: release a=experimental
Pin-Priority: 900
Package: *
Pin: release a=jessie-backports
Pin-Priority: 750
Package: *
Pin: release a=stable
Pin-Priority: 700
Package: *
Pin: release a=testing
Pin-Priority: 650
Package: *
Pin: release a=unstable
Pin-Priority: 600
Package: *
Pin: release a=experimental
Pin-Priority: 550
EOF
'''
pbldrc_text = '''DIST="jessie"
if [ -n "${DIST}" ]; then
TMPDIR=/tmp
BASETGZ="`dirname $BASETGZ`/$DIST-base.tgz"
DISTRIBUTION=$DIST
APTCACHE="/var/cache/pbuilder/$DIST/aptcache"
fi
HOOKDIR="${HOME}/.pbuilder-hooks"
OTHERMIRROR="deb http://ftp.us.debian.org/debian/ testing main contrib non-free | deb http://ftp.us.debian.org/debian/ experimental main contrib non-free"
'''
home = os.path.expanduser('~')
pbuilder_hooksdir = os.path.join(home, '.pbuilder-hooks')
if not os.path.isdir(pbuilder_hooksdir):
os.makedirs(pbuilder_hooksdir)
d05hook = os.path.join(pbuilder_hooksdir, 'D05apt-preferences')
with salt.utils.fopen(d05hook, 'w') as fow:
fow.write('{0}'.format(hook_text))
pbuilderrc = os.path.join(home, '.pbuilderrc')
with salt.utils.fopen(pbuilderrc, 'w') as fow:
fow.write('{0}'.format(pbldrc_text))
env_overrides = _get_build_env(env)
if env_overrides and not env_overrides.isspace():
with salt.utils.fopen(pbuilderrc, 'a') as fow:
fow.write('{0}'.format(env_overrides))
def _mk_tree():
'''
Create the debian build area
'''
basedir = tempfile.mkdtemp()
return basedir
def _get_spec(tree_base, spec, template, saltenv='base'):
'''
Get the spec file (tarball of the debian sub-dir to use)
and place it in build area
'''
spec_tgt = os.path.basename(spec)
dest = os.path.join(tree_base, spec_tgt)
return __salt__['cp.get_url'](spec, dest, saltenv=saltenv)
def _get_src(tree_base, source, saltenv='base'):
'''
Get the named sources and place them into the tree_base
'''
parsed = _urlparse(source)
sbase = os.path.basename(source)
dest = os.path.join(tree_base, sbase)
if parsed.scheme:
__salt__['cp.get_url'](source, dest, saltenv=saltenv)
else:
shutil.copy(source, dest)
def make_src_pkg(dest_dir, spec, sources, env=None, template=None, saltenv='base'):
'''
Create a platform specific source package from the given platform spec/control file and sources
CLI Example:
Debian
salt '*' pkgbuild.make_src_pkg /var/www/html/ https://raw.githubusercontent.com/saltstack/libnacl/master/pkg/deb/python-libnacl.control.tar.xz https://pypi.python.org/packages/source/l/libnacl/libnacl-1.3.5.tar.gz
This example command should build the libnacl SOURCE package and place it in
/var/www/html/ on the minion
'''
_create_pbuilders(env)
tree_base = _mk_tree()
ret = []
if not os.path.isdir(dest_dir):
os.makedirs(dest_dir)
spec_pathfile = _get_spec(tree_base, spec, template, saltenv)
# build salt equivalents from scratch
if isinstance(sources, str):
sources = sources.split(',')
for src in sources:
_get_src(tree_base, src, saltenv)
#.dsc then assumes sources already build
if spec_pathfile.endswith('.dsc'):
for efile in os.listdir(tree_base):
full = os.path.join(tree_base, efile)
trgt = os.path.join(dest_dir, efile)
shutil.copy(full, trgt)
ret.append(trgt)
trgt = os.path.join(dest_dir, os.path.basename(spec_pathfile))
shutil.copy(spec_pathfile, trgt)
ret.append(trgt)
return ret
# obtain name of 'python setup.py sdist' generated tarball, extract the version
# and manipulate the name for debian use (convert minix and add '+ds')
salttarball = None
for afile in os.listdir(tree_base):
if afile.startswith('salt-') and afile.endswith('.tar.gz'):
salttarball = afile
break
else:
return ret
frontname = salttarball.split('.tar.gz')
salttar_name = frontname[0]
debname = salttar_name.replace('-', '_')
debname += '+ds'
debname_orig = debname + '.orig.tar.gz'
abspath_debname = os.path.join(tree_base, debname)
cmd = 'tar -xvzf {0}'.format(salttarball)
__salt__['cmd.run'](cmd, cwd=tree_base)
cmd = 'mv {0} {1}'.format(salttar_name, debname)
__salt__['cmd.run'](cmd, cwd=tree_base)
cmd = 'tar -cvzf {0} {1}'.format(os.path.join(tree_base, debname_orig), debname)
__salt__['cmd.run'](cmd, cwd=tree_base)
cmd = 'rm -f {0}'.format(salttarball)
__salt__['cmd.run'](cmd, cwd=tree_base)
cmd = 'cp {0} {1}'.format(spec_pathfile, abspath_debname)
__salt__['cmd.run'](cmd, cwd=abspath_debname)
cmd = 'tar -xvJf {0}'.format(spec_pathfile)
__salt__['cmd.run'](cmd, cwd=abspath_debname)
cmd = 'rm -f {0}'.format(os.path.basename(spec_pathfile))
__salt__['cmd.run'](cmd, cwd=abspath_debname)
cmd = 'debuild -S -uc -us'
__salt__['cmd.run'](cmd, cwd=abspath_debname, python_shell=True)
cmd = 'rm -fR {0}'.format(abspath_debname)
__salt__['cmd.run'](cmd)
for dfile in os.listdir(tree_base):
if dfile.startswith('salt_'):
if not dfile.endswith('.build'):
full = os.path.join(tree_base, dfile)
trgt = os.path.join(dest_dir, dfile)
shutil.copy(full, trgt)
ret.append(trgt)
return ret
def build(runas, tgt, dest_dir, spec, sources, deps, env, template, saltenv='base'):
'''
Given the package destination directory, the tarball containing debian files (e.g. control)
and package sources, use pbuilder to safely build the platform package
CLI Example:
Debian
salt '*' pkgbuild.make_src_pkg deb-8-x86_64 /var/www/html/ https://raw.githubusercontent.com/saltstack/libnacl/master/pkg/deb/python-libnacl.control https://pypi.python.org/packages/source/l/libnacl/libnacl-1.3.5.tar.gz
This example command should build the libnacl package for Debian using pbuilder
and place it in /var/www/html/ on the minion
'''
ret = {}
if not os.path.isdir(dest_dir):
try:
os.makedirs(dest_dir)
except (IOError, OSError):
pass
dsc_dir = tempfile.mkdtemp()
dscs = make_src_pkg(dsc_dir, spec, sources, env, template, saltenv)
# dscs should only contain salt orig and debian tarballs and dsc file
for dsc in dscs:
afile = os.path.basename(dsc)
adist = os.path.join(dest_dir, afile)
shutil.copy(dsc, adist)
if dsc.endswith('.dsc'):
dbase = os.path.dirname(dsc)
cmd = 'chown {0} -R {1}'.format(runas, dbase)
__salt__['cmd.run'](cmd)
results_dir = tempfile.mkdtemp()
cmd = 'chown {0} -R {1}'.format(runas, results_dir)
__salt__['cmd.run'](cmd)
cmd = 'pbuilder create'
__salt__['cmd.run'](cmd, runas=runas, python_shell=True)
cmd = 'pbuilder --build --buildresult {1} {0}'.format(dsc, results_dir)
__salt__['cmd.run'](cmd, runas=runas, python_shell=True)
for bfile in os.listdir(results_dir):
full = os.path.join(results_dir, bfile)
if bfile.endswith('.deb'):
bdist = os.path.join(dest_dir, bfile)
shutil.copy(full, bdist)
else:
with salt.utils.fopen(full, 'r') as fp_:
ret[bfile] = fp_.read()
shutil.rmtree(results_dir)
shutil.rmtree(dsc_dir)
return ret
def make_repo(repodir, keyid=None, env=None):
'''
Given the repodir, create a Debian repository out of the dsc therein
CLI Example::
salt '*' pkgbuild.make_repo /var/www/html/
'''
repocfg_text = '''Origin: SaltStack
Label: salt_debian
Suite: unstable
Codename: jessie
Architectures: i386 amd64 source
Components: contrib
Description: SaltStack debian package repo
Pull: jessie
'''
repoconf = os.path.join(repodir, 'conf')
if not os.path.isdir(repoconf):
os.makedirs(repoconf)
repoconfdist = os.path.join(repoconf, 'distributions')
with salt.utils.fopen(repoconfdist, 'w') as fow:
fow.write('{0}'.format(repocfg_text))
if keyid is not None:
with salt.utils.fopen(repoconfdist, 'a') as fow:
fow.write('Signwith: {0}\n'.format(keyid))
repocfg_opts = _get_repo_env(env)
repoconfopts = os.path.join(repoconf, 'options')
with salt.utils.fopen(repoconfopts, 'w') as fow:
fow.write('{0}'.format(repocfg_opts))
for debfile in os.listdir(repodir):
if debfile.endswith('.changes'):
cmd = 'reprepro -Vb . include jessie {0}'.format(os.path.join(repodir, debfile))
__salt__['cmd.run'](cmd, cwd=repodir)
if debfile.endswith('.deb'):
cmd = 'reprepro -Vb . includedeb jessie {0}'.format(os.path.join(repodir, debfile))
__salt__['cmd.run'](cmd, cwd=repodir)
```
#### File: salt/modules/splay.py
```python
from __future__ import absolute_import
import time
# Import 3rd-party libs
import salt.ext.six as six
# Import Salt Libs
from salt.exceptions import CommandExecutionError
_DEFAULT_SPLAYTIME = 600
_DEFAULT_SIZE = 8192
def _get_hash(hashable, size):
'''
Jenkins One-At-A-Time Hash Function
More Info: http://en.wikipedia.org/wiki/Jenkins_hash_function#one-at-a-time
'''
# Using bitmask to emulate rollover behavior of C unsigned 32 bit int
bitmask = 0xffffffff
h = 0
for i in bytearray(hashable):
h = (h + i) & bitmask
h = (h + (h << 10)) & bitmask
h = (h ^ (h >> 6)) & bitmask
h = (h + (h << 3)) & bitmask
h = (h ^ (h >> 11)) & bitmask
h = (h + (h << 15)) & bitmask
return (h & (size - 1)) & bitmask
def calc_splay(hashable, splaytime=_DEFAULT_SPLAYTIME, size=_DEFAULT_SIZE):
'''
Use directly to get a value from the Jenkins One-At-A-Time Hash Function using an
arbitrary key.
CLI Example:
# Get a value from the hash function between 1 and 60 using key 'foobar'
salt example-host calc_splay 'foobar' 60
'''
hash_val = _get_hash(hashable, size)
return int(splaytime * hash_val / float(size))
def splay(*args, **kwargs):
'''
Splay a salt function call execution time across minions over
a number of seconds (default: 600)
NOTE: You *probably* want to use --async here and look up the job results later.
If you're dead set on getting the output from the CLI command, then make
sure to set the timeout (with the -t flag) to something greater than the
splaytime (max splaytime + time to execute job).
Otherwise, it's very likely that the cli will time out before the job returns.
CLI Example:
# With default splaytime
salt --async '*' splay.splay pkg.install cowsay version=3.03-8.el6
# With specified splaytime (5 minutes) and timeout with 10 second buffer
salt -t 310 '*' splay.splay 300 pkg.version cowsay
'''
# Convert args tuple to a list so we can pop the splaytime and func out
args = list(args)
# If the first argument passed is an integer, set it as the splaytime
try:
splaytime = int(args[0])
args.pop(0)
except ValueError:
splaytime = _DEFAULT_SPLAYTIME
if splaytime <= 0:
raise ValueError('splaytime must be a positive integer')
func = args.pop(0)
# Check if the func is valid before the sleep
if func not in __salt__:
raise CommandExecutionError('Unable to find module function {0}'.format(func))
my_delay = calc_splay(__grains__['id'], splaytime=splaytime)
time.sleep(my_delay)
# Get rid of the hidden kwargs that salt injects
func_kwargs = dict((k, v) for k, v in six.iteritems(kwargs) if not k.startswith('__'))
result = __salt__[func](*args, **func_kwargs)
if not isinstance(result, dict):
result = {'result': result}
result['splaytime'] = str(my_delay)
return result
def show(splaytime=_DEFAULT_SPLAYTIME):
'''
Show calculated splaytime for this minion
Will use default value of 600 (seconds) if splaytime value not provided
CLI Example:
salt example-host splay.show
salt example-host splay.show 60
'''
# Coerce splaytime to int (passed arg from CLI will be a str)
if not isinstance(splaytime, int):
splaytime = int(splaytime)
return str(calc_splay(__grains__['id'], splaytime=splaytime))
```
|
{
"source": "jensreeder/scikit-bio",
"score": 2
}
|
#### File: jensreeder/scikit-bio/setup.py
```python
import os
import platform
import re
import ast
from setuptools import find_packages, setup
from setuptools.extension import Extension
from setuptools.command.build_ext import build_ext as _build_ext
# Bootstrap setup.py with numpy
# Huge thanks to coldfix's solution
# http://stackoverflow.com/a/21621689/579416
class build_ext(_build_ext):
def finalize_options(self):
_build_ext.finalize_options(self)
# Prevent numpy from thinking it is still in its setup process:
__builtins__.__NUMPY_SETUP__ = False
import numpy
self.include_dirs.append(numpy.get_include())
# version parsing from __init__ pulled from Flask's setup.py
# https://github.com/mitsuhiko/flask/blob/master/setup.py
_version_re = re.compile(r'__version__\s+=\s+(.*)')
with open('skbio/__init__.py', 'rb') as f:
hit = _version_re.search(f.read().decode('utf-8')).group(1)
version = str(ast.literal_eval(hit))
classes = """
Development Status :: 1 - Planning
License :: OSI Approved :: BSD License
Topic :: Software Development :: Libraries
Topic :: Scientific/Engineering
Topic :: Scientific/Engineering :: Bio-Informatics
Programming Language :: Python
Programming Language :: Python :: 2
Programming Language :: Python :: 2.7
Programming Language :: Python :: 3
Programming Language :: Python :: 3.3
Programming Language :: Python :: 3.4
Operating System :: Unix
Operating System :: POSIX
Operating System :: MacOS :: MacOS X
"""
classifiers = [s.strip() for s in classes.split('\n') if s]
description = ('Data structures, algorithms and educational '
'resources for bioinformatics.')
with open('README.rst') as f:
long_description = f.read()
# Dealing with Cython
USE_CYTHON = os.environ.get('USE_CYTHON', False)
ext = '.pyx' if USE_CYTHON else '.c'
# There's a bug in some versions of Python 3.4 that propagates
# -Werror=declaration-after-statement to extensions, instead of just affecting
# the compilation of the interpreter. See http://bugs.python.org/issue21121 for
# details. This acts as a workaround until the next Python 3 release -- thanks
# <NAME> (wolma) for the workaround!
ssw_extra_compile_args = ['-Wno-error=declaration-after-statement']
# Users with i686 architectures have reported that adding this flag allows
# SSW to be compiled. See https://github.com/biocore/scikit-bio/issues/409 and
# http://stackoverflow.com/q/26211814/3776794 for details.
if platform.machine() == 'i686':
ssw_extra_compile_args.append('-msse2')
extensions = [
Extension("skbio.stats.__subsample",
["skbio/stats/__subsample" + ext]),
Extension("skbio.alignment._ssw_wrapper",
["skbio/alignment/_ssw_wrapper" + ext,
"skbio/alignment/_lib/ssw.c"],
extra_compile_args=ssw_extra_compile_args)
]
if USE_CYTHON:
from Cython.Build import cythonize
extensions = cythonize(extensions)
setup(name='scikit-bio',
version=version,
license='BSD',
description=description,
long_description=long_description,
author="scikit-bio development team",
author_email="<EMAIL>",
maintainer="scikit-bio development team",
maintainer_email="<EMAIL>",
url='http://scikit-bio.org',
test_suite='nose.collector',
packages=find_packages(),
ext_modules=extensions,
cmdclass={'build_ext': build_ext},
setup_requires=['numpy >= 1.7'],
install_requires=['numpy >= 1.7', 'matplotlib >= 1.1.0',
'scipy >= 0.13.0', 'pandas', 'future', 'six',
'natsort >= 4.0.0', 'IPython',
'CacheControl[FileCache]'],
extras_require={'test': ["nose >= 0.10.1", "pep8", "flake8",
"python-dateutil"],
'doc': ["Sphinx == 1.2.2", "sphinx-bootstrap-theme"]},
classifiers=classifiers,
package_data={
'skbio.io.tests': ['data/*'],
'skbio.stats.tests': ['data/*'],
'skbio.stats.distance.tests': ['data/*'],
'skbio.stats.ordination.tests': ['data/*']
}
)
```
|
{
"source": "jensren/image-understanding",
"score": 2
}
|
#### File: a4/Part2/a4p2.py
```python
import numpy as np
import cv2 as cv
import matplotlib.pyplot as plt
import random
from mpl_toolkits.mplot3d import Axes3D
from mpl_toolkits.mplot3d.art3d import Poly3DCollection, Line3DCollection
M = np.array([[-0.035/0.00002611, 0, 900/2],
[0, -0.035/0.00002611, 600/2],
[0, 0, 1]])
FOCAL_LENGTH = 0.035
def load_matches(file):
""" Load a set of matching points from the matches.txt file and reformat as list of tuples (left point, right point)
matches.txt is space delimited in format x y x' y' """
mat = np.loadtxt(file, dtype=int)
ret_lst = []
for row in range(mat.shape[0]):
ret_lst.append(((mat[row, 0], mat[row, 1]), (mat[row, 2], mat[row, 3])))
return ret_lst
def plot_epilines(img1, img2, matches, epip_tup, fundamental, name, plot_f=False):
""" Plot the epilines for the two images
If plot_f, also plot the fundamental matrix """
# Source of heatmap plotting code for displaying the fundamental matrix:
# https://matplotlib.org/3.1.1/gallery/images_contours_and_fields/image_annotated_heatmap.html
fig, ax = plt.subplots(nrows=1, ncols=3, figsize=(50, 15)) if plot_f \
else plt.subplots(nrows=1, ncols=2, figsize=(40, 11))
fig.suptitle("Epilines ({})".format(name))
ax[0].imshow(img1)
ax[0].set_title("Left Image")
ax[1].imshow(img2)
ax[1].set_title("Right Image")
colour_list = ['r', 'g', 'b', 'c', 'm', 'y']
e_l, e_r = epip_tup
for p_l, p_r in matches:
colour = random.randint(0, len(colour_list) - 1)
ax[0].plot((e_l[0], p_l[0]), (e_l[1], p_l[1]), marker='o', ls='-', c=colour_list[colour])
ax[1].plot((e_r[0], p_r[0]), (e_r[1], p_r[1]), marker='o', ls='-', c=colour_list[colour])
if plot_f:
ax[2].imshow(fundamental)
ax[2].set_title("Fundamental Matrix")
for i in range(len(fundamental)):
for j in range(len(fundamental)):
ax[2].text(j, i, round(fundamental[i, j], 5), ha="center", va="center", color="w")
plt.show()
def plot_poly_3d(points_sets, point_matches, name, img1, img2):
""" Takes 3d points and plots them as polygons to show depth
Each item in the points_sets is a set of points that create one polygon """
# source for code used to plot:
# https://stackoverflow.com/questions/4622057/plotting-3d-polygons-in-python-matplotlib
# https://stackoverflow.com/questions/18897786/transparency-for-poly3dcollection-plot-in-matplotlib
colour_list = ['r', 'g', 'b', 'c', 'm', 'y']
# plot of matching points
fig, ax = plt.subplots(nrows=1, ncols=2, figsize=(20, 11))
fig.suptitle("{}".format(name))
ax[0].imshow(img1)
ax[0].set_title("Left Image")
ax[1].imshow(img2)
ax[1].set_title("Right Image")
i = 0 # tracks the corresponding point in point_matches
for s in range(len(points_sets)):
for p in range(len(points_sets[s])):
ax[0].scatter(point_matches[i, 0, 0], point_matches[i, 0, 1], c=colour_list[s])
ax[1].scatter(point_matches[i, 1, 0], point_matches[i, 1, 1], c=colour_list[s])
i += 1
plt.show()
# plot of recovered depth
fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(20, 11))
ax = fig.add_subplot(111, projection='3d')
ax.set_title("Recovered Depth ({})".format(name))
ax.set_xlabel('x axis')
ax.set_ylabel('y axis')
ax.set_zlabel('z axis')
for s in range(len(points_sets)):
pts = points_sets[s]
x, y, z = np.array(pts)[:, 0], np.array(pts)[:, 1], np.array(pts)[:, 2]
# x = [0, 1, 1, 0]
# y = [0, 0, 1, 1]
# z = [1, 1, 1, 1]
ax.scatter(x, y, z, c=colour_list[s])
vertices = [[0, 1, 2], [0, 1, 3], [0, 2, 3], [1, 2, 3]]
tupleList = list(zip(x, y, z))
poly3d = [[tupleList[vertices[ix][iy]] for iy in range(len(vertices[0]))] for ix in range(len(vertices))]
collection = Poly3DCollection(poly3d, linewidths=1, alpha=0.2)
collection.set_facecolor(colour_list[s])
collection.set_alpha(0.3)
ax.add_collection3d(collection)
ax.add_collection3d(Line3DCollection(poly3d, colors='k', linewidths=0.2, linestyles=':'))
plt.show()
def get_h(point_lst):
""" Calculate H, as explained in Exercise 7.6 of the book """
homogeneous_pts = np.array([(p[0], p[1], 1) for p in point_lst])
mean = np.mean(homogeneous_pts, axis=0)
dist = np.array([np.linalg.norm(p - mean) for p in homogeneous_pts])
mean_dist = np.mean(dist)
return np.array([[np.sqrt(2) / mean_dist, 0, -np.sqrt(2) / mean_dist * mean[0]],
[0, np.sqrt(2) / mean_dist, np.sqrt(2) / mean_dist * mean[1]],
[0, 0, 1]])
def eight_point(points_lst):
""" Eight-point algorithm that returns the estimate of the fundamental matrix
points_lst is a list of tuples (left point, right point) in image coordinates
max_dim is the max value of (height, width) used to scale points to prevent numerical instabilities """
# get H for normalization and produce normalized points
points_lst = np.array(points_lst)
h_l = get_h(points_lst[:, 0])
h_r = get_h(points_lst[:, 1])
p_l_norm = [h_l @ np.array([p[0], p[1], 1]) for p in points_lst[:, 0]]
p_r_norm = [h_r @ np.array([p[0], p[1], 1]) for p in points_lst[:, 1]]
# create A using normalized points
a = []
for p_l, p_r in zip(p_l_norm, p_r_norm):
x_l, y_l = p_l[0], p_l[1]
x_r, y_r = p_r[0], p_r[1]
a.append([x_r * x_l, x_r * y_l, x_r, y_r * x_l, y_r * y_l, y_r, x_l, y_l, 1])
a = np.array(a)
u, s, vh = np.linalg.svd(a)
f_mat = np.reshape(vh[-1, :], (3, 3))
# enforce singularity constraint
u, s, vh = np.linalg.svd(f_mat)
s[-1] = 0
f_unscaled = (u * s) @ vh
# rescale F
return np.linalg.inv(h_r) @ f_unscaled @ np.linalg.inv(h_l)
def epipoles_location(f_mat):
""" Computer the location of the epipoles from the fundamental matrix
Returns (left epipole, right epipole) """
u, s, vh = np.linalg.svd(f_mat)
e_l = vh[-1, :]
e_r = u[:, -1]
# get x, y by dividing by w
e_l = (e_l[0] / e_l[2], e_l[1] / e_l[2])
e_r = (e_r[0] / e_r[2], e_r[1] / e_r[2])
return e_l, e_r
def compute_e(f_mat, m_mat):
""" Compute the essential matrix given F and M, assuming M_r = M_l """
return m_mat.T @ f_mat @ m_mat
def compute_r_t(e_mat):
""" Compute R, t_hat from the essential matrix """
e_hat = e_mat / np.sqrt(np.trace(e_mat.T @ e_mat) / 2)
et_e_hat = e_hat.T @ e_hat
# using 7.26 from the book
t_hat = np.array([np.sqrt(1 - et_e_hat[0, 0]), np.sqrt(1 - et_e_hat[1, 1]), np.sqrt(1 - et_e_hat[2, 2])])
w = np.array([np.cross(e_hat[i, :], t_hat) for i in range(3)]) # [w_i, w_j, w_k]
r = np.array([w[0] + np.cross(w[1], w[2]), w[1] + np.cross(w[2], w[0]), w[2] + np.cross(w[0], w[1])])
return r, t_hat
def add_z(point, focal_len):
""" Given a point (x, y) and the focal length, returns (x, y, f) """
return np.array([point[0], point[1], focal_len])
def triang_point(r_mat, t_vec, focal_len, p_l, p_r):
""" Compute P' given p_l, p_r, the rotation matrix, and the translation vector
p_l, p_r are in the form (x, y) in image coordinates
units are in metres """
# convert p_l, p_r into 3D coordinates (according to the book, z = focal length)
p_l = add_z(p_l, focal_len)
p_r = add_z(p_r, focal_len)
# formulate the linear system and solve for a, b, c
a = [p_l, r_mat.T @ p_r, (np.cross(p_l, r_mat.T @ p_r))]
sol = np.linalg.inv(a).dot(t_vec)
# return the midpoint of line segment joining a * p_l and T + b * R_T @ p_r
return (np.reshape(sol[0] * p_l, 3) + (np.reshape(t_vec, 3) + sol[1] * r_mat.T @ p_r)) / 2
def triang_four(matches):
# takes 4 matches and returns a set of 4 3d points
pts_set = []
for tup in matches[:4]:
p_3d = triang_point(R, t, FOCAL_LENGTH, tup[0], tup[1])
pts_set.append(p_3d)
return pts_set
if __name__ == '__main__':
test_q4 = True
test_q5 = True
test_q6 = True
p1 = (cv.cvtColor(cv.imread("first_pair/p11.jpg"), cv.COLOR_BGR2RGB),
cv.cvtColor(cv.imread("first_pair/p12.jpg"), cv.COLOR_BGR2RGB))
p2 = (cv.cvtColor(cv.imread("second_pair/p21.jpg"), cv.COLOR_BGR2RGB),
cv.cvtColor(cv.imread("second_pair/p22.jpg"), cv.COLOR_BGR2RGB))
p1_matches = load_matches("first_pair/matches.txt")
p2_matches = load_matches("second_pair/matches.txt")
max_dim = max(p1[0].shape[:-1])
if test_q4:
compare_f = True
plot_lst_img1 = []
plot_lst_img2 = []
f1 = eight_point(p1_matches)
ep1 = epipoles_location(f1)
plot_lst_img1.append((f1, ep1, "First Pair, Test F"))
f2 = eight_point(p2_matches)
ep2 = epipoles_location(f2)
plot_lst_img2.append((f2, ep2, "Second Pair, Test F"))
if compare_f:
p1_matches_subset_1 = p1_matches[5:]
p1_matches_subset_2 = p1_matches[:-5]
f3 = eight_point(p1_matches_subset_1)
plot_lst_img1.append((f3, epipoles_location(f3), "First Pair, Test F Less First 5 Matches"))
f4 = eight_point(p1_matches_subset_2)
plot_lst_img1.append((f4, epipoles_location(f4), "First Pair, Test F Less Last 5 Matches"))
for f, ep, name in plot_lst_img1:
plot_epilines(p1[0], p1[1], p1_matches, ep, f, name=name, plot_f=True)
for f, ep, name in plot_lst_img2:
plot_epilines(p2[0], p2[1], p2_matches, ep, f, name=name, plot_f=True)
if test_q5:
p1_matches_arr = np.array(p1_matches)
k1, k2 = p1_matches_arr[:, 0, :], p1_matches_arr[:, 1, :]
f1 = cv.findFundamentalMat(k1, k2)[0]
p2_matches_arr = np.array(p2_matches)
k1, k2 = p2_matches_arr[:, 0, :], p2_matches_arr[:, 1, :]
f2 = cv.findFundamentalMat(k1, k2)[0]
epipoles = epipoles_location(f1)
epipoles = epipoles_location(f2)
plot_epilines(p1[0], p1[1], p1_matches, epipoles, f1, name="First Pair, Test Epipoles", plot_f=True)
plot_epilines(p2[0], p2[1], p2_matches, epipoles, f2, name="Second Pair, Test Epipoles", plot_f=True)
if test_q6:
# first pair
p1_matches_arr = np.array(p1_matches)
k1, k2 = p1_matches_arr[:, 0, :], p1_matches_arr[:, 1, :]
essential = cv.findEssentialMat(k1, k2, M)[0]
points, R, t, mask = cv.recoverPose(essential, k1, k2, M)
s1 = []
for i in range(0, len(p1_matches), 4):
s1.append(triang_four(p1_matches_arr[i:i+4]))
plot_poly_3d(s1, p1_matches_arr, "First Pair", p1[0], p1[1])
# second pair
p2_matches_arr = np.array(p2_matches)
k1, k2 = p2_matches_arr[:, 0, :], p2_matches_arr[:, 1, :]
essential = cv.findEssentialMat(k1, k2, M)[0]
points, R, t, mask = cv.recoverPose(essential, k1, k2, M)
s2 = []
for i in range(0, len(p2_matches), 4):
s2.append(triang_four(p2_matches_arr[i:i + 4]))
plot_poly_3d(s2, p2_matches_arr, "Second Pair", p2[0], p2[1])
```
|
{
"source": "jenssss/micmon",
"score": 2
}
|
#### File: micmon/audio/device.py
```python
from micmon.audio import AudioSource
class AudioDevice(AudioSource):
def __init__(self, system: str = 'alsa', device: str = 'plughw:0,1', *args, **kwargs):
super().__init__(*args, **kwargs)
self.ffmpeg_args = (
self.ffmpeg_bin, '-f', system, '-i', device, *self.ffmpeg_base_args
)
```
#### File: micmon/audio/segment.py
```python
from typing import Optional
import numpy as np
class AudioSegment:
default_low_freq = 20
default_high_freq = 20000
default_bins = 100
def __init__(self, data: bytes, sample_rate: int = 44100, channels: int = 1, label: Optional[int] = None):
self.data = data
self.audio = np.frombuffer(data, dtype=np.int16)
self.sample_rate = sample_rate
self.channels = channels
self.duration = len(self.audio) / (sample_rate * channels)
self.label = label
def fft(self, low_freq: int = default_low_freq, high_freq: int = default_high_freq) -> np.ndarray:
return np.absolute(np.fft.rfft(self.audio))[low_freq:high_freq]
def spectrum(self, low_freq: int = default_low_freq, high_freq: int = default_high_freq,
bins: int = default_bins) -> np.ndarray:
fft = self.fft(low_freq=low_freq, high_freq=high_freq)
bin_size = int(len(fft) / bins)
return np.array([
np.average(fft[i * bin_size: i * bin_size + bin_size]) / (self.duration * ((1 << 16) - 1))
for i in range(bins)
])
def plot_audio(self):
import matplotlib.pyplot as plt
plt.plot(self.audio)
plt.show()
def plot_spectrum(self, low_freq: int = default_low_freq, high_freq: int = default_high_freq,
bins: int = default_bins):
import matplotlib.pyplot as plt
spectrum = self.spectrum(low_freq=low_freq, high_freq=high_freq, bins=bins)
plt.ylim(0, 1)
plt.bar(range(len(spectrum)), spectrum)
plt.show()
```
#### File: micmon/model/__init__.py
```python
import json
import os
import pathlib
import numpy as np
from typing import List, Optional
from tensorflow.keras import Sequential
from tensorflow.keras.layers import Layer
from tensorflow.keras.models import load_model, Model as _Model
from micmon.audio import AudioSegment
from micmon.dataset import Dataset
class Model:
labels_file_name = 'labels.json'
freq_file_name = 'freq.json'
# noinspection PyShadowingNames
def __init__(self, layers: Optional[List[Layer]] = None, labels: Optional[List[str]] = None,
model: Optional[_Model] = None,
optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=('accuracy',),
low_freq: int = AudioSegment.default_low_freq,
high_freq: int = AudioSegment.default_high_freq):
assert layers or model
self.label_names = labels
self.cutoff_frequencies = (int(low_freq), int(high_freq))
if layers:
self._model = Sequential(layers)
self._model.compile(optimizer=optimizer, loss=loss, metrics=list(metrics))
else:
self._model = model
def fit(self, dataset: Dataset, *args, **kwargs):
return self._model.fit(dataset.train_samples, dataset.train_classes, *args, **kwargs)
def evaluate(self, dataset: Dataset, *args, **kwargs):
return self._model.evaluate(dataset.validation_samples, dataset.validation_classes, *args, **kwargs)
def predict(self, audio: AudioSegment):
spectrum = audio.spectrum(low_freq=self.cutoff_frequencies[0], high_freq=self.cutoff_frequencies[1])
output = self._model.predict(np.array([spectrum]))
prediction = int(np.argmax(output))
return self.label_names[prediction] if self.label_names else prediction
def save(self, path: str, *args, **kwargs):
path = os.path.abspath(os.path.expanduser(path))
is_file = path.endswith('.h5') or path.endswith('.pb')
if is_file:
model_dir = str(pathlib.Path(path).parent)
else:
model_dir = path
pathlib.Path(model_dir).mkdir(parents=True, exist_ok=True)
self._model.save(path, *args, **kwargs)
if self.label_names:
labels_file = os.path.join(model_dir, self.labels_file_name)
with open(labels_file, 'w') as f:
json.dump(self.label_names, f)
if self.cutoff_frequencies:
freq_file = os.path.join(model_dir, self.freq_file_name)
with open(freq_file, 'w') as f:
json.dump(self.cutoff_frequencies, f)
@classmethod
def load(cls, path: str, *args, **kwargs):
path = os.path.abspath(os.path.expanduser(path))
is_file = path.endswith('.h5') or path.endswith('.pb')
if is_file:
model_dir = str(pathlib.Path(path).parent)
else:
model_dir = path
model = load_model(path, *args, **kwargs)
labels_file = os.path.join(model_dir, cls.labels_file_name)
freq_file = os.path.join(model_dir, cls.freq_file_name)
label_names = []
frequencies = []
if os.path.isfile(labels_file):
with open(labels_file, 'r') as f:
label_names = json.load(f)
if os.path.isfile(freq_file):
with open(freq_file, 'r') as f:
frequencies = json.load(f)
return cls(model=model, labels=label_names, low_freq=frequencies[0], high_freq=frequencies[1])
```
#### File: micmon/utils/datagen.py
```python
import argparse
import logging
import os
import sys
from micmon.audio import AudioDirectory, AudioFile, AudioSegment
from micmon.dataset import DatasetWriter
logger = logging.getLogger(__name__)
defaults = {
'sample_duration': 2.0,
'sample_rate': 44100,
'channels': 1,
'ffmpeg_bin': 'ffmpeg',
}
def create_dataset(audio_dir: str, dataset_dir: str,
low_freq: int = AudioSegment.default_low_freq,
high_freq: int = AudioSegment.default_high_freq,
bins: int = AudioSegment.default_bins,
sample_duration: float = defaults['sample_duration'],
sample_rate: int = defaults['sample_rate'],
channels: int = defaults['channels'],
ffmpeg_bin: str = defaults['ffmpeg_bin']):
audio_dir = os.path.abspath(os.path.expanduser(audio_dir))
dataset_dir = os.path.abspath(os.path.expanduser(dataset_dir))
audio_dirs = AudioDirectory.scan(audio_dir)
for audio_dir in audio_dirs:
dataset_file = os.path.join(dataset_dir, os.path.basename(audio_dir.path) + '.npz')
logger.info(f'Processing audio sample {audio_dir.path}')
with AudioFile(audio_dir.audio_file, audio_dir.labels_file,
sample_duration=sample_duration, sample_rate=sample_rate, channels=channels,
ffmpeg_bin=os.path.expanduser(ffmpeg_bin)) as reader, \
DatasetWriter(dataset_file, low_freq=low_freq, high_freq=high_freq, bins=bins) as writer:
for sample in reader:
writer += sample
def main():
# noinspection PyTypeChecker
parser = argparse.ArgumentParser(
description='''
Tool to create numpy dataset files with audio spectrum data from a set of labelled raw audio files.''',
epilog='''
- audio_dir should contain a list of sub-directories, each of which represents a labelled audio sample.
audio_dir should have the following structure:
audio_dir/
-> train_sample_1
-> audio.mp3
-> labels.json
-> train_sample_2
-> audio.mp3
-> labels.json
...
- labels.json is a key-value JSON file that contains the labels for each audio segment. Example:
{
"00:00": "negative",
"02:13": "positive",
"04:57": "negative",
"15:41": "positive",
"18:24": "negative"
}
Each entry indicates that all the audio samples between the specified timestamp and the next entry or
the end of the audio file should be applied the specified label.
- dataset_dir is the directory where the generated labelled spectrum dataset in .npz format will be saved.
Each dataset file will be named like its associated audio samples directory.''',
formatter_class=argparse.RawDescriptionHelpFormatter
)
parser.add_argument('audio_dir', help='Directory containing the raw audio samples directories to be scanned.')
parser.add_argument('dataset_dir', help='Destination directory for the compressed .npz files containing the '
'frequency spectrum datasets.')
parser.add_argument('--low', help='Specify the lowest frequency to be considered in the generated frequency '
'spectrum. Default: 20 Hz (lowest possible frequency audible to a human ear).',
required=False, default=AudioSegment.default_low_freq, dest='low_freq', type=int)
parser.add_argument('--high', help='Specify the highest frequency to be considered in the generated frequency '
'spectrum. Default: 20 kHz (highest possible frequency audible to a human ear).',
required=False, default=AudioSegment.default_high_freq, dest='high_freq', type=int)
parser.add_argument('-b', '--bins', help=f'Specify the number of frequency bins to be used for the spectrum '
f'analysis (default: {AudioSegment.default_bins})',
required=False, default=AudioSegment.default_bins, dest='bins', type=int)
parser.add_argument('-d', '--sample-duration', help=f'The script will calculate the spectrum of audio segments of '
f'this specified length in seconds (default: '
f'{defaults["sample_duration"]}).',
required=False, default=defaults['sample_duration'], dest='sample_duration', type=float)
parser.add_argument('-r', '--sample-rate', help=f'Audio sample rate (default: {defaults["sample_rate"]} Hz)',
required=False, default=defaults['sample_rate'], dest='sample_rate', type=int)
parser.add_argument('-c', '--channels', help=f'Number of destination audio channels (default: '
f'{defaults["channels"]})',
required=False, default=defaults['channels'], dest='channels', type=int)
parser.add_argument('--ffmpeg', help=f'Absolute path to the ffmpeg executable (default: {defaults["ffmpeg_bin"]})',
required=False, default=defaults['ffmpeg_bin'], dest='ffmpeg_bin', type=str)
opts, args = parser.parse_known_args(sys.argv[1:])
return create_dataset(audio_dir=opts.audio_dir, dataset_dir=opts.dataset_dir, low_freq=opts.low_freq,
high_freq=opts.high_freq, bins=opts.bins, sample_duration=opts.sample_duration,
sample_rate=opts.sample_rate, channels=opts.channels, ffmpeg_bin=opts.ffmpeg_bin)
if __name__ == '__main__':
main()
```
|
{
"source": "jenssss/runana",
"score": 3
}
|
#### File: runana/runana/run.py
```python
from __future__ import print_function
from sys import stdout
from os import path, getcwd, listdir, chdir, makedirs, remove
from subprocess import call
from io import FileIO
from glob import glob
from functools import wraps
from contextlib import contextmanager
from runana import input_file_handling
from operator import add, mul
from runana.read_numbers import ignored
try:
from operator import div
except ImportError:
from operator import truediv as div
try:
basestring # Python 2.x
except NameError:
basestring = str # Python 3.x
OPERATIONS = {'add': add, 'mul': mul, 'div': div}
@contextmanager
def cwd(path):
""" Contextmanager that changes working directory temporarily """
oldpwd = getcwd()
chdir(path)
try:
yield
finally:
chdir(oldpwd)
def generate_seq(start, incr, nvalues=0, incr_func=add):
"""Iterator that returns a sequence of numbers
:param incr_func: function used to increment the return value. Can be one
of the strings 'add', 'mul' or 'div'
:type incr_func: func or str
"""
if isinstance(incr_func, basestring):
incr_func = OPERATIONS[incr_func]
value = start
yield value
for i in range(1, nvalues):
value = incr_func(value, incr)
yield value
def generate_list(*args, **kwargs):
"""Wrap of generate_seq that returns a list instead of an iterator"""
return list(generate_seq(*args, **kwargs))
class Dirs(object):
"""Container class for names of directories
:param str scratch_base: Directory prefix
:param str local_scratch_base: Prefix for directory in which programs
are run. If `None` then `scratch_base` is used
:param list copy_2_scratch: List of strings that are globbed and copied
from the local scratch directory to scratch directory
"""
def __init__(self, scratch_base, local_scratch_base=None,
copy_2_scratch=['*.txt', '*.nml', '*.stdout', '*.dat']):
if local_scratch_base is None:
local_scratch_base = scratch_base
self.scratch_base = scratch_base
self.local_scratch_base = local_scratch_base
self.copy_2_scratch = copy_2_scratch
makedir(self.scratch_base)
def makedir(dir_):
if not path.exists(dir_):
makedirs(dir_)
class OpenWithNone(FileIO):
def __init__(self, file_string, *args, **kwargs):
self.file_string = file_string
if file_string:
super(OpenWithNone, self).__init__(file_string, *args, **kwargs)
def __enter__(self):
handle = None
if self.file_string:
handle = super(OpenWithNone, self).__enter__()
self.fd = handle
return self.fd
def __exit__(self, type, value, traceback):
if self.fd:
super(OpenWithNone, self).__exit__()
def replace_string_in_file(fileName, text_to_search, text_to_replace):
with open(fileName, 'r') as file_handle:
filedata = file_handle.read()
filedata = filedata.replace(text_to_search, text_to_replace)
with open(fileName, 'w') as file_handle:
file_handle.write(filedata)
def run_program(program, cmdargs, stdin_f, stdout_f, stderr_f,
run=True, cmd_prepend="", run_from_cmd=True,
**kwargs):
"""Runs `program` with `cmdargs` using `subprocess.call`.
:param str stdin_f: File from which to take standard input
:param str stdout_f: File in which to put standard output
:param str stderr_f: File in which to put standard error
:param bool run: Whether to actually run `program`
If `True` the program return code is returned.
If false a string pointing to the script which will run
the program is returned
:param str cmd_prepend: Put in the beginning of the bash script
:param bool run_from_cmd: Run `program` using the generated bash
script instead of running it directly
"""
time_file_name = '.'.join(stdout_f.split('.')[:-1])+'.time'
cmd_file_name = '.'.join(stdout_f.split('.')[:-1])+'.sh'
with open(cmd_file_name, 'w') as cmd_file:
cmd = ' '.join([program]+cmdargs)
time_cmd = "/usr/bin/time -o {time_file}".format(time_file=time_file_name)
cmd = "{time_cmd} {cmd} 1> {stdout} 2> {stderr} \n".format(time_cmd=time_cmd,
cmd=cmd,
stdout=stdout_f,
stderr=stderr_f)
cmd = cmd_prepend + cmd
cmd_file.write(cmd)
if run:
with OpenWithNone(stdin_f, 'r') as input_file, open(stdout_f, 'w') as stdout_file, open(stderr_f, 'w') as stderr_file:
if run_from_cmd:
retcode = call(["bash", cmd_file_name], **kwargs)
else:
try:
with open(time_file_name, 'w') as time_file:
with print_time(time_file):
retcode = call([program]+cmdargs, stdin=input_file,
stdout=stdout_file, stderr=stderr_file, **kwargs)
except Exception as e:
print(e)
print('program ', program)
print('cmdargs', cmdargs)
print('stdin ', stdin_f)
print('stdout ', stdout_f)
print('stderr ', stderr_f)
# print 'kwargs ', kwargs
print(getcwd())
raise
replace_string_in_file(stdout_f, '\r', '\n')
return retcode
else:
return cmd_file_name
from subprocess import Popen, PIPE
from time import sleep
# from time import time
# from os import fsync
def run_program_print_output(program, cmdargs,
stdin_f, stdout_f, stderr_f, print_output=False,
**kwargs):
with OpenWithNone(stdin_f, 'r') as input_file:
with open(stdout_f, 'w', buffering=0) as stdout_file:
with open(stderr_f, 'w', buffering=0) as stderr_file:
try:
with open(stdout_f+'.sh', 'w') as cmd_file:
cmd_file.write(' '.join([program]+cmdargs))
if print_output:
# start_time = time()
process = Popen([program]+cmdargs, stdin=input_file,
stdout=PIPE, stderr=PIPE, bufsize=0, **kwargs)
# print(time()-start_time)
# print('Right after process call')
while True:
stdout = process.stdout.readline()
stderr = process.stderr.readline()
if stdout == '' and stderr == '' and process.poll() is not None:
break
if stdout:
# print(time()-start_time)
print(stdout, end='')
stdout_file.write(stdout)
# stdout_file.flush()
# fsync(stdout_file.fileno())
if stderr:
print(stderr, end='')
stderr_file.write(stderr)
sleep(0.1)
else:
call([program]+cmdargs, stdin=input_file,
stdout=stdout_file, stderr=stderr_file, bufsize=0, **kwargs)
except Exception as e:
print(e)
print('program ', program)
print('cmdargs', cmdargs)
print('stdin ', stdin_f)
print('stdout ', stdout_f)
print('stderr ', stderr_f)
# print 'kwargs ', kwargs
print(getcwd())
raise
replace_string_in_file(stdout_f, '\r', '\n')
def name_stdout(program, add=''):
if isinstance(program, basestring):
prog = program
else:
prog = program[0]
stdouts = prog.split('/')[-1]
# stdouts = stdouts.split('.')[0].split('_')[0]+add+'.std'
stdouts = stdouts+add+'.std'
return stdouts+'out', stdouts+'err'
def run_prog(program, cmdargs=[], stdin_f=None, add='', **kwargs):
# run_program_print_output(program, cmdargs, stdin_f,
return run_program(program, cmdargs, stdin_f,
*name_stdout(program, add), **kwargs)
def copy_ignore_same(from_file, to_file):
from shutil import copy, Error
try:
copy(from_file, to_file)
except Error as err:
with open('shutil.err', 'a') as f1:
f1.write(str(err)+'\n')
except IOError:
pass
def copy_to_scratch(WorkDir, file_strings):
files = []
for file_string in file_strings:
files = files+glob(file_string)
for fil in files:
copy_ignore_same(fil, WorkDir)
@ignored(OSError)
def get_subdirs(a_dir='./'):
return [name for name in listdir(a_dir)
if path.isdir(path.join(a_dir, name))]
def generate_run_ID(work_dir, invalid_IDs=[], prepend=""):
subdirs = get_subdirs(work_dir)
ID = 1
while True:
strID = str(ID)
if prepend:
strID = prepend + strID
if strID in subdirs or strID in invalid_IDs:
ID = ID+1
else:
return strID
def make_run_dirs(ScratchBase, LScratchBase, **gen_ID_kwargs):
ID = generate_run_ID(ScratchBase, **gen_ID_kwargs)
work_dir = path.join(ScratchBase, ID)
lwork_dir = path.join(LScratchBase, ID)
makedir(work_dir)
makedir(lwork_dir)
return ID, work_dir, lwork_dir
def lock_wrap_retry(Dir, nretries=10, wait=0.1):
def decorate(f):
@wraps(f)
def call(*args, **kwargs):
import fcntl, time
pid_file = path.join(Dir, 'lock_ID_gen.pid')
with open(pid_file, 'w') as lock_fp:
for attempt in range(nretries):
try:
fcntl.lockf(lock_fp, fcntl.LOCK_EX | fcntl.LOCK_NB)
except IOError:
import random
print('Waiting for lock on', Dir, attempt)
time.sleep(wait+0.05*random.random())
continue
break
else:
print('Failed to get ID generation lock')
try:
ret = f(*args, **kwargs)
fcntl.lockf(lock_fp, fcntl.LOCK_UN)
finally:
fcntl.lockf(lock_fp, fcntl.LOCK_UN)
return ret
return call
return decorate
def save_info_in_file(filename, command, copy_back=None):
with open(filename, 'w') as output_file:
call(command, stdout=output_file)
if copy_back:
copy_ignore_same(filename, copy_back)
def calc_all(replacements, dirs, inp_file, programs,
print_finish=True, filter_func='f90nml', use_stdin=False,
use_inp_file=True,
**gen_ID_kwargs):
base_dir = dirs.scratch_base
dirID, work_dir, lwork_dir = lock_wrap_retry(base_dir, nretries=10, wait=0.1)(
make_run_dirs)(base_dir, dirs.local_scratch_base, **gen_ID_kwargs)
inp_file_local = path.join(work_dir, path.basename(inp_file))
input_file_handling.INP_FILE_FILTERS[filter_func](inp_file, inp_file_local, replacements)
with cwd(lwork_dir):
save_info_in_file('hostname.txt', 'hostname', work_dir)
save_info_in_file('started.txt', 'date', work_dir)
inp_file_relative = path.relpath(inp_file_local, lwork_dir)
run_core(programs, inp_file_relative, use_stdin=use_stdin,
use_inp_file=use_inp_file)
copy_to_scratch(work_dir, dirs.copy_2_scratch)
save_info_in_file('ended.txt', 'date', work_dir)
if print_finish:
print('Finished', dict((is_it_tuple(key),elem) for key,elem in replacements.items()))
return dirID
def is_it_tuple(it):
if isinstance(it, tuple):
return it[1]
else:
return it
@contextmanager
def NamedTempFile(name, mode="w", **kwargs):
"""Contextmanager for creating a named temporary file """
try:
with open(name, mode=mode, **kwargs) as f:
yield f
finally:
with ignored(FileNotFoundError):
remove(name)
def run_core(programs, inp_file_relative, use_stdin=False,
use_inp_file=True, add_temp_ignore_file=True):
if add_temp_ignore_file:
with NamedTempFile("ignore"):
run_core_inner(programs, inp_file_relative, use_stdin,
use_inp_file)
else:
run_core_inner(programs, inp_file_relative, use_stdin,
use_inp_file)
def run_core_inner(programs, inp_file_relative, use_stdin=False,
use_inp_file=True):
for program in programs:
if hasattr(program, '__call__'):
if use_inp_file:
program(inp_file_relative)
else:
program()
else:
if use_inp_file:
if use_stdin:
run_prog(program, [], stdin_f=inp_file_relative)
else:
run_prog(program, [inp_file_relative])
else:
run_prog(program)
def add_to_fname(fname, add=''):
fname_list = fname.split('.')
fname_list[-2] = fname_list[-2]+add
return '.'.join(fname_list)
def rerun(replacements, lworkdir, inp_file, programs, filter_func='f90nml'):
inp_file_replace = add_to_fname(inp_file, '_rerun')
with cwd(lworkdir):
input_file_handling.INP_FILE_FILTERS[filter_func](inp_file,
inp_file_replace,
replacements)
save_info_in_file('re_hostname.txt', 'hostname')
save_info_in_file('restarted.txt', 'date')
run_core(programs, inp_file_replace, lworkdir)
save_info_in_file('re_ended.txt', 'date')
def merge_dicts(x, y):
z = x.copy()
z.update(y)
return z
def product_replacements(product_iters):
from itertools import product
for value_set in product(*product_iters.values()):
yield dict(zip(product_iters.keys(), value_set))
def co_replacements(co_iters):
if len(co_iters) >= 1:
for vector in zip(*co_iters.values()):
yield dict(zip(co_iters.keys(), vector))
else:
yield {}
def chain_replacements(chain_iters):
if len(chain_iters) >= 1:
for key, chain_iter in chain_iters.items():
for value in chain_iter:
yield {key: value}
else:
yield {}
def replace_iter_gen(product_iters={}, chain_iters={}, co_iters={},
just_replace={}):
for prod_iter in product_replacements(product_iters):
for chain_iter in chain_replacements(chain_iters):
for co_iter in co_replacements(co_iters):
yield merge_dicts(merge_dicts(merge_dicts(just_replace,
prod_iter),
chain_iter),
co_iter)
def check_dirs(dirs):
if not isinstance(dirs, Dirs):
dirs = Dirs(dirs)
return dirs
from functools import partial
def pick_filter_func(filter_func, calc_all):
return partial(calc_all,filter_func=filter_func)
def execute(programs, input_file, dirs,
chain_iters={}, product_iters={}, co_iters={}, just_replace={},
filter_func='f90nml', use_stdin=False,
calc_all=calc_all, **kwargs):
"""Run sequence of programs with different parameters defined by iters.
:param list programs: List of strings with names of programs. Should
contain absolute paths. Could alternately contain functions
:param str input_file: Input file
:param runana.run.Dirs dirs: Base directory in which programs will be run
:type dirs: str or runana.run.Dirs
:param dict chain_iters: Entries of the form
{'Name of parameter':[*values to replace with*]}
:param dict product_iters: Like `chain_iters`, but runs all combinations
:param dict co_iters: Runs with several parameters changing simultanously
:param dict just_replace: Entries of the form
{'Name of parameter':*value to replace with*}
:param str filter_func: Which filter function to use. Options are listed
as keys in the INPUT_FILE_FILTERS dictionary
:param bool use_stdin: send in the content of the filtered input file
through stdin rather passing the name of the input file as the
first command line argument
:param func calc_all: Hook for the parallel decorator, please ignore
this argument
"""
dirs = check_dirs(dirs)
input_file = path.abspath(input_file)
calc_all = pick_filter_func(filter_func, calc_all)
dir_IDs = []
for replacers in replace_iter_gen(product_iters=product_iters,
chain_iters=chain_iters,
co_iters=co_iters,
just_replace=just_replace):
dir_ID = calc_all(replacers, dirs, input_file, programs,
use_stdin=use_stdin, **kwargs)
dir_IDs.append(dir_ID)
return dir_IDs
def execute_lock_par(lock, parallel, *args, **kwargs):
""" Convenience function for running execute with a lock and/or in parallel """
execute_here = execute
if parallel:
execute_here = parallel_wrap(parallel)(execute_here)
if lock:
execute_here = lock_wrap(lock)(execute_here)
return execute_here(*args, **kwargs)
def common_start(chain_iters, just_replace):
""" Returns modified `chain_iters` and `just_replace` such that the
calculations will start at the first value of each variable in chain_iter
"""
chain_iters_out = chain_iters.copy()
replacers = {}
if len(chain_iters_out) > 0:
for (key, elem) in chain_iters_out.items():
replacers[key] = elem.pop(0)
else:
elem.insert(0, replacers[key])
just_replace = just_replace.copy()
just_replace.update(replacers)
return chain_iters_out, just_replace
class PoolApplyAsyncWrap(object):
def __init__(self, pool):
self.pool = pool
def __call__(self, fun):
def wrapped_f(*args, **kwargs):
import copy
for arg in args:
print(arg)
copy.deepcopy(arg)
args = copy.deepcopy(args)
kwargs = copy.deepcopy(kwargs)
ret = self.pool.apply_async(fun, args, kwargs)
return ret
return wrapped_f
@contextmanager
def multi_stuff(parallel, kwargs):
import multiprocessing
import signal
def initializer():
"""Ignore CTRL+C in the worker process."""
signal.signal(signal.SIGINT, signal.SIG_IGN)
number_of_cpus = multiprocessing.cpu_count()
pool = multiprocessing.Pool(number_of_cpus, initializer=initializer)
if parallel == 'Calc all':
kwargs['calc_all'] = PoolApplyAsyncWrap(pool)(calc_all)
elif parallel == 'auto_converge_var':
if 'auto_converge_var' in kwargs:
kwargs['auto_converge_var'] = PoolApplyAsyncWrap(pool)(kwargs['auto_converge_var'])
else:
kwargs['auto_converge_var'] = PoolApplyAsyncWrap(pool)(auto_converge_var)
try:
yield kwargs
pool.close()
pool.join()
except KeyboardInterrupt:
print("Caught KeyboardInterrupt, terminating workers")
pool.terminate()
pool.join()
raise
import collections
def map_nested_dicts(dict_, func):
if isinstance(dict_, collections.Mapping):
ret = dict((k, map_nested_dicts(v, func)) for k, v in dict_.items())
else:
ret = func(dict_)
return ret
def parallel_wrap(parallel=None):
def decorate(fun):
@wraps(fun)
def call(*args, **kwargs):
if parallel is None:
converged_parameters = fun(*args, **kwargs)
else:
with multi_stuff(parallel, kwargs) as kwargs:
converged_parameters = fun(*args, **kwargs)
if converged_parameters:
converged_parameters = list(map(lambda x: x.get(), converged_parameters))
# converged_parameters = map_nested_dicts(converged_parameters, lambda x: x.get())
# converged_parameters = multi_stuff(parallel, fun, args, kwargs)
return converged_parameters
return call
return decorate
def lock_wrap(dir_):
def decorate(fun):
@wraps(fun)
def call(*args, **kwargs):
import fcntl
makedir(dir_)
pid_file = path.join(dir_, 'lock.pid')
with open(pid_file, 'w') as lock_fp:
try:
fcntl.lockf(lock_fp, fcntl.LOCK_EX | fcntl.LOCK_NB)
except IOError:
print('Another instance is running')
print('Lock file ', pid_file)
raise SystemExit
try:
ret = fun(*args, **kwargs)
fcntl.lockf(lock_fp, fcntl.LOCK_UN)
finally:
fcntl.lockf(lock_fp, fcntl.LOCK_UN)
return ret
return call
return decorate
INTERVALS = (
('weeks', 604800), # 60 * 60 * 24 * 7
('days', 86400), # 60 * 60 * 24
('hours', 3600), # 60 * 60
('minutes', 60),
('seconds', 1),
)
def display_time(seconds, granularity=2, intervals=INTERVALS):
list_ = display_time_list(seconds, granularity=granularity,
intervals=intervals)
return ', '.join(list_)
# result = []
# for name, count in intervals:
# value = seconds // count
# if value:
# seconds -= value * count
# if value == 1:
# name = name.rstrip('s')
# result.append("{0:.0f} {1}".format(value, name))
# return ', '.join(result[:granularity])
def display_time_list(seconds, granularity=2, intervals=INTERVALS):
result = []
for name, count in intervals:
value = seconds // count
if value:
seconds -= value * count
if value == 1:
name = name.rstrip('s')
result.append("{0:.0f} {1}".format(value, name))
return result[:granularity]
@contextmanager
def print_time(file_=stdout):
""" Contextmanager that prints how much time was spent in it"""
import time
start = time.time()
yield
end = time.time()
print(display_time(end-start), file=file_)
def rel_err_rel_var(O1, O2, x1, x2):
""" Estimate of relative error `abs(x2/(x2-x1)*(O1-O2)/O2)` """
return abs(x2/(x2-x1)*(O1-O2)/O2)
class ConvCrit(object):
""" Contains information on how to check for convergence.
:parameter func data_read: Function which will be executed in the directory in which the programs was run. It should return the observable in terms of which convergence is sought
:parameter float eps: Desired precision
:parameter func conv_funv: Function that calculates convergence criterion. It should take 4 arguments, `f(O1,O2,x1,x2)`, where `x1` and `x2` are the values of the numerical parameter at the current and previous calculation and `O1` and `O2` are the corresponding observable values
"""
def __init__(self, data_read, eps = 1.0e-3, conv_func = rel_err_rel_var, itermax=10, common_start=False):
self.eps = eps
self.conv_func = conv_func
self.data_read = data_read
self.itermax = itermax
self.common_start = common_start
def make_run_string(replacers):
replacers = dict((is_it_tuple(key), elem) for key, elem in replacers.items())
return str(replacers)
def float_also_array(varvalue):
try:
varvalue_f = float(varvalue)
except TypeError:
varvalue_f = varvalue[0]
return varvalue_f
def auto_converge_var(var_name, var, replacements, dirs, inp_file, programs, conv_crit):
data_read = conv_crit.data_read
prevdirID = getattr(var, 'dirID', None)
yield_diff = 1.0
iteration = 1
if prevdirID is None:
iteration = 0
for iteration, varvalue in enumerate(var, start=iteration):
replacers = merge_dicts(replacements,{var_name:varvalue})
dirID = calc_all(replacers, dirs, inp_file, programs, print_finish=False)
if not prevdirID is None:
# print(prevdirID,varvalue_prev,varvalue,data_read(path.join(dirs.local_scratch_base, dirID))
with cwd(path.join(dirs.local_scratch_base, prevdirID)):
O1 = data_read()
with cwd(path.join(dirs.local_scratch_base, dirID)):
O2 = data_read()
yield_diff = conv_crit.conv_func(O1,O2,
float_also_array(varvalue_prev),
float_also_array(varvalue))
try:
print('{: <10.10}\t {:.2f}\t {:.2e}\t {}\t {}'.format(var_name[1], varvalue,
yield_diff, iteration, dirID))
except ValueError:
print('{: <10.10}\t {}\t {:.2e}\t {}\t {}'.format(var_name[1], varvalue,
yield_diff, iteration, dirID))
if (abs(yield_diff) < conv_crit.eps):
break
prevdirID = dirID
varvalue_prev = varvalue
return {'VarValue': varvalue, 'PrevVarValue': varvalue_prev, 'iteration': iteration+1}
def auto_conv_sub(chain_iters,replacers, dirs, inp_file, programs, conv_crit, auto_converge_var):
results={}
for chain_iter in chain_iters:
results[chain_iter] = auto_converge_var(chain_iter, chain_iters[chain_iter],
replacers, dirs, inp_file, programs, conv_crit)
return results
def auto_conv(programs, inp_file, dirs, conv_crit, chain_iters,
product_iters={}, co_iters={}, just_replace={},
auto_converge_var=auto_converge_var, auto_conv_sub=auto_conv_sub):
""" Run programs until converged or chain_iters is exhausted.
:param list programs: List of strings with names of programs. Should contain absolute paths. Could alternately contain functions
:param str input_file: Input file
:param runana.run.Dirs dirs: Base directory in which programs will be run
:type dirs: str or runana.run.Dirs
:param runana.run.ConvCrit conv_crit: Object specifying type of convergence
:param dict chain_iters: Entries of the form {'Name of parameter':[*values to replace with*]}
:param dict product_iters: Like `chain_iters`, but runs all combinations
:param dict co_iters: Runs with several parameters changing simultanously
:param bool use_stdin: send in the content of the filtered input file through stdin rather passing the name of the input file as the first command line argument
"""
# :param dict just_replace: Entries of the form {'Name of parameter':*value to replace with*}
# :param str filter_func: Which filter function to use. Options are listed as keys in the INPUT_FILE_FILTERS dictionary
dirs = check_dirs(dirs)
inp_file = path.abspath(inp_file)
results = {}
for replacers in replace_iter_gen(product_iters=product_iters,
co_iters=co_iters,
just_replace=just_replace):
run_string = make_run_string(replacers)
results[run_string] = auto_conv_sub(chain_iters,replacers, dirs, inp_file, programs,
conv_crit, auto_converge_var)
return results
def intercept_argument(args, kwargs, iarg, name):
if len(args)>iarg:
ret = args[iarg]
else:
ret = kwargs.get(name)
return ret
def reinsert_argument(args, kwargs, iarg, name, argument):
if len(args)>iarg:
args= args[:iarg] + (argument, ) + args[iarg+1:]
else:
kwargs[name] = argument
return args, kwargs
def inject_status(recursion_dict):
def decorate(fun):
@wraps(fun)
def call(*args, **kwargs):
chain_iters = intercept_argument(args, kwargs, 0, 'chain_iters')
replacements = intercept_argument(args, kwargs, 1, 'replacements')
run_string = make_run_string(replacements)
this_converged = recursion_dict[run_string].get('This Converged', False)
if not this_converged:
replacers = recursion_dict[run_string].get('Replacers', {})
if replacers:
chain_iters_new = {}
for (key,var) in chain_iters.items():
var = var[var.index(replacers[key]):]
chain_iters_new[key] = var
replacements = merge_dicts(replacements, replacers)
args, kwargs = reinsert_argument(args, kwargs, 0, 'chain_iters', chain_iters_new)
args, kwargs = reinsert_argument(args, kwargs, 1, 'replacements', replacements)
return fun(*args, **kwargs)
else:
return {}
return call
return decorate
def static_vars(**kwargs):
def decorate(func):
for k in kwargs:
setattr(func, k, kwargs[k])
return func
return decorate
def auto_conv_rerun(fun):
""" Decorator for rerunning :func:`auto_conv`, until convergence is achieved in
the first two calculations for each parameter. This is useful for cases where
parameters are strongly correlated"""
@wraps(fun)
@static_vars(status={})
def call(*args, **kwargs):
converged_parameters = fun(*args, **kwargs)
everything_converged=True
for run_string in converged_parameters:
replacements={}
prevvarvalues={}
call.status[run_string]=call.status.get(run_string, {})
this_converged = call.status[run_string].get('This Converged', False)
if not this_converged:
this_converged = True
for var_name in converged_parameters[run_string]:
replacements[var_name]=converged_parameters[run_string][var_name]['VarValue']
if converged_parameters[run_string][var_name]['iteration']>2:
this_converged = False
everything_converged = False
prevvarvalues[var_name]=converged_parameters[run_string][var_name]['PrevVarValue']
if this_converged:
call.status[run_string]['Final replacements'] = replacements
call.status[run_string]['This Converged'] = this_converged
call.status[run_string]['Replacers'] = prevvarvalues
call.status[run_string]['Run no'] = call.status[run_string].get('Run no', 0)+1
import pprint
pprint.pprint(call.status)
if not everything_converged:
auto_conv_handle = inject_status(call.status)(auto_conv_sub)
kwargs.update({'auto_conv_sub': auto_conv_handle})
converged_parameters=call(*args, **kwargs)
else:
converged_parameters=call.status
return converged_parameters
return call
```
|
{
"source": "jensstein/mockdock",
"score": 2
}
|
#### File: src/mockdock/server.py
```python
import json
import logging
import os
import multiprocessing
import socket
import ssl
import sys
import typing
from mockdock import dns
CONFIG_PATH = os.getenv("CONFIG_PATH")
CONFIG_DATA = os.getenv("CONFIG_DATA")
TLS_CERTIFICATE = os.getenv("TLS_CERTIFICATE")
TLS_CERTIFICATE_KEY = os.getenv("TLS_CERTIFICATE_KEY")
# This variable is used to indicate if additional ports need to be opened
# and listened to.
EXTRA_PORTS = os.getenv("EXTRA_PORTS")
def setup_logging() -> logging.Logger:
logger = logging.getLogger()
handler = logging.StreamHandler()
formatter = logging.Formatter(
'{"name": "%(name)s", "level": "%(levelname)s", "message": "%(message)s"}')
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.DEBUG)
return logger
logger = setup_logging()
class ParseRequestError(Exception):
pass
class HttpResponse(object):
def __init__(self, data: bytes = bytes(), code: int = 404,
content_type = "text/plain") -> None:
# TODO: specify binary data in base64
self.data = data
self.code = code
self.content_type = content_type
def code_to_status(self, code: int) -> str:
if code == 200:
return "OK"
elif code == 404:
return "Not Found"
return "Error"
def to_message(self) -> bytes:
status = self.code_to_status(self.code)
content_length = len(self.data)
header = "HTTP/1.1 {} {}\nContent-Type: {}\nContent-Length: {}\n\n".format(
self.code, status, self.content_type, content_length).encode("utf8")
return header + self.data
class Config(object):
def __init__(self, data: typing.Optional[str] = None,
config_path: typing.Optional[str] = None) -> None:
if data is not None and config_path is not None:
raise ValueError("cannot supply both data and config path")
if config_path is not None:
with open(config_path) as fp:
self.data = json.load(fp)
elif data is not None:
self.data = json.loads(data)
else:
self.data = {}
logger.debug("Using config {}".format(self.data))
def response_for_path(self, path: str) -> HttpResponse:
if path in self.data:
kwargs = {}
if "data" in self.data[path]:
kwargs["data"] = self.data[path]["data"].encode("utf8")
if "code" in self.data[path]:
kwargs["code"] = self.data[path]["code"]
if "content-type" in self.data[path]:
kwargs["content_type"] = self.data[path]["content-type"]
return HttpResponse(**kwargs)
return HttpResponse()
class HttpRequest(object):
def __init__(self, request_bytes: bytes) -> None:
try:
request = request_bytes.decode("utf8")
self.method, self.path, self.headers = self.parse(request)
except (UnicodeDecodeError, ValueError) as e:
raise ParseRequestError(e)
def parse(self, request: str) -> typing.Tuple[str, str, dict]:
parts = request.split("\r\n")
method, path, _ = parts[0].split(" ")
headers = {k.strip(): v.strip() for k, v in [k.split(":", maxsplit=1) for k in
[p for p in parts[1:] if p]]}
return method, path, headers
def __str__(self) -> str:
return "HttpRequest({}, {}, {})".format(self.method, self.path,
self.headers)
class DnsResolverProcess(multiprocessing.Process):
def __init__(self, redirect_ip: str = None, port: int = 53):
multiprocessing.Process.__init__(self, target=self.start_socket,
args=(redirect_ip, port))
def start_socket(self, redirect_ip: str = None, port: int = 53):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.bind(("0.0.0.0", port))
ip = socket.gethostbyname(socket.getfqdn()) if redirect_ip is None \
else redirect_ip
logger.info("Resolving dns to {}".format(ip))
while True:
data, address = s.recvfrom(1024)
packet = dns.build_packet(data, ip)
logger.debug("dns question received from {}: {}. response: {}".format(
address, data, packet))
s.sendto(packet, address)
class ServerProcess(multiprocessing.Process):
def __init__(self, port: int) -> None:
multiprocessing.Process.__init__(self, target=self.start_socket,
args=(port,))
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.config = Config(CONFIG_DATA, CONFIG_PATH)
def start_socket(self, port: int) -> None:
self.socket.bind(("0.0.0.0", port))
self.socket.listen(1)
while True:
try:
conn, address = self.socket.accept()
while True:
data = conn.recv(1024)
logger.debug("conn {} data {} addr {}".format(conn, data, address))
if not data:
break
request = HttpRequest(data)
path = request.headers["Host"] + request.path
response = self.config.response_for_path(path)
message = response.to_message()
conn.send(message)
except (ssl.SSLError, ConnectionResetError, BrokenPipeError, ParseRequestError) as e:
logger.error("Caught error while handling connection: {}"
.format(e))
class TlsServerProcess(ServerProcess):
def __init__(self, ssl_certificate: str, ssl_key: str, port: int = 443)\
-> None:
ServerProcess.__init__(self, port)
context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
context.load_cert_chain(ssl_certificate, ssl_key)
self.socket = context.wrap_socket(self.socket, server_side=True)
def main():
dns_resolver = DnsResolverProcess()
dns_resolver.start()
server_process = ServerProcess(80)
server_process.start()
if TLS_CERTIFICATE is not None and TLS_CERTIFICATE_KEY is not None:
tls_server_process = TlsServerProcess(TLS_CERTIFICATE,
TLS_CERTIFICATE_KEY)
tls_server_process.start()
if EXTRA_PORTS is not None:
ports = json.loads(EXTRA_PORTS)
for port in ports:
p = ServerProcess(port)
p.start()
if __name__ == "__main__":
main()
```
#### File: mockdock/tests/integration_test.py
```python
import json
import logging
import os
import unittest
import subprocess
import tempfile
import conu
import mockdock
class IntegrationTest(unittest.TestCase):
def test_server(self):
print("building image tls-test")
conu.DockerImage.build(".", tag="tls-test", dockerfile="Dockerfile-tlstest")
print("building image mockdock")
conu.DockerImage.build(".", tag="mockdock", dockerfile="Dockerfile")
domains = ["google.com", "domain.org"]
# This dictionary specify the responses given for different queries.
# All elements all optional.
config = json.dumps({
"google.com/": {
"data": '{"response": "OK"}\n',
"code": 200,
"content-type": "application/json"
},
"domain.org:1010/path": {
"data": "Okay response\n",
"code": 200
}
})
# Generate a certificate with subject alternative names for the
# specified domains.
cert_file, key_file = mockdock.generate_certificate(domains)
image_name = "mockdock"
image_tag = "latest"
with conu.DockerBackend() as backend:
try:
server_image = backend.ImageClass(image_name, tag=image_tag)
docker_options = mockdock.get_docker_options(cert_file.name,
key_file.name)
server_container = server_image.run_via_binary(
volumes=[(cert_file.name, cert_file.name),
(key_file.name, key_file.name)],
additional_opts=["-e", "CONFIG_DATA={}".format(config),
"-e", "EXTRA_PORTS=[1010]"] +
docker_options)
server_container.wait_for_port(80)
server_container.wait_for_port(1010)
container_ips = server_container.get_IPv4s()
client_image = backend.ImageClass("tls-test")
# Use the docker --dns argument to specify the server
# container as dns resolver.
docker_run = conu.DockerRunBuilder(["bash"], additional_opts=[
"-it", "-u", "root", "--dns", container_ips[0]])
client_container = client_image.run_via_binary(docker_run)
# Install the generated certificate in the server container.
# This method is debian-specific.
mockdock.install_certificate_debian(cert_file.name,
client_container)
result1 = client_container.execute(["curl", "-vi", "google.com"])
result1 = b" ".join(r for r in result1)
print("Testing {} in {}".format(b'{"response": "OK"}', result1))
self.assertEqual(b'{"response": "OK"}' in result1, True)
result2 = client_container.execute(["curl", "-vi", "https://google.com"])
result2 = b" ".join(r for r in result2)
print("Testing {} in {}".format(b'{"response": "OK"}', result2))
self.assertEqual(b'{"response": "OK"}' in result2, True)
result3 = client_container.execute(["curl", "-vi",
"domain.org:1010/path"])
result3 = b" ".join(r for r in result3)
print("Testing {} in {}".format(b"Okay response", result3))
self.assertEqual(b"Okay response" in result3, True)
result4 = client_container.execute(["curl", "-vi", "https://domain.org"])
result4 = b" ".join(r for r in result4)
print("Testing {} in {}".format(b"Not Found", result4))
self.assertEqual(b"Not Found" in result4, True)
finally:
server_container.kill()
server_container.delete()
client_container.kill()
client_container.delete()
def test_install_certificate(self):
print("building image tls-test")
conu.DockerImage.build(".", tag="tls-test", dockerfile="Dockerfile-tlstest")
with conu.DockerBackend() as backend:
try:
image = backend.ImageClass("tls-test")
container = image.run_via_binary(conu.DockerRunBuilder(["bash"],
additional_opts=["-it"]))
cert_file, key_file = mockdock.generate_certificate(["google.com"])
mockdock.install_certificate_debian(cert_file.name, container)
with container.mount() as fs:
certificates_conf = fs.read_file("/etc/ca-certificates.conf")
self.assertEqual(os.path.basename(cert_file.name) in
certificates_conf, True)
self.assertEqual(fs.file_is_present(os.path.join(
"/usr/share/ca-certificates/", os.path.basename(
cert_file.name))), True)
finally:
container.kill()
container.delete()
```
#### File: mockdock/tests/test_dns.py
```python
import unittest
from mockdock import dns
class DNSTest(unittest.TestCase):
def test_build_packet(self):
data = b"^4\x01\x00\x00\x01\x00\x00\x00\x00\x00\x00\x06google\x03com\x00\x00\x01\x00\x01"
packet = dns.build_packet(data, "192.168.0.1")
expeced_result = b"^4\x81\x80\x00\x01\x00\x01\x00\x00\x00\x00\x06google\x03com\x00\x00\x01\x00\x01\xc0\x0c\x00\x01\x00\x01\x00\x00\x00<\x00\x04\xc0\xa8\x00\x01"
self.assertEqual(packet, expeced_result)
```
|
{
"source": "jensstutte/relman-auto-nag",
"score": 2
}
|
#### File: auto_nag/scripts/close_intermittents.py
```python
from auto_nag.bzcleaner import BzCleaner
class Intermittents(BzCleaner):
def __init__(self):
super(Intermittents, self).__init__()
def description(self):
return "Intermittent test failure bugs unchanged in 21 days"
def get_bz_params(self, date):
params = {
"email1": "<EMAIL>",
"emailreporter1": "1",
"emailtype1": "exact",
"n1": "1",
"f1": "longdescs.count",
"o1": "changedafter",
"v1": "-3w",
"f2": "blocked",
"o2": "isempty",
"f3": "flagtypes.name",
"o3": "notequals",
"v3": "needinfo?",
"f4": "OP",
"n4": "1",
"f5": "bug_status",
"o5": "changedto",
"v5": "REOPENED",
"f6": "bug_status",
"o6": "changedafter",
"v6": "-7d",
"f7": "CP",
"f8": "bug_severity",
"o8": "notequals",
"v8": "critical",
"f9": "component",
"o9": "nowordssubstr",
"v9": "new tab page, messaging system",
"keywords_type": "nowords",
"keywords": "leave-open",
"priority": "P5",
"resolution": "---",
"status_whiteboard_type": "notregexp",
"status_whiteboard": "(leave open|leave-open|leaveopen|test disabled|test-disabled|testdisabled)",
}
return params
def get_autofix_change(self):
return {
"status": "RESOLVED",
"resolution": "INCOMPLETE",
"comment": {
"body": f"https://wiki.mozilla.org/Bug_Triage#Intermittent_Test_Failure_Cleanup\n{self.get_documentation()}"
},
}
if __name__ == "__main__":
Intermittents().run()
```
|
{
"source": "JenSte/aiotus",
"score": 3
}
|
#### File: aiotus/aiotus/retry.py
```python
from __future__ import annotations
import asyncio
import dataclasses
from types import TracebackType
from typing import (
AsyncContextManager,
BinaryIO,
Callable,
Iterable,
Mapping,
Optional,
Type,
TypeVar,
Union,
)
import aiohttp
import tenacity
import yarl
from . import common, core, creation
from .log import logger
T = TypeVar("T")
class asyncnullcontext(AsyncContextManager[T]): # noqa: N801
"""Asynchronous version of 'contextlib.nullcontext'."""
def __init__(self, aenter_result: T) -> None:
self._aenter_result = aenter_result
async def __aenter__(self) -> T:
return self._aenter_result
async def __aexit__(
self,
exc_type: Optional[Type[BaseException]],
exc_value: Optional[BaseException],
traceback: Optional[TracebackType],
) -> None:
return None
@dataclasses.dataclass
class RetryConfiguration:
"""Class to hold settings for the functions of this module."""
retry_attempts: int = 10
"""
Number of retry attempts to do when the communication fails.
"""
max_retry_period_seconds: float = 60.0
"""
Maximum time between retries, in seconds.
Exponential backoff is used in case of communication errors,
but the time between retries is caped by this value.
"""
ssl: Optional[common.SSLArgument] = None
"""
'ssl' argument passed on to the aiohttp calls.
This can be None, False, or an instance of ssl.SSLContext, see
the `aiohttp documentation
<https://docs.aiohttp.org/en/stable/client_advanced.html#ssl-control-for-tcp-sockets>`_
for the different meanings.
""" # noqa: E501
def _make_log_before_function(s: str) -> Callable[[tenacity.RetryCallState], None]:
"""Create a function used to log before a retry attempt."""
def log(retry_state: tenacity.RetryCallState) -> None:
if retry_state.attempt_number > 1:
logger.info(
f"Trying {s} again, attempt number {retry_state.attempt_number}..."
)
return log
def _make_log_before_sleep_function(
s: str,
) -> Callable[[tenacity.RetryCallState], None]:
"""Create a function used when a call made through tenacity fails."""
def log(retry_state: tenacity.RetryCallState) -> None:
if (retry_state.next_action is not None) and (retry_state.outcome is not None):
duration = retry_state.next_action.sleep
if retry_state.outcome.failed:
value = retry_state.outcome.exception()
else:
value = retry_state.outcome.result()
logger.warning(
f"{s.capitalize()} failed, "
f"retrying in {duration:.0f} second(s): {value}"
)
return log
def _make_retrying(
s: str, config: RetryConfiguration
) -> tenacity.AsyncRetrying: # type: ignore
"""Create a tenacity retry object."""
return tenacity.AsyncRetrying( # type: ignore
retry=tenacity.retry_if_exception_type(aiohttp.ClientError), # type: ignore
stop=tenacity.stop_after_attempt(config.retry_attempts), # type: ignore
wait=tenacity.wait_exponential( # type: ignore
max=config.max_retry_period_seconds
),
before=_make_log_before_function(s),
before_sleep=_make_log_before_sleep_function(s),
)
async def upload(
endpoint: Union[str, yarl.URL],
file: BinaryIO,
metadata: Optional[common.Metadata] = None,
client_session: Optional[aiohttp.ClientSession] = None,
config: RetryConfiguration = RetryConfiguration(),
headers: Optional[Mapping[str, str]] = None,
chunksize: int = 4 * 1024 * 1024,
) -> Optional[yarl.URL]:
"""Upload a file to a tus server.
This function creates an upload on the server and then uploads
the data to that location.
In case of a communication error, this function retries the upload.
:param endpoint: The creation endpoint of the server.
:param file: The file to upload.
:param metadata: Additional metadata for the upload.
:param client_session: An aiohttp ClientSession to use.
:param config: Settings to customize the retry behaviour.
:param headers: Optional headers used in the request.
:param chunksize: The size of individual chunks to upload at a time.
:return: The location where the file was uploaded to (if the upload succeeded).
.. # noqa: DAR401 asyncio.CancelledError
"""
url = yarl.URL(endpoint)
if metadata is None:
metadata = {}
retrying_create = _make_retrying("upload creation", config)
retrying_upload_file = _make_retrying("upload", config)
try:
ctx: Union[aiohttp.ClientSession, AsyncContextManager[aiohttp.ClientSession]]
if client_session is None:
ctx = aiohttp.ClientSession()
else:
ctx = asyncnullcontext(client_session)
async with ctx as session:
async for attempt in retrying_create:
with attempt:
location = await creation.create(
session,
url,
file,
metadata,
ssl=config.ssl,
headers=headers,
)
if not location.is_absolute():
location = url / location.path
async for attempt in retrying_upload_file:
with attempt:
await core.upload_buffer(
session,
location,
file,
ssl=config.ssl,
chunksize=chunksize,
headers=headers,
)
return location
except asyncio.CancelledError: # pragma: no cover
raise
except tenacity.RetryError as e:
logger.error(
f"Unable to upload file, even after retrying: {e.last_attempt.exception()}"
)
except Exception as e:
logger.error(f"Unable to upload file: {e}")
return None
async def metadata(
endpoint: Union[str, yarl.URL],
client_session: Optional[aiohttp.ClientSession] = None,
config: RetryConfiguration = RetryConfiguration(),
headers: Optional[Mapping[str, str]] = None,
) -> Optional[common.Metadata]:
"""Read back the metadata of an upload.
See :data:`aiotus.Metadata` for details on how metadata is handled in the
tus protocol.
In case of a communication error, this function retries.
:param endpoint: The location of the upload.
:param client_session: An aiohttp ClientSession to use.
:param config: Settings to customize the retry behaviour.
:param headers: Optional headers used in the request.
:return: The metadata associated with the upload.
.. # noqa: DAR401 asyncio.CancelledError
"""
if isinstance(endpoint, str):
url = yarl.URL(endpoint)
else:
url = endpoint
retrying_metadata = _make_retrying("query metadata", config)
try:
ctx: Union[aiohttp.ClientSession, AsyncContextManager[aiohttp.ClientSession]]
if client_session is None:
ctx = aiohttp.ClientSession()
else:
ctx = asyncnullcontext(client_session)
async with ctx as session:
md: common.Metadata
async for attempt in retrying_metadata:
with attempt:
return await core.metadata(
session, url, ssl=config.ssl, headers=headers
)
except asyncio.CancelledError: # pragma: no cover
raise
except tenacity.RetryError as e:
logger.error(
f"Unable to get metadata, even after retrying: {e.last_attempt.exception()}"
)
return None
async def _upload_partial(
semaphore: asyncio.Semaphore,
endpoint: Union[str, yarl.URL],
file: BinaryIO,
client_session: Optional[aiohttp.ClientSession],
config: RetryConfiguration,
headers: Optional[Mapping[str, str]],
chunksize: int,
) -> str:
"""Helper function for "upload_multiple() to upload a single part."""
tus_headers = dict(headers or {})
tus_headers["Upload-Concat"] = "partial"
async with semaphore:
url = await upload(
endpoint, file, None, client_session, config, tus_headers, chunksize
)
if url is None:
raise RuntimeError("Unable to upload part.")
return url.path
async def upload_multiple(
endpoint: Union[str, yarl.URL],
files: Iterable[BinaryIO],
metadata: Optional[common.Metadata] = None,
client_session: Optional[aiohttp.ClientSession] = None,
config: RetryConfiguration = RetryConfiguration(),
headers: Optional[Mapping[str, str]] = None,
chunksize: int = 4 * 1024 * 1024,
parallel_uploads: int = 3,
) -> Optional[yarl.URL]:
"""Upload multiple files and then use the "concatenation" protocol extension
to combine the parts on the server-side.
:param endpoint: The creation endpoint of the server.
:param files: The files to upload.
:param metadata: Additional metadata for the final upload.
:param client_session: An aiohttp ClientSession to use.
:param config: Settings to customize the retry behaviour.
:param headers: Optional headers used in the request.
:param chunksize: The size of individual chunks to upload at a time.
:param parallel_uploads: The number of parallel uploads to do concurrently.
:return: The location of the final (concatenated) file on the server.
:raises RuntimeError: If the server does not support the "concatenation" extension.
.. # noqa: DAR401 asyncio.CancelledError
"""
url = yarl.URL(endpoint)
if metadata is None:
metadata = {}
retrying_config = _make_retrying("query configuration", config)
retrying_create = _make_retrying("upload creation", config)
try:
ctx: Union[aiohttp.ClientSession, AsyncContextManager[aiohttp.ClientSession]]
if client_session is None:
ctx = aiohttp.ClientSession()
else:
ctx = asyncnullcontext(client_session)
async with ctx as session:
#
# Check if the server supports the "concatenation" extension.
#
async for attempt in retrying_config:
with attempt:
server_config = await core.configuration(
session, url, ssl=config.ssl, headers=headers
)
if "concatenation" not in server_config.protocol_extensions:
raise RuntimeError(
'Server does not support the "concatenation" extension.'
)
#
# Upload the individual parts.
#
# Used to limit the number of coroutines that perform uploads in parallel.
semaphore = asyncio.Semaphore(parallel_uploads)
coros = [
_upload_partial(
semaphore, endpoint, f, session, config, headers, chunksize
)
for f in files
]
tasks = [asyncio.create_task(c) for c in coros]
try:
paths = await asyncio.gather(*tasks)
except asyncio.CancelledError: # pragma: no cover
raise
except Exception as e:
logger.info("Cancelling other uploads...")
for t in tasks:
if not t.done():
t.cancel()
raise RuntimeError(f"Upload of a part failed: {e}")
concat_header = "final;" + " ".join(paths)
#
# Do the final concatenation.
#
final_headers = dict(headers or {})
final_headers.update({"Upload-Concat": concat_header})
async for attempt in retrying_create:
with attempt:
return await creation.create(
session,
url,
None,
metadata,
ssl=config.ssl,
headers=final_headers,
)
except asyncio.CancelledError: # pragma: no cover
raise
except tenacity.RetryError as e:
logger.error(
f"Unable to upload files, even after retrying: {e.last_attempt.exception()}"
)
except Exception as e:
logger.error(f"Unable to upload files: {e}")
return None
```
|
{
"source": "JensTimmerman/django-jquery",
"score": 2
}
|
#### File: django-jquery/jquery/context_processors.py
```python
from . import utils
def jquery(context):
return {'jquery_path': utils.jquery_path}
```
|
{
"source": "JensTimmerman/django-libs",
"score": 2
}
|
#### File: django-libs/django_libs/models.py
```python
from django.core.validators import RegexValidator
from django.db.models import CharField
try:
from south.modelsinspector import add_introspection_rules
except ImportError:
pass
else:
add_introspection_rules([], [r"^django_libs\.models\.ColorField"])
from .widgets import ColorPickerWidget
class ColorField(CharField):
"""Custom color field to display a color picker."""
def __init__(self, *args, **kwargs):
kwargs['max_length'] = 6
super(ColorField, self).__init__(*args, **kwargs)
self.validators.append(RegexValidator(
regex='^([A-Fa-f0-9]{6}|[A-Fa-f0-9]{3})$',
message='Only RGB color model inputs allowed, like FF00CC',
code='nomatch'))
def formfield(self, **kwargs):
kwargs['widget'] = ColorPickerWidget
return super(ColorField, self).formfield(**kwargs)
```
|
{
"source": "JensTimmerman/django-tex",
"score": 2
}
|
#### File: django-tex/tests/tests.py
```python
import datetime
from decimal import Decimal
from django.test import TestCase
from django.test.utils import override_settings
from django.core.exceptions import ValidationError
from django.http import HttpResponse
from django.template import engines
from django_tex.core import (
run_tex,
compile_template_to_pdf,
render_template_with_context,
)
from django_tex.exceptions import TexError
from django_tex.shortcuts import render_to_pdf
from .models import TemplateFile
class RunningTex(TestCase):
"""
Tests calling latex compiler
"""
def test_run_tex(self):
"""Call different LaTex interpreters with very simple template"""
source = "\
\\documentclass{article}\n\
\\begin{document}\n\
This is a test!\n\
\\end{document}"
interpreters = ["pdflatex", "latexmk -pdf", "lualatex"]
for name in interpreters:
with self.subTest(name=name):
with self.settings(LATEX_INTERPRETER=name):
pdf = run_tex(source)
self.assertIsNotNone(pdf)
@override_settings(LATEX_INTERPRETER="does_not_exist")
def test_wrong_latex_interpreter(self):
"""Using an unknown interpreter raises an Exception"""
source = "\
\\documentclass{article}\n\
\\begin{document}\n\
This is a test!\n\
\\end{document}"
with self.assertRaises(Exception):
run_tex(source) # should raise
class Exceptions(TestCase):
"""
Tests custom exceptions
"""
def test_exception_emergency_stop(self):
source = "\
\\documentclass{article}\n\
\\begin{document}\n\
This is a test!\n"
with self.assertRaises(TexError) as cm:
run_tex(source)
self.assertRegex(cm.exception.log, r"^This is Lua")
self.assertRegex(cm.exception.message, r"^! Emergency stop")
self.assertRegex(
cm.exception.message,
r"(End of file on the terminal\!$)|(job aborted, no legal \\end found)",
) # First alternative applies
# if tex source is given to
# pdflatex via stdin.
# Second, if tex source is
# given as filename
@override_settings(LATEX_INTERPRETER="pdflatex")
def test_pdflatex_exceptions(self):
source = "\
\\documentclass{article}\n\
\\begin{document}\n\
This is a test!\n"
with self.assertRaises(TexError) as cm:
run_tex(source)
self.assertRegex(cm.exception.log, r"^This is pdf")
self.assertRegex(cm.exception.message, r"^! Emergency stop")
self.assertRegex(
cm.exception.message,
r"(End of file on the terminal\!$)|(job aborted, no legal \\end found)",
) # First alternative applies
# if tex source is given to
# pdflatex via stdin.
# Second, if tex source is
# given as filename
def test_exception_unknown_command(self):
source = "\
\\documentclass{article}\n\
\\begin{document}\n\
\\unknown{command}\n\
\\end{document}\n"
with self.assertRaises(TexError) as cm:
run_tex(source)
self.assertRegex(cm.exception.log, r"^This is Lua")
self.assertRegex(cm.exception.message, r"^! Undefined control sequence")
self.assertRegex(cm.exception.message, r"l\.3")
def test_template_debug(self):
source = (
"\\documentclass{article}\n"
"\\begin{document}\n"
"\\unknown{command}\n"
"\\end{document}\n"
)
with self.assertRaises(TexError) as cm:
run_tex(source)
template_debug = cm.exception.template_debug
self.assertEqual(template_debug["during"], "\\unknown{command}")
self.assertEqual(template_debug["line"], 3)
def test_template_error_context(self):
source = (
"\\documentclass{article}\n"
"\n"
"\n"
"\n"
"\n"
"\\begin{document}\n"
"\\unknown{command}\n"
"\n"
"\n"
"\\end{document}\n"
)
with self.assertRaises(TexError) as cm:
run_tex(source)
message = cm.exception.message
expected_context = (
" 2 \n"
" 3 \n"
" 4 \n"
" 5 \n"
" 6 \\begin{document}\n"
" 7 \\unknown{command}\n"
" 8 \n"
" 9 \n"
"10 \\end{document}"
)
self.assertIn(expected_context, message)
class RenderingTemplates(TestCase):
"""
Tests rendering a template file with context to a string
TODO: Add a test for custom template file locations.
"""
def test_render_template(self):
template_name = "tests/test.tex"
context = {
"test": "a simple test",
"number": Decimal("1000.10"),
"date": datetime.date(2017, 10, 25),
"names": ["Arjen", "Robert", "Mats"],
}
output = render_template_with_context(template_name, context)
self.assertIn("\\section{a simple test}", output)
self.assertIn("This is a number: 1000,10.", output)
self.assertIn("And this is a date: 25.10.2017.", output)
self.assertIn("\\item Arjen", output)
def test_render_template_from_custom_directory(self):
template_name = "custom_directory_test.tex"
context = {"foo": "bar"}
output = render_template_with_context(template_name, context)
self.assertIn("bar", output)
class CompilingTemplates(TestCase):
"""
Tests compiling a template file with a context to a pdf file
"""
def test_compile_template_to_pdf(self):
"""test compile_template_to_pdf
- accepts template name and context
- context may contain unicode characters
- produces pdf file
"""
template_name = "tests/test.tex"
context = {
"test": "a simple test",
"number": Decimal("1000.10"),
"date": datetime.date(2017, 10, 25),
"names": ["Arjen", "Robert", "Mats", "รครผรถรรฉรจรดโโ
ง"],
}
pdf = compile_template_to_pdf(template_name, context)
self.assertIsNotNone(pdf)
@override_settings(LATEX_INTERPRETER="pdflatex")
def test_compile_template_with_graphics_pdflatex(self):
template_name = "tests/test_graphics.tex"
context = {}
pdf = compile_template_to_pdf(template_name, context)
self.assertIsNotNone(pdf)
class TemplateLanguage(TestCase):
"""
Tests features such as whitespace control and filters
"""
def render_template(self, template_string, context={}, using="tex"):
engine = engines[using]
template = engine.from_string(template_string)
return template.render(context)
def test_whitespace_control(self):
context = {"foo": "bar"}
template_string = "\\section{ {{- foo -}} }"
output = self.render_template(template_string, context)
self.assertEqual(output, "\\section{bar}")
def test_localization(self):
template_string = "{{ foo|localize }}"
parameters = [
("en", Decimal("1000.10"), "1000.10"),
("de-de", Decimal("1000.10"), "1000,10"),
("de-de", datetime.date(2017, 10, 25), "25.10.2017"),
]
for lang, value, expected in parameters:
with self.subTest(lang=lang, value=value):
with self.settings(LANGUAGE_CODE=lang):
output = self.render_template(template_string, {"foo": value})
self.assertEqual(output, expected)
@override_settings(LANGUAGE_CODE="de-de")
def test_format_long_date(self):
context = {"foo": datetime.date(2017, 10, 25)}
template_string = "{{ foo | date('d. F Y') }}"
# template_string="{{ '{:%d. %B %Y}'.format(foo) }}"
output = self.render_template(template_string, context)
self.assertEqual(output, "25. Oktober 2017")
def test_rendering_unicode(self):
context = {"foo": "รครผรรฉรด"}
template_string = "{{ foo }}"
output = self.render_template(template_string, context)
self.assertEqual(output, "รครผรรฉรด")
def test_escape(self):
template_string = "{{ value | latex_escape }}"
parameters = [
("&", "\\&"),
("%", "\\%"),
("$", "\\$"),
("#", "\\#"),
("_", "\\_"),
("{", "\\{"),
("}", "\\}"),
("~", "\\textasciitilde{}"),
("^", "\\textasciicircum{}"),
("\\", "\\textbackslash{}"),
("\\\\", "\\textbackslash{}\\textbackslash{}"),
("foo", "foo"),
]
for value, expected in parameters:
with self.subTest(value):
output = self.render_template(template_string, {"value": value})
self.assertEqual(output, expected)
def test_linebreaks(self):
context = {
"brecht": "Ich sitze am Straรenhang." + "\nDer Fahrer wechselt das Rad."
}
template_string = "{{ brecht | linebreaks }}"
output = self.render_template(template_string, context)
self.assertEqual(
output, r"Ich sitze am Straรenhang.\\" + "\nDer Fahrer wechselt das Rad."
)
# Render with default django renderer
output = self.render_template(template_string, context, using="django")
self.assertHTMLEqual(
output,
"<p>Ich sitze am Straรenhang.<br>" + "Der Fahrer wechselt das Rad.</p>",
)
@override_settings(
TEMPLATES=[
{
"NAME": "tex",
"BACKEND": "django_tex.engine.TeXEngine",
"OPTIONS": {"environment": "tests.environment.test_environment"},
}
]
)
def test_custom_filters(self):
context = {
"duration": datetime.timedelta(minutes=90),
}
template_string = "{{ duration | hhmm_format }}"
output = self.render_template(template_string, context)
self.assertEqual("1:30", output)
@override_settings(LATEX_GRAPHICSPATH=["c:\\foo\\bar", "c:\\bar baz\\foo"])
def test_graphicspath(self):
template_string = "{% graphicspath %}"
with override_settings(LATEX_INTERPRETER="pdflatex"):
output = self.render_template(template_string)
self.assertEqual(
output, '\\graphicspath{ {c:/foo/bar/} {"c:/bar baz/foo/"} }'
)
with override_settings(LATEX_INTERPRETER="lualatex"):
output = self.render_template(template_string)
self.assertEqual(
output, "\\graphicspath{ {c:/foo/bar/} {c:/bar baz/foo/} }"
)
class Models(TestCase):
"""
TeXTemplateFile contains the relative path to a tex template (e.g. django_tex/test.tex)
and validates if this template can be loaded.abs
Since TeXTemplateFile is an abstract base class, it is used here in a subclassed version 'TemplateFile'
"""
def test_validation(self):
TemplateFile(title="valid", name="tests/test.tex").full_clean()
with self.assertRaises(ValidationError):
TemplateFile(title="invalid", name="template/doesnt.exist").full_clean()
class Views(TestCase):
def test_render_to_pdf(self):
request = None # request is only needed to make the signature of render_to_pdf similar to the signature of django's render function
template_name = "tests/test.tex"
context = {
"test": "a simple test",
"number": Decimal("1000.10"),
"date": datetime.date(2017, 10, 25),
"names": ["Arjen", "Robert", "Mats"],
}
response = render_to_pdf(request, template_name, context, filename="test.pdf")
self.assertIsInstance(response, HttpResponse)
self.assertEqual(response["Content-Type"], "application/pdf")
self.assertEqual(response["Content-Disposition"], 'filename="test.pdf"')
```
|
{
"source": "JensTimmerman/thefuck",
"score": 3
}
|
#### File: tests/rules/test_mkdir_p.py
```python
from thefuck.main import Command
from thefuck.rules.mkdir_p import match, get_new_command
def test_match():
assert match(Command('mkdir foo/bar/baz', '', 'mkdir: foo/bar: No such file or directory'), None)
assert not match(Command('mkdir foo/bar/baz', '', ''), None)
assert not match(Command('mkdir foo/bar/baz', '', 'foo bar baz'), None)
assert not match(Command('', '', ''), None)
def test_get_new_command():
assert get_new_command(Command('mkdir foo/bar/baz', '', ''), None) == 'mkdir -p foo/bar/baz'
```
#### File: thefuck/rules/rm_dir.py
```python
import re
def match(command, settings):
return ('rm' in command.script
and 'is a directory' in command.stderr)
def get_new_command(command, settings):
return re.sub('^rm (.*)', 'rm -rf \\1', command.script)
```
|
{
"source": "JensTimmerman/whois",
"score": 3
}
|
#### File: whois/test/test_query.py
```python
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from builtins import *
import unittest
from whois import whois
class TestQuery(unittest.TestCase):
def test_simple_ascii_domain(self):
domain = 'google.com'
whois(domain)
def test_simple_unicode_domain(self):
domain = 'ะฝะฐัะพััะธ.com'
whois(domain)
def test_unicode_domain_and_tld(self):
domain = 'ัะพััะธั.ัั'
whois(domain)
def test_ipv4(self):
""" Verify ipv4 addresses. """
domain = '172.16.58.3'
whois_results = whois(domain)
if isinstance(whois_results['domain_name'], list):
domain_names = [_.lower() for _ in whois_results['domain_name']]
else:
domain_names = [whois_results['domain_name'].lower()]
self.assertIn('1e100.net', domain_names)
self.assertIn('ns1.google.com', [_.lower() for _ in whois_results['name_servers']])
def test_ipv6(self):
""" Verify ipv6 addresses. """
domain = 'fc00:e968:6179::de52:7100'
whois_results = whois(domain)
if isinstance(whois_results['domain_name'], list):
domain_names = [_.lower() for _ in whois_results['domain_name']]
else:
domain_names = [whois_results['domain_name'].lower()]
self.assertIn('1e100.net', domain_names)
self.assertIn('ns1.google.com', [_.lower() for _ in whois_results['name_servers']])
```
|
{
"source": "JensUweUlrich/ganon",
"score": 2
}
|
#### File: JensUweUlrich/ganon/setup.py
```python
import io
import os
import re
from setuptools import setup
def read(filename):
filename = os.path.join(os.path.dirname(__file__), filename)
text_type = type(u"")
with io.open(filename, mode="r", encoding='utf-8') as fd:
return re.sub(text_type(r':[a-z]+:`~?(.*?)`'), text_type(r'``\1``'), fd.read())
setup(
name="ganon",
version="1.1.2",
url="https://www.github.com/pirovc/ganon",
license='MIT',
author="<NAME>",
description="ganon is a k-mer based read classification tool which uses Interleaved Bloom Filters in conjunction with a taxonomic clustering and a k-mer counting-filtering scheme.",
long_description=read("README.md"),
package_dir={'': 'src'},
packages=["ganon"],
entry_points={'console_scripts': ['ganon=ganon.ganon:main_cli']},
scripts=['scripts/ganon-get-seq-info.sh'],
classifiers=[
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
],
)
```
|
{
"source": "JensUweUlrich/osprey",
"score": 2
}
|
#### File: osprey/python_nets/sbonito_d2s_eval.py
```python
from ont_fast5_api.fast5_interface import get_fast5_file
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import sys
from datetime import datetime
import os
import h5py
import deepnano2
def med_mad(x, factor=1.4826):
"""
Calculate signal median and median absolute deviation
"""
med = np.median(x)
mad = np.median(np.absolute(x - med)) * factor
return med, mad
def rescale_signal(signal):
signal = signal.astype(np.float32)
med, mad = med_mad(signal)
signal -= med
signal /= mad
return np.clip(signal, -2.5, 2.5)
from modelsbd2s import Model
from bonito import model as bmodel
import bonito
import toml
# In[5]:
os.listdir()
# In[6]:
cfgx = toml.load(bonito.__path__[0] + "/models/configs/dna_r9.4.1.toml")
cfgx
common = dict(activation="swish", dropout=0.0, dilation=[1])
block = [
#C1
dict(**common, repeat = 1, filters = 80, kernel = [9], stride = [3], residual = False, separable = False,),
dict(**common, repeat = 5, filters = 80, kernel = [11], stride = [1], residual = True, separable = True, type="BlockX", pool=3, inner_size=160),
dict(**common, repeat = 5, filters = 80, kernel = [11], stride = [1], residual = True, separable = True, type="BlockX", pool=3, inner_size=160),
dict(**common, repeat = 5, filters = 80, kernel = [11], stride = [1], residual = True, separable = True, type="BlockX", pool=3, inner_size=160),
dict(**common, repeat = 5, filters = 80, kernel = [11], stride = [1], residual = True, separable = True, type="BlockX", pool=3, inner_size=160),
dict(**common, repeat = 5, filters = 80, kernel = [11], stride = [1], residual = True, separable = True, type="BlockX", pool=3, inner_size=160),
#C2
dict(**common, repeat = 1, filters = 80, kernel = [11], stride = [1], residual = False, separable = True,),
#C3
dict(**common, repeat = 1, filters = 40, kernel = [7], stride = [1], residual = False, separable = False,)
]
cfgx["encoder"]["activation"] = "swish"
cfgx["block"] = block
cfgx["input"]["features"] = 1
cfgx
bmodel.activations["relu6"] = nn.modules.activation.ReLU6
# In[15]:
bmodelx = Model(cfgx)
# In[16]:
C = 5
ls_weights = torch.cat([torch.tensor([0.4]), (0.1 / (C - 1)) * torch.ones(C - 1)]).cuda()
class Net(nn.Module):
def __init__(self, encoder, oks=40):
super(Net, self).__init__()
self.e = encoder
self.out = torch.nn.Linear(oks, 5)
def run_m(self, m):
def run(*x):
return m(*x)
return run
def forward(self, x):
with torch.cuda.amp.autocast(enabled=True):
#print("start size", x.shape)
x = x.permute((0,2,1))
for i, l in enumerate(self.e.encoder):
x = l(x)
#x = self.e(x)
#print("after c", x.shape)
x = x.permute((0,2,1))
out = self.out(x)
out = torch.nn.functional.log_softmax(out, dim=-1)
label_smoothing_loss = -((out * ls_weights.to(out.device)).mean())
return out
torch.set_grad_enabled(True)
bmodelx = Model(cfgx)
model = Net(bmodelx.encoder)
model.cuda()
torch.set_grad_enabled(False)
model.load_state_dict(torch.load(sys.argv[1]))
#step = 500
#pad = 10
step = 400
pad = 100
model.eval()
model.cuda()
torch.set_grad_enabled(False)
def decode(signal_piece):
alph = "NACGT"
base_state = torch.zeros((1, 1, 5))
decoder_state = torch.zeros((1, 1, 32))
decoder_state, _ = model.b.gru(base_state, decoder_state)
s_enc = signal_piece.unsqueeze(0).cpu()
out = []
for i in range(s_enc.shape[1]):
#print(decoder_state[0,0].shape, s_enc[0,i].shape)
base = model.j(s_enc[:1,i:i+1], decoder_state[:1,:1])[0][0][0].detach().numpy().argmax()
if base != 0:
base_state[:,:,:] = 0
base_state[0,0,base] = 1
decoder_state, _ = model.b.gru(base_state, decoder_state)
out.append(alph[base])
return "".join(out)
#dir_name = "../../training-data-nobackup/klebsiela/test_data/"
#dir_name = "../../training-data-nobackup/klebsiela/sample_no_restart/"
dir_name = sys.argv[2]
test_files = [os.path.join(dir_name, fn) for fn in os.listdir(dir_name)]
outg = open("%s-g.fasta" % sys.argv[3], "w")
outb5 = open("%s-b5.fasta" % sys.argv[3], "w")
model.cuda()
STEP = 1500
PAD = 100
for find, fn in enumerate(test_files):
start = datetime.now()
with h5py.File(fn, "r") as f:
key = list(f["Raw/Reads"].keys())[0]
signal_orig = np.array(f["Raw/Reads"][key]["Signal"][()], dtype=np.float32)
signal = rescale_signal(signal_orig)
print("go", find, len(signal_orig), len(signal), signal.dtype, datetime.now() - start)
outputs = []
batch = []
for i in range(0, len(signal), 3*step):
if i + 3*step + 6*pad > len(signal):
break
part = np.array(signal[i:i+3*step+6*pad])
part = np.vstack([part]).T
batch.append(part)
print("b ready", datetime.now() - start)
for i in range(0, len(batch), 100):
net_result = F.softmax(model(torch.Tensor(np.stack(batch[i:i+100])).cuda()).detach().cpu(), dim=-1)
#net_result = model_timed(model, torch.Tensor(batch).cpu()).detach().cpu().numpy()
print("pred read", datetime.now() - start)
for row in net_result:
# decoded = decoder.decode(row.numpy())
# outputs.append(decoded[pad:-pad])
outputs.append(row[pad:-pad].numpy())
# seq = []
# last = 47
# for o in outputs[:5]:
# seq.append(o.replace("N", ""))
# seq = "".join(seq)
seqg = deepnano2.beam_search_py(np.vstack(outputs), 1, 0.1)
seqb5 = deepnano2.beam_search_py(np.vstack(outputs), 5, 0.1)
# seqb10 = deepnano2.beam_search_py(np.vstack(outputs), 10, 0.001)
# seqb20 = deepnano2.beam_search_py(np.vstack(outputs), 20, 0.001)
print("seq ready", datetime.now() - start, len(seqg), len(seqb5))
print(">%d" % find, file=outg)
print(seqg, file=outg)
outg.flush()
print(">%d" % find, file=outb5)
print(seqb5, file=outb5)
outb5.flush()
# print(">%d" % find, file=outb10)
# print(seqb10, file=outb10)
# outb10.flush()
# print(">%d" % find, file=outb20)
# print(seqb20, file=outb20)
# outb20.flush()
print("done", find, fn, len(seqb5), datetime.now() - start)
```
|
{
"source": "JensUweUlrich/seqan",
"score": 3
}
|
#### File: pair_align/tests/run_tests.py
```python
import logging
import os.path
import sys
# Automagically add util/py_lib to PYTHONPATH environment variable.
path = os.path.abspath(os.path.join(os.path.dirname(__file__), '..',
'..', '..', 'util', 'py_lib'))
sys.path.insert(0, path)
import seqan.app_tests as app_tests
def main(source_base, binary_base):
"""Main entry point of the script."""
print 'Executing test for pair_align'
print '============================='
print
ph = app_tests.TestPathHelper(
source_base, binary_base,
'apps/pair_align/tests') # tests dir
# ============================================================
# Auto-detect the binary path.
# ============================================================
path_to_program = app_tests.autolocateBinary(
binary_base, 'apps/pair_align', 'pair_align')
# ============================================================
# Built TestConf list.
# ============================================================
# Build list with TestConf objects, analoguely to how the output
# was generated in generate_outputs.sh.
conf_list = []
# ============================================================
# Run on Proteins (Balibase).
# ============================================================
# Run with defaults for all non-mandatory options.
for fname in ['1aab', '1ad2', '2trx']:
conf = app_tests.TestConf(
program=path_to_program,
redir_stdout=ph.outFile('%s.stdout' % fname),
args=['-s', ph.inFile('%s.fa' % fname),
'-o', ph.outFile('%s.fa' % fname)],
to_diff=[(ph.inFile('%s_out.fa' % fname),
ph.outFile('%s.fa' % fname)),
(ph.inFile('%s.stdout' % fname),
ph.outFile('%s.stdout' % fname))])
conf_list.append(conf)
# Run with explicit alphabet.
for fname in ['1aab', '1ad2', '2trx']:
conf = app_tests.TestConf(
program=path_to_program,
args=['-a', 'protein',
'-s', ph.inFile('%s.fa' % fname),
'-o', ph.outFile('%s.protein.fa' % fname)],
redir_stdout=ph.outFile('%s.protein.stdout' % fname),
to_diff=[(ph.inFile('%s.protein_out.fa' % fname),
ph.outFile('%s.protein.fa' % fname)),
(ph.inFile('%s.protein.stdout' % fname),
ph.outFile('%s.protein.stdout' % fname))])
conf_list.append(conf)
# Run with different alignment methods.
for fname in ['1aab', '1ad2', '2trx']:
for m in ['nw', 'gotoh', 'sw', 'lcs']:
conf = app_tests.TestConf(
program=path_to_program,
redir_stdout=ph.outFile('%s.m%s.stdout' % (fname, m)),
args=['-m', m,
'-s', ph.inFile('%s.fa' % fname),
'-o', ph.outFile('%s.m%s.fa' % (fname, m))],
to_diff=[(ph.inFile('%s.m%s_out.fa' % (fname, m)),
ph.outFile('%s.m%s.fa' % (fname, m))),
(ph.inFile('%s.m%s.stdout' % (fname, m)),
ph.outFile('%s.m%s.stdout' % (fname, m)))])
conf_list.append(conf)
# Run with different scoring options.
for fname in ['1aab', '1ad2', '2trx']:
conf = app_tests.TestConf(
program=path_to_program,
redir_stdout=ph.outFile('%s.g-20.stdout' % fname),
args=['-g', '-20',
'-s', ph.inFile('%s.fa' % fname),
'-o', ph.outFile('%s.g-20.fa' % fname)],
to_diff=[(ph.inFile('%s.g-20_out.fa' % fname),
ph.outFile('%s.g-20.fa' % fname)),
(ph.inFile('%s.g-20.stdout' % fname),
ph.outFile('%s.g-20.stdout' % fname))])
conf_list.append(conf)
conf = app_tests.TestConf(
program=path_to_program,
redir_stdout=ph.outFile('%s.e-5.stdout' % fname),
args=['-e', '-5',
'-s', ph.inFile('%s.fa' % fname),
'-o', ph.outFile('%s.e-5.fa' % fname)],
to_diff=[(ph.inFile('%s.e-5_out.fa' % fname),
ph.outFile('%s.e-5.fa' % fname)),
(ph.inFile('%s.e-5.stdout' % fname),
ph.outFile('%s.e-5.stdout' % fname))])
conf_list.append(conf)
conf = app_tests.TestConf(
program=path_to_program,
redir_stdout=ph.outFile('%s.ms10.stdout' % fname),
args=['-ms', '10',
'-s', ph.inFile('%s.fa' % fname),
'-o', ph.outFile('%s.ms10.fa' % fname)],
to_diff=[(ph.inFile('%s.ms10_out.fa' % fname),
ph.outFile('%s.ms10.fa' % fname)),
(ph.inFile('%s.ms10.stdout' % fname),
ph.outFile('%s.ms10.stdout' % fname))])
conf_list.append(conf)
conf = app_tests.TestConf(
program=path_to_program,
redir_stdout=ph.outFile('%s.mm-8.stdout' % fname),
args=['-mm', '-8',
'-s', ph.inFile('%s.fa' % fname),
'-o', ph.outFile('%s.mm-8.fa' % fname)],
to_diff=[(ph.inFile('%s.mm-8_out.fa' % fname),
ph.outFile('%s.mm-8.fa' % fname)),
(ph.inFile('%s.mm-8.stdout' % fname),
ph.outFile('%s.mm-8.stdout' % fname))])
conf_list.append(conf)
# Run with matrix file.
for fname in ['1aab', '1ad2', '2trx']:
conf = app_tests.TestConf(
program=path_to_program,
redir_stdout=ph.outFile('%s.maVTML200.stdout' % fname),
args=['-ma', ph.inFile('VTML200I'),
'-s', ph.inFile('%s.fa' % fname),
'-o', ph.outFile('%s.maVTML200.fa' % fname)],
to_diff=[(ph.inFile('%s.maVTML200_out.fa' % fname),
ph.outFile('%s.maVTML200.fa' % fname)),
(ph.inFile('%s.maVTML200.stdout' % fname),
ph.outFile('%s.maVTML200.stdout' % fname))])
conf_list.append(conf)
# Run with different banded alignment options.
for fname in ['1aab', '1ad2', '2trx']:
conf = app_tests.TestConf(
program=path_to_program,
redir_stdout=ph.outFile('%s.lo5.stdout' % fname),
args=['-lo', '5',
'-s', ph.inFile('%s.fa' % fname),
'-o', ph.outFile('%s.lo5.fa' % fname)],
to_diff=[(ph.inFile('%s.lo5_out.fa' % fname),
ph.outFile('%s.lo5.fa' % fname)),
(ph.inFile('%s.lo5.stdout' % fname),
ph.outFile('%s.lo5.stdout' % fname))])
conf_list.append(conf)
conf = app_tests.TestConf(
program=path_to_program,
redir_stdout=ph.outFile('%s.hi5.stdout' % fname),
args=['-hi', '5',
'-s', ph.inFile('%s.fa' % fname),
'-o', ph.outFile('%s.hi5.fa' % fname)],
to_diff=[(ph.inFile('%s.hi5_out.fa' % fname),
ph.outFile('%s.hi5.fa' % fname)),
(ph.inFile('%s.hi5.stdout' % fname),
ph.outFile('%s.hi5.stdout' % fname))])
conf_list.append(conf)
# Run with different matrix configuraiton options.
for fname in ['1aab', '1ad2', '2trx']:
for c in ['ffff', 'tttt', 'ffft', 'fftf', 'ftff', 'tfff', 'fftt',
'fttf', 'ttff', 'tfft']:
conf = app_tests.TestConf(
program=path_to_program,
redir_stdout=ph.outFile('%s.c%s.stdout' % (fname, c)),
args=['-c', c,
'-s', ph.inFile('%s.fa' % fname),
'-o', ph.outFile('%s.c%s.fa' % (fname, c))],
to_diff=[(ph.inFile('%s.c%s_out.fa' % (fname, c)),
ph.outFile('%s.c%s.fa' % (fname, c))),
(ph.inFile('%s.c%s.stdout' % (fname, c)),
ph.outFile('%s.c%s.stdout' % (fname, c)))])
conf_list.append(conf)
# ============================================================
# Run on DNA (Adenoviruses).
# ============================================================
# Run with defaults for all non-mandatory options.
for i in [1, 2, 3]:
conf = app_tests.TestConf(
program=path_to_program,
args=['-a', 'dna',
'-s', ph.inFile('adeno%d.fa' % i),
'-o', ph.outFile('adeno%d.fa' % i)],
to_diff=[(ph.inFile('adeno%d_out.fa' % i),
ph.outFile('adeno%d.fa' % i))])
conf_list.append(conf)
# ============================================================
# Run on RNA.
# ============================================================
# Run with defaults for all non-mandatory options.
for i in [1, 2, 3]:
conf = app_tests.TestConf(
program=path_to_program,
args=['-a', 'rna',
'-s', ph.inFile('adeno%d-rna.fa' % i),
'-o', ph.outFile('adeno%d-rna.fa' % i)],
to_diff=[(ph.inFile('adeno%d-rna_out.fa' % i),
ph.outFile('adeno%d-rna.fa' % i))])
conf_list.append(conf)
# Execute the tests.
failures = 0
for conf in conf_list:
res = app_tests.runTest(conf)
# Output to the user.
print ' '.join(['pair_align'] + conf.args),
if res:
print 'OK'
else:
failures += 1
print 'FAILED'
# Cleanup.
ph.deleteTempDir()
print '=============================='
print ' total tests: %d' % len(conf_list)
print ' failed tests: %d' % failures
print 'successful tests: %d' % (len(conf_list) - failures)
print '=============================='
# Compute and return return code.
return failures != 0
if __name__ == '__main__':
sys.exit(app_tests.main(main))
```
#### File: seqan/dddoc/dddoc.py
```python
import os
import os.path
import copy
import pickle
import string
import sys
# Constant for C++ files.
FILETYPE_CPP = 2
# Constant for DDDOC files.
FILETYPE_DDDOC = 1
# Constant for none of the above.
FILETYPE_OTHER = 0
# Extension of C++ files.
CPP_EXTS = ['c', 'C', 'cpp', 'CPP', 'c++', 'C++', 'h', 'H', 'hpp', 'HPP',
'h++', 'H++']
# Extensions of DDDOC files.
DDDOC_EXTS = ['dddoc', 'DDDOC']
# List of ignored directory names.
IGNORED_DIRS = ['CSV', '.svn', 'seeds2', 'find2', 'cmake']
DATA = None
ID = 0
class DddocCache(object):
def __init__(self, path):
self.path = path
self.content = {}
self._tryLoad()
def _tryLoad(self):
try:
with open(self.path, 'rb') as f:
self.content = pickle.load(f)
except:
print >>sys.stderr, 'Could not load cache %s' % self.path
return False
print >>sys.stderr, 'Successfully loaded cache %s' % self.path
return True
def flush(self):
try:
with open(self.path, 'wb') as f:
pickle.dump(self.content, f)
except:
print >>sys.stderr, 'Could not store cache %s' % self.path
return False
print >>sys.stderr, 'Successfully stored cache %s' % self.path
return True
def has_key(self, key):
return self.content.has_key(key)
def isFresh(self, filename):
if not self.has_key(filename):
return False
mtime = os.stat(filename).st_mtime
return mtime >= self.content[filename][0]
def get(self, key, defaultValue=None):
return self.content.get(key, (None, defaultValue))[1]
def set(self, filename, value):
mtime = os.stat(filename).st_mtime
self.content[filename] = (mtime, value)
class App(object):
"""Application object for DDDoc.
Provides a facade to the functionality of the core module.
Usage:
app = App()
app.loadFiles([<files>])
app.loadFiles([<files>])
app.loadingComplete()
Attrs:
data The global state Data object.
"""
def __init__(self):
"""Initialize object members."""
global DATA
DATA = Data([], 0)
self.data = DATA
self.next_id = ID
self.cache = DddocCache('dddoc_cache.bin')
def loadFiles(self, filenames):
"""Load the files with the given file name."""
loadFiles(filenames, self.cache)
def loadingComplete(self):
"""Initialize data object.
This method is called after all calls to LoadFiles().
"""
self.cache.flush()
self.data.init()
def getNextId(self):
"""Returns an identifier.
Each id is only returned once.
"""
assert False, "For future use."
self.next_id += 1
return self.next_id - 1
class Line:
def __init__(self, _nodes, _text, file_name, line_no):
global ID
self.nodes = _nodes
self._text = _text.rstrip()
self.id = ID
self.file_name = file_name
self.line_no = line_no
ID += 1
def __repr__(self):
return 'Line(%s, %s, %s, %s)' % (repr(self.nodes), repr(self._text), repr(self.file_name), repr(self.line_no))
def name(self, index = 0):
if len(self.nodes) > index:
return self.nodes[index]
else:
return '(unknown)'
def text(self):
return self._text
class Data:
def __init__ (self, _lines, _level):
self.lines = _lines
self.level = _level
self.cache = {}
def __repr__(self):
return 'Data(\n %s,\n%d)' % (',\n '.join(map(repr, self.lines)), self.level)
def init(self):
self.lines.sort(sortLineCompare)
relations = self["globals.relations"].at_level(1).lines
for relation in relations:
to = relation.name(2)
arr = splitName(relation.text())
if len(arr) > 0:
findRelation(self, arr, to)
self.lines.sort(sortLineCompare)
def __getitem__(self, str):
return self.find(str)
def find(self, str):
"""Find all lines below the given path.
Args:
str String with a dot separated path to the lines to find.
Returns:
Data object with the lines below the given path.
"""
# If possible, return from cache.
if self.cache.has_key(str):
return self.cache[str]
arr = splitName(str)
lines = []
# TODO(holtgrew): Keeping linear code for linear search to be able to fix things quickly.
if True:
# Use binary search for finding the first hit.
def isHit(line, arr, level):
"""Return True iff arr matches the key of line, from the given level on."""
i = 0
while (i < len(arr)) and (i + level < len(line.nodes)) and (arr[i] == line.nodes[i + level]):
i += 1
return i == len(arr)
# Use binary search to find the first hit.
query = arr
lo = 0
hi = len(self.lines)
while lo < hi:
mid = (lo + hi) // 2
slice = self.lines[mid].nodes[self.level:self.level + len(query)]
if slice < query:
lo = mid + 1
else:
hi = mid
result = lo
# Output all consecutive hits, if any.
if result < len(self.lines) and isHit(self.lines[result], arr, self.level):
for line in self.lines[result:]:
if isHit(line, arr, self.level):
lines.append(line)
else:
break
else:
# Use linear search for finding the first hit. Deactivated for now.
maxi = 0
for line in self.lines:
i = 0
while (i < len(arr)) and (i + self.level < len(line.nodes)) and (arr[i] == line.nodes[i + self.level]):
i += 1
if i == len(arr):
lines.append(line)
elif maxi > i:
break
maxi = i
data = Data(lines, self.level + len(arr))
# Cache result.
self.cache[str] = data
return data
def at_level(self, level = 0):
lines = []
for line in self.lines:
if len(line.nodes) == self.level + level:
lines.append(line)
data = Data(lines, self.level + level)
return data
def sub_level(self, level = 1):
lines = []
for line in self.lines:
if len(line.nodes) >= self.level + level:
lines.append(line)
data = Data(lines, self.level + level)
return data
def by_occ(self):
lines = copy.copy(self.lines)
lines.sort(sortLinesByOcc)
data = Data(lines, self.level)
return data
def empty(self):
return len(self.lines) == 0
def name(self, index = 0):
if len(self.lines) > 0:
return self.lines[0].name(index)
return '(empty)'
def text(self):
str = ''
for line in self.at_level(0).lines:
if (str != ''): str += '\n'
str += line.text()
return str
def keys(self, level = 0):
dict = {}
for line in self.lines:
if len(line.nodes) > self.level + level:
dict[line.nodes[self.level + level]] = 1
arr = dict.keys()
arr.sort()
return arr
def keys_by_occ(self, level = 0):
dict = {}
for line in self.lines:
if len(line.nodes) > self.level + level:
key = line.nodes[self.level + level]
if not dict.has_key(key) or (dict[key] > line.id):
dict[key] = line.id
dict2 = {}
for key in dict:
dict2[dict[key]] = key
arr2 = dict2.keys()
arr2.sort()
arr = []
for i in arr2:
arr.append(dict2[i])
return arr
################################################################################
def sortLineCompare(left, right):
l = left.nodes
r = right.nodes
i = 0
while (i < len(l)) and (i < len(r)):
ret = cmp(l[i], r[i])
if ret != 0:
return ret
i += 1
if len(l) < len(r): return -1
elif len(l) > len(r): return 1
elif left.id < right.id: return -1
else: return 1
################################################################################
def sortLinesByOcc(left, right):
if left.id < right.id: return -1
else: return 1
################################################################################
def findRelation(data, arr, to):
global DATA
if len(arr) > 0:
if (arr[0] == '*'):
sub_data = data.sub_level(1)
else:
lines = []
for line in data.lines:
if line.name(data.level) == arr[0]:
lines.append(line)
sub_data = Data(lines, data.level + 1)
findRelation(sub_data, arr[1:], to)
else:
for line in data.at_level(0).lines:
text = line.name(0) + '.' + line.name(1)
entry = splitName(line.text())
entry = entry[:2] + [to]
DATA.lines.append(Line(entry, text, '<through-relation>', 0))
################################################################################
def clearData():
global DATA
DATA = Data([], 0)
global ID
ID = 0
################################################################################
def loadDDDOCFile(filename, cache):
if cache.isFresh(filename):
return cache.get(filename)
f = open(filename)
text = f.readlines()
f.close()
cache.set(filename, text)
return text
################################################################################
def loadCPPFile(filename, cache):
if cache.isFresh(filename):
return cache.get(filename)
f = open(filename)
lines = f.readlines()
f.close()
ret = []
#test for SEQAN_NO_DDDOC
for line in lines:
if line.find("SEQAN_NO_DDDOC") >= 0:
cache.set(filename, ret)
return ret;
incomment = False
innextcomment = False
inextract = False
for line in lines:
line = line.rstrip()
str_line = ""
if len(line) == 0:
if not innextcomment and not incomment:
str_line = "."
else:
str_line = " "
while len(line) > 0 :
if innextcomment:
if line[len(line)-1] == "\\" :
if inextract: str_line += line[: len(line)-1]
else:
if inextract: str_line += line
innextcomment = False
break
elif incomment:
pos1 = line.find("*/")
if pos1 < 0:
if inextract: str_line += line;
break;
else:
if inextract:
str_line += line[:pos1];
line = line[pos1 + 3:];
else:
line = line[pos1 + 2:];
incomment = False;
else:
pos1 = line.find("/*")
pos2 = line.find("//")
pos3 = line.find('"')
if (pos1 >= 0) and ((pos2 < 0) or (pos1 < pos2)) and ((pos3 < 0) or (pos1 < pos3)):
pos9 = line.find("*/", pos1 + 2)
if (len(line) > pos1 + 2):
inextract = (line[pos1 + 2] == "/") or (line[pos1 + 2] == "*")
else:
inextract = False
if pos9 < 0 :
if inextract: str_line += line[pos1 + 3:]
incomment = True
break
else:
if inextract:
str_line += line[pos1 + 3: pos3]
line = line[pos9 + 3:]
else:
line = line[pos9 + 2:]
elif (pos2 >= 0) and ((pos3 < 0) or (pos2 < pos3)):
pos2b = pos2 + 2;
while ((pos2b < len(line)) and ((line[pos2b] == "/") or (line[pos2b] == "*"))):
pos2b += 1
inextract = (pos2b > pos2 + 2)
if line[len(line)-1] == "\\" :
if inextract: str_line += line[pos2b: len(line)-1]
innextcomment = True
else:
if inextract: str_line += line[pos2b:]
break
elif pos3 >= 0:
pos9 = line.find('"', pos3 + 2)
if pos9 < 0:
line = line[pos9+1:]
break
else:
break
else:
break
ret = ret + [str_line]
cache.set(filename, ret)
return ret
################################################################################
def getFileType(filename):
"""Determines file type from filename.
Determines the file type from the extension of the given filename.
>>> getFileType('test.cpp') == FILETYPE_CPP
True
>>> getFileType('path/file.h') == FILETYPE_CPP
True
>>> getFileType('test.dddoc') == FILETYPE_DDDOC
True
Args:
filename Filename to parse.
Returns:
One of {FILETYPE_CPP, FILETYPE_DDDOC, FILETYPE_OTHER}, depending
on the extension of filename.
"""
# Get file extension.
base, ext = os.path.splitext(filename)
if ext[1:] in CPP_EXTS:
return FILETYPE_CPP
elif ext[1:] in DDDOC_EXTS:
return FILETYPE_DDDOC
else:
return FILETYPE_OTHER
################################################################################
def loadFile(filename, cache):
file_type = getFileType(filename)
if (file_type == 2): return loadCPPFile(filename, cache)
elif (file_type == 1): return loadDDDOCFile(filename, cache)
else: raise "unknown file type"
################################################################################
def parseFile(filename, cache):
text = loadFile(filename, cache)
text.append('.')
context = [[]]
str = False
line_no = 0
for line in text:
line_no += 1
if line != '':
if line[0] == '.':
parseString(str, context, filename, line_no)
str = line
elif str:
if str[len(str)-1] != '\n': str += '\n'
str += line
################################################################################
def parseString(str, context, file_name, line_no):
global DATA
if not str or (str == '.'):
return [[]]
level = 0
while (level < len(str)) and (str[level] == '.'):
level += 1
str = str[level:]
if (level < len(context)):
del context[level:]
if len(context) > 0:
entry = copy.copy(context[len(context) - 1])
else:
entry = []
key = ''
text = ''
pos = 0
is_escaped = False
c_quoted = ''
while (pos < len(str)):
c = str[pos]
if c == "\x0d":
pos += 1
continue
if c_quoted != "":
if c_quoted == c: c_quoted = ""
else: key += c
elif is_escaped:
key += c
is_escaped = False
else:
if c == '\\': is_escaped = True
elif c in ['"', "'"]: c_quoted = c
elif (c == ':'):
key = str[0:pos]
text = str[pos+1:]
break
else: key += c
pos += 1
entry += splitName(key)
DATA.lines.append(Line(entry, text, file_name, line_no))
context.append(entry)
################################################################################
def splitName(line):
pos = 0
key = ""
c_quoted = ""
is_escaped = False
li = []
while (pos < len(line)):
c = line[pos]
if c_quoted != "":
if c_quoted == c: c_quoted = ""
else: key += c
elif is_escaped:
key += c
is_escaped = False
else:
if c == '\\': is_escaped = True
elif c in ['"', "'"]: c_quoted = c
elif c == '.':
if key != "":
li.append(key)
key = ""
elif c == '|':
if key != "":
li.append(key)
key = ""
rest = line[pos+1:]
if len(rest)>0: li.append(rest)
break;
elif c != '\n':
key += c
pos += 1
if key != "": li.append(key)
return li
def splitUrl(line):
"""Splits a tuple at separator characters.
The separator character is '|'. These characters can be escaped
using the backslash sign '\', entries can also be quoted.
>>> splitUrl('a|b|c')
['a', 'b', 'c']
>>> splitUrl('a\|b|c')
['a|b', 'c']
>>> splitUrl('"a|b"|c')
['a|b', 'c']
Args:
line String to split.
Returns
List with strings, split at | symbols, excluding these symbols
themselves.
"""
pos = 0
key = ""
c_quoted = ""
is_escaped = False
li = []
while (pos < len(line)):
c = line[pos]
if c_quoted != "":
if c_quoted == c: c_quoted = ""
else: key += c
elif is_escaped:
key += c
is_escaped = False
else:
if c == '\\': is_escaped = True
elif c in ['"', "'"]: c_quoted = c
elif c == '|':
if key != "":
li.append(key)
key = ""
elif c != '\n':
key += c
pos += 1
if key != "": li.append(key)
return li
def loadFiles(search_path, cache):
"""Call parseFile() on files.
All files below search_path will be searched that have file type
FILETYPE_CPP or FILETYPE_DOC as determined by getFileType().
Directories with names of IGNORED_DIRS are skipped.
Args:
search_path String, path to search files under.
"""
for root, dirs, files in os.walk(search_path):
# Parse all files.
for file in files:
if os.path.basename(file).startswith('.'):
continue # Skipp hidden files.
path = os.path.join(root, file)
if getFileType(path) in [FILETYPE_CPP, FILETYPE_DDDOC]:
parseFile(path, cache)
# Exclude ignored diretories.
for ignored in IGNORED_DIRS:
if ignored in dirs:
dirs.remove(ignored)
```
#### File: dox/test/test_inc_mgr.py
```python
__author__ = '<NAME> <<EMAIL>>'
import os
import os.path
import unittest
import seqan.dox.inc_mgr as inc_mgr
class TestIncludeManager(unittest.TestCase):
def setUp(self):
base_dir = os.path.dirname(os.path.realpath(__file__))
base_dir = os.path.join(base_dir, '../', 'test_src')
self.mgr = inc_mgr.IncludeManager([base_dir])
def testIncludeFile(self):
txt = self.mgr.loadFile('example.cpp')
self.assert_(txt.splitlines()[0].startswith('#include <iostream>'))
self.assert_(txt.splitlines()[-1].endswith('}'))
def testIncludeSnippet(self):
txt = self.mgr.loadSnippet('example.cpp', 'Print to stdout')
self.assertEqual(len(txt.splitlines()), 1)
self.assertEqual(txt.splitlines()[0], r' std::cout << "This is an example.\n";')
if __name__ == '__main__':
unittest.main()
```
|
{
"source": "JensVA/algorithms-python-jensva",
"score": 3
}
|
#### File: JensVA/algorithms-python-jensva/riemannsum.py
```python
def RightRiemannSum(a, b, aantal):
deltaX = (b-a)/aantal # breedte van een rechthoek
x = a # x = huidige x-waarde rechthoek
opp_totaal = 0.0
for n in range(0, aantal): # aantal keer herhalen
opp_rechthoek = ((x+deltaX)**2 + 1) * deltaX # f(x) = xยฒ + 1 , (x+deltaX) is voor de Right Riemann sum
x = x + deltaX # volgende rechthoek
opp_totaal = opp_totaal + opp_rechthoek
n = n + 1
return opp_totaal
def LeftRiemannSum(a, b, aantal):
deltaX = (b-a)/aantal # breedte van een rechthoek
x = a # x = huidige x-waarde rechthoek
opp_totaal = 0.0
for n in range(0, aantal): # aantal keer herhalen
opp_rechthoek = (x**2 + 1) * deltaX # f(x) = xยฒ + 1
x = x + deltaX # volgende rechthoek
opp_totaal = opp_totaal + opp_rechthoek
n = n + 1
return opp_totaal
def MiddleRiemannSum(a, b, aantal):
deltaX = (b-a)/aantal # breedte van een rechthoek
x = a # x = huidige x-waarde rechthoek
opp_totaal = 0.0
for n in range(0, aantal): # aantal keer herhalen
opp_rechthoek = ((x+(deltaX/2))**2 + 1) * deltaX # f(x) = xยฒ + 1 , (x+(deltaX/2)) is voor de Middle Riemann sum
x = x + deltaX # volgende rechthoek
opp_totaal = opp_totaal + opp_rechthoek
n = n + 1
return opp_totaal
# functie: f(x) = xยฒ + 1
a = 0
b = 2
n = 5
print("functie: f(x) = xยฒ + 1 Interval: [" + str(a) + ", " + str(b) + "]\tn = " + str(n))
print("Oppervlakte R: ", RightRiemannSum(a, b, n))
print("Oppervlakte L: ", LeftRiemannSum(a, b, n))
print("Oppervlakte M: ", MiddleRiemannSum(a, b, n))
```
|
{
"source": "JensVanHerck/Adafruit_CircuitPython_RGB_Display",
"score": 2
}
|
#### File: Adafruit_CircuitPython_RGB_Display/adafruit_rgb_display/ssd2119.py
```python
try:
import struct
except ImportError:
import ustruct as struct
from adafruit_rgb_display.rgb import DisplaySPI
__version__ = "0.0.0-auto.0"
class SSD2119(DisplaySPI):
"""
A simple driver for the SSD2119 based displays.
>>> import busio
>>> import digitalio
>>> import board
>>> from adafruit_rgb_display import color565
>>> import adafruit_rgb_display.ili9341 as ili9341
>>> spi = busio.SPI(clock=board.SCK, MOSI=board.MOSI, MISO=board.MISO)
>>> display = ili9341.ILI9341(spi, cs=digitalio.DigitalInOut(board.GPIO0),
... dc=digitalio.DigitalInOut(board.GPIO15))
>>> display.fill(color565(0xff, 0x11, 0x22))
>>> display.pixel(120, 160, 0)
"""
_COLUMN_SET = 0x004F
_PAGE_SET = 0x004E
_RAM_WRITE = 0x0022
_RAM_READ = 0x0022
_INIT = (
(0x0028, b'\x0006'),
(0x0000, b'\x0001'),
(0x0010, b'\x0000'),
(0x0001, b'\x32EF'),
(0x0002, b'\x0600'),
(0x0003, b'\0x6A38'), # Power Control 1, VRH[5:0]
(0x0011, b'\x6870'),
(0x000F,b'\x0000'),
(0X000B,b'\x5308'),
(0X000C, b'\x0003'), # Power Control 2, SAP[2:0], BT[3:0]
(0X000D, b'\x000A'),
(0X000E, b'\x2E00'),
(0x001E, b'\x00BE'),
(0x0025, b'\x8000'),
(0x0026, b'\x7800'),
(0x004E, b'\x0000'),
(0x004F, b'\x0000'),
(0x0012, b'\x08D9'),
(0x0030, b'\x0000'),
(0x0031, b'\x0104'),
(0x0032, b'\x0100'),
(0x0033, b'\x0305'),
(0x0034, b'\x0505'),
(0x0035, b'\x0305'),
(0x0036, b'\x0707'),
(0x0037, b'\x0300'),
(0x003A, B'\x1200'),
(0x003B, B'\x0800'),
(0x0007, B'\x0033'),
(0x0022, None)
)
_ENCODE_PIXEL = ">H"
_ENCODE_POS = ">HH"
_DECODE_PIXEL = ">BBB"
#pylint: disable-msg=too-many-arguments
def __init__(self, spi, dc, cs, rst=None, width=320, height=240,
baudrate=16000000, polarity=0, phase=0, rotation=0):
super().__init__(spi, dc, cs, rst=rst, width=width, height=height,
baudrate=baudrate, polarity=polarity, phase=phase,
rotation=rotation)
self._scroll = 0
#pylint: enable-msg=too-many-arguments
def scroll(self, dy=None): #pylint: disable-msg=invalid-name
"""Scroll the display by delta y"""
if dy is None:
return self._scroll
self._scroll = (self._scroll + dy) % self.height
self.write(0x37, struct.pack('>H', self._scroll))
return None
```
|
{
"source": "JensVanHerck/httpdb-gae",
"score": 2
}
|
#### File: JensVanHerck/httpdb-gae/main.py
```python
from google.appengine.ext import webapp
from google.appengine.ext import db
from google.appengine.ext.webapp.util import run_wsgi_app
class Folder(db.Model):
uuidOwner = db.StringProperty(multiline=False)
folderName = db.StringProperty(multiline=False)
class StoredData(db.Model):
uuidOwner = db.StringProperty(multiline=False)
name = db.StringProperty(multiline=False)
content = db.StringProperty(multiline=True)
folder = db.ReferenceProperty(Folder)
class HTTPDB(webapp.RequestHandler):
def _isSl(self):
if self.request.headers.has_key("X-Secondlife-Shard"):
if self.request.headers["X-Secondlife-Shard"] == "Production":
return True
return False
def _fileExists(self, name, uuidOwner, folder):
_files = StoredData.gql("WHERE name = :1 AND uuidOwner = :2 AND folder = :3", name, uuidOwner, folder).fetch(1)
if len(_files) == 1:
self._file = _files[0]
return True
else:
self._file = StoredData()
return False
def _folderExists(self, name, uuidOwner):
_folders = Folder.gql("WHERE folderName = :1 AND uuidOwner = :2", name, uuidOwner).fetch(1)
if len(_folders) == 1:
self._folder = _folders[0]
return True
else:
self._folder = Folder()
return False
def _dbget(self, path, mode, uuidOwner):
if path.startswith("/"): # Ignore a leading /
path = path[1:]
parts = path.split("/")
length = len(parts)
if 1 <= length <= 2: # Either 1 or 2 items in the split string
if mode == "list":
if self._folderExists( parts[0], uuidOwner ):
files = StoredData.gql("WHERE uuidOwner = :1 AND folder = :2", uuidOwner, self._folder)
data = ""
for item in files:
data += item.name + "\n"
return data.strip()
else: return "404: Folder not found"
else:
if length == 1: # No parent folder
data = StoredData.gql("WHERE uuidOwner = :1 AND name = :2 AND folder = :3", uuidOwner, parts[0], None).fetch(1)
if len(data) == 1:
return data[0].content
if length == 2: # With a parent folder
folder = Folder.gql("WHERE uuidOwner = :1 AND folderName = :2", uuidOwner, parts[0]).fetch(1)
if len(folder) != 1:
return "404: Folder not found"
data = StoredData.gql("WHERE uuidOwner = :1 AND name = :2 AND folder = :3", uuidOwner, parts[1], folder[0].key()).fetch(1)
if len(data) == 1:
return data[0].content
else: return "400: Only one parent folder allowed."
return "404: Not found"
def _dbput(self, path, value, uuidOwner):
if path.startswith("/"): # Ignore a leading /
path = path[1:]
parts = path.split("/")
length = len(parts)
if 1 <= length <= 2: # Either 1 or 2 items in the split string
if length == 1: # No parent folder
if self._fileExists(parts[0], uuidOwner, None):
# Get file and replace contents, found file has been stored in self._file
self._file.content = value
else:
# File not found, new StoredData item in self._file
self._file.uuidOwner = uuidOwner
self._file.name = parts[0]
self._file.content = value
self._file.put()
return "201: Successfully created " + path
if length == 2: # With a parent folder
if not self._folderExists(parts[0], uuidOwner): # A new or existing folder is always placed in self._folder
self._folder.uuidOwner = uuidOwner
self._folder.folderName = parts[0]
self._folder.put()
if self._fileExists(parts[1], uuidOwner, self._folder):
# Get file and replace contents, found file has been stored in self._file
self._file.content = value
else:
# File not found, new StoredData item in self._file
self._file.uuidOwner = uuidOwner
self._file.name = parts[1]
self._file.content = value
self._file.folder = self._folder
self._file.put()
return "201: Successfully created " + str(parts)
else: return "400: Only one parent folder allowed."
return "500: You should never encounter this"
def _dbdel(self, path, uuidOwner):
if path.startswith("/"): # Ignore a leading /
path = path[1:]
parts = path.split("/")
length = len(parts)
if 1 <= length <= 2: # Either 1 or 2 items in the split string
if length == 1: # No parent folder
if self._fileExists(parts[0], uuidOwner, None):
self._file.delete()
else:
return "404: Path not found"
if length == 2: # With a parent folder
if not self._folderExists(parts[0], uuidOwner): # A new or existing folder is always placed in self._folder
return "404: Path not found"
if self._fileExists(parts[1], uuidOwner, self._folder):
self._file.delete()
else:
return "404: Path not found"
else: return "400: Only one parent folder allowed."
return "Deleted " + path
def get(self):
path = self.request.path
mode = self.request.get("mode")
body = ""
if mode == "":
mode = self.request.get("m")
if self._isSl():
uuidOwner = self.request.headers["X-Secondlife-Owner-Key"]
body = self._dbget(path, mode, uuidOwner)
self.response.out.write(body)
else:
self.response.out.write("Path: " + path + "\n")
self.response.out.write("Mode: " + mode + "\n")
# self.response.out.write(str(self.request.headers))
self.response.headers['Content-Type'] = 'text/plain'
if body.startswith("201"):
self.response.set_status(201)
if body.startswith("404"):
self.response.set_status(404)
if body.startswith("400"):
self.response.set_status(400)
if body.startswith("500"):
self.response.set_status(500)
def post(self):
self.reponse.out.write("POST not implemented")
self.response.set_status(501)
def put(self):
body = ""
path = self.request.path
_body = self.request.body
if self._isSl():
uuidOwner = self.request.headers["X-Secondlife-Owner-Key"]
body = self._dbput(path, _body, uuidOwner)
self.response.out.write(body)
else:
self.response.out.write("You may only put values through SL")
self.response.out.write(str(self.request.headers))
if body.startswith("201"):
self.response.set_status(201)
if body.startswith("404"):
self.response.set_status(404)
if body.startswith("400"):
self.response.set_status(400)
if body.startswith("500"):
self.response.set_status(500)
def delete(self):
body = ""
path = self.request.path
if self._isSl():
uuidOwner = self.request.headers["X-Secondlife-Owner-Key"]
_body = self._dbdel(path, uuidOwner)
self.response.out.write(_body)
else:
self.response.out.write("You may only delete values through SL")
if body.startswith("201"):
self.response.set_status(201)
if body.startswith("404"):
self.response.set_status(404)
if body.startswith("400"):
self.response.set_status(400)
if body.startswith("500"):
self.response.set_status(500)
def main():
application = webapp.WSGIApplication([('.*', HTTPDB)],
debug=True)
run_wsgi_app(application)
if __name__ == '__main__':
main()
```
|
{
"source": "jensv/fluxtubestability",
"score": 2
}
|
#### File: jensv/fluxtubestability/analytic_condition.py
```python
import numpy as np
from scipy.special import kv, kvp
import matplotlib.pyplot as plt
from matplotlib import colors
import seaborn as sns
sns.set_style('ticks')
sns.set_context('poster')
def conditions(k_bar, lambda_bar, epsilon, m, delta):
r"""
Return analytic stability condition.
Parameters
----------
k_bar : float
normalized inverse aspect ratio
lambda_bar : float
normalized current-to-magnetic flux ratio
epsilon : float
core to total current ratio
m : float
azimuthal periodicity number
delta : float
abruptness parameter
Returns
-------
delta_w : float
perturbed potential energy of marginal stability case
"""
term1 = conditions_plasma_term(k_bar, lambda_bar, epsilon, m, delta)
term2 = conditions_interface_term(k_bar, lambda_bar, epsilon, m, delta)
term3 = conditions_vacuum_term(k_bar, lambda_bar, m, delta)
return term1 + term2 - term3
def conditions_without_interface(k_bar, lambda_bar, m, delta):
r"""
Return analytic stability condition minus interface term (term2).
Parameters
----------
k_bar : float
normalized inverse aspect ratio
lambda_bar : float
normalized current-to-magnetic flux ratio
m : float
azimuthal periodicity number
delta : float
abruptness parameter
Returns
-------
delta_w_without_interface : float
perturbed potential energy without interface term.
Notes
-----
For profiles with current smoothly going to zero at the delta_w term is zero.
"""
term1 = conditions_smooth_plasma_term(k_bar, lambda_bar, m, delta)
term3 = conditions_vacuum_term(k_bar, lambda_bar, m, delta)
return term1 - term3
def conditions_without_interface_wo_sing(k_bar, lambda_bar, m, xi,
xi_der, a):
r"""
Multiply analytic expression with xi squared to avoid singularity.
Parameters
----------
k_bar : float
normalized inverse aspect ratio
lambda_bar : float
normalized current-to-magnetic flux ratio
m : float
azimuthal periodicity number
xi : float
solution to Euler-Lagrange equation at boundary
xi_der : float
derivative of solution to Euler-Lagrange equation at boundary
a : float
radius of current-carrying magnetic flux tube
Returns
-------
delta_w_without_interface_wo_sing : float
perturbed potential energy without interface or singularity
Notes
-----
delta can be singular when xi goes through zero. This form is multiplied
by xi**2 to avoid singularity.
"""
term1 = conditions_smooth_plasma_term_wo_sing(k_bar, lambda_bar,
m, xi, xi_der, a)
term3 = conditions_vacuum_term_wo_sing(k_bar, lambda_bar, m, xi)
return term1 - term3
def conditions_plasma_term(k_bar, lambda_bar, epsilon, m, delta):
r"""
Returns plasma term of analytic stability condition.
Parameters
----------
k_bar : float
normalized inverse aspect ratio
lambda_bar : float
normalized current-to-magnetic flux ratio
epsilon : float
core to total current ratio
m : float
azimuthal periodicity number
delta : float
abruptness parameter
Returns
-------
delta_w_plasma_term : float
perturbed potential energy plasma term due to internal
currents.
"""
term1 = (2.*k_bar - m*epsilon*lambda_bar)*((delta + 1)*2.*k_bar -
(delta - 1)*m*epsilon *
lambda_bar)/(k_bar**2 + m**2)
return term1
def conditions_smooth_plasma_term_wo_sing(k_bar, lambda_bar, m, xi,
xi_der, a):
r"""
Multiply analytic expression with xi squared to avoid singularity.
Parameters
----------
k_bar : float
normalized inverse aspect ratio
lambda_bar : float
normalized current-to-magnetic flux ratio
m : float
azimuthal periodicity number
xi : float
solution to Euler-Lagrange equation at boundary
xi_der : float
derivative of solution to Euler-Lagrange equation at boundary
a : float
radius of current-carrying magnetic flux tube
"""
epsilon = 1.
term1 = (2.*k_bar - m*epsilon*lambda_bar)*((xi_der*a*xi + xi**2)*2.*k_bar -
(xi_der*a*xi - xi**2)*m*epsilon*
lambda_bar)/(k_bar**2 + m**2)
return term1
def conditions_smooth_plasma_term(k_bar, lambda_bar, m, delta):
r"""
Returns plasma term of analytic condition with epsilon set to 1. This
should be relvant for a profile that smoothly goes to zero current,
since b_v(a) = b_p(a) in that case.
Parameters
----------
k_bar : float
normalized inverse aspect ratio
lambda_bar : float
normalized current-to-magnetic flux ratio
m : float
azimuthal periodicity number
delta : float
abruptness parameter
"""
epsilon = 1.
term1 = conditions_plasma_term(k_bar, lambda_bar, epsilon, m, delta)
return term1
def conditions_interface_term(k_bar, lambda_bar, epsilon, m, delta):
r"""
Returns interface term of analytic stability condition.
Parameters
----------
k_bar : float
normalized inverse aspect ratio
lambda_bar : float
normalized current-to-magnetic flux ratio
epsilon : float
core to total current ratio
m : float
azimuthal periodicity number
delta : float
abruptness parameter
"""
term2 = (epsilon**2 - 1) * lambda_bar**2
return term2
def conditions_vacuum_term(k_bar, lambda_bar, m, delta):
r"""
Returns vacuum term of analytic stability condition.
Parameters
----------
k_bar : float
normalized inverse aspect ratio
lambda_bar : float
normalized current-to-magnetic flux ratio
m : float
azimuthal periodicity number
delta : float
abruptness parameter
"""
term3 = (m*lambda_bar - 2.*k_bar)**2/k_bar*(kv(m, np.abs(k_bar)) /
kvp(m, np.abs(k_bar)))
return term3
def conditions_vacuum_term_wo_sing(k_bar, lambda_bar, m, xi):
r"""
Multiply analytic expression with xi squared to avoid singularity.
Parameters
----------
k_bar : float
normalized inverse aspect ratio
lambda_bar : float
normalized current-to-magnetic flux ratio
m : float
azimuthal periodicity number
xi : float
Euler-Lagrange solution
"""
term3 = xi**2 * (m*lambda_bar - 2.*k_bar)**2/k_bar*(kv(m, np.abs(k_bar)) /
kvp(m, np.abs(k_bar)))
return term3
def condition_map(epsilon=0.5, delta=0.):
r"""
Draw filled contours of sausage (orange), kink(yellow), and stable (white)
regions for given epsilon and delta values.
Parameters
----------
epsilon : float
core to total current ratio
delta : float
abruptness parameter
"""
fig = plt.figure(figsize=(10,10))
lambda_bar = np.linspace(0., 3., 750)
k_bar = np.linspace(0, 1.5, 750)
lambda_bar_mesh, k_bar_mesh = np.meshgrid(lambda_bar, k_bar)
stability_kink = conditions(k_bar_mesh, lambda_bar_mesh, epsilon, 1., delta)
stability_kink = stability_kink < 0
stability_sausage = conditions(k_bar_mesh, lambda_bar_mesh, epsilon, 0., delta)
stability_sausage = stability_sausage < 0
stability_kink = stability_kink.astype(float)
stability_kink[stability_sausage] = 2
cmap = colors.ListedColormap([sns.xkcd_rgb["white"],
sns.xkcd_rgb["yellow"], sns.xkcd_rgb["orange"]])
plt.contourf(lambda_bar_mesh, k_bar_mesh, stability_kink,
cmap=cmap, levels=[0., 0.5, 1.5, 2.])
plt.contour(lambda_bar_mesh, k_bar_mesh, stability_kink,
levels=[0., 0.5, 1.5, 2.], colors='grey')
plt.plot([0, 3.], [0., 1.5], '--', c='black', lw=5)
axes = plt.gca()
plt.setp(axes.get_xticklabels(), fontsize=40)
plt.setp(axes.get_yticklabels(), fontsize=40)
plt.ylabel(r'$\bar{k}$', fontsize=45, rotation='horizontal', labelpad=25)
plt.xlabel(r'$\bar{\lambda}$', fontsize=45)
sns.despine()
def condition_map_variable_delta(filename, mode=1, epsilon=0.5,
conditions_func=conditions_without_interface):
r"""
Draw filled contours of sausage (orange), kink(yellow), and stable (white)
regions for given epsilon and delta values.
Delta values are loaded from a .npz mesh file.
Parameters
----------
filename : string
filename from which to load lambda_bar, k_bar and delta values.
mode : int
azimuthal mode number 0 or 1
epsilon : float
core current to total current ratio
conditions_func : function
conditions function to use
"""
data_meshes = np.load(filename)
lambda_bar_mesh = data_meshes['lambda_a_mesh']
k_bar_mesh = data_meshes['k_a_mesh']
delta_mesh = data_meshes['delta_m_0']
fig = plt.figure(figsize=(10,10))
cmap = colors.ListedColormap([sns.xkcd_rgb["white"],
sns.xkcd_rgb["yellow"],
sns.xkcd_rgb["orange"]])
stability_kink = conditions_func(k_bar_mesh, lambda_bar_mesh, epsilon, 1.,
delta_mesh)
stability_kink = stability_kink < 0
stability_sausage = conditions_func(k_bar_mesh, lambda_bar_mesh, epsilon, 0.,
delta_mesh)
stability_sausage = stability_sausage < 0
if mode == 0:
stability = stability_sausage
cmap = colors.ListedColormap([sns.xkcd_rgb["white"],
sns.xkcd_rgb["orange"]])
else:
stability = stability_kink
cmap = colors.ListedColormap([sns.xkcd_rgb["white"],
sns.xkcd_rgb["yellow"]])
plt.contourf(lambda_bar_mesh, k_bar_mesh, stability,
cmap=cmap, levels=[0.5, 1.5])
plt.contour(lambda_bar_mesh, k_bar_mesh, stability,
levels=[0.5, 1.5], colors='grey')
plt.plot([0, 3.], [0., 1.5], '--', c='black', lw=5)
axes = plt.gca()
plt.setp(axes.get_xticklabels(), fontsize=40)
plt.setp(axes.get_yticklabels(), fontsize=40)
plt.ylabel(r'$\bar{k}$', fontsize=45, rotation='horizontal', labelpad=25)
plt.xlabel(r'$\bar{\lambda}$', fontsize=45)
sns.despine()
plt.show()
```
|
{
"source": "jensv/relative_canonical_helicity_tools",
"score": 2
}
|
#### File: relative_canonical_helicity_tools/centroid_fitting/gyration_path.py
```python
r""""
Created on Apr 17 2017
@author: <NAME>
Plot field line null gyration of RSX magnetic flux tube.
"""
import numpy as np
from scipy.optimize import leastsq
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_style('white')
sns.set_context('poster')
from mpl_toolkits.axes_grid1 import host_subplot
import mpl_toolkits.axisartist as axisartist
import matplotlib.patches as patches
def gyration_path(axes=None, circles=None, step=25,
start=0, error_test=False,
points=None, errors=None,
field_null_path="/home/jensv/rsx/jens_analysis/output/field_nulls/",
field_null_file="2017-04-13-23-41/field_nulls.txt",
measurement_limits=(-0.022, 0.024, -0.017, 0.026),
bxby_limits = (-0.032, 0.025, -0.072, 0.040),
errorevery=10, params_guess=[0, 0, 0.01],
circle_fit=False, xlim=(-0.03, 0.05),
xticks=None, shift_time=125):
size=5
centroid_file = field_null_path + field_null_file
if not points is None:
field_nulls = np.roll(points, shift_time, axis=0)
else:
field_nulls = np.loadtxt(centroid_file)
x_min, x_max = measurement_limits[0], measurement_limits[1]
y_min, y_max = measurement_limits[2], measurement_limits[3]
if not axes:
fig, axes = plt.subplots(1, 1)
measurement_box = patches.Rectangle((x_min, y_min), x_max-x_min, y_max-y_min,
color='grey', alpha=0.4)
bx_by_x_min, bx_by_x_max = bxby_limits[0:2]
bx_by_y_min, bx_by_y_max = bxby_limits[2:4]
bx_by_measurement_box = patches.Rectangle((bx_by_x_min, bx_by_y_min),
bx_by_x_max - bx_by_x_min,
bx_by_y_max - bx_by_y_min,
color='grey', alpha=0.1)
axes.add_patch(measurement_box)
axes.add_patch(bx_by_measurement_box)
colormap = np.linspace(0, 1, 250-start)
axes.scatter(field_nulls[start:, 0], field_nulls[start:, 1],
c=colormap, zorder=100, s=size)
if not errors is None:
errors = np.roll(errors, shift_time, axis=0)
axes.errorbar(field_nulls[start:, 0], field_nulls[start:, 1],
xerr=errors[start:, 0], yerr=errors[start:, 1],
ecolor='grey',
fmt='none', zorder=0, errorevery=errorevery, alpha=0.5)
#axes.text(-0.008, -0.015, r'$0 \mu s$')
#axes.text(0.03, -0.003, r'$%2.1f \mu s$' % (0.068*56))
#axes.text(-0.03, 0.017, r'$%2.1f \mu s$' % (0.068*208))
if circles:
for i, field_null in enumerate(field_nulls[::step]):
colormap = np.linspace(1, 0, np.round(250./step))
circle = patches.Circle(field_null, radius=0.02, facecolor='none',
edgecolor=str(colormap[i]), alpha=0.5)
axes.scatter(field_null[0], field_null[1], c='red', s=size)
axes.add_patch(circle)
if circle_fit:
circle_params, success = leastsq(to_min, params_guess,
args=np.asarray([field_nulls[:, 0],
field_nulls[:, 1]]))
circle = patches.Circle((circle_params[0], circle_params[1]),
radius=circle_params[2], facecolor='none',
edgecolor='red', lw=5, ls='--', alpha=0.5)
print circle_params[0], circle_params[1], circle_params[2]
print leastsq(to_min, params_guess,
args=np.asarray([field_nulls[:, 0],
field_nulls[:, 1]]))
axes.add_patch(circle)
axes.set_xlabel('x [m]')
axes.set_ylabel('y [m]')
axes.set_xlim(xlim)
if not xticks is None:
axes.xaxis.set_ticks(xticks)
axes.set_aspect('equal')
axes.invert_xaxis()
return axes
def to_min(params, points):
r"""
Returns circle expression to minimize with least squares.
"""
a = 2.*params[0]
b = 2.*params[1]
c = params[2]**2 - params[1]**2 - params[0]**2
return a*points[0] + b*points[1] + c - points[0]**2 - points[1]**2
```
#### File: jensv/relative_canonical_helicity_tools/filter_measurements_on_unstructured_grid.py
```python
import argparse
import numpy as np
from datetime import date
from datetime import datetime
import os
from scipy.interpolate import LinearNDInterpolator
from scipy import ndimage
from write_to_vtk.read_unstructured_vtk import read_unstructured_vtk
from mach_probe_analysis import ion_current_to_mach_number as ic_to_mach
from read_from_sql import read_from_sql
from write_to_vtk import structured_3d_vtk as struc_3d
from write_to_vtk import prepare_measurements as pm
from write_to_vtk import unstructured_grid as ug
def main(args):
r"""
"""
now = datetime.now().strftime("%Y-%m-%d-%H-%M")
out_dir = '../output/filtered_unstructured_measurements/' + now
try:
os.makedirs(out_dir)
except:
pass
planes = [0.249, 0.302, 0.357, 0.416]
bx_measurements = pm.read_idl('bx')
by_measurements = pm.read_idl('by')
bz_measurements = pm.read_idl('bz')
te_measurements = pm.read_idl('te')
n_measurements = pm.read_idl('n')
mach_y_measurements, mach_z_measurements = pm.read_mach_probe_data(args)
if args.bxby_only:
bx_all_planes = pm.cut_and_average_quantity(bx_measurements,
args.bxby_extent, planes)
by_all_planes = pm.cut_and_average_quantity(by_measurements,
args.bxby_extent, planes)
else:
bx_all_planes = pm.cut_and_average_quantity(bx_measurements,
args.bx_extent, planes)
by_all_planes = pm.cut_and_average_quantity(by_measurements,
args.by_extent, planes)
bz_all_planes = pm.cut_and_average_quantity(bz_measurements,
args.bz_extent, planes)
n_all_planes = pm.cut_and_average_quantity(n_measurements,
args.n_extent,
planes,
bounds=args.n_bounds)
te_all_planes = pm.cut_and_average_quantity(te_measurements, args.te_extent,
planes, bounds=args.te_bounds)
mach_y_plane = pm.cut_and_average_quantity(mach_y_measurements, args.mach_y_extent,
[0.416], bounds=args.mach_bounds)
mach_z_plane = pm.cut_and_average_quantity(mach_z_measurements, args.mach_z_extent,
[0.416], bounds=args.mach_bounds)
n_three_planes = pm.remove_plane(0.302, n_all_planes)
te_three_planes = pm.remove_plane(0.302, te_all_planes)
if args.bxby_only:
bxby_grid = make_grid_from_extent(args.bxby_filter_extent,
args.filter_spatial_increment)
single_plane = np.unique(bx_all_planes['z_out'])[0]
bx_filtered = filter_unstructured_data(bxby_grid, bx_all_planes,
single_plane=single_plane,
filter_sigma=args.filter_sigma,
filter_truncate=args.filter_truncate)
by_filtered = filter_unstructured_data(bxby_grid, by_all_planes,
single_plane=single_plane,
filter_sigma=args.filter_sigma,
filter_truncate=args.filter_truncate)
else:
bx_grid = make_grid_from_extent(args.bx_filter_extent,
args.filter_spatial_increment)
bx_filtered = filter_unstructured_data(bx_grid, bx_all_planes,
filter_sigma=args.filter_sigma,
filter_truncate=args.filter_truncate)
by_grid = make_grid_from_extent(args.by_filter_extent,
args.filter_spatial_increment)
by_filtered = filter_unstructured_data(by_grid, by_all_planes,
filter_sigma=args.filter_sigma,
filter_truncate=args.filter_truncate)
bz_grid = make_grid_from_extent(args.bz_filter_extent,
args.filter_spatial_increment)
bz_filtered = filter_unstructured_data(bz_grid, bz_all_planes,
filter_sigma=args.filter_sigma,
filter_truncate=args.filter_truncate)
n_grid = make_grid_from_extent(args.n_filter_extent,
args.filter_spatial_increment)
n_filtered = filter_unstructured_data(n_grid, n_all_planes,
filter_sigma=args.filter_sigma,
filter_truncate=args.filter_truncate)
te_grid = make_grid_from_extent(args.te_filter_extent,
args.filter_spatial_increment)
te_filtered = filter_unstructured_data(te_grid, te_all_planes,
filter_sigma=args.filter_sigma,
filter_truncate=args.filter_truncate)
mach_y_grid = make_grid_from_extent(args.mach_y_extent,
args.filter_spatial_increment)
mach_y_filtered = filter_unstructured_data(mach_y_grid, mach_y_plane,
filter_sigma=args.filter_sigma,
filter_truncate=args.filter_truncate)
mach_z_grid = make_grid_from_extent(args.mach_z_extent,
args.filter_spatial_increment)
mach_z_filtered = filter_unstructured_data(mach_z_grid, mach_z_plane,
filter_sigma=args.filter_sigma,
filter_truncate=args.filter_truncate)
ug.save_to_unstructured_grid(bx_filtered, 'bx', out_dir,
prefix=args.output_prefix)
ug.save_to_unstructured_grid(by_filtered, 'by', out_dir,
prefix=args.output_prefix)
if not args.bxby_only:
ug.save_to_unstructured_grid(bz_filtered, 'bz', out_dir,
prefix=args.output_prefix)
ug.save_to_unstructured_grid(te_filtered, 'te', out_dir,
prefix=args.output_prefix)
ug.save_to_unstructured_grid(n_filtered, 'n', out_dir,
prefix=args.output_prefix)
ug.save_to_unstructured_grid(mach_y_filtered, 'mach_y', out_dir,
prefix=args.output_prefix)
ug.save_to_unstructured_grid(mach_z_filtered, 'mach_z', out_dir,
prefix=args.output_prefix)
def make_grid_from_extent(extent, increment):
r"""
Make rectilinear grid from extent list.
"""
grid_x_points = int((extent[1] - extent[0])/increment)
grid_y_points = int((extent[3] - extent[2])/increment)
grid_z_points = int((extent[5] - extent[4])/increment)
grid = np.meshgrid(np.linspace(extent[0],
extent[1],
grid_x_points),
np.linspace(extent[2],
extent[3],
grid_y_points),
np.linspace(extent[4],
extent[5],
grid_z_points))
return grid
def filter_unstructured_data(grid, measurements, filter_sigma=None,
single_plane=None, filter_truncate=None):
r"""
Filter data on unstructured grid.
Interpolate data to rectilinear grid, filter, resample
onto unstructured grid.
"""
(planes, points_by_plane,
values_by_plane) = extract_planes(measurements)
if single_plane:
planes = [single_plane]
filtered_values_by_plane = []
delays = measurements['delays']
for i, plane in enumerate(planes):
filtered_by_time_point = interpolate_and_filter_data(points_by_plane[i],
values_by_plane[i],
grid, delays,
filter_sigma=filter_sigma,
filter_truncate=filter_truncate)
(points,
values_by_time_point) = resample_to_unstructured_grid(grid,
filtered_by_time_point,
points_by_plane[i], delays)
filtered_values_by_plane.append(values_by_time_point)
filtered_measurements = recombine_planes(planes, points_by_plane,
filtered_values_by_plane,
delays)
return filtered_measurements
def extract_planes(measurements):
r"""
Extract measurement points and values by plane from
measurement dictionaries.
"""
planes = np.unique(measurements['z_out'])
points_by_plane = []
values_by_plane = []
measurements['a_out'] = np.asarray(measurements['a_out'])
for plane in planes:
indexes = np.where(measurements['z_out'] == plane)[0]
points = np.stack((measurements['x_out'][indexes],
measurements['y_out'][indexes]), axis=1)
values = measurements['a_out'][:, indexes]
points_by_plane.append(points)
values_by_plane.append(values)
return planes, points_by_plane, values_by_plane
def interpolate_and_filter_data(points, values, grid, delays, filter_sigma=None,
filter_truncate=None):
r"""
Interpolate and filter (with Gaussian)
"""
filtered_by_time_point = []
for time_point in xrange(delays.size):
print 'filter', time_point
interpolator = struc_3d.get_interpolator(points, values[time_point])
data = interpolator(grid[0], grid[1])
#print data.size, data.shape
#print np.sum(np.isnan(data))
#print 'nan x', np.unique(grid[0][np.isnan(data)])
#print 'nan y', np.unique(grid[1][np.isnan(data)])
#assert np.sum(np.isnan(data)) == 0, 'interpolated data contains nans'
if filter_sigma:
if filter_truncate:
filtered = ndimage.gaussian_filter(data, filter_sigma,
truncate=filter_truncate)
else:
filtered = ndimage.gaussian_filter(data, filter_sigma)
else:
filtered = data
filtered_by_time_point.append(filtered)
return filtered_by_time_point
def resample_to_unstructured_grid(grid, data, points, delays):
r"""
Resample filtered data back to measurement grid.
"""
values_by_time_point = []
grid_points_x = grid[0].ravel()
grid_points_y = grid[1].ravel()
grid_points = np.stack((grid_points_x, grid_points_y), axis=1)
for time_point in xrange(delays.size):
print 'resample', time_point
grid_values = data[time_point].ravel()
interpolator = struc_3d.get_interpolator(grid_points, grid_values)
values = interpolator(points[:, 0], points[:, 1])
values_by_time_point.append(values)
return points, values_by_time_point
def recombine_planes(planes, points_by_plane, values_by_plane, delays):
r"""
Recombine planes so that ug.save_to_unstructured_grid
function can be used.
"""
measurements = {'delays': delays,
'x_out': points_by_plane[0][:, 0],
'y_out': points_by_plane[0][:, 1],
'z_out': np.ones(points_by_plane[0][:, 0].size)*planes[0],
'a_out': values_by_plane[0]}
for i, plane in enumerate(planes[1:]):
measurements['x_out'].append(points_by_plane[i][:, 0])
measurements['y_out'].append(points_by_plane[i][:, 1])
measurements['z_out'].append(np.ones(points_by_plane[i].shape[0])*plane)
measurements['a_out'].append(values_by_plane[i])
return measurements
def parse_args():
r"""
"""
parser = argparse.ArgumentParser(description='Create unstructured VTK from measurements')
parser.add_argument('--bx_extent',
help='spatial extent of Bx measurements',
nargs=6, type=float,
default=[-0.032, 0.028, -0.022, 0.032, 0.249, 0.416])
parser.add_argument('--bx_filter_extent',
help="spatial extent of interpolated grid"
"on which to filter Bx measurements",
nargs=4, type=float,
default=[-0.026, 0.025, -0.019, 0.029, 0.249, 0.416])
parser.add_argument('--by_extent',
help='spatial extent of By measurements',
nargs=6, type=float,
default=[-0.032, 0.028, -0.022, 0.032, 0.249, 0.416])
parser.add_argument('--by_filter_extent',
help="spatial extent of interpolated grid"
"on which to filter Bx measurements",
nargs=6, type=float,
default=[-0.032, 0.028, -0.022, 0.032, 0.249, 0.416])
parser.add_argument('--bxby_only',
help='flag to filter Bx and By only.',
default=False,
action='store_true')
parser.add_argument('--bxby_extent',
help='spatial extent of Bx and By measurements',
nargs=6, type=float,
default=[-0.032, 0.026, -0.06, 0.043, 0.249, 0.416])
parser.add_argument('--bxby_filter_extent',
help="spatial extent of interpolated grid"
"on which to filter Bx and By",
nargs=4, type=float,
default=[-0.032, 0.026, -0.06, 0.043, 0.249, 0.416])
parser.add_argument('--bz_extent',
help='spatial extent of Bz measurements',
nargs=6, type=float,
default=[-0.032, 0.028, -0.022, 0.032, 0.249, 0.416])
parser.add_argument('--bz_filter_extent',
help="spatial extent of interpolated grid"
"on which to filter Bz measurements",
nargs=6, type=float,
default=[-0.032, 0.028, -0.022, 0.032, 0.249, 0.416])
parser.add_argument('--te_extent',
help='spatial extent of temperature measurements',
nargs=6, type=float,
default=[-0.026, 0.028, -0.03, 0.028, 0.249, 0.416])
parser.add_argument('--te_filter_extent',
help="spatial extent of interpolated grid"
"on which to filter Te measurements",
nargs=6, type=float,
default=[-0.026, 0.028, -0.03, 0.028, 0.249, 0.416])
parser.add_argument('--te_bounds',
help='sensible bounds for temperature measurements',
nargs=2, type=float,
default=[1e-3, 1e3])
parser.add_argument('--n_extent',
help='spatial extent of density measurements',
nargs=6, type=float,
default=[-0.026, 0.028, -0.03, 0.028, 0.249, 0.416])
parser.add_argument('--n_filter_extent',
help="spatial extent of interpolated grid"
"on which to filter n measurements",
nargs=6, type=float,
default=[-0.026, 0.028, -0.03, 0.028, 0.249, 0.416])
parser.add_argument('--n_bounds',
help='sensible bounds for density measurements',
nargs=2, type=float,
default=[1e3, 1e22])
parser.add_argument('--mach_time_steps',
help='# of time steps to extract from one gyration', type=int,
default=250)
parser.add_argument('--shot_database', help='path to shot database',
default='/home/jensv/rsx/jens_analysis/helicity_tools/shots_database/shots.db')
parser.add_argument('--table_name', help='name of sql table',
default='Shots')
parser.add_argument('--min_spectral',
help=("minimum spectral energy around gyration"
"frequency to include shot"),
type=float,
default=1.6e-8)
parser.add_argument('--mach_y_extent',
help='spatial extent of mach measurements to include',
nargs=6, type=float,
default=[-0.052, 0.052, -0.022, 0.032, 0.249, 0.416])
parser.add_argument('--mach_y_filter_extent',
help="spatial extent of interpolated grid"
"on which to filter mach_y measurements",
nargs=6, type=float,
default=[-0.052, 0.052, -0.022, 0.032, 0.249, 0.416])
parser.add_argument('--mach_z_extent',
help='spatial extent of mach measurements to include',
nargs=6, type=float,
default=[-0.032, 0.032, -0.022, 0.032, 0.249, 0.416])
parser.add_argument('--mach_z_filter_extent',
help="spatial extent of interpolated grid"
"on which to filter Bz measurements",
nargs=6, type=float,
default=[-0.032, 0.032, -0.022, 0.032, 0.249, 0.416])
parser.add_argument('--mach_bounds',
help='bounds on mach measurements', nargs=2, type=float,
default=[-10, 10])
parser.add_argument('--output_prefix',
help='prefix of output files',
default='_filtered_unstructured_')
parser.add_argument('--filter_spatial_increment',
help='spatial increment of interpolated grid for filtering',
default=0.001, type=float)
parser.add_argument('--no_filter',
help="run with no filter should return same"
"unstructured grid as write_measurements_to_unstructured_grid",
default=False, action='store_true')
parser.add_argument('--filter_sigma',
help='standard deviation of gaussian filter',
type=float,
default=3)
parser.add_argument('--filter_truncate',
help='truncate Gaussian filter at this multiple of sigma',
type=float,
default=3)
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_args()
main(args)
```
#### File: jensv/relative_canonical_helicity_tools/interpolate_measurements.py
```python
import argparse
import numpy as np
from scipy.interpolate import LinearNDInterpolator
from datetime import date
from datetime import datetime
import os
from write_to_vtk.read_unstructured_vtk import read_unstructured_vtk
from write_to_vtk import structured_3d_vtk as struc_3d
def main(args):
r"""
Interpolate unstructured fields to rectilinear grid.
"""
just_magnetic = args.just_magnetic
now = datetime.now().strftime("%Y-%m-%d-%H-%M")
out_dir = '../output/' + args.output_prefix + '/' + now + '/'
try:
os.makedirs(out_dir)
except:
pass
in_dir = args.input_path + args.input_date + '/'
in_file = args.input_file_text
for time_point in xrange(args.time_steps):
print time_point
time_str = str(time_point).zfill(4)
bx_points, bx_values = read_unstructured_vtk(in_dir + 'bx' +
in_file + time_str + '.vtk')
by_points, by_values = read_unstructured_vtk(in_dir + 'by' +
in_file + time_str + '.vtk')
bz_points, bz_values = read_unstructured_vtk(in_dir + 'bz' +
in_file + time_str + '.vtk')
if not just_magnetic:
n_points, n_values = read_unstructured_vtk(in_dir + 'n' +
in_file + time_str + '.vtk')
te_points, te_values = read_unstructured_vtk(in_dir + 'te' +
in_file + time_str + '.vtk')
mach_y_points, mach_y_values = read_unstructured_vtk(in_dir + 'mach_y' +
in_file + time_str + '.vtk')
mach_z_points, mach_z_values = read_unstructured_vtk(in_dir + 'mach_z' +
in_file + time_str + '.vtk')
bx_interpolator = struc_3d.get_interpolator(bx_points, bx_values)
by_interpolator = struc_3d.get_interpolator(by_points, by_values)
bz_interpolator = struc_3d.get_interpolator(bz_points, bz_values)
if not just_magnetic:
te_interpolator = struc_3d.get_interpolator(te_points, te_values)
n_interpolator = struc_3d.get_interpolator(n_points, n_values)
mach_y_interpolator = struc_3d.get_interpolator(mach_y_points[:, :2], mach_y_values)
mach_z_interpolator = struc_3d.get_interpolator(mach_z_points[:, :2], mach_z_values)
(x_min, x_max,
y_min, y_max,
z_min, z_max) = args.joint_extent
mesh = np.meshgrid(np.linspace(x_min, x_max,
np.ceil((x_max-x_min)/
args.spatial_increment)),
np.linspace(y_min, y_max,
np.ceil((y_max-y_min)/
args.spatial_increment)),
np.linspace(z_min, z_max,
np.ceil((z_max-z_min)/
args.spatial_increment)))
bx_grad = struc_3d.triangulate_grad(mesh, bx_interpolator,
increment=args.derivative_increment)
by_grad = struc_3d.triangulate_grad(mesh, by_interpolator,
increment=args.derivative_increment)
bz_grad = struc_3d.triangulate_grad(mesh, bz_interpolator,
increment=args.derivative_increment)
if not just_magnetic:
te_grad = struc_3d.triangulate_grad(mesh, te_interpolator,
increment=args.derivative_increment)
n_grad = struc_3d.triangulate_grad(mesh, n_interpolator,
increment=args.derivative_increment)
plane_mesh = [mesh[0][:, :, 0], mesh[1][:, :, 0]]
mach_y_grad_plane = struc_3d.triangulate_grad(plane_mesh, mach_y_interpolator,
increment=args.derivative_increment)
mach_z_grad_plane = struc_3d.triangulate_grad(plane_mesh, mach_z_interpolator,
increment=args.derivative_increment)
bx, by, bz = struc_3d.vector_on_mesh((bx_interpolator,
by_interpolator,
bz_interpolator), mesh)
bx, by, bz = struc_3d.add_vacuum_field([bx, by, bz],
vacuum_field=args.bias_field_magnitude)
if not just_magnetic:
te = struc_3d.scalar_on_mesh(te_interpolator, mesh)
n = struc_3d.scalar_on_mesh(n_interpolator, mesh)
mach_y_plane = struc_3d.scalar_on_mesh(mach_y_interpolator,
plane_mesh)
mach_z_plane= struc_3d.scalar_on_mesh(mach_z_interpolator,
plane_mesh)
mach_y = np.repeat(mach_y_plane[:, :, np.newaxis],
mesh[0].shape[2], axis=2)
mach_z = np.repeat(mach_z_plane[:, :, np.newaxis],
mesh[0].shape[2], axis=2)
mach_y_dx = np.repeat(mach_y_grad_plane[0][:, :, np.newaxis],
mesh[0].shape[2], axis=2)
mach_y_dy = np.repeat(mach_y_grad_plane[1][:, :, np.newaxis],
mesh[0].shape[2], axis=2)
mach_y_dz = np.zeros(mesh[0].shape)
mach_z_dx = np.repeat(mach_z_grad_plane[0][:, :, np.newaxis],
mesh[0].shape[2], axis=2)
mach_z_dy = np.repeat(mach_z_grad_plane[0][:, :, np.newaxis],
mesh[0].shape[2], axis=2)
mach_z_dz = np.zeros(mesh[0].shape)
fields = ([bx] + [by] + [bz] + [n] + [te] +
[mach_y] + [mach_z] +
list(bx_grad) +
list(by_grad) +
list(bz_grad) +
list(n_grad) +
list(te_grad) +
[mach_y_dx] + [mach_y_dy] + [mach_y_dz] +
[mach_z_dx] + [mach_z_dy] + [mach_z_dz])
quantity_names = ['B_x', 'B_y', 'B_z',
'n', 'Te',
'mach_y', 'mach_z',
'B_x_dx', 'B_x_dy', 'B_x_dz',
'B_y_dx', 'B_y_dy', 'B_y_dz',
'B_z_dx', 'B_z_dy', 'B_z_dz',
'n_dx', 'n_dy', 'n_dz',
'Te_dx', 'Te_dy', 'Te_dz',
'mach_y_dx', 'mach_y_dy', 'mach_y_dz',
'mach_z_dx', 'mach_z_dy', 'mach_z_dz']
else:
fields = ([bx] + [by] + [bz] +
list(bx_grad) +
list(by_grad) +
list(bz_grad))
quantity_names = ['B_x', 'B_y', 'B_z',
'B_x_dx', 'B_x_dy', 'B_x_dz',
'B_y_dx', 'B_y_dy', 'B_y_dz',
'B_z_dx', 'B_z_dy', 'B_z_dz']
x, y, z, variables = struc_3d.prepare_for_rectilinear_grid(mesh, fields,
quantity_names)
vtk_file_path = out_dir + args.output_prefix
struc_3d.write_fields_to_rectilinear_grid(vtk_file_path,
x, y, z, variables,
time_point)
def parse_args():
r"""
Read arguments.
"""
parser = argparse.ArgumentParser(description=("Create VTK files of"
"interpolated measurements"))
parser.add_argument('--input_path',
help='path to input files',
default='../output/boxed_unstructured_measurements/')
parser.add_argument('--input_date',
help='time stamp of input files',
default='2017-04-04-13-44')
parser.add_argument('--input_file_text',
help='input file name',
default='_boxed_unstructured_')
parser.add_argument('--spatial_increment',
help='Spatial increment of output file grids',
type=float, default=0.001)
parser.add_argument('--derivative_increment',
help=("spatial increment used to determine"
"tetrahedron derivative of Delaunay"),
type=float, default=0.0001)
parser.add_argument('--joint_extent',
help='overlapping spatial extent of all parameters',
nargs=6, type=float,
default=[-0.022, 0.024, -0.02, 0.029, 0.249, 0.416])
parser.add_argument('--output_prefix',
help='prefix of output files',
default='data_interp_to_rect_grid')
parser.add_argument('--bias_field_magnitude',
help='magnitude of axial bias magnetic field',
type=float,
default=0.02)
parser.add_argument('--time_steps',
help='number of time steps', type=int,
default=250)
parser.add_argument('--just_magnetic',
help='only interpolate bdot measurements',
action='store_true', default=False)
#parser.add_argument('--just_one_time_step',
# help='only interpolate first time step',
# action='store_true', default=False)
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_args()
main(args)
```
#### File: relative_canonical_helicity_tools/invert_curl/invert_curl.py
```python
import numpy as np
import scipy.fftpack as fft
import sys
sys.path.append('../laplace_solver/')
import laplace_solver as lsolve
from scipy.integrate import cumtrapz
def fourier_inverse_curl(Bx, By, Bz, x, y, z, method='fourier', pad=True):
r"""
Invert curl with pseudo-spectral method described in MacKay 2006.
"""
shape = Bx.shape
Bx_copy = np.array(Bx)
By_copy = np.array(By)
Bz_copy = np.array(Bz)
if pad:
Bx = np.pad(Bx, pad_width=zip(shape, shape),
mode='reflect')
By = np.pad(By, pad_width=zip(shape, shape),
mode='reflect')
Bz = np.pad(Bz, pad_width=zip(shape, shape),
mode='reflect')
kx1 = np.zeros(Bx[0, :, 0].size)
ky1 = np.zeros(By[:, 0, 0].size)
kz1 = np.zeros(Bz[0, 0, :].size)
dx = x[0, 1, 0] - x[0, 0, 0]
dy = y[1, 0, 0] - y[0, 0, 0]
dz = z[0, 0, 1] - z[0, 0, 0]
nx = kx1.size
ny = ky1.size
nz = kz1.size
kx1 = fft.fftfreq(nx, dx)
ky1 = fft.fftfreq(ny, dy)
kz1 = fft.fftfreq(nz, dz)
kx, ky, kz = np.meshgrid(kx1, ky1, kz1)
if method == 'fourier':
Bx_k = np.fft.fftn(Bx)
By_k = np.fft.fftn(By)
Bz_k = np.fft.fftn(Bz)
if method == 'cosine':
Bx_k = lsolve.dct_3d(shape, Bx)
By_k = lsolve.dct_3d(shape, By)
Bz_k = lsolve.dct_3d(shape, Bz)
k_squared = kx**2. + ky**2. + kz**2.
if method == 'fourier':
Ax_k = 1j*(ky*Bz_k - kz*By_k)/k_squared
Ay_k = 1j*(kz*Bx_k - kx*Bz_k)/k_squared
Az_k = 1j*(kx*By_k - ky*Bx_k)/k_squared
if method == 'cosine':
Ax_k = (ky*Bz_k - kz*By_k)/k_squared
Ay_k = (kz*Bx_k - kx*Bz_k)/k_squared
Az_k = (kx*By_k - ky*Bx_k)/k_squared
Ax_k[0, 0, 0] = 0.
Ay_k[0, 0, 0] = 0.
Az_k[0, 0, 0] = 0.
if method == 'fourier':
Ax = np.real(np.fft.ifftn(Ax_k))
Ay = np.real(np.fft.ifftn(Ay_k))
Az = np.real(np.fft.ifftn(Az_k))
if method == 'cosine':
Ax = lsolve.idct_3d(shape, Ax_k)
Ay = lsolve.idct_3d(shape, Ay_k)
Az = lsolve.idct_3d(shape, Az_k)
if pad:
Ax = Ax[shape[0]:shape[0]*2,
shape[1]:shape[1]*2,
shape[2]:shape[2]*2]
Ay = Ay[shape[0]:shape[0]*2,
shape[1]:shape[1]*2,
shape[2]:shape[2]*2]
Az = Az[shape[0]:shape[0]*2,
shape[1]:shape[1]*2,
shape[2]:shape[2]*2]
B0_x = np.mean(Bx_copy)
B0_y = np.mean(By_copy)
B0_z = np.mean(Bz_copy)
A0_x = -(y*B0_z - z*B0_y)/2.
A0_y = -(z*B0_x - x*B0_z)/2.
A0_z = -(x*B0_y - y*B0_x)/2.
Ax = Ax + A0_x
Ay = Ay + A0_y
Az = Az + A0_z
return [Ax, Ay, Az]
def devore_invert_curl(mesh, b_field, with_z=True):
r"""
"""
b_field = np.asarray(b_field)
dz = mesh[2][0, 0, 1] - mesh[2][0, 0, 0]
z_length = mesh[0].shape[2]
A_0x, A_0y = devore_A_0(mesh, b_field)
A_x = np.expand_dims(A_0x, 2)
A_y = np.expand_dims(A_0y, 2)
A_x = np.repeat(A_x, z_length, axis=2)
A_y = np.repeat(A_y, z_length, axis=2)
A_x += cumtrapz(b_field[1], axis=2, dx=dz, initial=0)
A_y -= cumtrapz(b_field[0], axis=2, dx=dz, initial=0)
if with_z:
A_z = np.zeros(mesh[0].shape)
return A_x, A_y, A_z
else:
return A_x, A_y
def devore_A_0(mesh, b_field):
r"""
"""
dx = mesh[0][0, 1, 0] - mesh[0][0, 0, 0]
dy = mesh[1][1, 0, 0] - mesh[1][0, 0, 0]
A_0x = -0.5*cumtrapz(b_field[2, :, :, 0], axis=0, dx=dy, initial=0)
A_0y = 0.5*cumtrapz(b_field[2, :, :, 0], axis=1, dx=dx, initial=0)
return A_0x, A_0y
```
#### File: relative_canonical_helicity_tools/read_from_shotlog/read_from_shotlog.py
```python
import pandas as pd
import sqlite3
def read_and_store_shotlog(shots, database, start=False):
r"""
"""
path = '/Users/vonderlinden2/rsx_drive/RSX/MDS+/Shotlogs/shotlog_2013-05-30.1.xlsx'
shotlog1 = pd.read_excel(path, sheetname='shot log', index_col=3)
path = '/Users/vonderlinden2/rsx_drive/RSX/MDS+/Shotlogs/shotlog_2013-05-02.8.xlsx'
shotlog2 = pd.read_excel(path, sheetname='shot log', index_col=3)
path = '/Users/vonderlinden2/rsx_drive/RSX/MDS+/Shotlogs/shotlog_2013-03-08.3.xlsx'
shotlog3 = pd.read_excel(path, sheetname='shot log', index_col=3)
path = '/Users/vonderlinden2/rsx_drive/RSX/MDS+/Shotlogs/shotlog_2012-09-27.xlsx'
shotlog4 = pd.read_excel(path, sheetname='shot log', index_col=3)
path = '/Users/vonderlinden2/rsx_drive/RSX/MDS+/Shotlogs/shotlog_2012-09-14_cumulative.xlsx'
shotlog5 = pd.read_excel(path, sheetname='shot log', index_col=3)
shotlogs = [shotlog1, shotlog2, shotlog3, shotlog4, shotlog5]
for shot in shots:
print "Writing Shot Settings %i" % shot
rsx_settings = read_rsx_settings_shotlog(shot, shotlogs)
store_rsx_settings_sql(shot, rsx_settings, database, start=start)
def read_rsx_settings_shotlog(shot, shotlogs):
r"""
Return a dictionary with all mach probe settings from the shotlog for a
specific shot number.
"""
rsx_settings = {}
if 17344 <= shot <= 17622:
shotlog_num = 0
path = '/Users/vonderlinden2/rsx_drive/RSX/MDS+/Shotlogs/shotlog_2013-05-30.1.xlsx'
column = {'mach_orientation': 'Unnamed: 17',
'mach_orientation_read': 'Unnamed: 18',
'mach_x': 'Unnamed: 19',
'mach_y': 'Unnamed: 20',
'mach_y_read': 'Unnamed: 21',
'mach_z': 'Unnamed: 22',
'bdot10_orientation': 'Unnamed: 23',
'bdot10_x': 'Unnamed: 24',
'bdot10_y': 'Unnamed: 25',
'bdot10_z': 'Unnamed: 26',
'tp1_insertion': 'Unnamed: 27',
'tp1_x': 'Unnamed: 28',
'tp1_y': 'Unnamed: 29',
'tp1_z': 'Unnamed: 30',
'shotlog': 'Unnamed: 57'}
elif 16594 <= shot <= 17343:
shotlog_num = 1
path = '/Users/vonderlinden2/rsx_drive/RSX/MDS+/Shotlogs/shotlog_2013-05-02.8.xlsx'
column = {'bdot3a_orientation': 'Unnamed: 17',
'bdot3a_x': 'Unnamed: 18',
'bdot3a_y': 'Unnamed: 19',
'bdot3a_z': 'Unnamed: 20',
'bdot10_orientation': 'Unnamed: 21',
'bdot10_x': 'Unnamed: 22',
'bdot10_y': 'Unnamed: 23',
'bdot10_z': 'Unnamed: 24',
'tp1_insertion': 'Unnamed: 25',
'tp1_x': 'Unnamed: 26',
'tp1_y': 'Unnamed: 27',
'tp1_z': 'Unnamed: 28',
'mach_insertion': 'Unnamed: 37',
'mach_x': 'Unnamed: 38',
'mach_x_read': 'Unnamed: 39',
'mach_orientation': 'Unnamed: 40',
'mach_orientation_read': 'Unnamed: 41',
'mach_y': 'Unnamed: 42',
'mach_z': 'Unnamed: 43',
'shotlog': 'Unnamed: 62'}
elif 16057 <= shot <= 16578:
shotlog_num = 2
path = '/Users/vonderlinden2/rsx_drive/RSX/MDS+/Shotlogs/shotlog_2013-03-08.3.xlsx'
column = {'bdot3a_orientation': 'Unnamed: 17',
'bdot3a_x': 'Unnamed: 18',
'bdot3a_y': 'Unnamed: 19',
'bdot3a_z': 'Unnamed: 20',
'bdot10_orientation': 'Unnamed: 21',
'bdot10_x': 'Unnamed: 22',
'bdot10_y': 'Unnamed: 23',
'bdot10_z': 'Unnamed: 24',
'tp1_insertion': 'Unnamed: 25',
'tp1_x': 'Unnamed: 26',
'tp1_y': 'Unnamed: 27',
'tp1_z': 'Unnamed: 28',
'tp2_insertion': 'Unnamed: 37',
'tp2_x': 'Unnamed: 38',
'tp2_y': 'Unnamed: 39',
'tp2_z': 'Unnamed: 40',
'shotlog': 'Unnamed: 59'}
elif 15927 < shot < 16018:
shotlog_num = 3
path = '/Users/vonderlinden2/rsx_drive/RSX/MDS+/Shotlogs/shotlog_2012-09-27.xlsx'
column = {'bdot3a_orientation': 'Unnamed: 17',
'bdot3a_x': 'Unnamed: 18',
'bdot3a_y': 'Unnamed: 19',
'bdot3a_z': 'Unnamed: 20',
'bdot10_orientation': 'Unnamed: 21',
'bdot10_x': 'Unnamed: 22',
'bdot10_y': 'Unnamed: 23',
'bdot10_z': 'Unnamed: 24',
'mach_orientation': 'Unnamed: 25',
'mach_x': 'Unnamed: 26',
'mach_y': 'Unnamed: 27',
'mach_z': 'Unnamed: 28',
'tp2_insertion': 'Unnamed: 37',
'tp2_x': 'Unnamed: 38',
'tp2_y': 'Unnamed: 39',
'tp2_z': 'Unnamed: 40',
'shotlog': 'Unnamed: 59'}
elif 15249 < shot < 15928:
shotlog_num = 4
path = '/Users/vonderlinden2/rsx_drive/RSX/MDS+/Shotlogs/shotlog_2012-09-14_cumulative.xlsx'
column = {'bdot3a_orientation': 'Unnamed: 17',
'bdot3a_x': 'Unnamed: 18',
'bdot3a_y': 'Unnamed: 19',
'bdot3a_z': 'Unnamed: 20',
'bdot10_orientation': 'Unnamed: 21',
'bdot10_x': 'Unnamed: 22',
'bdot10_y': 'Unnamed: 23',
'bdot10_z': 'Unnamed: 24',
'tp1_insertion': 'Unnamed: 25',
'tp1_x': 'Unnamed: 26',
'tp1_y': 'Unnamed: 27',
'tp1_z': 'Unnamed: 28',
'tp2_insertion': 'Unnamed: 37',
'tp2_x': 'Unnamed: 38',
'tp2_y': 'Unnamed: 39',
'tp2_z': 'Unnamed: 40',
'shotlog': 'Unnamed: 59'}
else:
print 'No column dictionary defined for %i' % shot
return None
if shot in shotlogs[shotlog_num]['Unnamed: 0'].index:
for key in column.keys():
rsx_settings[key] = shotlogs[shotlog_num][column[key]][shot]
rsx_settings['exists_in_shotlog'] = True
else:
print 'Shot %i not in excel spreadsheet.' % shot
rsx_settings['exists_in_shotlog'] = False
return rsx_settings
def store_rsx_settings_sql(shot, rsx_settings, database, start=False):
r"""
Write contents of one row (shot) from shotlog sheet to sql database.
"""
connection = sqlite3.connect(database)
cursor = connection.cursor()
if start:
if rsx_settings:
key_string = '('
keys = rsx_settings.keys()
for key in keys[:-1]:
key_string += ":" + key + ", "
key_string += ":" + keys[-1] + ", :shot)"
rsx_settings.update({'shot': shot})
cursor.execute("INSERT INTO Shots ( " + str(keys + ['shot'])[1:-1].replace("'", "") +
") VALUES " + key_string + ";",
rsx_settings)
else:
cursor.execute("INSERT INTO Shots (shot, exists_in_shotlog) " +
"VALUES (:shot, :exists);", {'shot': shot, 'exists': False})
else:
if rsx_settings:
for key in rsx_settings.keys():
cursor.execute("UPDATE Shots SET " + key + " = :value " +
"WHERE shot = :shot;",
{'value': rsx_settings[key], 'shot': shot})
else:
cursor.execute("UPDATE Shots SET exists_in_shotlog = :exists " +
"WHERE shot = :shot;", {'shot': shot, 'exists': False})
connection.commit()
cursor.close()
connection.close()
```
#### File: relative_canonical_helicity_tools/read_from_sql/read_from_sql.py
```python
import sqlite3
def read_all_rows(condition, database, table):
r"""
Return all rows from sql table that match condition.
"""
connection = sqlite3.connect(database)
connection.row_factory = sqlite3.Row
cursor = connection.cursor()
cursor.execute('SELECT * FROM ' + table + ' WHERE ' + condition)
rows = cursor.fetchall()
cursor.close()
connection.close()
return rows
def cursor_with_rows(condition, database, table):
r"""
Return cursor object which can iterate through rows matching condition.
"""
connection = sqlite3.connect(database)
connection.row_factory = sqlite3.Row
cursor = connection.cursor()
cursor.execute('SELECT * FROM ' + table + ' WHERE ' + condition)
return cursor, connection
def close(connection, cursor):
r"""
Close connection and cursor.
"""
cursor.close()
connection.close()
```
#### File: jensv/relative_canonical_helicity_tools/vis_canonical_fluxtubes.py
```python
from datetime import datetime as date
import numpy as np
import os
from scipy.constants import elementary_charge, proton_mass
from glob import glob
import sys
visit_path1 = "/home/jensv/visit/visit2_10_3.linux-x86_64/2.10.3/linux-x86_64/lib/site-packages"
visit_path2 = "/home/jensv/visit/visit2_10_3.linux-x86_64/bin/"
sys.path.append(visit_path1)
sys.path.append(visit_path2)
os.environ["PATH"] += os.pathsep + visit_path1
os.environ["PATH"] += os.pathsep + visit_path2
import visit
import argparse
tan = (209, 178, 111, 255)
olive = (110, 117, 14, 255)
dim_grey =(105, 105, 105, 255)
black = (0, 0, 0, 255)
dark_grey = (169, 169, 169, 255)
red = (255, 0, 0, 255)
dark_red = (84, 0, 0, 255)
green = (0, 154, 0, 255)
navy = (0, 0, 128, 255)
aqua = (0, 255, 255, 255)
def define_expressions(visit):
r"""
Define Visit expressions.
"""
visit.DefineVectorExpression("B", "{B_x, B_y, B_z}")
visit.DefineVectorExpression("B_norm", "{B_norm_x, B_norm_y, "
"B_norm_z}")
visit.DefineVectorExpression("B_perp", "{B_x, B_y, 0}")
visit.DefineVectorExpression("B_para", "{0, 0, B_z}")
visit.DefineScalarExpression("B_para_scalar", "B_z")
visit.DefineVectorExpression("Omega_e_times_density", "B*n")
visit.DefineVectorExpression("A_vacuum", "{A_vacuum_x, A_vacuum_y, 0}")
visit.DefineVectorExpression("A", "{A_x, A_y, A_z}")
visit.DefineVectorExpression("J_smooth", "{j_x, j_y, j_z}")
visit.DefineScalarExpression("J_smooth_mag", "sqrt(j_x^2 + j_y^2 + j_z^2)")
visit.DefineVectorExpression("J_smooth_perp", "{j_x, j_y, 0}")
visit.DefineVectorExpression("J_smooth_para", "{0, 0, j_z}")
visit.DefineVectorExpression("J_smooth_para_mag", "j_z")
visit.DefineVectorExpression("J_raw", "{j_raw_x, j_raw_y, j_raw_z}")
visit.DefineScalarExpression("J_raw_mag", "sqrt(j_raw_x^2 +" +
"j_raw_y^2 +" +
"j_raw_z^2)")
visit.DefineVectorExpression("J_raw_perp", "{j_raw_x, j_raw_y, 0}")
visit.DefineVectorExpression("J_raw_para", "{0, 0, j_raw_z}")
visit.DefineScalarExpression("divergence_B", "divergence(B)")
visit.DefineScalarExpression("divergence_Omega_i_raw_plus",
"divergence(Omega_i_raw_plus)")
visit.DefineScalarExpression("divergence_Omega_i_plus",
"divergence(Omega_i_plus)")
visit.DefineVectorExpression("J_raw_filtered_by_Te",
"J_raw * Te_raw_normalized")
visit.DefineVectorExpression("J_raw_filtered_by_Te^2",
"J_raw * Te_raw_normalized^2")
visit.DefineVectorExpression("J_raw_filtered_by_Te^3",
"J_raw * Te_raw_normalized^3")
visit.DefineVectorExpression("J_raw_filtered_by_Te^4",
"J_raw * Te_raw_normalized^4")
visit.DefineVectorExpression("J_raw_filtered_by_Te_smooth",
"J_raw * Te_smooth_normalized")
visit.DefineVectorExpression("J_raw_filtered_by_Te_smooth^2",
"J_raw * Te_smooth_normalized^2")
visit.DefineVectorExpression("J_raw_filtered_by_Te_smooth^3",
"J_raw * Te_smooth_normalized^3")
visit.DefineVectorExpression("J_raw_filtered_by_Te_smooth^4",
"J_raw * Te_smooth_normalized^4")
visit.DefineVectorExpression("u_i_plus", "{u_i_x_plus, u_i_y, u_i_z}")
visit.DefineVectorExpression("u_i_plus_perp", "{dot(u_i_plus, {1, 0, 0}), dot(u_i_plus, "
"{0, 1, 0}), 0}")
visit.DefineVectorExpression("u_i_plus_para", "{0, 0, dot(u_i_plus, {0, 0, 1})}")
visit.DefineVectorExpression("omega_i_plus", "{w_i_x_plus, w_i_y_plus, w_i_z_plus}")
visit.DefineVectorExpression("omega_i_plus_perp", "{dot(omega_i_plus, {1, 0, 0}),"
"dot(omega_i_plus, {0, 1, 0}), 0})")
visit.DefineVectorExpression("omega_i_plus_para", "{0, 0, dot(omega_i_plus, "
"{0, 0, 1})}")
visit.DefineVectorExpression("omega_i_raw_plus",
"{w_i_raw_x_plus, w_i_raw_y_plus, w_i_raw_z_plus}")
visit.DefineVectorExpression("omega_i_raw_plus_perp", "{dot(omega_i_raw_plus, {1, 0, 0}),"
"dot(omega_i_raw_plus, {0, 1, 0}), 0})")
visit.DefineVectorExpression("omega_i_raw_plus_para", "{0, 0, dot(omega_i_raw_plus, "
"{0, 0, 1})}")
visit.DefineVectorExpression("Omega_i_plus",
str(elementary_charge) + "*B +" + str(proton_mass) +
"*omega_i_plus")
visit.DefineVectorExpression("Omega_i_plus_perp", "{dot(Omega_i_plus, {1, 0, 0}), "
"dot(Omega_i_plus, {0, 1, 0}), 0}")
visit.DefineVectorExpression("Omega_i_plus_para", "{0, 0, dot(Omega_i_plus, "
"{0, 0, 1})}")
visit.DefineScalarExpression("Omega_i_plus_para_scalar",
"dot(Omega_i_plus, {0, 0, 1})")
visit.DefineVectorExpression("Omega_i_raw_plus",
str(elementary_charge) + "*B +" + str(proton_mass) +
"*omega_i_raw_plus")
visit.DefineVectorExpression("Omega_i_raw_plus_perp", "{dot(Omega_i_raw_plus, {1, 0, 0}), "
"dot(Omega_i_raw_plus, {0, 1, 0}), 0}")
visit.DefineVectorExpression("Omega_i_raw_plus_para", "{0, 0, dot(Omega_i_raw_plus, "
"{0, 0, 1})}")
visit.DefineScalarExpression("Omega_i_raw_plus_para_scalar",
"dot(Omega_i_raw_plus, {0, 0, 1})")
visit.DefineVectorExpression("Omega_i_plus_density_dependence",
"n*(%e *B + %e *omega_i_plus) +"
"cross(gradient(n), %e *A + %e * u_i_plus)"
% (elementary_charge, proton_mass,
elementary_charge, proton_mass))
visit.DefineVectorExpression("Omega_i_plus_density_dependence_perp",
"{dot(Omega_i_plus_density_dependence, {1, 0, 0}), "
"dot(Omega_i_plus_density_dependence, {0, 1, 0}), 0}")
visit.DefineVectorExpression("Omega_i_plus_density_dependence_para",
"{0, 0, dot(Omega_i_plus_density_dependence, "
"{0, 0, 1})}")
visit.DefineScalarExpression("Omega_i_plus_density_dependence_para_scalar",
"dot(Omega_i_plus_density_dependence, {0, 0, 1})")
visit.DefineVectorExpression("Omega_i_raw_plus_density_dependence",
"n*(%e *B + %e *omega_i_raw_plus) +"
" cross(gradient(n), %e *A + %e * u_i_plus)"
% (elementary_charge, proton_mass,
elementary_charge, proton_mass))
visit.DefineVectorExpression("Omega_i_raw_plus_density_dependence_perp",
"{dot(Omega_i_raw_plus_density_dependence, {1, 0, 0}), "
"dot(Omega_i_raw_plus_density_dependence, {0, 1, 0}), 0}")
visit.DefineVectorExpression("Omega_i_raw_plus_density_dependence_para",
"{0, 0, dot(Omega_i_raw_plus_density_dependence, "
"{0, 0, 1})}")
visit.DefineScalarExpression("Omega_i_raw_plus_density_dependence_para_scalar",
"dot(Omega_i_raw_plus_density_dependence, {0, 0, 1})")
visit.DefineVectorExpression("Omega_i_plus_times_density",
"n*(%e *B + %e *omega_i_plus)" %
(elementary_charge, proton_mass))
visit.DefineVectorExpression("Omega_i_plus_times_density_perp",
"{dot(Omega_i_plus_times_density, {1, 0, 0}), "
"dot(Omega_i_plus_times_density, {0, 1, 0}), 0}")
visit.DefineVectorExpression("Omega_i_plus_times_density_para",
"{0, 0, dot(Omega_i_plus_density_dependence, "
"{0, 0, 1})}")
visit.DefineScalarExpression("Omega_i_plus_times_density_para_scalar",
"dot(Omega_i_plus_times_density, {0, 0, 1})")
visit.DefineVectorExpression("Omega_i_raw_plus_times_density",
"n*(%e *B + %e *omega_i_raw_plus)" %
(elementary_charge, proton_mass))
visit.DefineVectorExpression("Omega_i_raw_plus_times_density_perp",
"{dot(Omega_i_raw_plus_density_dependence, {1, 0, 0}), "
"dot(Omega_i_raw_plus_times_density, {0, 1, 0}), 0}")
visit.DefineVectorExpression("Omega_i_raw_plus_times_density_para",
"{0, 0, dot(Omega_i_raw_plus_times_density, "
"{0, 0, 1})}")
visit.DefineScalarExpression("Omega_i_raw_plus_times_density_para_scalar",
"dot(Omega_i_raw_plus_times_density, {0, 0, 1})")
## Omega_e density dependence
##
visit.DefineVectorExpression("Omega_e_density_dependence",
"n*B + cross(gradient(n), A)")
visit.DefineVectorExpression("Omega_e_density_dependence_perp",
"{dot(Omega_e_density_dependence, {1, 0, 0}), "
"dot(Omega_e_density_dependence, {0, 1, 0}), 0}")
visit.DefineVectorExpression("Omega_e_density_dependence_para",
"{0, 0, dot(Omega_e_plus_density_dependence, "
"{0, 0, 1})}")
visit.DefineScalarExpression("Omega_e_density_dependence_para_scalar",
"dot(Omega_e_density_dependence, {0, 0, 1})")
## u_i_x(t) = u_i_y(t MINUS tau*0.25)
##
visit.DefineVectorExpression("u_i_minus",
"{u_i_x_minus, u_i_y, u_i_z}")
visit.DefineVectorExpression("u_i_minus_perp",
"{dot(u_i_minus, {1, 0, 0}), dot(u_i_minus, "
"{0, 1, 0}), 0}")
visit.DefineVectorExpression("u_i_minus_para", "{0, 0, dot(u_i_minus, {0, 0, 1})}")
visit.DefineScalarExpression("u_i_minus_para_scalar", "dot(u_i_minus, {0, 0, 1})")
visit.DefineVectorExpression("omega_i_minus", "{w_i_minus_x, w_i_minus_y, w_i_minus_z}")
visit.DefineVectorExpression("omega_i_minus_perp", "{dot(omega_i_minus, {1, 0, 0}),"
"dot(omega_i_minus, {0, 1, 0}), 0})")
visit.DefineVectorExpression("omega_i_minus_para", "{0, 0, dot(omega_i_minus, "
"{0, 0, 1})}")
visit.DefineScalarExpression("omega_i_minus_para_scalar", "dot(omega_i_minus, "
"{0, 0, 1})")
visit.DefineVectorExpression("omega_i_raw_minus",
"{w_i_raw_x_minus, w_i_raw_y_minus, w_i_raw_z_minus}")
visit.DefineVectorExpression("omega_i_minus_raw_perp",
"{dot(omega_i_raw_minus, {1, 0, 0}),"
"dot(omega_i_raw_minus, {0, 1, 0}), 0})")
visit.DefineVectorExpression("omega_i_minus_raw_para",
"{0, 0, dot(omega_i_raw_minus, "
"{0, 0, 1})}")
visit.DefineScalarExpression("omega_i_raw_minus_para_scalar",
"dot(omega_i_raw_minus, "
"{0, 0, 1})")
visit.DefineVectorExpression("Omega_i_minus", str(elementary_charge) +
"*B +" + str(proton_mass) +
"*omega_i_minus")
visit.DefineVectorExpression("Omega_i_minus_perp", "{dot(Omega_i_minus, {1, 0, 0}), "
"dot(Omega_i_minus, {0, 1, 0}), 0}")
visit.DefineVectorExpression("Omega_i_minus_para", "{0, 0, dot(Omega_i_minus, "
"{0, 0, 1})}")
visit.DefineScalarExpression("Omega_i_minus_para_scalar",
"dot(Omega_i_minus, {0, 0, 1})")
visit.DefineVectorExpression("Omega_i_raw_minus",
str(elementary_charge) + "*B +" + str(proton_mass) +
"*omega_i_raw_minus")
visit.DefineVectorExpression("Omega_i_raw_minus_perp", "{dot(Omega_i_raw_minus, {1, 0, 0}), "
"dot(Omega_i_raw_minus, {0, 1, 0}), 0}")
visit.DefineVectorExpression("Omega_i_raw_minus_para", "{0, 0, dot(Omega_i_raw_minus, "
"{0, 0, 1})}")
visit.DefineScalarExpression("Omega_i_raw_minus_para_scalar",
"dot(Omega_i_raw_minus, {0, 0, 1})")
visit.DefineVectorExpression("Omega_i_minus_density_dependence",
"n*(%e *B + %e *omega_i_minus) +"
" cross(gradient(n), %e *A + %e * u_i_minus)" %
(elementary_charge, proton_mass,
elementary_charge, proton_mass))
visit.DefineVectorExpression("Omega_i_minus_density_dependence_perp",
"{dot(Omega_i_minus_density_dependence, {1, 0, 0}), "
"dot(Omega_i_minus_density_dependence, {0, 1, 0}), 0}")
visit.DefineVectorExpression("Omega_i_minus_density_dependence_para",
"{0, 0, dot(Omega_i_minus_density_dependence, "
"{0, 0, 1})}")
visit.DefineScalarExpression("Omega_i_minus_density_dependence_para_scalar",
"dot(Omega_i_minus_density_dependence, {0, 0, 1})")
visit.DefineVectorExpression("Omega_i_raw_minus_density_dependence",
"n*(%e *B + %e *omega_i_raw_minus) +"
" cross(gradient(n), %e *A + %e * u_i_minus)" %
(elementary_charge, proton_mass,
elementary_charge, proton_mass))
visit.DefineVectorExpression("Omega_i_raw_minus_density_dependence_perp",
"{dot(Omega_i_raw_minus_density_dependence, {1, 0, 0}), "
"dot(Omega_i_raw_minus_density_dependence, {0, 1, 0}), 0}")
visit.DefineVectorExpression("Omega_i_raw_minus_density_dependence_para",
"{0, 0, dot(Omega_i_raw_minus_density_dependence, "
"{0, 0, 1})}")
visit.DefineScalarExpression("Omega_i_raw_minus_density_dependence_para_scalar",
"dot(Omega_i_raw_minus_density_dependence, {0, 0, 1})")
visit.DefineVectorExpression("Omega_i_minus_times_density",
"n*(%e *B + %e *omega_i_minus)" %
(elementary_charge, proton_mass))
visit.DefineVectorExpression("Omega_i_minus_times_density_perp",
"{dot(Omega_i_plus_times_density, {1, 0, 0}), "
"dot(Omega_i_minus_times_density, {0, 1, 0}), 0}")
visit.DefineVectorExpression("Omega_i_minus_times_density_para",
"{0, 0, dot(Omega_i_plus_density_dependence, "
"{0, 0, 1})}")
visit.DefineScalarExpression("Omega_i_minus_times_density_para_scalar",
"dot(Omega_i_minus_times_density, {0, 0, 1})")
visit.DefineVectorExpression("Omega_i_raw_minus_times_density",
"n*(%e *B + %e *omega_i_raw_minus)" %
(elementary_charge, proton_mass))
visit.DefineVectorExpression("Omega_i_raw_minus_times_density_perp",
"{dot(Omega_i_raw_minus_density_dependence, {1, 0, 0}), "
"dot(Omega_i_raw_minus_times_density, {0, 1, 0}), 0}")
visit.DefineVectorExpression("Omega_i_raw_minus_times_density_para",
"{0, 0, dot(Omega_i_raw_minus_times_density, "
"{0, 0, 1})}")
visit.DefineScalarExpression("Omega_i_raw_minus_times_density_para_scalar",
"dot(Omega_i_raw_minus_times_density, {0, 0, 1})")
## Canonical momentum fields
visit.DefineVectorExpression("P_i", "%e*A + %e*u_i_plus" %
(elementary_charge, proton_mass))
visit.DefineVectorExpression("P_i_times_density", "n*P_i")
## Reference fields
visit.DefineVectorExpression("B_ref", "{B_ref_x, B_ref_y, B_ref_z}")
visit.DefineVectorExpression("A_ref", "{A_ref_x, A_ref_y, A_ref_z}")
visit.DefineVectorExpression("u_i_ref",
"{u_i_ref_x, u_i_ref_y, u_i_ref_z}")
visit.DefineVectorExpression("omega_i_ref",
"{omega_i_ref_x, omega_i_ref_y, omega_i_ref_z}")
#visit.DefineVectorExpression("u_i_ref_raw_vort",
# "{u_i_raw_ref_x, u_i_raw_ref_y, u_i_raw_ref_z}")
visit.DefineVectorExpression("omega_i_ref_raw",
"{w_i_raw_ref_x, w_i_raw_ref_y, w_i_raw_ref_z}")
visit.DefineVectorExpression("P_i_ref", "%e*A_ref + %e*u_i_ref" %
(elementary_charge, proton_mass))
visit.DefineVectorExpression("P_i_ref_times_density", "n*P_i_ref")
visit.DefineVectorExpression("P_i_ref_raw_vort", "%e*A_ref + %e*u_i_ref_raw_vort" %
(elementary_charge, proton_mass))
visit.DefineVectorExpression("P_i_ref_raw_vort_times_density", "n*P_i_ref_raw_vort")
visit.DefineVectorExpression("Omega_i_ref", "%e*B_ref + %e*omega_i_ref_raw" %
(elementary_charge, proton_mass))
visit.DefineVectorExpression("Omega_i_ref_times_density", "n*Omega_i_ref")
visit.DefineVectorExpression("Omega_i_ref_density_dependence",
"n*(%e *B_ref + %e *omega_i_ref) +"
"cross(gradient(n), %e *A_ref + %e * u_i_ref)"
% (elementary_charge,proton_mass,
elementary_charge, proton_mass))
visit.DefineVectorExpression("Omega_i_ref_raw_vort",
"%e*B_ref + %e*omega_i_ref_raw" %
(elementary_charge, proton_mass))
visit.DefineVectorExpression("Omega_i_ref_raw_vort_time_density",
"n*Omega_i_ref_raw_vort")
visit.DefineVectorExpression("Omega_i_ref_raw_vort_density_dependence",
"n*(%e *B_ref + %e *omega_i_ref_raw) +"
"cross(gradient(n), %e *A_ref + %e * u_i_ref_raw_vort)"
% (elementary_charge,proton_mass,
elementary_charge, proton_mass))
## Relative fields
visit.DefineVectorExpression("B_rel", "B + B_ref")
visit.DefineVectorExpression("B_rel_minus", "B - B_ref")
visit.DefineVectorExpression("A_rel", "A - A_ref")
visit.DefineVectorExpression("u_i_rel", "u_i_plus - u_i_ref")
visit.DefineVectorExpression("u_i_rel_plus", "u_i_plus + u_i_ref")
#visit.DefineVectorExpression("u_i_rel_raw_vort", "u_i_plus - u_i_ref_raw_vort")
#visit.DefineVectorExpression("u_i_rel_plus_raw_vort", "u_i_plus + u_i_ref_raw_vort")
visit.DefineVectorExpression("omega_i_rel", "omega_i_plus + omega_i_ref")
visit.DefineVectorExpression("omega_i_rel_raw", "omega_i_raw_plus + omega_i_ref_raw")
visit.DefineVectorExpression("P_i_rel", "P_i - P_i_ref")
visit.DefineVectorExpression("P_i_rel_times_density",
"P_i_times_density - P_i_ref_times_density")
visit.DefineVectorExpression("P_i_rel_raw_vort", "P_i - P_i_ref_raw_vort")
visit.DefineVectorExpression("P_i_rel_raw_vort_times_density",
"P_i_rel_raw_vort_times_density - P_i_ref_raw_vort_times_density")
visit.DefineVectorExpression("Omega_i_rel", "Omega_i_plus + Omega_i_ref")
visit.DefineVectorExpression("Omega_i_rel_times_density",
"Omega_i_plus_times_density + Omega_i_ref_times_density")
visit.DefineVectorExpression("Omega_i_rel_density_dependence",
"Omega_i_plus_density_dependence + Omega_i_ref_density_dependence")
visit.DefineVectorExpression("Omega_i_raw_rel",
"Omega_i_raw_plus + Omega_i_ref_raw_vort")
visit.DefineVectorExpression("Omega_i_raw_rel_times_density",
"Omega_i_raw_plus_times_density +"
"Omega_i_ref_raw_vort_times_density")
visit.DefineVectorExpression("Omega_i_ref_raw_density_dependence",
"Omega_i_raw_plus_density_dependence +"
"Omega_i_ref_raw_vort_density_dependence")
## Dynamic fields
visit.DefineVectorExpression("B_dynamic", "{B_dynamic_x, B_dynamic_y, B_dynamic_z}")
visit.DefineVectorExpression("A_dynamic", "{A_dynamic_x, A_dynamic_y, A_dynamic_z}")
visit.DefineVectorExpression("B_dynamic_ref", "{B_dynamic_ref_x,"
"B_dynamic_ref_y, B_dynamic_ref_z}")
visit.DefineVectorExpression("A_dynamic_ref", "{A_dynamic_ref_x,"
"A_dynamic_ref_y, A_dynamic_ref_z}")
## Helicity density
visit.DefineScalarExpression("mag_helicity_density", "dot(A, B)")
visit.DefineScalarExpression("mag_ref_helicity_density", "dot(A_ref, B_ref)")
visit.DefineScalarExpression("mag_rel_helicity_density", "dot(A-A_ref, B+B_ref)")
visit.DefineScalarExpression("mag_dynamic_helicity_density", "dot(A_dynamic, B_dynamic)")
visit.DefineScalarExpression("mag_dynamic_ref_helicity_density",
"dot(A_dynamic_ref, B_dynamic_ref)")
visit.DefineScalarExpression("mag_dynamic_rel_helicity_density",
"dot(A_dynamic - A_dynamic_ref, B_dynamic + B_dynamic_ref)")
visit.DefineScalarExpression("kin_helicity_density",
"dot(u_i_plus, omega_i_raw_plus)")
visit.DefineScalarExpression("kin_ref_helicity_density",
"dot(u_i_ref_raw, omega_i_ref_raw)")
visit.DefineScalarExpression("kin_rel_helicity_density",
"dot(u_i_plus - u_i_ref_raw, omega_i_raw_plus + omega_i_ref_raw)")
visit.DefineScalarExpression("cross_helicity_density",
"2.*dot(B, u_i_plus)")
visit.DefineScalarExpression("cross_ref_helicity_density",
"2.*dot(B_ref, u_i_ref)")
visit.DefineScalarExpression("cross_rel_helicity_density",
"(dot(u_i_plus - u_i_ref, B + B_ref)"
"+ dot(u_i_plus + u_i_ref, B - B_ref))")
def normalize_scalar(visit, scalar_name,
normalized_scalar_name):
r"""
Determine max of scalar.
"""
visit.AddPlot("Pseudocolor", scalar_name)
visit.DrawPlots()
visit.Query("Max")
max = visit.GetQueryOutputValue()
visit.DefineScalarExpression(normalized_scalar_name,
"%s - %g" % (scalar_name, max))
visit.DeleteActivePlots()
def launch_points_inner_outer(center, plane=0.249,
radius_inner=0.001, radius_outer=0.005,
num_inner=80, num_outer=60,
return_cut_point=False):
r"""
Calculate points on a circle outline for a given center point.
"""
thetas = circle_with_cut_thetas(num_outer)
points_outer = launch_points(center, thetas, radius=radius_outer,
plane=plane)
thetas = full_circle_thetas(num_inner)
points_inner = launch_points(center, thetas, radius=radius_inner,
plane=plane)
cut_point_x = points_outer[(num_outer-1)*3]
cut_point_y = points_outer[(num_outer-1)*3+1]
cut_point = [cut_point_x, cut_point_y]
if return_cut_point:
return points_outer, points_inner, cut_point
else:
return points_outer, points_inner
def full_circle_thetas(num_points):
r"""
Return a linear space of angles.
"""
thetas = np.linspace(0, 2.*np.pi, num_points)
return thetas
def circle_with_cut_thetas(num_points):
r"""
Return a linear space of angles with a cut from 3/4pi to 5/4pi.
"""
thetas = np.linspace(0, 3./4.*np.pi, num_points)
thetas = np.concatenate((thetas, np.linspace(5./4.*np.pi, 2.*np.pi,
num_points)))
return thetas
def launch_points(center, thetas, radius=0.003,
plane=0.249):
r"""
Return launch points for field lines.
"""
x_points = radius * np.cos(thetas) + center[0]
y_points = radius * np.sin(thetas) + center[1]
z_points = plane * np.ones(x_points.size)
points = np.empty((x_points.size + y_points.size + z_points.size))
points[0::3] = x_points
points[1::3] = y_points
points[2::3] = z_points
points = tuple(points)
return points
def setup_scalar_isosurface(visit, quantity,
colortable="PuRd", max_val=1., min_val=0.9):
r"""
Setup iso_surface. Works best if quantity is plane normalized.
"""
visit.AddPlot("Pseudocolor", quantity, 1, 0)
PseudocolorAtts = visit.PseudocolorAttributes()
PseudocolorAtts.colorTableName = colortable
PseudocolorAtts.opacityType = PseudocolorAtts.Constant
PseudocolorAtts.opacity = 0.25
PseudocolorAtts.smoothingLevel = 0
PseudocolorAtts.legendFlag = 0
visit.SetPlotOptions(PseudocolorAtts)
visit.AddOperator("Isosurface", 0)
IsosurfaceAtts = visit.IsosurfaceAttributes()
IsosurfaceAtts.contourNLevels = 5
IsosurfaceAtts.contourValue = ()
IsosurfaceAtts.contourPercent = ()
IsosurfaceAtts.contourMethod = IsosurfaceAtts.Level
IsosurfaceAtts.minFlag = 1
IsosurfaceAtts.min = min_val
IsosurfaceAtts.maxFlag = 1
IsosurfaceAtts.max = max_val
visit.SetOperatorOptions(IsosurfaceAtts, 0)
return PseudocolorAtts, IsosurfaceAtts
def setup_current_pseudocolor(visit, current_to_use,
colortable="PRGn_Stepped", max_val=1e6,
min_val=-1e6, invert=True, horizontal=True):
r"""
Setup pseudocolor current plot.
"""
visit.AddPlot("Pseudocolor", current_to_use, 1, 0)
PseudocolorAtts = visit.PseudocolorAttributes()
PseudocolorAtts.scaling = PseudocolorAtts.Linear
PseudocolorAtts.limitsMode = PseudocolorAtts.OriginalData
PseudocolorAtts.colorTableName = colortable
PseudocolorAtts.invertColorTable = invert
if max_val:
PseudocolorAtts.maxFlag = 1
PseudocolorAtts.max = max_val
if min_val:
PseudocolorAtts.minFlag = 1
PseudocolorAtts.min = min_val
visit.SetPlotOptions(PseudocolorAtts)
visit.AddOperator("Slice", 0)
SliceAtts = visit.SliceAttributes()
SliceAtts.originType = SliceAtts.Intercept
SliceAtts.originIntercept = 0.249
SliceAtts.axisType = SliceAtts.ZAxis
SliceAtts.project2d = 0
visit.SetOperatorOptions(SliceAtts, 0)
colorbar = visit.GetAnnotationObject('Plot0000')
colorbar.SetDrawMinMax(0)
if horizontal:
colorbar.SetOrientation("HorizontalBottom")
colorbar.SetFontHeight(0.017)
colorbar.SetNumberFormat('%#3.1g')
colorbar.SetManagePosition(0)
colorbar.SetPosition((0.05, 0.99))
return PseudocolorAtts, SliceAtts
def setup_massless_electron_canonical_flux_tubes(visit, points_outer,
points_inner):
r"""
Setup two massless electron canonical flux tubes i.e. magnetic flux tubes.
Intended to be inner and outer flux tubes.
"""
visit.AddPlot("Streamline", "Omega_e_times_density", 1, 0)
StreamlineAtts_outer = visit.StreamlineAttributes()
StreamlineAtts_outer.sourceType = StreamlineAtts_outer.SpecifiedPointList
StreamlineAtts_outer.SetPointList(points_outer)
StreamlineAtts_outer.coloringMethod = StreamlineAtts_outer.Solid
StreamlineAtts_outer.colorTableName = "Default"
StreamlineAtts_outer.singleColor = (255, 0, 0, 255)
StreamlineAtts_outer.legendFlag = 0
visit.SetPlotOptions(StreamlineAtts_outer)
visit.AddPlot("Streamline", "Omega_e_times_density", 1, 0)
StreamlineAtts_inner = visit.StreamlineAttributes()
StreamlineAtts_inner.sourceType = StreamlineAtts_inner.SpecifiedPointList
StreamlineAtts_inner.SetPointList(points_inner)
StreamlineAtts_inner.coloringMethod = StreamlineAtts_inner.Solid
StreamlineAtts_inner.colorTableName = "Default"
StreamlineAtts_inner.singleColor = (190, 64, 0, 255)
StreamlineAtts_inner.legendFlag = 0
visit.SetPlotOptions(StreamlineAtts_inner)
return StreamlineAtts_outer, StreamlineAtts_inner
def setup_outer_inner_ion_canonical_flux_tubes(visit, quantity, points_outer,
points_inner,
outer_color=dark_grey,
inner_color=black):
r"""
Setup two ion canonical flux tubes.
Inteteded to be inner and outer flux tubes.
"""
visit.AddPlot("Streamline", quantity, 1, 0)
StreamlineAtts_outer = visit.StreamlineAttributes()
StreamlineAtts_outer.sourceType = StreamlineAtts_outer.SpecifiedPointList
StreamlineAtts_outer.SetPointList(points_outer)
StreamlineAtts_outer.coloringMethod = StreamlineAtts_outer.Solid
StreamlineAtts_outer.colorTableName = "Default"
StreamlineAtts_outer.singleColor = outer_color
StreamlineAtts_outer.legendFlag = 0
visit.SetPlotOptions(StreamlineAtts_outer)
visit.AddPlot("Streamline", quantity, 1, 0)
StreamlineAtts_inner = visit.StreamlineAttributes()
StreamlineAtts_inner.sourceType = StreamlineAtts_inner.SpecifiedPointList
StreamlineAtts_inner.SetPointList(points_inner)
StreamlineAtts_inner.coloringMethod = StreamlineAtts_inner.Solid
StreamlineAtts_inner.colorTableName = "Default"
StreamlineAtts_inner.singleColor = inner_color
StreamlineAtts_inner.legendFlag = 0
visit.SetPlotOptions(StreamlineAtts_inner)
return StreamlineAtts_outer, StreamlineAtts_inner
def setup_forward_backward_ion_canonical_flux_tubes(visit, points_foward,
points_backward,
forward_color=tan,
backward_color=olive):
r"""
Setup two ion canonical flux tubes, one integrating in the forward
direction, one integrating in the backward direction.
"""
visit.AddPlot("Streamline", "Omega_i", 1, 0)
StreamlineAtts_forward = visit.StreamlineAttributes()
StreamlineAtts_forward.sourceType = StreamlineAtts_forward.SpecifiedPointList
StreamlineAtts_forward.SetPointList(points_foward)
StreamlineAtts_forward.coloringMethod = StreamlineAtts_forward.Solid
StreamlineAtts_forward.colorTableName = "Default"
StreamlineAtts_forward.singleColor = forward_color
StreamlineAtts_forward.integrationDirection = StreamlineAtts_forward.Forward
StreamlineAtts_forward.legendFlag = 0
visit.SetPlotOptions(StreamlineAtts_forward)
visit.AddPlot("Streamline", "Omega_i", 1, 0)
StreamlineAtts_backward = visit.StreamlineAttributes()
StreamlineAtts_backward.sourceType = StreamlineAtts_backward.SpecifiedPointList
StreamlineAtts_backward.SetPointList(points_backward)
StreamlineAtts_backward.coloringMethod = StreamlineAtts_backward.Solid
StreamlineAtts_backward.colorTableName = "Default"
StreamlineAtts_backward.singleColor = backward_color
StreamlineAtts_backward.integrationDirection = StreamlineAtts_backward.Backward
StreamlineAtts_backward.legendFlag = 0
visit.SetPlotOptions(StreamlineAtts_backward)
return StreamlineAtts_forward, StreamlineAtts_backward
def setup_backward_and_B_stream(visit, name, launch_points,
B_launch_points, color=green, B_color=red):
r"""
Setup fiedlines for a magnetic flux tube and for a backward integrated quantity.
"""
visit.AddPlot("Streamline", 'B', 1, 0)
StreamlineAtts_B = visit.StreamlineAttributes()
StreamlineAtts_B.sourceType = StreamlineAtts_B.SpecifiedPointList
StreamlineAtts_B.SetPointList(B_launch_points)
StreamlineAtts_B.coloringMethod = StreamlineAtts_B.Solid
StreamlineAtts_B.colorTableName = "Default"
StreamlineAtts_B.singleColor = B_color
StreamlineAtts_B.integrationDirection = StreamlineAtts_B.Forward
StreamlineAtts_B.legendFlag = 0
visit.SetPlotOptions(StreamlineAtts_B)
visit.AddPlot("Streamline", name, 1, 0)
StreamlineAtts_backward = visit.StreamlineAttributes()
StreamlineAtts_backward.sourceType = StreamlineAtts_backward.SpecifiedPointList
StreamlineAtts_backward.SetPointList(launch_points)
StreamlineAtts_backward.coloringMethod = StreamlineAtts_backward.Solid
StreamlineAtts_backward.colorTableName = "Default"
StreamlineAtts_backward.singleColor = color
StreamlineAtts_backward.integrationDirection = StreamlineAtts_backward.Backward
StreamlineAtts_backward.legendFlag = 0
visit.SetPlotOptions(StreamlineAtts_backward)
return StreamlineAtts_B, StreamlineAtts_backward
def setup_field_line(visit, quantity,
launch_point=(0.01, 0.01), launch_z=0.249,
color=dark_red):
r"""
Setup single field line plot to better see twistedness.
"""
visit.AddPlot("Streamline", quantity, 1, 0)
StreamlineAtts_line = visit.StreamlineAttributes()
StreamlineAtts_line.sourceType = StreamlineAtts_line.SpecifiedPoint
StreamlineAtts_line.pointSource = (launch_point[0], launch_point[1], launch_z)
StreamlineAtts_line.coloringMethod = StreamlineAtts_line.Solid
StreamlineAtts_line.singleColor = color
StreamlineAtts_line.legendFlag = 0
StreamlineAtts_line.showSeeds = 0
StreamlineAtts_line.lineWidth = 8
visit.SetPlotOptions(StreamlineAtts_line)
return StreamlineAtts_line
def setup_annotations(visit, time_scale=1, time_offset=0):
r"""
Setup Annotations: scale tick font size, label font size,
hide unecessary text.
"""
AnnotationAtts = visit.AnnotationAttributes()
AnnotationAtts.axes3D.autoSetScaling = 0
AnnotationAtts.axes3D.xAxis.title.visible = 0
AnnotationAtts.axes3D.yAxis.title.visible = 0
AnnotationAtts.axes3D.zAxis.title.visible = 0
AnnotationAtts.axes3D.xAxis.label.font.scale = 3
AnnotationAtts.axes3D.xAxis.label.scaling = -2
AnnotationAtts.axes3D.yAxis.label.font.scale = 3
AnnotationAtts.axes3D.yAxis.label.scaling = -2
AnnotationAtts.axes3D.zAxis.label.font.scale = 3
AnnotationAtts.axes3D.zAxis.label.scaling = -2
AnnotationAtts.userInfoFlag = 0
AnnotationAtts.databaseInfoFlag = 0
AnnotationAtts.databaseInfoTimeScale = time_scale
AnnotationAtts.databaseInfoTimeOffset = time_offset
visit.SetAnnotationAttributes(AnnotationAtts)
return AnnotationAtts
def set_default_view(visit):
r"""
Set default view for viewing fluxtubes.
If view needs to be modified it is best to align visit with gui and save
parameters from visit.GetView3D().
"""
view = visit.GetView3D()
view.SetViewNormal((-0.731293, 0.40847, 0.546227))
view.SetFocus((0.00202222, 0.000976744, 0.331997))
view.SetViewUp((0.322268, 0.91274, -0.251095))
view.SetViewAngle(30)
view.SetParallelScale(0.088383)
view.SetNearPlane(-0.176766)
view.SetImagePan((0, 0))
view.SetImageZoom(1.5)
view.SetPerspective(1)
view.SetEyeAngle(2)
view.SetCenterOfRotationSet(0)
view.SetCenterOfRotation((0.00202222, 0.000976744, 0.331997))
view.SetAxis3DScaleFlag(0)
view.SetAxis3DScales((1, 1, 1))
view.SetShear((0, 0, 1))
view.SetWindowValid(0)
visit.SetView3D(view)
def set_default_lighting(visit):
r"""
Set lightening to light up contour plot in z=0.249 plane
when default view is used.
"""
light0 = visit.LightAttributes()
light0.enabledFlag = 1
light0.type = light0.Camera
light0.direction = (-0.5, 0, -0.5)
light0.color = (255, 255, 255, 255)
light0.brightness = 1
visit.SetLight(0, light0)
def set_default_view_thesis(visit):
r"""
Set default view for viewing fluxtubes.
If view needs to be modified it is best to align visit with gui and save
parameters from visit.GetView3D().
"""
View3DAtts = visit.View3DAttributes()
View3DAtts.viewNormal = (-0.652048, 0.487146, 0.580966)
View3DAtts.focus = (0.00151111, 0.0045, 0.331997)
View3DAtts.viewUp = (0.365672, 0.873317, -0.321872)
View3DAtts.viewAngle = 30
View3DAtts.parallelScale = 0.0883679
View3DAtts.nearPlane = -0.176736
View3DAtts.farPlane = 0.176736
View3DAtts.imagePan = (-0.0429026, -0.00832011)
View3DAtts.imageZoom = 0.95
View3DAtts.perspective = 1
View3DAtts.eyeAngle = 2
View3DAtts.centerOfRotationSet = 0
View3DAtts.centerOfRotation = (0.00151111, 0.0045, 0.331997)
View3DAtts.axis3DScaleFlag = 0
View3DAtts.axis3DScales = (1, 1, 1)
View3DAtts.shear = (0, 0, 1)
View3DAtts.windowValid = 0
visit.SetView3D(View3DAtts)
def set_default_view_lower_angle(visit):
r"""
Set view with lower angle for viewing fluxtubes.
If view needs to be modified it is best to align visit with gui and save
parameters from visit.GetView3D().
"""
view = visit.GetView3D()
view.setViewNormal((-0.776189, 0.193398, 0.600106))
view.setFocus((0.00202222, 0.000976744, 0.331997))
view.setViewUp((0.138771, 0.980856, -0.136615))
view.setViewAngle(30)
view.setParallelScale(0.088383)
view.setNearPlane(-0.176766)
view.setFarPlane(0.175437)
view.setImagePan((0, 0))
view.setImageZoom(1)
view.setPerspective(1)
view.setEyeAngle(2)
view.setCenterOfRotationSet(0)
view.setCenterOfRotation((0.00202222, 0.000976744, 0.331997))
view.setAxis3DScaleFlag(0)
view.setAxis3DScales((1, 1, 1))
view.setShear((0, 0, 1))
view.setWindowValid(0)
visit.SetView3D(view)
def set_positive_x_view(visit):
r"""
Set view along positive x for viewing fluxtubes.
If view needs to be modified it is best to align visit with gui and save
parameters from visit.GetView3D().
"""
view = visit.GetView3D()
view.SetViewNormal((-0.00997631, 0.0600385, 0.0335938))
view.SetFocus((0.00202222, -0.00202703, 0.331997))
view.SetViewUp((0.0598395, 0.998184, 0.00689852))
view.SetViewAngle(30)
view.SetParallelScale(0.0877186)
view.SetNearPlane(-0.175437)
view.SetImagePan((0, 0))
view.SetImageZoom(1)
view.SetPerspective(1)
view.SetEyeAngle(2)
view.SetCenterOfRotationSet(0)
view.SetCenterOfRotation((0.00202222, -0.00202703, 0.331997))
view.SetAxis3DScaleFlag(0)
view.SetAxis3DScales((1, 1, 1))
view.SetShear((0, 0, 1))
view.SetWindowValid(1)
visit.SetView3D(view)
def set_postive_z_view(visit):
r"""
Set view along positive z for viewing fluxtubes.
If view needs to be modified it is best to align visit with gui and save
parameters from visit.GetView3D().
"""
view = visit.GetView3D()
view.SetViewNormal((0.00944856, 0.0379894, 0.999233))
view.SetFocus((0.00202222, -0.00202703, 0.331997))
view.SetViewUp((-0.00367716, 0.9999274, 0.0037961))
view.SetViewAngle(30)
view.SetParallelScale(0.0877186)
view.SetNearPlane(-0.175437)
view.SetImagePan((0, 0))
view.SetImageZoom(2.14359)
view.SetPerspective(1)
view.SetEyeAngle(2)
view.SetCenterOfRotationSet(0)
view.SetCenterOfRotation((0.00202222, -0.00202703, 0.331997))
view.SetAxis3DScaleFlag(0)
view.SetAxis3DScales((1, 1, 1))
view.SetShear((0, 0, 1))
view.SetWindowValid(1)
visit.SetView3D(view)
def set_negative_z_view(visit):
r"""
Set view along negative z for viewing fluxtubes.
If view needs to be modified it is best to align visit with gui and save
parameters from visit.GetView3D().
"""
view = visit.GetView3D()
view.SetViewNormal((-0.00894299, -0.00985814, 0.999911))
view.SetFocus((0.00202222, 0.000976744, 0.331997))
view.SetViewUp((0.00367716, 0.999944, 0.00989136))
view.SetViewAngle(30)
view.SetParallelScale(0.0877186)
view.SetNearPlane(-0.175437)
view.SetImagePan((0, 0))
view.SetImageZoom(2.14359)
view.SetPerspective(1)
view.SetEyeAngle(2)
view.SetCenterOfRotationSet(0)
view.SetCenterOfRotation((0.00202222, -0.00202703, 0.331997))
view.SetAxis3DScaleFlag(0)
view.SetAxis3DScales((1, 1, 1))
view.SetShear((0, 0, 1))
view.SetWindowValid(1)
visit.SetView3D(view)
def determine_j_mag_extrema(database_path, plane_num=0):
r"""
Determine extrema over time of current across all shots.
Can be sued to set min and max values for colormaps.
"""
numpy_archives = glob(database_path + '*.npz')
data = np.load(numpy_archives[0])
j_x = data['j_x'][:, :, plane_num]
j_y = data['j_y'][:, :, plane_num]
j_z = data['j_z'][:, :, plane_num]
j_mag = np.sqrt(j_x**2. + j_y**2. + j_z**2.)
j_mag_max = np.nanmax(j_mag)
j_mag_min = np.nanmin(j_mag)
for archive in numpy_archives[1:]:
data = np.load(archive)
j_x = data['j_x'][:, :, plane_num]
j_y = data['j_y'][:, :, plane_num]
j_z = data['j_z'][:, :, plane_num]
j_mag = np.sqrt(j_x**2. + j_y**2. + j_z**2.)
j_mag_max = np.nanmax(j_mag) if np.nanmax(j_mag) > j_mag_max else j_mag_max
j_mag_min = np.nanmin(j_mag) if np.nanmin(j_mag) < j_mag_min else j_mag_min
return j_mag_max, j_mag_min
def set_save_settings(visit):
r"""
Set and return save_atts.
"""
save_atts = visit.SaveWindowAttributes()
save_atts.format = save_atts.PNG
save_atts.height = 1080
save_atts.width = 1920
save_atts.family = 0
visit.SetSaveWindowAttributes(save_atts)
return save_atts
def setup_slider(visit):
r"""
Add a time slider with us text label.
"""
slider = visit.CreateAnnotationObject("TimeSlider")
slider.SetText("$time us")
slider.SetRounded(0)
slider.SetVisible(1)
def main():
r"""
Plot frames of canonical flux tube animations.
"""
args = parse_args()
database_prefix = args.database_prefix + args.database_date
visit.Launch()
today = date.now().strftime('%Y-%m-%d-%H-%M')
out_dir = '../output/canonical_flux_tubes/' + today
try:
os.makedirs(out_dir)
except:
pass
if args.interactive_session:
visit.OpenDatabase(database_prefix + args.database_postfix + "*.vtk database")
define_expressions(visit)
visit.OpenGUI()
return
output_path = out_dir + '/' + args.output_prefix
print 'data_path', database_prefix + args.database_postfix
visit.OpenDatabase(database_prefix + args.database_postfix + "*.vtk database")
define_expressions(visit)
field_nulls = np.loadtxt(args.field_nulls)
field_nulls = np.roll(field_nulls, args.time_shift, axis=0)
time_points = np.arange(args.start_time_point, args.end_time_point)
#time_points = np.roll(time_points, args.time_shift, axis=0)
AnnotationAtts = setup_annotations(visit, time_scale=args.time_scale,
time_offset=0)
plot_count = 0
if args.current_plane:
PseudocolorAtts, SliceAtts = setup_current_pseudocolor(visit,
args.current_to_use,
max_val=args.current_max,
min_val=args.current_min)
plot_count += 1
if args.temperature_tubes:
setup_scalar_isosurface(visit, "Te_plane_normalized", colortable="PuRd")
plot_count += 1
if args.density_tubes:
setup_scalar_isosurface(visit, "n_plane_normalized", colortable="Greys")
plot_count += 1
if args.stationary_tube:
(points_outer, points_inner,
cut_point) = launch_points_inner_outer(field_nulls[0],
return_cut_point=True)
else:
(points_outer, points_inner,
cut_point) = launch_points_inner_outer(args.stationary_center,
return_cut_point=True)
if args.ion:
(StreamlineAtts_ion_outer,
StreamlineAtts_ion_inner) = setup_outer_inner_ion_canonical_flux_tubes(visit,
args.omega_to_use,
points_outer,
points_inner)
plot_count += 2
if args.electron:
stream_line_func = setup_massless_electron_canonical_flux_tubes
(StreamlineAtts_electron_outer,
StreamlineAtts_electron_inner) = setup_massless_electron_canonical_flux_tubes(visit,
points_outer,
points_inner)
plot_count += 2
if not args.ion:
cut_point[1] += 0.0005
FieldLineAtts = setup_field_line(visit, 'Omega_e_times_density',
launch_point=cut_point)
plot_count += 1
if args.velocity:
(velocity_stream_1,
velocity_stream_2) = setup_outer_inner_ion_canonical_flux_tubes(visit,
'u_i_plus',
points_inner,
points_outer,
outer_color=aqua,
inner_color=navy)
plot_count += 2
if args.view == 'default':
set_default_view_thesis(visit)
elif args.view == 'default_lower_angle':
set_default_view_lower_angle(visit)
elif args.view == 'positive_z':
set_postive_z_view(visit)
elif args.view == 'negative_z':
set_negative_z_view(visit)
elif args.view == 'positive_x':
set_positive_x_view(visit)
set_default_lighting(visit)
setup_slider(visit)
if args.double_stream:
stream_launch_point = (field_nulls[args.start_time_point][0] + args.x_offset,
field_nulls[args.start_time_point][1] + args.y_offset)
setup_field_line(visit, args.double_stream,
launch_point=(stream_launch_point[0] + 0.001,
stream_launch_point[1] + 0.))
setup_field_line(visit, args.double_stream,
launch_point=(stream_launch_point[0] + 0.,
stream_launch_point[1] + 0.001))
setup_field_line(visit, args.double_stream,
launch_point=(stream_launch_point[0] - 0.001,
stream_launch_point[1] + 0.))
setup_field_line(visit, args.double_stream,
launch_point=(stream_launch_point[0] + 0.,
stream_launch_point[1] - 0.001))
setup_field_line(visit, args.double_stream,
launch_point=(stream_launch_point[0] + 0.005,
stream_launch_point[1] + 0.))
setup_field_line(visit, args.double_stream,
launch_point=(stream_launch_point[0] + 0.,
stream_launch_point[1] + 0.005))
setup_field_line(visit, args.double_stream,
launch_point=(stream_launch_point[0] - 0.005,
stream_launch_point[1] + 0.))
setup_field_line(visit, args.double_stream,
launch_point=(stream_launch_point[0] + 0.,
stream_launch_point[1] - 0.005))
setup_field_line(visit, args.double_stream,
launch_point=stream_launch_point)
setup_field_line(visit, 'B',
launch_point=stream_launch_point, color=red)
visit.DrawPlots()
save_atts = set_save_settings(visit)
ending = '.png'
visit.SetTimeSliderState(time_points[0])
visit.ResizeWindow(1, 960, 1000)
if args.wait_for_manual_settings:
visit.OpenGUI()
comment = raw_input()
for index, time_point in enumerate(time_points):
print time_point
plot_number = 0
save_atts.fileName = output_path + str(index + args.file_number_offset).zfill(4) + ending
visit.SetSaveWindowAttributes(save_atts)
if args.current_plane:
plot_number += 1
if args.temperature_tubes:
plot_number += 1
if args.density_tubes:
plot_number += 1
(points_outer,
points_inner,
cut_point) = launch_points_inner_outer(field_nulls[index],
return_cut_point=True)
if args.stationary_tube:
(points_outer,
points_inner,
cut_point) = launch_points_inner_outer(args.stationary_center,
return_cut_point=True)
if args.ion:
visit.SetActivePlots(plot_number)
StreamlineAtts_ion_outer.SetPointList(points_outer)
visit.SetPlotOptions(StreamlineAtts_ion_outer)
plot_number += 1
visit.SetActivePlots(plot_number)
StreamlineAtts_ion_inner.SetPointList(points_inner)
visit.SetPlotOptions(StreamlineAtts_ion_inner)
plot_number += 1
if args.electron:
visit.SetActivePlots(plot_number)
StreamlineAtts_electron_outer.SetPointList(points_outer)
visit.SetPlotOptions(StreamlineAtts_electron_outer)
plot_number += 1
visit.SetActivePlots(plot_number)
StreamlineAtts_electron_inner.SetPointList(points_inner)
visit.SetPlotOptions(StreamlineAtts_electron_inner)
plot_number +=1
if not args.ion:
cut_point[1] += 0.0005
visit.SetActivePlots(plot_number)
FieldLineAtts.SetPointSource(cut_point[0],
cut_point[1],
0.249)
visit.SetPlotOptions(FieldLineAtts)
plot_number += 1
if args.velocity:
visit.SetActivePlots(plot_number)
velocity_stream_1.SetPointList(points_outer)
visit.SetPlotOptions(velocity_stream_1)
plot_number +=1
visit.SetActivePlots(plot_number)
velocity_stream_2.SetPointList(points_inner)
visit.SetPlotOptions(velocity_stream_2)
plot_number +=1
visit.SetTimeSliderState(time_point)
name = visit.SaveWindow()
def parse_args():
parser = argparse.ArgumentParser(description="Generate time step plots of canonical flux tubes.")
parser.add_argument('--database_prefix', help='path to visit database i.e. vtk files',
default='/home/jensv/rsx/jens_analysis/output/canonical_quantities/')
parser.add_argument('--database_postfix', help='path to visit database i.e. vtk files',
default='/canonical_quantities')
parser.add_argument('database_date', help='date of data run YYYY-MM-DD-mm-ss')
parser.add_argument('--output_prefix', help='output_file_prefix',
default='canonical_flux_tubes_')
parser.add_argument('--current_min', help='minimum for current color map', default=-3.5e5)
parser.add_argument('--current_max', help='maximum for current color map', default=3.5e5)
parser.add_argument('--start_time_point', help='time point of first output frame', type=int, default=0)
parser.add_argument('--end_time_point', help='time point of last output frame', type=int, default=250)
parser.add_argument('--field_nulls', help='path to file listing field_nulls (launching centers)',
default='/home/jensv/rsx/jens_analysis/output/field_nulls/2017-05-05/averaged_nulls.txt')
parser.add_argument('--time_scale', help='time scale of time steps', default=0.068)
parser.add_argument('--current_plane', help='plot temperature contours',
action='store_true', default=False)
parser.add_argument('--temperature_tubes', help='plot temperature isosurfaces',
action='store_true', default=False)
parser.add_argument('--density_tubes', help='plot density isosurfaces',
action='store_true', default=False)
parser.add_argument('--electron', help='plot canonical electron flux tubes',
action='store_true', default=False)
parser.add_argument('--ion', help='plot canonical ion flux tubes', action='store_true', default=False)
parser.add_argument('--current',
help='plot thin current flux tube surrounded by electron / magnetic flux tube',
action='store_true', default=False)
parser.add_argument('--interactive_session', action='store_true', default=False)
parser.add_argument('--current_to_use', default='j_z')
parser.add_argument('--omega_to_use', default='Omega_i_raw_plus_times_density')
parser.add_argument('--view', help='pre-configured_views: default, default_lower_angle, positive_z, negative_z, positive_x', default='default')
parser.add_argument('--wait_for_manual_settings',
help='flag makes program wait for input before rendering time series.',
default=False, action='store_true')
parser.add_argument('--double_stream', help='plot canonical streamline and magnetic of given variable', default=None)
parser.add_argument('--x_offset', help='x offset of single streamline', default=0, type=int)
parser.add_argument('--y_offset', help='y offset of single streamline', default=0, type=int)
parser.add_argument('--file_number_offset', help='offset in file numbering', default=0, type=int)
parser.add_argument('--turn_off_density_start', help='time step at which to start turning off density cloud.', type=int, default=None)
parser.add_argument('--turn_off_density_end', help='time step at which to end turning off density cloud', type=int, default=None)
parser.add_argument('--velocity', action='store_true', default=False)
parser.add_argument('--stationary_tube', help="flag to hold flux tube launch point"
"stationary",
action='store_true', default=False)
parser.add_argument('--stationary_center',
help='launch_point of stationary tube',
nargs=2,
type=float,
default = [0, 0])
parser.add_argument('--time_shift',
help='shift gyration phase',
type=int, default=125)
args = parser.parse_args()
return args
if __name__ == '__main__':
main()
```
#### File: relative_canonical_helicity_tools/write_rsx_planes_to_vtk/idl_sav_to_vtk_unstructured.py
```python
import numpy as np
from pyvisfile.vtk import (write_structured_grid,
UnstructuredGrid,
DataArray,
AppendedDataXMLGenerator,
VTK_VERTEX, VF_LIST_OF_VECTORS,
VF_LIST_OF_COMPONENTS)
from pytools.obj_array import make_obj_array
from os.path import exists
import scipy.io.idl as idl
from scipy.interpolate import griddata
def plane_points_3d(data, z_position):
r"""
Returns 3d points from dictionary of a 2D scan of RSX.
"""
assert ('x_out' in data.keys() and
'y_out' in data.keys()), 'No x_out and y_out keys in data '
points_3d = np.dstack((data['x_out'],
data['y_out'],
np.ones(data['x_out'].size)*z_position))[0]
return points_3d
def write_scalar_data_to_vtk(file_name, time_point, z_position, labels,
data_dicts):
r"""
Writes scalars to an unstrcutured grid VTK file.
"""
data_points = plane_points_3d(data_dicts[0], z_position)
if len(data_dicts) > 1:
msg = 'scalars not measured at same points'
for data_dict in data_dicts:
assert np.allclose(data_points,
plane_points_3d(data_dict, z_position)), msg
data = [(labels[i],
data_dict['a_out'][time_point]) for i, data_dict in enumerate(data_dicts)]
write_data_to_unstructured_vtk(file_name, data, data_points)
def write_data_to_unstructured_vtk(file_name, data, points):
r"""
Writes data to an unstructured grid VTK file.
Data is a list of points and values, the values can be scalar, or 3d vector.
"""
n_points = points.shape[0]
grid = UnstructuredGrid((n_points, DataArray("points",
points,
vector_format=VF_LIST_OF_VECTORS)),
cells=np.arange(n_points, dtype=np.uint32),
cell_types=np.asarray([VTK_VERTEX] * n_points,
dtype=np.uint8))
for name, field in data:
print 'number of nans', np.sum(np.isnan(field))
grid.add_pointdata(DataArray(name, field.astype('float64'),
vector_format=VF_LIST_OF_COMPONENTS))
if exists(file_name):
raise RuntimeError("output file '%s' already exists" % file_name)
outf = open(file_name, "w")
compressor = None
AppendedDataXMLGenerator(compressor)(grid).write(outf)
outf.close()
def write_vector_data_to_vtk(file_name, time_point, z_position, labels,
data_dicts):
r"""
Writes a vector to an unstructured grid VTK file.
"""
data_points_x = plane_points_3d(data_dicts[0], z_position)
data_points_y = plane_points_3d(data_dicts[1], z_position)
data_points_z = plane_points_3d(data_dicts[2], z_position)
x_min = np.nanmin(np.concatenate(data_points_x[:][0],
data_points_y[:][0],
data_points_z[:][0]))
x_max = np.nanmax(np.concatenate(data_points_x[:][0],
data_points_y[:][0],
data_points_z[:][0]))
y_min = np.nanmin(np.concatenate(data_points_x[:][1],
data_points_y[:][1],
data_points_z[:][1]))
y_max = np.nanmax(np.concatenate(data_points_x[:][1],
data_points_y[:][1],
data_points_z[:][1]))
x_points = np.linspace(x_min, x_max, 100)
y_points = np.linspace(y_min, y_max, 100)
data_points = np.dstack(x_points, y_points)
gridx, gridy = np.meshgrid(x_points, y_points)
interpolated_x = griddata(np.dstack((data_dicts[0]['x_out'],
data_dicts[0]['y_out']))[0],
data_dicts[0]['a_out'][time_point],
(gridx, gridy))
interpolated_y = griddata(np.dstack((data_dicts[1]['x_out'],
data_dicts[1]['y_out']))[0],
data_dicts[1]['a_out'][time_point],
(gridx, gridy))
interpolated_z = griddata(np.dstack((data_dicts[2]['x_out'],
data_dicts[2]['y_out']))[0],
data_dicts[2]['a_out'][time_point],
(gridx, gridy))
interpolated_field = np.dstack((interpolated_x,
interpolated_y,
interpolated_z))[0]
data = [(labels[0],
interpolated_field)]
write_data_to_unstructured_vtk(file_name, data, data_points)
```
#### File: relative_canonical_helicity_tools/write_to_vtk/maximum_extents.py
```python
def maximum_b_extent():
extent_0249_dict = {'x_min': -0.028, 'x_max': 0.025,
'y_min': -0.043, 'y_max': 0.039,
'z_min': 0.249, 'z_max': 0.249}
extent_0302_dict = {'x_min': -0.022, 'x_max': 0.021,
'y_min': -0.038, 'y_max': 0.04,
'z_min': 0.302, 'z_max': 0.302}
extent_0357_dict = {'x_min': -0.041, 'x_max': 0.030,
'y_min': -0.019, 'y_max': 0.0255,
'z_min': 0.357, 'z_max': 0.357}
extent_0416_dict = {'x_min': -0.044, 'x_max': 0.031,
'y_min': -0.022, 'y_max': 0.027,
'z_min': 0.416, 'z_max': 0.416}
extent_dicts = {0.249: extent_0249_dict,
0.302: extent_0302_dict,
0.357: extent_0357_dict,
0.416: extent_0416_dict}
return extent_dicts
def maximum_all_probe_extent(plane):
pass
```
#### File: relative_canonical_helicity_tools/write_to_vtk/reference_field.py
```python
import numpy as np
from invert_curl.invert_curl import devore_invert_curl
from laplace_solver.laplace_solver import laplace_3d_dct_fd
import vector_calculus.vector_calculus as vc
def determine_reference_fields(mesh, circulation,
return_scalar_ref=False):
r"""
Return reference fields used for relative helicity.
Reference fields consist of a circulation and the general momentum vector
of which the curl gives the circulation.
Parameters
----------
mesh: list of ndarray
3D mesh
circulation: list of ndarray
3D vector which is the curl quantity.
e.g. magnetic field B or flow vorticity omega.
Returns
-------
momentum_ref: list of ndarray
reference general momentum field e.g.
reference magnetic vector potential or
reference flow
circulation_ref: list of ndarray
curl of reference field e.g.
reference magnetic field, reference flow vorticity.
Notes
-----
Circulation reference dotted with surface normal should be
the negative of the real circulation dotted with the surface
normal.
.. math::
$-\vec{Circ}_{ref} \cdot \hat{n}= \vec{Circ} \cdot \hat{n}$
"""
boundary = make_boundary(circulation)
scalar_potential_ref = laplace_3d_dct_fd(mesh, boundary)
circulation_ref = vc.gradient(scalar_potential_ref, mesh=mesh)
momentum_ref = devore_invert_curl(mesh,
circulation_ref)
if return_scalar_ref:
return momentum_ref, circulation_ref, scalar_potential_ref
else:
return momentum_ref, circulation_ref
def make_boundary(field):
r"""
Return boundary conditions for circulation reference.
.. math::
$-\vec{Circ}_{ref} \cdot \hat{n}= \vec{Circ} \cdot \hat{n}$
"""
boundary = np.zeros(field[0].shape)
boundary[:, 0, :] = -field[0][:, 0, :]
boundary[:, -1, :] = -field[0][:, -1, :]
boundary[0, :, :] = -field[1][0, :, :]
boundary[-1, :, :] = -field[1][-1, :, :]
boundary[:, :, 0] = -field[2][:, :, 0]
boundary[:, :, -1] = -field[2][:, :, -1]
return boundary
```
|
{
"source": "jensweissflog/olervaningen",
"score": 3
}
|
#### File: app/models/brewfather.py
```python
from app import app
import requests
from datetime import datetime
import json
now=datetime.now().timestamp()*1000
def time_(timestamp):
return(datetime.fromtimestamp(timestamp/1000))
def get_request(url,params):
url=app.config['BREWFATHER_URL']+url
return requests.get(
url,
headers={
"Authorization": app.config['BREWFATHER_TOKEN'],
"Content-Type": "application/json",
},
params=params
).json()
def get_batches():
params={
"include": "recipe.fermentation,batchNotes",
"complete": "True",
"status": "Fermenting",
}
return(get_request("batches",params))
def get_target_temp(fridge):
batches=get_batches()
stepTemp=""
for b in batches:
try:
bnote=b["batchNotes"]
except KeyError:
bnote=""
if fridge in bnote and b["status"]=="Fermenting":
try:
steps=b["recipe"]["fermentation"]["steps"]
except KeyError:
print("No fermentation steps found")
steps=[]
for s in steps:
if now > s["actualTime"]:
stepTemp=s["stepTemp"]
else:
break
if stepTemp=="":
#set last temperature in list as target
stepTemp=steps[len(steps)-1]["stepTemp"]
if stepTemp != "":
return(stepTemp)
else:
return(app.config['DEFAULT_TEMP'])
```
|
{
"source": "jenswi-linaro/hafnium",
"score": 2
}
|
#### File: build/image/generate_initrd.py
```python
import argparse
import os
import shutil
import subprocess
import sys
def Main():
parser = argparse.ArgumentParser()
parser.add_argument("-f", "--file",
action="append", nargs=2,
metavar=("NAME", "PATH"),
help="File at host location PATH to be added to the RAM disk as NAME")
parser.add_argument("-s", "--staging", required=True)
parser.add_argument("-o", "--output", required=True)
args = parser.parse_args()
# Create staging folder if needed.
if not os.path.isdir(args.staging):
os.makedirs(args.staging)
# Copy files into the staging folder.
staged_files = []
for name, path in args.file:
shutil.copyfile(path, os.path.join(args.staging, name))
assert name not in staged_files
staged_files.append(name)
# Package files into an initial RAM disk.
with open(args.output, "w") as initrd:
# Move into the staging directory so the file names taken by cpio don't
# include the path.
os.chdir(args.staging)
cpio = subprocess.Popen(
["cpio", "--create"],
stdin=subprocess.PIPE,
stdout=initrd,
stderr=subprocess.PIPE)
cpio.communicate(input="\n".join(staged_files).encode("utf-8"))
return 0
if __name__ == "__main__":
sys.exit(Main())
```
#### File: hafnium/build/make.py
```python
import argparse
import os
import shutil
import subprocess
import sys
def Main():
parser = argparse.ArgumentParser()
parser.add_argument("--directory", required=True)
parser.add_argument("--copy_out_file", nargs=2,
help="Copy file after building. Takes two params: <src> <dest>")
args, make_args = parser.parse_known_args()
os.chdir(args.directory)
os.environ["PWD"] = args.directory
status = subprocess.call(["make"] + make_args)
if status != 0:
return status
if args.copy_out_file is not None:
shutil.copyfile(args.copy_out_file[0], args.copy_out_file[1])
return 0
if __name__ == "__main__":
sys.exit(Main())
```
|
{
"source": "Jental/find_answer",
"score": 3
}
|
#### File: Jental/find_answer/deptree.py
```python
import itertools
import urllib
import word2vec
# MSD: http://corpus.leeds.ac.uk/mocky/ru-table.tab
# Universal: http://universaldependencies.org/ru/pos/index.html
def convert_pos_MSD_to_Universal(pos):
if pos.startswith('A'):
return 'ADJ'
elif pos.startswith('C'):
return 'CCONJ'
elif pos.startswith('I'):
return 'INTJ'
elif pos.startswith('M'):
return 'NUM'
elif pos.startswith('Nc'):
return 'NOUN'
elif pos.startswith('Np'):
return 'PROPN'
elif pos.startswith('N'):
return 'NOUN'
elif pos.startswith('P'):
return 'PRON' # TODO: or DET
elif pos.startswith('Q'):
return 'PART'
elif pos.startswith('R'):
return 'ADV'
elif pos.startswith('S'):
return 'ADP'
elif pos.startswith('V'):
return 'VERB' # TODO: or AUX
elif pos.startswith('SENT') or pos.startswith('PUNC'):
return 'PUNCT'
else:
return 'X'
# ------------------
# get_dep_tree(sentence)
# ---
# Creates a word dependency tree from a sentence.
# Returns: deptree=(node, [deptree])
# Creates a deptree from the webservice response dictionary
def make_dep_tree(respDict, idx):
if idx == 0:
el = None
else:
el = respDict[idx]
children = [(k, respDict[k]) for k in respDict if int(respDict[k][6]) == idx]
childTrees = [ make_dep_tree(respDict, k) for (k, c) in children ]
return (el, childTrees)
def get_dep_tree(sentence):
url = 'http://deptree.jental.name/parse?' + urllib.parse.urlencode({'text': sentence})
respRaw = urllib.request.urlopen(url)
resp = respRaw.read()
respStr = resp.decode('utf-8')
respList = [ r[1:-1].split('\\t') for r in respStr[1:-1].split(',') ]
respDict = dict([(int(r[0]), r + [convert_pos_MSD_to_Universal(r[5])]) for r in respList])
(root, trees) = make_dep_tree(respDict, 0)
if len(trees) == 0:
print('No tree', sentence, trees)
return None
else:
return trees[0]
# ------------------
# filter_dep_tree(tree)
# ---
# Filters out invaluable parts of speech.
# Returns: deptree=(node, [deptree])
def filter_dep_tree(tree):
root, children = tree
posp = convert_pos_MSD_to_Universal(root[3])
if (posp == 'ADJ' or posp == 'NUM' or posp == 'NOUN' or posp == 'PROPN' or posp == 'ADV' or posp == 'VERB'):
res = [ (root, list(itertools.chain.from_iterable([ filter_dep_tree(c) for c in children ]))) ]
else:
cd = [ filter_dep_tree(c) for c in children ]
if len(cd) > 0:
res = list(itertools.chain.from_iterable(cd))
else:
res = []
return res
# ------------------
# filter_dep_tree(tree)
# ---
# Prints a word dependency tree
def print_dep_tree(tree):
def pdt(t, offset):
root, children = t
print(''.join([ ' ' for i in range(0, offset) ]), root[1], root[3])
for c in children:
pdt(c, offset + 1)
pdt(tree, 0)
```
#### File: Jental/find_answer/similarity.py
```python
import itertools
import scipy
import numpy as np
import util
import word2vec
import sentence2vec
import jsm
DEBUG = False
# ------------------
# sentence_similarity_samewords(sentence0, sentence1)
# ---
# Similarity based on number of identical words. Second element - if one sentence is part of another
# Returns: (double, bool)
def sentence_similarity_samewords(sentence0, sentence1):
words0 = list([ w for (w, prob, poss) in util.extract_nfs(sentence0) if len(w) > 3 ])
words1 = list([ w for (w, prob, poss) in util.extract_nfs(sentence1) if len(w) > 3 ])
if DEBUG:
print("sentence_similarity_samewords: words0: {0}".format(words0))
print("sentence_similarity_samewords: words1: {0}".format(words1))
if len(words0) <= 1 or len(words1) <= 1:
return 0.0, False
elif len(words0) <= 3 or len(words1) <= 3:
if set(words0) <= set(words1):
return len(words0) / len(words1), True
elif set(words1) <= set(words0):
return len(words1) / len(words0), True
else:
return 0.0, False
cnt = 0
for w0 in words0:
for w1 in words1:
if w0 == w1:
if DEBUG:
print("sentence_similarity_samewords: pair: {0}, {1}".format(w0, w1))
cnt = cnt + 1
return cnt / min(len(words0), len(words1)), set(words1) <= set(words0) or set(words0) <= set(words1)
# ------------------
# sentence_similarity_wordvectors(sentence0, sentence1)
# ---
# Similarity based on top vector-similar word pairs
# Returns: double
def words_similarity (words0, words1):
for word0 in words0:
for word1 in words1:
if len(word1) > 2:
try:
similarity = word2vec.w2v.similarity(word0, word1)
yield (word0, word1, similarity)
except Exception as err:
# print(err)
pass
def sentence_similarity_wordvectors(sentence0, sentence1):
words0 = list([ w for (w, prob, poss) in util.extract_nfs(sentence0)])
words1 = list([ w for (w, prob, poss) in util.extract_nfs(sentence1)])
if DEBUG:
print("sentence_similarity_wordvectors: words0: {0}".format(words0))
print("sentence_similarity_wordvectors: words1: {0}".format(words1))
pairs = words_similarity(words0, words1)
sortedPairs = sorted(pairs, key=lambda tup: tup[2], reverse=True)
addedPairs = []
for p in sortedPairs:
found = [pa for pa in addedPairs if pa[0] == p[0] or pa[1] == p[1]]
if len(found) == 0:
addedPairs.append(p)
simSum = 0.0
for p in addedPairs:
if DEBUG:
print("sentence_similarity_wordvectors: pair: {0}".format(p))
simSum += p[2]
# sum(sims in addedPairs) / len(addedPairs) * (2 * len(addedPairs)) / (len(words0) + len(words1)) = 2 * sum(sims in addedPairs) / (len(words0) + len(words1))
return simSum * 2.0 / ( len(words0) + len(words1) )
# return simSum * 2.0 / ( len(words0) + len(words1) ) + 0.5
# ------------------
# sentence_similarity_jsm(sentence0, sentence1, mode=0)
# ---
# Similarity based on top vector-similar word pairs. JSM generalization
# https://en.wikipedia.org/wiki/Jaccard_index
# mode: 0 - basic jsm; 1 - jsm with smaller union size; 2 - not jsm, but vec average
# Returns: double
def pair_similarity (words0, words1):
pairs0 = list(zip(words0, words0[1:]))
pairs1 = list(zip(words1, words1[1:]))
for w00, w01 in pairs0:
for w10, w11 in pairs1:
try:
vec00 = word2vec.w2v[w00]
vec01 = word2vec.w2v[w01]
vec10 = word2vec.w2v[w10]
vec11 = word2vec.w2v[w11]
vec0 = np.add(vec00, vec01)
vec1 = np.add(vec10, vec11)
similarity = 1 - scipy.spatial.distance.cosine(vec0, vec1)
yield ((w00, w01), (w10, w11), similarity)
except Exception as err:
print(err)
pass
def pair_similarity_allpairs (words0, words1):
pairs0 = list(itertools.combinations(words0, 2))
pairs1 = list(itertools.combinations(words1, 2))
for w00, w01 in pairs0:
for w10, w11 in pairs1:
try:
vec00 = word2vec.w2v[w00]
vec01 = word2vec.w2v[w01]
vec10 = word2vec.w2v[w10]
vec11 = word2vec.w2v[w11]
vec0 = np.add(vec00, vec01)
vec1 = np.add(vec10, vec11)
similarity = 1 - scipy.spatial.distance.cosine(vec0, vec1)
yield ((w00, w01), (w10, w11), similarity)
except Exception as err:
print(err)
pass
def sentence_similarity_jsm(sentence0, sentence1, mode=0):
words0 = list([ w for (w, prob, poss) in util.extract_nfs(sentence0)])
words1 = list([ w for (w, prob, poss) in util.extract_nfs(sentence1)])
if DEBUG:
print("sentence_similarity_jsm: words0: {0}".format(words0))
print("sentence_similarity_jsm: words1: {0}".format(words1))
pairs = words_similarity(words0, words1)
matrix = util.tranform_triples_to_matrix(pairs)
if mode == 0:
return jsm.basic(matrix)
elif mode == 1:
return jsm.smallerunion(matrix)
elif mode == 2:
return jsm.average(matrix)
else:
return jsm.basic(matrix)
def sentence_similarity_jsm_pairs(sentence0, sentence1, mode=0):
words0 = list([ w for (w, prob, poss) in util.extract_nfs(sentence0)])
words1 = list([ w for (w, prob, poss) in util.extract_nfs(sentence1)])
if DEBUG:
print("sentence_similarity_jsm: words0: {0}".format(words0))
print("sentence_similarity_jsm: words1: {0}".format(words1))
pairs = pair_similarity(words0, words1)
matrix = util.tranform_triples_to_matrix(pairs)
if mode == 0:
return jsm.basic(matrix)
elif mode == 1:
return jsm.smallerunion(matrix)
elif mode == 2:
return jsm.average(matrix)
else:
return jsm.basic(matrix)
def sentence_similarity_jsm_allpairs(sentence0, sentence1, mode=0):
words0 = list([ w for (w, prob, poss) in util.extract_nfs(sentence0)])
words1 = list([ w for (w, prob, poss) in util.extract_nfs(sentence1)])
if DEBUG:
print("sentence_similarity_jsm: words0: {0}".format(words0))
print("sentence_similarity_jsm: words1: {0}".format(words1))
pairs = pair_similarity_allpairs(words0, words1)
matrix = util.tranform_triples_to_matrix(pairs)
if mode == 0:
return jsm.basic(matrix)
elif mode == 1:
return jsm.smallerunion(matrix)
elif mode == 2:
return jsm.average(matrix)
else:
return jsm.basic(matrix)
# ------------------
# sentence_similarity_vec(sentence0, sentence1)
# ---
# Similarity based on vector sentence representation.
# Returns: double
def sentence_similarity_vec(sentence0, sentence1):
sv0 = sentence2vec.sentence2vec(sentence0)
sv1 = sentence2vec.sentence2vec(sentence1)
if sv0 is None or sv1 is None:
return 0.0
else:
return 1 - scipy.spatial.distance.cosine(sv0, sv1)
def sentence_similarity_vec2(sentence0, sentence1):
sv0 = sentence2vec.sentence2vec2(sentence0)
sv1 = sentence2vec.sentence2vec2(sentence1)
if sv0 is None or sv1 is None:
return 0.0
else:
return 1 - scipy.spatial.distance.cosine(sv0, sv1)
# -------
# sentence0 = 'ะะฐะนัะบะธะผ ัััะพะผ ะบะพัะพะฒะฐ ัะธะฟะฐะปะฐ ััะฐะฒั'
# sentences = [
# 'ะะฒัะฐ ัะฑะตะถะฐะปะฐ ะฒ ะปะตั',
# 'ะะตัะตั - ะฒัะตะผั ะฟะธัั ัะฐะน',
# 'ะกะผะพััะธั, ะบะฐะบ ะฑะฐัะฐะฝ ะฝะฐ ะฝะพะฒัะต ะฒะพัะพัะฐ',
# 'ะะฐะฝั ะฝะฐ ะฒะพัั
ะพะดะต ะตะปะฐ ะทะตะปะตะฝั'
# ]
# for sentence1 in sentences:
# print(sentence0, ' ;-; ', sentence1);
# print('similarity (words) :', sentence_similarity_samewords(sentence0, sentence1))[0]
# print('similarity (wordvec) :', sentence_similarity_wordvectors(sentence0, sentence1))
# print('similarity (jsm) :', sentence_similarity_jsm(sentence0, sentence1))
# print('similarity (jsm, su) :', sentence_similarity_jsm(sentence0, sentence1, mode=1))
# print('similarity (jsm, avg) :', sentence_similarity_jsm(sentence0, sentence1, mode=2))
# print('similarity (jsm, pairs) :', sentence_similarity_jsm_pairs(sentence0, sentence1))
# print('similarity (jsm, pairs, su) :', sentence_similarity_jsm_pairs(sentence0, sentence1, mode=1))
# print('similarity (jsm, pairs, avg) :', sentence_similarity_jsm_pairs(sentence0, sentence1, mode=2))
# print('similarity (jsm, allpairs) :', sentence_similarity_jsm_pairs(sentence0, sentence1))
# print('similarity (jsm, allpairs, su) :', sentence_similarity_jsm_pairs(sentence0, sentence1, mode=1))
# print('similarity (jsm, allpairs, avg):', sentence_similarity_jsm_pairs(sentence0, sentence1, mode=2))
# print('similarity (vec) :', sentence_similarity_vec(sentence0, sentence1))
# print('similarity (vec-2) :', sentence_similarity_vec2(sentence0, sentence1))
# words0 = list([ w for (w, prob, poss) in util.extract_nfs(sentence0) if len(w) > 3 ])
# words1 = list([ w for (w, prob, poss) in util.extract_nfs(sentences[2]) if len(w) > 3 ])
# pairs = pair_similarity_allpairs(words0, words1)
# for p in pairs:
# print(p)
```
#### File: Jental/find_answer/top.py
```python
import sys
from nltk.stem.snowball import SnowballStemmer
from nltk.tokenize import RegexpTokenizer
import pymorphy2
import util
# stemmer = SnowballStemmer("russian")
morph = pymorphy2.MorphAnalyzer()
def substrings(s1, minSize):
return [s1[i:i+ms] for ms in range(minSize, len(s1) + 1) for i in range(len(s1) - ms + 1)]
def intersections(s1, s2, minSize):
ss1 = substrings(s1, minSize)
ss2 = substrings(s2, minSize)
iss = list(set(ss1) & set(ss2))
iss.sort(key=len, reverse=True)
res = []
for ss in iss:
found = [r for r in res if r.find(ss) >= 0]
if not found:
res.append(ss)
return res
# with open("unigrams.cyr.lc") as f:
# reader = csv.reader(f, delimiter='\t')
# for row in reader:
# parsed = morph.parse(row[0])
# if len(parsed) > 0:
# nf = parsed[0].normal_form
# iss = intersections(nf, norm, 5)
# if len(iss) > 0:
# print(row[0], nf, iss)
print(util.extract_keywords(sys.argv[1]))
```
#### File: Jental/find_answer/util.py
```python
import itertools
import csv
import re
import pymorphy2
from nltk.tokenize import RegexpTokenizer
import numpy as np
print("Initializing pymorphy2")
morph = pymorphy2.MorphAnalyzer()
print("Initializing pymorphy2. Done")
print("Initializing tokenizer")
tokenizer = RegexpTokenizer(r'\w+')
print("Initializing tokenizer. Done")
# ------------------
# get_normal_forms(word)
# ---
# Returns a normal forms of a word with their probabilities
# Returns: [(NF, prob, [POS])]
def get_normal_forms(word):
normalForms = {}
parsed = morph.parse(word)
for p in parsed:
if p.normal_form in normalForms:
prob, pos = normalForms[p.normal_form]
normalForms[p.normal_form] = (prob + p.score, pos + [ p.tag.POS ])
else:
normalForms[p.normal_form] = (p.score, [ p.tag.POS ])
return sorted([ (k, v, poss) for k, (v, poss) in normalForms.items() ], key=lambda tup: tup[1])
# ------------------
# get_normal_form(word)
# ---
# Returns a normal form of a word (or an original word is no NF found)
# Returns: (NF, prob, [POS]) or None
def get_normal_form(word):
lword = word.lower().strip()
if len(word) > 2:
try:
nfs = get_normal_forms(lword)
nf, score, poss = nfs[0]
if (score <= 0.5):
return (lword, 0.0, poss)
else:
return (nf, score, poss)
except:
return None
else:
return None
# ------------------
# extract_nfs(sent)
# ---
# Retrieves list of NFs from a sentence
def extract_nfs(sentence):
for w in tokenizer.tokenize(sentence):
w2 = get_normal_form(w)
if w2 is not None:
yield w2
# ------------------
# extract_keywords(sentence)
# ---
# Extracts key sentence words based on their rarity
NORMAL_FORM_THRESHOLD = 0.5
FREQUENCY_THRESHOLDS = [ 8, 64, 256, 512, 1024, 2048 ] # of million
frequencyPairs = list(zip([0] + FREQUENCY_THRESHOLDS, FREQUENCY_THRESHOLDS))
with open('normal_forms.lc') as f:
reader = list(csv.reader(f, delimiter='\t'))
freqWords = [ (thre, [ row[0] for row in reader if int(row[1]) < thre and int(row[1]) >= thrb ]) for (thrb, thre) in frequencyPairs ]
def extract_keywords(sentence):
words = tokenizer.tokenize(sentence);
words_nf_h = [ [ nf for (nf, score, poss) in get_normal_forms(word) if score >= NORMAL_FORM_THRESHOLD ] for word in words ]
words_nf = list(itertools.chain(*words_nf_h))
freqSentenceNFs = [ (freq, [ word for word in words_nf if word in group ]) for (freq, group) in freqWords ]
return [ group for (freq, group) in freqSentenceNFs]
# ------------------
# tranform_triples_to_matrix(triples)
# ---
# Transforms [(<key>, <key>, <value>)] to a numpy matrix
def tranform_triples_to_matrix(triples):
return np.matrix([[v[2] for v in group] for key, group in itertools.groupby(triples, key=lambda e: e[0])])
def apply_text_fixes(text):
text0 = text.replace('\xa0', ' ').replace('โฆ', '.').replace('ยซ', '"').replace('ยป', '"').replace('\'', '').replace('..', '.')
text1 = re.sub(r'\[\d+\]', '', text0.replace('ะฐฬ', 'ะฐ').replace('ัฬ', 'ั').replace('ั', 'ะต').replace('ั', 'ะต'))
return text1
def strip_syntax(sent):
words = [ nf for (nf, score, poss) in extract_nfs(sent) ]
res = ' '.join(words)
return res
```
|
{
"source": "jentenma/pfsimg-cfg",
"score": 2
}
|
#### File: aptype/sfv2/sysdiag.py
```python
import json
import pprint
import telnetlib
import logging
import os
import errno
import atexit
import subprocess
class SysDiagBase(object):
def __init__(self, svrcfg, setup_info, logger ):
self.svrcfg = svrcfg
self.setup_info = setup_info
self.logger = logger
#print ("name = %s"% (__name__, ))
def testNetwork(self, system):
self.logger.info('testNetwork')
return 0
def testMemory(self,system):
self.logger.info('testMemory')
return 0
def testCpu(self,system):
self.logger.info('testCpu')
return 0
def testStorage(self,system):
self.logger.info('testStorage')
return 0
def __str__(self):
return "svrcfg type is %s" % (self.svrcfg)
```
|
{
"source": "jenterkin/selenium-page-elements",
"score": 2
}
|
#### File: tests/fixtures/driver.py
```python
import pytest
from selenium import webdriver
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
@pytest.fixture(scope='session')
def driver():
driver = webdriver.Remote(
command_executor='http://localhost:4444/wd/hub',
desired_capabilities=DesiredCapabilities.CHROME)
driver.get('file:///Users/jenterkin/repos/page_elements/tests/index.html')
yield driver
driver.close()
```
|
{
"source": "jen-thomas/wildlife-observations",
"score": 2
}
|
#### File: WildlifeObservations/observations/admin.py
```python
from django.contrib import admin
from django import forms
from . import models
# Register your models here.
from .models import VegetationStructure, Survey
class SourceAdmin(admin.ModelAdmin):
list_display = ('name',)
ordering = ('name',)
search_fields = ('name',)
class SiteAdmin(admin.ModelAdmin):
list_display = (
'area', 'site_name', 'altitude_band', 'latitude_start', 'longitude_start', 'altitude_start',
'gps_aspect_start', 'latitude_end', 'longitude_end', 'altitude_end', 'gps_aspect_end',
'transect_length',)
ordering = (
'site_name', 'altitude_band', 'altitude_start', 'altitude_end', 'gps_aspect_start', 'gps_aspect_end',
'transect_length',)
search_fields = ('area', 'site_name', 'altitude_band', 'transect_length',)
class VisitAdmin(admin.ModelAdmin):
list_display = ('site', 'date',)
ordering = ('site', 'date',)
search_fields = ('site__site_name', 'date',)
class SurveyForm(forms.ModelForm):
class Meta:
model = Survey
fields = "__all__"
def clean(self):
if self.cleaned_data['repeat'] == Survey.Repeat.TWO:
exists_repeat1 = Survey.objects. \
filter(visit=self.cleaned_data['visit']). \
filter(method=self.cleaned_data['method']). \
filter(repeat=Survey.Repeat.ONE). \
exists()
if not exists_repeat1:
raise forms.ValidationError("Check there is an earlier repeat using this survey method")
class SurveyAdmin(admin.ModelAdmin):
form = SurveyForm
list_display = ('visit', 'start_time', 'end_time', 'method', 'repeat', 'observer',)
ordering = ('visit', 'start_time', 'end_time', 'method', 'repeat',)
search_fields = ('visit__site__site_name', 'start_time', 'end_time', 'method', 'observer',)
class ObservationAdmin(admin.ModelAdmin):
list_display = (
'specimen_label', 'survey', 'status', 'length_head_abdomen', 'length_head_tegmina', 'original_preservation',
'current_preservation', 'notes',)
ordering = ('specimen_label', 'survey', 'status', 'original_preservation', 'current_preservation',)
search_fields = ('specimen_label', 'survey__visit__site__site_name', 'status', 'length_head_abdomen',)
class PhotographAdmin(admin.ModelAdmin):
list_display = ('filepath', 'observation_list', 'created_on')
ordering = ('filepath', 'observation', 'created_on',)
search_fields = ('filepath', 'observation__specimen_label', 'created_on',)
filter_horizontal = ('observation',)
def observation_list(self, obj):
return ', '.join([str(observation) for observation in obj.observation.all()])
class IdentificationAdmin(admin.ModelAdmin):
list_display = (
'observation', 'species', 'genus', 'subfamily', 'family', 'suborder', 'specimen_status', 'identification_guide',
'sex',
'stage', 'confidence', 'confidence_reason', 'notebook', 'date_of_identification', 'comments',)
ordering = (
'observation', 'species', 'genus', 'subfamily', 'family', 'suborder', 'identification_guide', 'sex', 'stage',
'confidence', 'confidence_reason',)
search_fields = (
'observation__specimen_label', 'species__latin_name', 'genus__genus', 'subfamily__subfamily', 'family__family',
'suborder__suborder',
'identification_guide__title', 'sex', 'stage', 'confidence', 'confidence_reason',)
raw_id_fields = ('observation',)
def specimen_status(self, obj):
return "{}".format(obj.observation.status)
class TaxonomyClassAdmin(admin.ModelAdmin):
list_display = ('taxclass',)
ordering = ('taxclass',)
search_fields = ('taxclass',)
class TaxonomyOrderAdmin(admin.ModelAdmin):
list_display = ('taxclass', 'order',)
ordering = ('taxclass', 'order',)
search_fields = ('taxclass__taxclass', 'order',)
class TaxonomySuborderAdmin(admin.ModelAdmin):
list_display = ('order', 'suborder',)
ordering = ('order', 'suborder',)
search_fields = ('order__order', 'suborder',)
class TaxonomyFamilyAdmin(admin.ModelAdmin):
list_display = ('suborder', 'family',)
ordering = ('suborder', 'family',)
search_fields = ('suborder__suborder', 'family',)
class TaxonomySubfamilyAdmin(admin.ModelAdmin):
list_display = ('family', 'subfamily',)
ordering = ('family', 'subfamily',)
search_fields = ('family__family', 'subfamily')
class TaxonomyGenusAdmin(admin.ModelAdmin):
list_display = ('subfamily', 'genus',)
ordering = ('subfamily', 'genus',)
search_fields = ('subfamily__subfamily', 'genus',)
class TaxonomySpeciesAdmin(admin.ModelAdmin):
list_display = ('genus', 'latin_name', 'common_name_english', 'common_name_catalan', 'common_name_spanish',)
ordering = ('genus', 'latin_name', 'common_name_english', 'common_name_catalan', 'common_name_spanish',)
search_fields = (
'genus__genus', 'latin_name', 'common_name_english', 'common_name_catalan', 'common_name_spanish',)
class IdentificationGuideAdmin(admin.ModelAdmin):
list_display = ('author', 'title',)
ordering = ('author', 'title',)
search_fields = ('author', 'title',)
class MeteorologyConditionsAdmin(admin.ModelAdmin):
list_display = (
'survey', 'cloud_coverage_start', 'wind_start', 'rain_start', 'cloud_coverage_end', 'wind_end', 'rain_end',
'notes',)
ordering = (
'survey', 'cloud_coverage_start', 'wind_start', 'rain_start', 'cloud_coverage_end', 'wind_end', 'rain_end',)
search_fields = (
'survey__visit', 'cloud_coverage_start', 'wind_start', 'rain_start', 'cloud_coverage_end', 'wind_end',
'rain_end',)
class PlotAdmin(admin.ModelAdmin):
list_display = ('visit', 'position',)
ordering = ('visit', 'position',)
search_fields = ('visit__visit', 'position',)
class VegetationStructureForm(forms.ModelForm):
class Meta:
model = VegetationStructure
fields = "__all__"
def clean(self):
if self.cleaned_data['percentage_rock'] + self.cleaned_data['percentage_bare_ground'] + self.cleaned_data[
'percentage_vegetation_cover'] != 100:
raise forms.ValidationError("Ground cover percentages do not add up to 100")
return self.cleaned_data
class VegetationStructureAdmin(admin.ModelAdmin):
form = VegetationStructureForm
list_display = (
'plot', 'percentage_vegetation_cover', 'percentage_bare_ground', 'percentage_rock', 'height_75percent',
'max_height', 'density_01', 'density_02', 'density_03', 'density_04', 'density_05',)
ordering = ('plot', 'percentage_vegetation_cover', 'percentage_bare_ground', 'percentage_rock', 'height_75percent',
'max_height', 'density_01', 'density_02', 'density_03', 'density_04', 'density_05',)
search_fields = (
'plot__plot', 'percentage_vegetation_cover', 'percentage_bare_ground', 'percentage_rock', 'height_75percent',
'max_height', 'density_01', 'density_02', 'density_03', 'density_04', 'density_05',)
admin.site.register(models.Source, SourceAdmin)
admin.site.register(models.Site, SiteAdmin)
admin.site.register(models.Visit, VisitAdmin)
admin.site.register(models.Observation, ObservationAdmin)
admin.site.register(models.Photograph, PhotographAdmin)
admin.site.register(models.Identification, IdentificationAdmin)
admin.site.register(models.TaxonomyClass, TaxonomyClassAdmin)
admin.site.register(models.TaxonomyOrder, TaxonomyOrderAdmin)
admin.site.register(models.TaxonomySuborder, TaxonomySuborderAdmin)
admin.site.register(models.TaxonomyFamily, TaxonomyFamilyAdmin)
admin.site.register(models.TaxonomySubfamily, TaxonomySubfamilyAdmin)
admin.site.register(models.TaxonomyGenus, TaxonomyGenusAdmin)
admin.site.register(models.TaxonomySpecies, TaxonomySpeciesAdmin)
admin.site.register(models.IdentificationGuide, IdentificationGuideAdmin)
admin.site.register(models.MeteorologyConditions, MeteorologyConditionsAdmin)
admin.site.register(models.Plot, PlotAdmin)
admin.site.register(models.VegetationStructure, VegetationStructureAdmin)
admin.site.register(models.Survey, SurveyAdmin)
```
#### File: WildlifeObservations/observations/data_integrity_checks.py
```python
from django.db.models import Q, Count
from .models import Identification, Observation, Survey, MeteorologyConditions
class IdentificationDataChecks:
def __init__(self):
pass
def check_identification_has_sex_adults_only(self):
"""
Returns list of dictionaries of the identifications that do not have a sex, only for the specimens that have
been noted as adults.
Note that whilst it might not be possible to determine the sex of an observation if it is not an adult,
these identifications should still have sex=UNKNOWN, therefore this query considers all identifications,
not just the adults.
e.g. [{"specimen_label": TOR08 20211005 H1 C001},
{"specimen_label": TAV09 20211006 N1 C008}]
"""
identifications = Identification.objects.filter(stage=Identification.Stage.ADULT, sex__isnull=True)
identifications_missing_sex = []
for identification in identifications:
identifications_missing_sex.append({"specimen_label": identification.observation.specimen_label})
return identifications_missing_sex
def check_identification_has_sex(self):
"""
Returns list of dictionaries of the identifications that do not have a sex.
Note that whilst it might not be possible to determine the sex of an observation if it is not an adult,
these identifications should still have sex=UNKNOWN, therefore this query considers all identifications,
not just the adults.
e.g. [{"specimen_label": TOR08 20211005 H1 C001},
{"specimen_label": TAV09 20211006 N1 C008}]
"""
identifications = Identification.objects.filter(sex__isnull=True)
identifications_missing_sex = []
for identification in identifications:
identifications_missing_sex.append({"specimen_label": identification.observation.specimen_label})
return identifications_missing_sex
def check_identification_has_stage(self):
"""
Returns list of dictionaries of the identifications that do not have a stage.
e.g. [{"specimen_label": TOR08 20211005 H1 C001},
{"specimen_label": TAV09 20211006 N1 C008}]
"""
identifications = Identification.objects.filter(stage__isnull=True)
identifications_missing_stage = []
for identification in identifications:
identifications_missing_stage.append({"specimen_label": identification.observation.specimen_label})
return identifications_missing_stage
def check_identification_has_confidence(self):
"""
Returns list of dictionaries of the identifications that do not have a confidence.
e.g. [{"specimen_label": TOR08 20211005 H1 C001},
{"specimen_label": TAV09 20211006 N1 C008}]
"""
identifications = Identification.objects.filter(confidence__isnull=True)
identifications_missing_confidence = []
for identification in identifications:
identifications_missing_confidence.append({"specimen_label": identification.observation.specimen_label})
return identifications_missing_confidence
def find_observations_without_identification(self):
"""
Returns a set of the observations that do not have any identifications.
"""
observations = Observation.objects.all().values_list('specimen_label', flat=True)
identifications = Identification.objects.all().values_list('observation__specimen_label', flat=True)
observations_set = set()
for observation in observations:
observation: Observation
observations_set.add(observation)
identifications_set = set()
for identification in identifications:
identification: Identification
identifications_set.add(identification) # creating a set of the identifications, deals with the duplicates
# get all of the observations that do not have the specimen label in the identifications
observations_without_identifications = observations_set - identifications_set
return observations_without_identifications
def add_identifications_to_set(self, set, qs):
"""
Add identifications from a queryset into a set. Return the set.
"""
for identification in qs:
identification: Identification
set.add(identification)
return set
def find_observations_without_confirmed_or_finalised_identification(self):
"""
Returns a set of the observations that do not have any identifications that have a confidence
that is confirmed or finalised. This query will only consider observations that have at least one
identification.
"""
all_identifications = Identification.objects.all().values_list('observation__specimen_label', flat=True)
finalised_and_confirmed_identifications = self.get_all_finalised_and_confirmed_identifications()
finalised_and_confirmed_identifications_qs = finalised_and_confirmed_identifications.values_list(
'observation__specimen_label', flat=True)
all_identifications_set = set()
finalised_and_confirmed_identifications_set = set()
all_identifications_set = self.add_identifications_to_set(all_identifications_set, all_identifications)
finalised_and_confirmed_identifications_set = self.add_identifications_to_set(
finalised_and_confirmed_identifications_set,
finalised_and_confirmed_identifications_qs)
observations_without_confirmation_or_finalisation = \
all_identifications_set - finalised_and_confirmed_identifications_set # as this is
# reduced to distinct specimen labels, these are equivalent to the observations
return observations_without_confirmation_or_finalisation
def check_finalised_confirmed_identifications_sex(self):
"""
Returns a set of identifications that have confirmed or finalised identifications but the sex in these confirmed
or finalised identifications differs.
"""
finalised_and_confirmed_identifications = self.get_all_finalised_and_confirmed_identifications()
finalised_and_confirmed_identifications_qs = finalised_and_confirmed_identifications.values_list(
'observation__specimen_label', flat=True)
finalised_confirmed_identifications_different_sex = set()
for finalised_confirmed_identification in finalised_and_confirmed_identifications_qs:
distinct_sexes = finalised_and_confirmed_identifications.filter(
observation__specimen_label=finalised_confirmed_identification).values_list('sex').distinct()
if len(distinct_sexes) > 1:
finalised_confirmed_identifications_different_sex.add(finalised_confirmed_identification)
return finalised_confirmed_identifications_different_sex
def check_finalised_confirmed_identifications_stage(self):
"""
Returns a set of identifications that have confirmed or finalised identifications but the stage in these
confirmed or finalised identifications differs.
"""
finalised_and_confirmed_identifications = self.get_all_finalised_and_confirmed_identifications()
finalised_and_confirmed_identifications_qs = finalised_and_confirmed_identifications.values_list(
'observation__specimen_label', flat=True)
finalised_and_confirmed_identifications_different_stage = set()
for finalised_confirmed_identification in finalised_and_confirmed_identifications_qs:
distinct_stages = finalised_and_confirmed_identifications.filter(
observation__specimen_label=finalised_confirmed_identification).values_list('stage').distinct()
if len(distinct_stages) > 1:
finalised_and_confirmed_identifications_different_stage.add(finalised_confirmed_identification)
return finalised_and_confirmed_identifications_different_stage
def identification_inconsistency(self, identification1, identification2):
"""
Compares two identifications according to the different levels of taxonomy and return those that are
inconsistent at the same level. Return dictionary of the inconsistent identifcation with the observation
specimen label and the field that is inconsistent.
A non-empty field and null are considered to be inconsistent for the purposes of this function.
"""
inconsistent_identification = {}
for field in ['species', 'genus', 'subfamily', 'family', 'suborder']:
if getattr(identification1, field) != getattr(identification2, field):
inconsistent_identification['specimen_label'] = identification1.observation.specimen_label
inconsistent_identification['field'] = field
return inconsistent_identification
return None
def get_qs_confirmed_identifications(self):
"""
Returns a queryset of all confirmed identifications.
"""
confirmed_identifications = Identification.objects.filter(confidence=Identification.Confidence.CONFIRMED)
return confirmed_identifications
def get_qs_finalised_identifications(self):
"""
Returns a queryset of all finalised identifications.
"""
finalised_identifications = Identification.objects.filter(confidence=Identification.Confidence.FINALISED)
return finalised_identifications
def get_all_finalised_and_confirmed_identifications(self):
"""
Returns a queryset of all finalised and confirmed identifications.
"""
finalised_and_confirmed_identifications = self.get_qs_finalised_identifications() | self.get_qs_confirmed_identifications()
return finalised_and_confirmed_identifications
def get_confirmed_identifications_to_check_taxonomy(self, confirmed_identifications):
"""
Returns a list of querysets that are the identifications for which more than one identification exists, for a
particular observation. These are the identifications that need to be compared to check if they have a
consistent taxonomy.
"""
multiple_confirmed_identifications_for_observation = confirmed_identifications.values(
'observation__specimen_label').annotate(number_ids=Count('observation__specimen_label')).filter(
number_ids__gt=1)
identifications_to_check = []
for observation in multiple_confirmed_identifications_for_observation:
identifications_to_check.append(confirmed_identifications.filter(
observation__specimen_label=observation['observation__specimen_label']))
return identifications_to_check
def check_confirmed_identifications_taxonomy(self):
"""
Returns a list of dictionaries of the specimen labels which have inconsistent confirmed identifications.
"""
confirmed_identifications = self.get_qs_confirmed_identifications()
identifications_to_check = self.get_confirmed_identifications_to_check_taxonomy(confirmed_identifications)
inconsistent_identifications = []
for qs_of_identifications in identifications_to_check:
inconsistent_identification = self.identification_inconsistency(qs_of_identifications[0],
qs_of_identifications[1])
if inconsistent_identification != None:
inconsistent_identifications.append(inconsistent_identification)
return inconsistent_identifications
def observations_with_confirmed_and_finalised_identifications(self):
"""
Return a list of observations which have both confirmed and finalised identifications.
These can then be sorted through manually to ensure that there are none with this case, thereby ensuring that
observations do not have conflicting identifications and are not used incorrectly in the analysis.
"""
confirmed_identifications_qs = self.get_qs_confirmed_identifications()
finalised_identifications_qs = self.get_qs_finalised_identifications()
observations_with_confirmed_and_finalised_identifications = confirmed_identifications_qs.intersection(
finalised_identifications_qs).values_list('observation__specimen_label')
return observations_with_confirmed_and_finalised_identifications
class SurveyDataChecks:
def __init__(self):
pass
def find_surveys_without_met_conditions(self):
"""
Returns a set of surveys for which there is no meteorological data.
"""
surveys = Survey.objects.all().values_list('visit__site__site_name', 'visit__date', 'method', 'repeat')
met_conditions = MeteorologyConditions.objects.all().values_list('survey__visit__site__site_name',
'survey__visit__date', 'survey__method',
'survey__repeat')
surveys_set = set()
for survey in surveys:
survey: Survey
surveys_set.add(survey)
met_conditions_surveys_set = set()
for surveyed_met_conditions in met_conditions:
surveyed_met_conditions: MeteorologyConditions
met_conditions_surveys_set.add(surveyed_met_conditions)
# get all of the surveys that do not have meteorological condition data
surveys_without_met_conditions = surveys_set - met_conditions_surveys_set
return surveys_without_met_conditions
class ObservationDataChecks:
def __init__(self):
pass
def find_observations_without_suborder(self):
"""
Get all observations that do not yet have a suborder. Return a set.
"""
all_identifications_with_suborder = Identification.objects.filter(suborder__isnull=False).values(
'observation__specimen_label')
all_observations = Observation.objects.all().values('specimen_label')
all_identifications_with_suborder_set = set()
for identification in all_identifications_with_suborder:
all_identifications_with_suborder_set.add(identification['observation__specimen_label'])
all_observations_set = set()
for observation in all_observations:
all_observations_set.add(observation['specimen_label'])
observations_without_suborder = all_observations_set - all_identifications_with_suborder_set
return observations_without_suborder
```
#### File: management/commands/data_checks_finalised_identifications.py
```python
from django.core.management.base import BaseCommand
from ...data_integrity_checks import IdentificationDataChecks
class Command(BaseCommand):
help = 'Check finalised identification data.'
def add_arguments(self, parser):
pass
def handle(self, *args, **options):
identification_checks = IdentificationDataChecks()
```
#### File: management/commands/data_checks_identifications.py
```python
from django.core.management.base import BaseCommand
from ...data_integrity_checks import IdentificationDataChecks
class Command(BaseCommand):
help = 'Check identification data.'
def add_arguments(self, parser):
pass
def handle(self, *args, **options):
identification_checks = IdentificationDataChecks()
print("***** Identifications without a sex (adults only) *****")
print(len(identification_checks.check_identification_has_sex_adults_only()), "results:\n")
for identification in identification_checks.check_identification_has_sex_adults_only():
print(identification['specimen_label'])
print("\n***** Identifications without a sex (all stages) *****")
print(len(identification_checks.check_identification_has_sex()), "results:\n")
for identification in identification_checks.check_identification_has_sex():
print(identification['specimen_label'])
print("\n***** Identifications without a stage *****")
print(len(identification_checks.check_identification_has_stage()), "results:\n")
for identification in identification_checks.check_identification_has_stage():
print(identification['specimen_label'])
print("\n***** Identifications without a confidence *****")
print(len(identification_checks.check_identification_has_confidence()), "results:\n")
for identification in identification_checks.check_identification_has_confidence():
print(identification['specimen_label'])
print("\n***** Observations without an identification *****")
print(len(identification_checks.find_observations_without_identification()), "results:\n")
for observation in identification_checks.find_observations_without_identification():
print(observation)
print("\n***** Observations without a confirmed or finalised identification *****")
print(len(identification_checks.find_observations_without_confirmed_or_finalised_identification()), "results:\n")
for observation in identification_checks.find_observations_without_confirmed_or_finalised_identification():
print(observation)
print("\n***** Finalised/confirmed identifications with different sexes *****")
print(len(identification_checks.check_finalised_confirmed_identifications_sex()), "results:\n")
for identification in identification_checks.check_finalised_confirmed_identifications_sex():
print(identification)
print("\n***** Finalised/confirmed identifications with different stages *****")
print(len(identification_checks.check_finalised_confirmed_identifications_stage()), "results:\n")
for identification in identification_checks.check_finalised_confirmed_identifications_stage():
print(identification)
print("\n***** Confirmed identifications with different taxonomy *****")
print(len(identification_checks.check_confirmed_identifications_taxonomy()), "results:\n")
for identification in identification_checks.check_confirmed_identifications_taxonomy():
print(identification)
print("\n***** Observations with confirmed and finalised identifications *****")
print(len(identification_checks.observations_with_confirmed_and_finalised_identifications()), "results:\n")
for identification in identification_checks.observations_with_confirmed_and_finalised_identifications():
print(identification)
```
#### File: management/commands/data_checks_observations.py
```python
from django.core.management.base import BaseCommand
from ...data_integrity_checks import ObservationDataChecks
class Command(BaseCommand):
help = 'Check observation data.'
def add_arguments(self, parser):
pass
def handle(self, *args, **options):
observation_checks = ObservationDataChecks()
print("***** Observations without a suborder *****")
# print(len(observation_checks.find_observations_without_suborder()), "results:\n")
for observation in observation_checks.find_observations_without_suborder():
print(observation)
```
#### File: management/commands/export_observations_csv.py
```python
import argparse
import csv
from django.core.management.base import BaseCommand
from ...models import Identification
from ...utils import field_or_empty_string
header_observations = ['specimen_label', 'site_name', 'date_cest', 'method', 'repeat', 'sex', 'stage', 'id_confidence',
'suborder', 'family', 'subfamily', 'genus', 'species']
def get_row_for_identification(identification):
row = {}
row['specimen_label'] = identification.observation.specimen_label
print(row['specimen_label'])
row['site_name'] = identification.observation.survey.visit.site.site_name
row['date_cest'] = identification.observation.survey.visit.date
row['method'] = identification.observation.survey.method
row['repeat'] = identification.observation.survey.repeat
row['sex'] = identification.sex # shouldn't be null
row['stage'] = identification.stage # shouldn't be null
row['id_confidence'] = identification.confidence # shouldn't be null
row['suborder'] = identification.suborder.suborder # shouldn't be null
row['family'] = field_or_empty_string(identification.family, 'family') # can be null if the identification cannot
# be determined to this taxonomic level
row['subfamily'] = field_or_empty_string(identification.subfamily,
'subfamily') # can be null if the identification cannot be determined to
# this taxonomic level
row['genus'] = field_or_empty_string(identification.genus, 'genus') # can be null if the identification cannot be
# determined to this taxonomic level
row['species'] = field_or_empty_string(identification.species,
'latin_name') # can be null if the identification cannot be determined to
# this taxonomic level
return row
def export_csv(output_file, practice_sites):
"""
Export data from a query into a CSV file which has a specified output file.
Using an ORM query, get some data from the database and export specified fields into a CSV file which uses a set
of headers.
If all observations have been identified, then the export of observations and identifications can consider just the
confirmed and finalised identifications.
- Observations should not have both confirmed and finalised identifications. These should be encountered
in the data integrity checks.
- Where there are observations that have not been identified, these should be encountered in the data integrity
checks. These will not be exported.
- Where there are observations that have an identification but the identification has not been confirmed or
finalised, then these should also be encountered in the data integrity checks. These will not be exported.
- Where there is more than one confirmed identification for a particular observation, only one should be selected
for the output. Where there is more than one, the data integrity checks will ensure the confirmed identifications
are for the same taxa.
- Where an observation has finalised identifications, data integrity checks will ensure there are at least two.
All finalised identifications for an observation will be exported.
Observations from 'practice' sites, are excluded from the export. These were sites that were only visited once
during the surveys and were not appropriate for visiting again.
"""
headers = header_observations
csv_writer = csv.DictWriter(output_file, headers)
csv_writer.writeheader()
# There must only be one identification exported for each observation, where the observation has a confirmed
# identification. Note that this can be to any taxonomic level.
confirmed_identifications = Identification.objects.exclude(
observation__survey__visit__site__site_name__in=practice_sites).filter(
confidence=Identification.Confidence.CONFIRMED)
# Creating a set of the specimen labels ensures that only one confirmed identification for the same observation
# should be exported. Data integrity checks will ensure that if there is more than one confirmed identification
# for an observation, then it is for the same taxa. It is then also used as an extra check to make sure that no
# finalised identifications can be exported if a confirmed identification for the same observation has been
# exported. This case should be accounted for though in the data integrity checks.
selected_identification_specimen_label = set()
for confirmed_identification in confirmed_identifications:
if confirmed_identification.observation.specimen_label not in selected_identification_specimen_label:
row = get_row_for_identification(confirmed_identification)
selected_identification_specimen_label.add(confirmed_identification.observation.specimen_label)
csv_writer.writerow(row)
print("Number of specimen labels after confirmed ids: ", len(selected_identification_specimen_label))
# There could be more than one finalised identification that should be exported, so allow for more than one with
# the same specimen label, but check that they are not in the set of observations that have confirmed
# identifications.
finalised_identifications = Identification.objects.exclude(
observation__survey__visit__site__site_name__in=practice_sites).filter(
confidence=Identification.Confidence.FINALISED)
print("Number of finalised ids:", finalised_identifications.count())
for finalised_identification in finalised_identifications:
if finalised_identification.observation.specimen_label not in selected_identification_specimen_label:
row = get_row_for_identification(finalised_identification)
csv_writer.writerow(row)
class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument('output_file', type=argparse.FileType('w'), help='Path to the file or - for stdout')
parser.add_argument('--practice_sites', type=str, nargs="*",
help='Site names of the practice sites to exclude from the export')
def handle(self, *args, **options):
export_csv(options['output_file'], options['practice_sites'])
```
#### File: management/commands/export_vegetation_surveys_csv.py
```python
import argparse
import csv
from django.core.management.base import BaseCommand
from ...models import VegetationStructure, Plot
header_vegetation_survey = ['site_name', 'date_cest', 'plot_distance_from_start_m', 'percentage_vegetation_cover',
'percentage_bare_ground', 'percentage_rock', 'height_75percent', 'max_height',
'density_01', 'density_02', 'density_03', 'density_04', 'density_05']
def export_csv(output_file):
"""
Export data from a query into a CSV file which has a specified output file.
Using an ORM query, get some data from the database and export specified fields into a CSV file which uses a set
of headers.
"""
headers = header_vegetation_survey
csv_writer = csv.DictWriter(output_file, headers)
csv_writer.writeheader()
vegetation_surveys = VegetationStructure.objects.all()
for vegetation_survey in vegetation_surveys:
row = {}
row['site_name'] = vegetation_survey.plot.visit.site.site_name
row['date_cest'] = vegetation_survey.plot.visit.date
row['plot_distance_from_start_m'] = vegetation_survey.plot.position
row['percentage_vegetation_cover'] = vegetation_survey.percentage_vegetation_cover
row['percentage_bare_ground'] = vegetation_survey.percentage_bare_ground
row['percentage_rock'] = vegetation_survey.percentage_rock
row['height_75percent'] = vegetation_survey.height_75percent
row['max_height'] = vegetation_survey.max_height
row['density_01'] = vegetation_survey.density_01
row['density_02'] = vegetation_survey.density_02
row['density_03'] = vegetation_survey.density_03
row['density_04'] = vegetation_survey.density_04
row['density_05'] = vegetation_survey.density_05
csv_writer.writerow(row)
class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument('output_file', type=argparse.FileType('w'), help='Path to the file or - for stdout')
def handle(self, *args, **options):
export_csv(options['output_file'])
```
#### File: management/commands/import_sites.py
```python
from django.core.management.base import BaseCommand
from django.db import transaction
from ...models import Site, Source
import csv
class Command(BaseCommand):
help = 'Adds sites'
def add_arguments(self, parser):
parser.add_argument('filename', type=str)
@transaction.atomic
def handle(self, *args, **options):
print(options['filename'])
self.import_sources()
self.import_data_from_csv(options['filename'])
def import_sources(self):
Source.objects.get_or_create(name=Source.PositionSource.GPS)
Source.objects.get_or_create(name=Source.PositionSource.OSMAND)
Source.objects.get_or_create(name=Source.PositionSource.DEM)
Source.objects.get_or_create(name=Source.PositionSource.VIKINGTOPO)
def source_string_to_choice(self, source_string):
if source_string == 'GPS':
name = Source.PositionSource.GPS
elif source_string == 'Osmand':
name = Source.PositionSource.OSMAND
elif source_string == 'DEM':
name = Source.PositionSource.DEM
elif source_string == 'Viking':
name = Source.PositionSource.VIKINGTOPO
else:
assert False
return Source.objects.get(name=name)
def import_data_from_csv(self, filename):
with open(filename) as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
site = Site()
site.area = row['area']
site.site_name = row['sitename']
site.altitude_band = row['altitude_band']
site.transect_length = row['transect_length']
site.transect_description = row['transect_description']
site.transect_length_source = self.source_string_to_choice(row['transect_length_source'])
site.notes = row['notes']
site.latitude_start = row['start_latitude']
site.longitude_start = row['start_longitude']
site.altitude_start = row['start_altitude']
site.latitude_start_source = self.source_string_to_choice(row['start_latitude_source'])
site.longitude_start_source = self.source_string_to_choice(row['start_longitude_source'])
site.altitude_start_source = self.source_string_to_choice(row['start_altitude_source'])
if row['start_number_satellites'] != '':
site.gps_number_satellites_start = row['start_number_satellites']
if row['start_gps_accuracy'] != '':
site.gps_accuracy_start = row['start_gps_accuracy']
if row['start_orientation'] != '':
site.gps_aspect_start = row['start_orientation']
site.latitude_end = row['end_latitude']
site.longitude_end = row['end_longitude']
site.altitude_end = row['end_altitude']
site.latitude_end_source = self.source_string_to_choice(row['end_latitude_source'])
site.longitude_end_source = self.source_string_to_choice(row['end_longitude_source'])
site.altitude_end_source = self.source_string_to_choice(row['end_altitude_source'])
if row['end_number_satellites'] != '':
site.gps_number_satellites_end = row['end_number_satellites']
if row['end_gps_accuracy'] != '':
site.gps_accuracy_end = row['end_gps_accuracy']
if row['end_orientation'] != '':
site.gps_aspect_end = row['end_orientation']
site.save()
```
#### File: management/commands/report_identifications.py
```python
from django.core.management.base import BaseCommand
from ...reports import SpeciesReport
class Command(BaseCommand):
help = 'Print reports about observations and identifications'
def add_arguments(self, parser):
pass
def handle(self, *args, **options):
species_reports = SpeciesReport()
print("---------- Observations ----------")
counting_observations = species_reports.observations_count()
print("Total number of observations:", len(counting_observations))
counting_suborders = species_reports.observations_suborder()
print("Caelifera:", len(counting_suborders['Caelifera']), "=",
100 * (len(counting_suborders['Caelifera']) / len(counting_observations)).__round__(3), "%")
print("Ensifera:", len(counting_suborders['Ensifera']), "=",
100 * (len(counting_suborders['Ensifera']) / len(counting_observations)).__round__(3), "%")
print("Number of observations without an identification:",
len(counting_observations) - len(counting_suborders['Caelifera']) - len(
counting_suborders['Ensifera']) - len(counting_suborders['todo']))
print("\n---------- Observations identified ----------")
print(
"\nTotal number of observations with finalised identifications (yes, confirmed, cannot identify further, "
"small nymphs hard to ID):",
species_reports.identified_observations_finalised_count())
counting_species_identified = species_reports.identified_observations_to_species()
total_identifications_species_unique = species_reports.unique_observations_identified_to_species(counting_species_identified)
identifications_to_check = species_reports.get_species_from_specimen_label_confidence_set(counting_species_identified, 'Check')
list_identifications_to_check = sorted(identifications_to_check, key=lambda x: x[1])
print("Total number of observations identified to species:", len(total_identifications_species_unique))
print("\nNumber of unique observations identified to species, identification CONFIRMED:",
len(counting_species_identified['Confirmed']))
print("\nNumber of unique observations identified to species, identification FINALISED:",
len(counting_species_identified['Finalised']))
print("Number of unique observations identified to species, identification to REVIEW:",
len(counting_species_identified['Review']))
print("Number of unique observations identified to species, identification to CHECK AFTER MUSEUM:",
len(counting_species_identified['CheckMuseum']))
print("Number of unique observations identified to species, identification to CHECK:",
len(counting_species_identified['Check']))
print("Number of unique observations identified to species, identification to REDO / IN PROGRESS:",
len(counting_species_identified['Redo']))
print("Number of unique observations identified to species, identification IN PROGRESS:",
len(counting_species_identified['InProgress']))
print("Number of unique observations identified to species, identification MISSING CONFIRMATION:",
len(counting_species_identified['NoConfirmation']))
counting_genus_identified = species_reports.identified_observations_to_genus_not_species()
print("\nNumber of unique observations only identified to genus:", counting_genus_identified['Total'])
print("\nNumber of unique observations only identified to genus, identification CONFIRMED:",
len(counting_genus_identified['Confirmed']))
print("\nNumber of unique observations only identified to genus, identification FINALISED:",
len(counting_genus_identified['Finalised']))
print("Number of unique observations only identified to genus, identification to REVIEW:",
len(counting_genus_identified['Review']))
print("Number of unique observations only identified to genus, identification to CHECK AFTER MUSEUM:",
len(counting_genus_identified['CheckMuseum']))
print("Number of unique observations only identified to genus, identification to CHECK:",
len(counting_genus_identified['Check']))
print("Number of unique observations only identified to genus, identification to REDO:",
len(counting_genus_identified['Redo']))
print("Number of unique observations only identified to genus, identification IN PROGRESS:",
len(counting_genus_identified['InProgress']))
print("Number of unique observations only identified to genus, identification NO CONFIRMATION:",
len(counting_genus_identified['NoConfirmation']))
print("\n---------- Number of each stage identified ----------")
print("\nStages identified:")
for identification in species_reports.identifications_stage_count():
print(identification["stage"], identification["count"])
print("\nStage with confidence:")
for identification in species_reports.identifications_stage_confidence_count():
if identification["stage"] == "Adult":
print(identification["stage"], identification["confidence"], identification["count"])
elif identification["stage"] == "Nymph":
print(identification["stage"], identification["confidence"], identification["count"])
print("\n-----THINGS TO CHECK-----")
print("\n-Number of observations without an identification:",
len(counting_observations) - len(counting_suborders['Caelifera']) - len(
counting_suborders['Ensifera']) - len(counting_suborders['todo']), ":",
counting_observations - counting_suborders['Caelifera'] - counting_suborders['Ensifera'] -
counting_suborders['todo'])
print("\n-Number of identifications without a suborder:", len(counting_suborders['todo']), ":",
counting_suborders['todo'])
print("\n-ID'd to species to REVIEW:", len(counting_species_identified['Review']), ":",
counting_species_identified['Review'])
print("\n-ID'd to species to CHECK:")
for element in list_identifications_to_check:
print(element)
print("\n-ID'd to species to REDO:", len(counting_species_identified['Redo']), ":",
counting_species_identified['Redo'])
```
#### File: management/commands/report_survey_summary.py
```python
import datetime
from django.core.management.base import BaseCommand
from ...reports import SurveyReport
class Command(BaseCommand):
help = 'Print summary reports about a specified survey'
def add_arguments(self, parser):
parser.add_argument('site_name', type=str,
help="Site name")
parser.add_argument('date', type=datetime.date.fromisoformat, help='Date of survey')
parser.add_argument('method', type=str, help='Survey method')
parser.add_argument('repeat', type=int, help='Survey method repeat')
def handle(self, *args, **options):
survey_reports = SurveyReport()
survey = survey_reports.get_survey_object(options['site_name'], options['date'], options['method'],
options['repeat'])
print("\nSummary of suborders observed during this survey (from all identifications):", survey)
for suborder, observations in survey_reports.summarise_survey_suborder(survey).items():
print(suborder, len(observations))
print("\nSummary of confirmed or finalised taxa during this survey:", survey)
for taxa, count in survey_reports.summarise_survey_confirmed_finalised_taxa(survey):
print(taxa, count)
print("\nObservations for this survey (number in brackets: number of "
"identifications for the observation):", survey)
print("Total:", len(survey_reports.list_survey_observations(survey)))
for observation_id_summary in survey_reports.list_observations_count_identifications(survey):
print(observation_id_summary['observation'], "(", observation_id_summary['count'], ")")
print("\nList of identifications for this survey:", survey)
print("Total:", len(survey_reports.list_survey_identifications(survey)))
for row in survey_reports.list_survey_identifications(survey):
print(row)
```
#### File: management/commands/update_sex_small_nymphs.py
```python
from django.core.management.base import BaseCommand
from django.db import transaction
from ...models import Identification
def get_small_nymph_no_sex_identifications():
"""Get the identifications of nymphs that have been marked as being confirmed because they are too small to identify
further.
Return the queryset of objects.
"""
rows_to_update = Identification.objects.filter(stage=Identification.Stage.NYMPH).filter(
confidence_reason=Identification.ConfidenceReason.SMALL_NYMPH_HARD_TO_ID).filter(sex__isnull=True)
return rows_to_update
def update_sex_unknown_small_nymphs():
"""Update the objects with unknown sex."""
rows_to_update = get_small_nymph_no_sex_identifications()
rows_to_update.update(sex=Identification.Sex.UNKNOWN)
class Command(BaseCommand):
help = 'Updates identifications of small nymphs that do not currently have a sex defined.'
@transaction.atomic
def handle(self, *args, **options):
update_sex_unknown_small_nymphs()
```
#### File: WildlifeObservations/observations/models.py
```python
from django.core.validators import MinValueValidator, MaxValueValidator, RegexValidator
from django.db import models
from django.conf import settings
from django.db.models import Q
from django.utils import timezone
from django.utils.translation import gettext_lazy as _
class Source(models.Model):
class PositionSource(models.TextChoices):
VIKINGTOPO = 'Viking Topo', _('Viking Topo')
GPS = 'GPS', _('GPS')
DEM = 'DEM', _('DEM')
OSMAND = 'OsmAnd', _('OsmAnd')
name = models.CharField(max_length=20, choices=PositionSource.choices)
def __str__(self):
return "{}".format(self.name)
class Site(models.Model):
area = models.CharField(max_length=30)
site_name = models.CharField(max_length=5, unique=True)
altitude_band = models.IntegerField(validators=[MinValueValidator(0)])
latitude_start = models.FloatField(validators=[MinValueValidator(-90), MaxValueValidator(90)])
latitude_start_source = models.ForeignKey(Source, on_delete=models.PROTECT, related_name='+')
longitude_start = models.FloatField(validators=[MinValueValidator(-180), MaxValueValidator(180)])
longitude_start_source = models.ForeignKey(Source, on_delete=models.PROTECT, related_name='+')
altitude_start = models.FloatField(validators=[MinValueValidator(0)])
altitude_start_source = models.ForeignKey(Source, on_delete=models.PROTECT, related_name='+')
gps_number_satellites_start = models.IntegerField(null=True, blank=True, validators=[MinValueValidator(0)])
gps_accuracy_start = models.IntegerField(null=True, blank=True)
gps_aspect_start = models.FloatField(null=True, blank=True)
latitude_end = models.FloatField(validators=[MinValueValidator(-90), MaxValueValidator(90)])
latitude_end_source = models.ForeignKey(Source, on_delete=models.PROTECT, related_name='+')
longitude_end = models.FloatField(validators=[MinValueValidator(-180), MaxValueValidator(180)])
longitude_end_source = models.ForeignKey(Source, on_delete=models.PROTECT, related_name='+')
altitude_end = models.FloatField(validators=[MinValueValidator(0)])
altitude_end_source = models.ForeignKey(Source, on_delete=models.PROTECT, related_name='+')
gps_number_satellites_end = models.IntegerField(null=True, blank=True, validators=[MinValueValidator(0)])
gps_accuracy_end = models.IntegerField(null=True, blank=True)
gps_aspect_end = models.FloatField(null=True, blank=True)
transect_length = models.FloatField(validators=[MinValueValidator(0), MaxValueValidator(100)])
transect_length_source = models.ForeignKey(Source, on_delete=models.PROTECT)
transect_description = models.TextField(max_length=2048, default='', blank=True)
notes = models.TextField(max_length=2048, default='', blank=True)
created_on = models.DateTimeField(default=timezone.now)
def __str__(self):
return "{} ({}m)".format(self.site_name, self.altitude_band)
class Visit(models.Model):
site = models.ForeignKey(Site, on_delete=models.PROTECT)
date = models.DateField()
created_on = models.DateTimeField(default=timezone.now)
class Meta:
constraints = [models.UniqueConstraint(
name="%(app_label)s_%(class)s_site_date_unique_relationships",
fields=['site', 'date'])]
def __str__(self):
return "{} {}".format(self.site, self.date)
class Survey(models.Model):
class Method(models.TextChoices):
NET = 'Net', _('Net')
HAND = 'Hand', _('Hand')
class Repeat(models.IntegerChoices):
ONE = 1
TWO = 2
visit = models.ForeignKey(Visit, on_delete=models.PROTECT)
start_time = models.TimeField()
end_time = models.TimeField()
method = models.CharField(max_length=5, choices=Method.choices)
repeat = models.IntegerField(choices=Repeat.choices)
observer = models.CharField(max_length=100)
created_on = models.DateTimeField(default=timezone.now)
def __str__(self):
return "{} {} {}".format(self.visit, self.method, self.repeat)
class Meta:
constraints = [
models.UniqueConstraint(name="%(app_label)s_%(class)s_visit_method_repeat_unique_relationships",
fields=['visit', 'method', 'repeat']),
models.UniqueConstraint(name="%(app_label)s_%(class)s_visit_start_unique_relationships",
fields=['visit', 'start_time'])]
class MeteorologyConditions(models.Model):
survey = models.OneToOneField(Survey, on_delete=models.PROTECT, unique=True)
cloud_coverage_start = models.IntegerField(null=True, blank=True,
validators=[MinValueValidator(0), MaxValueValidator(8)])
wind_start = models.IntegerField(null=True, blank=True, validators=[MinValueValidator(0)])
rain_start = models.IntegerField(null=True, blank=True, validators=[MinValueValidator(0)])
cloud_coverage_end = models.IntegerField(null=True, blank=True,
validators=[MinValueValidator(0), MaxValueValidator(8)])
wind_end = models.IntegerField(null=True, blank=True, validators=[MinValueValidator(0)])
rain_end = models.IntegerField(null=True, blank=True, validators=[MinValueValidator(0)])
notes = models.TextField(max_length=2048, default='', blank=True)
created_on = models.DateTimeField(default=timezone.now)
def __str__(self):
return "{}".format(self.survey)
class Meta:
verbose_name_plural = 'Meteorological conditions'
class Observation(models.Model):
class Status(models.TextChoices):
OBSERVED = 'Observed', _('Observed')
SPECIMEN = 'Specimen', _('Specimen')
LOST = 'Lost', _('Lost')
class PreservationType(models.TextChoices):
FROZEN = 'Frozen', _('Frozen')
ALCOHOL = 'Alcohol', _('Alcohol')
PINNED = 'Pinned', _('Pinned')
NA = 'NA', _('NA')
specimen_label = models.CharField(max_length=22, unique=True, validators=[
RegexValidator(regex='^[A-Z]{3}[0-9]{2} [0-9]{8} [A-Z]{1}[0-9]{1} [A-Z]{1}[0-9]{3}$',
message='Format is sitename yyyymmdd methodrepeat specimen',
code='Invalid format')])
survey = models.ForeignKey(Survey, on_delete=models.PROTECT)
length_head_abdomen = models.FloatField(null=True, blank=True,
validators=[MinValueValidator(0), MaxValueValidator(100)])
length_head_tegmina = models.FloatField(null=True, blank=True,
validators=[MinValueValidator(0), MaxValueValidator(100)])
original_preservation = models.CharField(max_length=10, choices=PreservationType.choices,
default=PreservationType.FROZEN)
current_preservation = models.CharField(max_length=10, choices=PreservationType.choices,
default=PreservationType.FROZEN)
status = models.CharField(max_length=10, choices=Status.choices)
notes = models.TextField(max_length=1024, null=True, blank=True)
created_on = models.DateTimeField(default=timezone.now)
def __str__(self):
return "{}".format(self.specimen_label)
class Photograph(models.Model):
filepath = models.CharField(max_length=300, unique=True)
observation = models.ManyToManyField(Observation)
created_on = models.DateTimeField(default=timezone.now)
def __str__(self):
return "{}".format(self.filepath)
class TaxonomyClass(models.Model):
taxclass = models.CharField(max_length=255, unique=True)
def __str__(self):
return "{}".format(self.taxclass)
class Meta:
verbose_name_plural = 'Taxonomy classes'
class TaxonomyOrder(models.Model):
order = models.CharField(max_length=255, unique=True)
taxclass = models.ForeignKey(TaxonomyClass, on_delete=models.PROTECT)
def __str__(self):
return "{}".format(self.order)
class Meta:
verbose_name_plural = 'Taxonomy orders'
class TaxonomySuborder(models.Model):
suborder = models.CharField(max_length=255, unique=True)
order = models.ForeignKey(TaxonomyOrder, on_delete=models.PROTECT)
def __str__(self):
return "{}".format(self.suborder)
class Meta:
verbose_name_plural = 'Taxonomy sub-orders'
class TaxonomyFamily(models.Model):
family = models.CharField(max_length=255, unique=True)
suborder = models.ForeignKey(TaxonomySuborder, on_delete=models.PROTECT)
def __str__(self):
return "{}".format(self.family)
class Meta:
verbose_name_plural = 'Taxonomy families'
class TaxonomySubfamily(models.Model):
subfamily = models.CharField(max_length=255, unique=True)
family = models.ForeignKey(TaxonomyFamily, on_delete=models.PROTECT)
def __str__(self):
return "{}".format(self.subfamily)
class Meta:
verbose_name_plural = 'Taxonomy subfamilies'
class TaxonomyGenus(models.Model):
genus = models.CharField(max_length=255, unique=True)
subfamily = models.ForeignKey(TaxonomySubfamily, on_delete=models.PROTECT, null=True, blank=True)
def __str__(self):
return "{}".format(self.genus)
class Meta:
verbose_name_plural = 'Taxonomy genera'
ordering = ['genus']
class TaxonomySpecies(models.Model):
latin_name = models.CharField(max_length=255, unique=True)
genus = models.ForeignKey(TaxonomyGenus, on_delete=models.PROTECT)
common_name_english = models.CharField(max_length=100, null=True, blank=True)
common_name_catalan = models.CharField(max_length=100, null=True, blank=True)
common_name_spanish = models.CharField(max_length=100, null=True, blank=True)
def __str__(self):
return "{}".format(self.latin_name)
class Meta:
verbose_name_plural = 'Taxonomy species'
class IdentificationGuide(models.Model):
title = models.CharField(max_length=150, unique=True)
author = models.CharField(max_length=1024)
def __str__(self):
return "{} - {}".format(self.author, self.title)
class Identification(models.Model):
class Sex(models.TextChoices):
MALE = 'Male', _('Male')
FEMALE = 'Female', _('Female')
UNKNOWN = 'Unknown', _('Unknown')
class Stage(models.TextChoices):
ADULT = 'Adult', _('Adult')
NYMPH = 'Nymph', _('Nymph')
UNKNOWN = 'Unknown', _('Unknown')
class Confidence(models.TextChoices):
IN_PROGRESS = 'In_progress', _('In progress')
CHECK = 'Check', _('Check')
CHECK_IN_MUSEUM = 'Check_in_museum', _('Check in museum')
CONFIRMED = 'Confirmed', _('Confirmed')
REDO = 'Redo', _('Redo')
REVIEW = 'Review', _('Review')
FINALISED = 'Finalised', _('Finalised')
class ConfidenceReason(models.TextChoices):
ID_CERTAIN = 'ID_certain', _('ID certain')
ID_UNCERTAIN = 'ID_uncertain', _('ID uncertain')
ID_INCOMPLETE = 'ID_incomplete', _('ID incomplete')
ID_NEEDS_CONFIRMATION = 'ID_needs_confirmation', _('ID needs confirmation')
ID_INCORRECT = 'ID_incorrect', _('ID incorrect')
CANNOT_DETERMINE_FURTHER = 'Cannot_determine_further', _('Cannot determine further')
SMALL_NYMPH_HARD_TO_ID = 'Small_nymph_hard_to_ID', _('Small nymph hard to ID')
CANNOT_SPLIT_FURTHER = 'Cannot_split_further', _('Cannot split further')
observation = models.ForeignKey(Observation, on_delete=models.PROTECT)
species = models.ForeignKey(TaxonomySpecies, on_delete=models.PROTECT, null=True, blank=True)
genus = models.ForeignKey(TaxonomyGenus, on_delete=models.PROTECT, null=True, blank=True)
subfamily = models.ForeignKey(TaxonomySubfamily, on_delete=models.PROTECT, null=True, blank=True)
family = models.ForeignKey(TaxonomyFamily, on_delete=models.PROTECT, null=True, blank=True)
suborder = models.ForeignKey(TaxonomySuborder, on_delete=models.PROTECT, null=True, blank=True)
identification_notes = models.TextField(max_length=2048, null=True, blank=True)
identification_guide = models.ForeignKey(IdentificationGuide, on_delete=models.PROTECT, null=True, blank=True)
sex = models.CharField(max_length=7, choices=Sex.choices, null=True, blank=True)
stage = models.CharField(max_length=7, choices=Stage.choices, null=True, blank=True)
confidence = models.CharField(max_length=30, choices=Confidence.choices, null=True, blank=True)
confidence_reason = models.CharField(max_length=30, choices=ConfidenceReason.choices, null=True, blank=True)
date_of_identification = models.DateField(null=True, blank=True)
notebook = models.CharField(max_length=10)
comments = models.TextField(max_length=1000, null=True, blank=True)
created_on = models.DateTimeField(default=timezone.now)
def save(self, *args, **kwargs):
if self.species is not None:
self.genus = self.species.genus
if self.genus is not None:
self.subfamily = self.genus.subfamily
if self.subfamily is not None:
self.family = self.subfamily.family
if self.family is not None:
self.suborder = self.family.suborder
return super().save(*args, **kwargs)
def __str__(self):
return "{} - {} [{}]".format(self.observation, self.species, self.confidence)
class Meta:
constraints = [models.UniqueConstraint(
name="%(app_label)s_%(class)s_specimen_guide_species_date_unique_relationships",
fields=['observation', 'identification_guide', 'species', 'date_of_identification']),
# add constraints to ensure only allowed combinations of confidence and confidence reason
models.CheckConstraint(name="%(app_label)s_%(class)s_check_confidence_reasons",
check=Q(Q(confidence='Confirmed') & Q(confidence_reason__in=(
'Small_nymph_hard_to_ID', 'Cannot_determine_further', 'ID_certain')))
| Q(Q(confidence__in=('Check', 'Check_in_museum')) &
Q(confidence_reason='ID_needs_confirmation'))
| Q(Q(confidence='In_progress') & Q(confidence_reason='ID_incomplete'))
| Q(Q(confidence='Review') & Q(confidence_reason='ID_uncertain'))
| Q(Q(confidence='Redo') & Q(confidence_reason='ID_incorrect'))
| Q(Q(confidence='Finalised') & Q(confidence_reason='Cannot_split_further'))
)]
class Plot(models.Model):
visit = models.ForeignKey(Visit, on_delete=models.PROTECT)
position = models.IntegerField(validators=[MinValueValidator(0), MaxValueValidator(100)])
created_on = models.DateTimeField(default=timezone.now)
def __str__(self):
return "{} ({}m)".format(self.visit, self.position)
class Meta:
constraints = [models.UniqueConstraint(
name="%(app_label)s_%(class)s_visit_position_unique_relationships",
fields=['visit', 'position'])]
class VegetationStructure(models.Model):
plot = models.OneToOneField(Plot, on_delete=models.PROTECT)
percentage_vegetation_cover = models.IntegerField(validators=[MinValueValidator(0), MaxValueValidator(100)])
percentage_bare_ground = models.IntegerField(validators=[MinValueValidator(0), MaxValueValidator(100)])
percentage_rock = models.IntegerField(validators=[MinValueValidator(0), MaxValueValidator(100)])
height_75percent = models.IntegerField(validators=[MinValueValidator(0)])
max_height = models.IntegerField(validators=[MinValueValidator(0)])
density_01 = models.IntegerField(validators=[MinValueValidator(0)])
density_02 = models.IntegerField(validators=[MinValueValidator(0)])
density_03 = models.IntegerField(validators=[MinValueValidator(0)])
density_04 = models.IntegerField(validators=[MinValueValidator(0)])
density_05 = models.IntegerField(validators=[MinValueValidator(0)])
notes = models.TextField(max_length=2048, default='', blank=True)
created_on = models.DateTimeField(default=timezone.now)
def __str__(self):
return "{}".format(self.plot)
```
|
{
"source": "jentiai/Korean-Light-OCR-API",
"score": 3
}
|
#### File: jentiai/Korean-Light-OCR-API/tools.py
```python
import os
import re
import cv2
import torch
import numpy as np
import Polygon as plg
from PIL import Image
from collections import OrderedDict
from matplotlib import patches
from matplotlib import font_manager as fm
from matplotlib import pyplot as plt
def read_txt(txt_path):
txt_contents = []
f = open(txt_path, 'r')
while True:
line = f.readline().strip('\n').strip(' ')
if not line:
break
x1, y1, x2, y2, x3, y3, x4, y4 = line.split(',')[:8]
label = line.split(',')[8].strip(' ')
if line.endswith(','):
label += ','
x1, y1, x2, y2, x3, y3, x4, y4 = map(int, [x1, y1, x2, y2, x3, y3, x4, y4])
polygon = polygon_from_points([x1, y1, x2, y2, x3, y3, x4, y4])
txt_contents.append([polygon, label, f"{x1}, {y1}, {x2}, {y2}, {x3}, {y3}, {x4}, {y4}"])
f.close()
return txt_contents
def polygon_from_points(points):
"""
Returns a Polygon object to use with the Polygon2 class from a list of 8 points: x1,y1,x2,y2,x3,y3,x4,y4
"""
resBoxes = np.empty([1, 8], dtype='int32')
resBoxes[0, 0] = int(points[0])
resBoxes[0, 4] = int(points[1])
resBoxes[0, 1] = int(points[2])
resBoxes[0, 5] = int(points[3])
resBoxes[0, 2] = int(points[4])
resBoxes[0, 6] = int(points[5])
resBoxes[0, 3] = int(points[6])
resBoxes[0, 7] = int(points[7])
pointMat = resBoxes[0].reshape([2, 4]).T
return plg.Polygon(pointMat)
def get_union(pD, pG):
areaA = pD.area()
areaB = pG.area()
return areaA + areaB - get_intersection(pD, pG)
def get_intersection(pD, pG):
pInt = pD & pG
if len(pInt) == 0:
return 0
return pInt.area()
def get_intersection_over_union(pD, pG):
try:
return get_intersection(pD, pG) / get_union(pD, pG)
except:
return 0
def make_recognition_pred_gt(detection_output_dir, detection_data_dir, iou_threshold, cropped_data_dir):
count_dict = dict()
if not os.path.isdir(cropped_data_dir):
os.makedirs(cropped_data_dir, exist_ok = True)
for detection_result in os.listdir(detection_output_dir):
if 'txt' in detection_result:
detection_path = os.path.join(detection_output_dir, detection_result)
gt_path = os.path.join(detection_data_dir, detection_result)
eval_data_path = os.path.join(cropped_data_dir, detection_result)
gt_polygon_label_coordinate = read_txt(gt_path)
pred_polygon_confidence_coordinate = read_txt(detection_path)
img_label_count = 0
img_det_count = 0
img_name = detection_result.split('.txt')[0]
for gt_polygon, gt_label, gt_coordinate in gt_polygon_label_coordinate:
if gt_label not in ['*', '###', 'syeom']:
img_label_count += 1
f = open(eval_data_path, 'w')
for pred_polygon, pred_confidence, pred_coordinate in pred_polygon_confidence_coordinate:
keep_info = []
for gt_polygon, gt_label, gt_coordinate in gt_polygon_label_coordinate:
if gt_label in ['*', '###', 'syeom']:
pass
else:
intersection_over_union = get_intersection_over_union(gt_polygon, pred_polygon)
if intersection_over_union > iou_threshold:
keep_info.append([pred_coordinate, gt_label, intersection_over_union])
if keep_info:
img_det_count += 1
keep_info = sorted(keep_info, key = lambda x : -x[2])
data = f"{keep_info[0][0]}, {keep_info[0][1]}\n"
f.write(data)
count_dict[img_name] =[img_det_count, img_label_count]
f.close()
return count_dict
def coordinate_process(points_confidence):
x1, y1, x2, y2, x3, y3, x4, y4, confidence = points_confidence
x_points = [int(x1), int(x2), int(x3), int(x4)]
y_points = [int(y1), int(y2), int(y3), int(y4)]
min_x = int(min(x_points))
max_x = int(max(x_points))
min_y = int(min(y_points))
max_y = int(max(y_points))
return min_x, max_x, min_y, max_y
def coordinate_process_pred(points_confidence):
x1, y1, x2, y2, x3, y3, x4, y4, label = points_confidence
x_points = [int(x1), int(x2), int(x3), int(x4)]
y_points = [int(y1), int(y2), int(y3), int(y4)]
min_x = int(min(x_points))
max_x = int(max(x_points))
min_y = int(min(y_points))
max_y = int(max(y_points))
label = label.split( )
return min_x, max_x, min_y, max_y, label
def polygon_crop(src_img, points_confidence, args):
is_vertical = False
x1, y1, x2, y2, x3, y3, x4, y4, label = points_confidence
x1, y1, x2, y2, x3, y3, x4, y4 = map(int, [x1, y1, x2, y2, x3, y3, x4, y4])
min_x, max_x, min_y, max_y = coordinate_process(points_confidence)
if (max_y - min_y) / (max_x - min_x) >= 1.5:
is_vertical = True
temp_points = np.float32([ [x1, y1], [x2, y2], [x3, y3], [x4, y4]])
sm = temp_points.sum(axis = 1)
dif = np.diff(temp_points, axis = 1)
tl = temp_points[np.argmin(sm)]
br = temp_points[np.argmax(sm)]
tr = temp_points[np.argmin(dif)]
bl = temp_points[np.argmax(dif)]
if is_vertical:
H, W = args.imgW, args.imgH
else:
H, W = args.imgH, args.imgW
pts1 = np.float32([tl, tr, bl, br])
pts2 = np.float32([[0,0], [W, 0], [0, H], [W, H]])
M = cv2.getPerspectiveTransform(pts1, pts2)
res = cv2.warpPerspective(src_img, M, (W, H))
if is_vertical:
res = cv2.rotate(res, cv2.ROTATE_90_COUNTERCLOCKWISE)
return res, [x1, y1, x2, y2, x3, y3, x4, y4], label.split( )[0], is_vertical
def crop_polygon_with_persp(detection_data_dir, detection_output_dir, args):
detection_results = []
for detection_result in os.listdir(detection_output_dir):
if detection_result.endswith('.txt'):
detection_results.append([os.path.join(detection_output_dir, detection_result), os.path.join(detection_data_dir, os.path.splitext(detection_result)[0] + '.jpg')])
crop_imgs = dict()
for txt_path, img_path in detection_results:
if not os.path.isfile(img_path):
continue
img = cv2.imread(img_path)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
src_img = img
img_name = os.path.splitext(os.path.basename(img_path))[0]
crop_imgs[img_name] = []
crop_imgs_h = []
crop_imgs_v = []
with open(txt_path, 'r') as f:
count = 1
while True:
line = f.readline().strip('\n').strip(' ')
if not line:
break
rec = ''.join(line.split(',')[8:]).strip(' ')
splited_line = [t.strip(' ') for t in line.split(',')[:8]] + [rec]
res, coordinate, label, is_vertical = polygon_crop(src_img, splited_line, args)
if is_vertical:
crop_imgs_v.append([res, coordinate, label])
else:
crop_imgs_h.append([res, coordinate, label])
crop_imgs[img_name].append([crop_imgs_h, crop_imgs_v])
return crop_imgs
def crop_polygon_with_persp_from_gt(gt_data_dir, args):
gt_file_names = []
for gt_file_name in os.listdir(gt_data_dir):
if gt_file_name.endswith('.txt'):
gt_file_names.append([os.path.join(gt_data_dir, gt_file_name), os.path.join(gt_data_dir, os.path.splitext(gt_file_name)[0]+'.jpg')])
crop_imgs = dict()
for txt_path, img_path in gt_file_names:
if not os.path.isfile(img_path):
continue
img = cv2.imread(img_path)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
src_img = img
img_name = os.path.splitext(os.path.basename(img_path))[0]
crop_imgs[img_name] = []
with open(txt_path, 'r') as f:
while True:
line = f.readline().strip('\n').strip(' ')
if not line:
break
rec = ''.join(line.split(',')[8:]).strip(' ')
splited_line = [t.strip(' ') for t in line.split(',')[:8]] + [rec]
res, coordinate, label, is_vertical = polygon_crop(src_img, splited_line, args)
if label in ['*', '###', 'syeom']:
continue
crop_imgs[img_name].append([res, coordinate, label])
return crop_imgs
def crop_img(detection_data_dir, detection_output_dir, hv_ratio):
detection_results = []
for detection_result in os.listdir(detection_output_dir):
if 'txt' in detection_result:
detection_results.append([f'{detection_output_dir}{detection_result}', f"{detection_data_dir}{detection_result.split('.txt')[0]}.jpg"])
crop_imgs = dict()
for txt_path, img_path in detection_results:
if not os.path.isfile(img_path):
continue
img = cv2.imread(img_path)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img_name = img_path.split('/')[-1].split('.')[0]
crop_imgs[img_name] = []
f = open(txt_path, 'r')
while True:
line = f.readline()
if not line: break
min_x, max_x, min_y, max_y= coordinate_process(line.split(','))
crop = img[min_y:max_y, min_x:max_x]
crop_imgs[img_name].append([crop, [min_x, min_y, max_x, max_y]])
f.close()
return crop_imgs
def crop_img_pred(detection_data_dir, recognition_eval_data_dir, hv_ratio):
detection_results = []
for detection_result in os.listdir(recognition_eval_data_dir):
if detection_result.endswith('.txt'):
detection_results.append(os.path.join(recognition_eval_data_dir, detection_result), os.path.join(detection_data_dir, os.path.splitext(detection_result)[0] + '.jpg') )
crop_imgs = dict()
for txt_path, img_path in detection_results:
if not os.path.isfile(img_path):
continue
img = cv2.imread(img_path)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img_name = img_path.split('/')[-1].split('.')[0]
crop_imgs[img_name] = []
f = open(txt_path, 'r')
while True:
line = f.readline()
if not line: break
min_x, max_x, min_y, max_y, label = coordinate_process_pred(line.split(','))
crop = img[min_y:max_y, min_x:max_x]
crop_imgs[img_name].append([crop, [min_x, min_y, max_x, max_y], label])
f.close()
return crop_imgs
def change_checkpoint(detection_checkpoint):
config = torch.load('./detection/output/DBNet_MobileNetV3_FPN_DBHead/checkpoint/model_best.pth')
torch.save(config['state_dict'], detection_checkpoint)
def visualization_poly(ocr_output_dir, detection_data_dir):
fm.get_fontconfig_fonts()
font_path = "./NanumFont/NanumGothicBold.ttf"
font_prop = fm.FontProperties(fname=font_path)
for txt_name in os.listdir(ocr_output_dir):
if 'txt' in txt_name:
img_name = f"{txt_name.split('.txt')[0]}.jpg"
img_path = os.path.join(detection_data_dir, img_name)
txt_path = os.path.join(ocr_output_dir, txt_name)
img = Image.open(img_path)
plt.imshow(img)
ax = plt.gca()
with open(txt_path, 'r') as f:
while True:
line = f.readline().strip('\n').strip(' ')
if not line:
break
else:
p4 = re.compile(r"([\d]+)\, ([\d]+)\, ([\d]+)\, ([\d]+)\, (.*)")
p8 = re.compile(r"([\d]+)\, ([\d]+)\, ([\d]+)\, ([\d]+)\, ([\d]+)\, ([\d]+)\, ([\d]+)\, ([\d]+)\, (.*)")
if m:= p8.search(line):
det = [int(m.group(i)) for i in range(1, 9)]
rec = m.group(9)
elif m := p4.search(line):
det = [int(m.group(i)) for i in range(1, 4)]
rec = m.group(4)
x = [det[i] for i in range(len(det)) if i % 2 == 0]
y = [det[i] for i in range(len(det)) if i % 2 == 1]
xy = np.array([ [x_i, y_i] for x_i, y_i in zip(x, y)])
poly = patches.Polygon(xy = xy,
fill = False,
linewidth = 2,
edgecolor = 'cyan')
ax.add_patch(poly)
plt.text(min(x), min(y), rec, fontproperties=font_prop)
plt.axis('off')
plt.savefig(os.path.join(ocr_output_dir, img_name), bbox_inches = 'tight', pad_inches = 0)
plt.clf()
def to_tensor(src):
src = src.transpose((2, 0 ,1))
src = src/255.
src = src[np.newaxis, :, :, :]
src_tensor = torch.Tensor(src)
src_tensor.sub_(0.5).div_(0.5)
return src_tensor
def copyStateDict(state_dict):
if list(state_dict.keys())[0].startswith("module"):
start_idx = 1
else:
start_idx = 0
new_state_dict = OrderedDict()
for k, v in state_dict.items():
name = ".".join(k.split(".")[start_idx:])
new_state_dict[name] = v
return new_state_dict
def gt_list_num_except_ignore(gt_list, ignore):
gt_list_num = 0
for gt in gt_list:
_, _, _, _, _, _, _, _, gt_label = gt
if gt_label not in ignore:
gt_list_num += 1
return gt_list_num
def evaluation(pred_list, gt_list, iou_threshold):
ignore = ['*', '###']
gt_num = gt_list_num_except_ignore(gt_list, ignore)
if gt_num == 0:
return 0, 0
correct_num = 0
for pred in pred_list:
x1, y1, x2, y2, x3, y3, x4, y4, pred_label = pred
pred_polygon = polygon_from_points([x1, y1, x2, y2, x3, y3, x4, y4])
keep_info = []
for gt in gt_list:
x1, y1, x2, y2, x3, y3, x4, y4, gt_label = gt
if gt_label not in ignore:
gt_polygon = polygon_from_points([x1, y1, x2, y2, x3, y3, x4, y4])
intersection_over_union = get_intersection_over_union(pred_polygon, gt_polygon)
if intersection_over_union > iou_threshold:
keep_info.append([gt_label, intersection_over_union])
if keep_info:
keep_info = sorted(keep_info, key = lambda x : -x[1])[0]
if pred_label == gt_label:
correct_num += 1
return correct_num, gt_num
def visualization(img_path, predict, output_dir):
fm.get_fontconfig_fonts()
font_path = "./NanumFont/NanumGothicBold.ttf"
font_prop = fm.FontProperties(fname=font_path)
img_name = img_path.split('/')[-1]
img = Image.open(img_path)
plt.imshow(img)
ax = plt.gca()
for label, coordinate in predict.items():
x = [coordinate[i] for i in range(len(coordinate)) if i % 2 == 0]
y = [coordinate[i] for i in range(len(coordinate)) if i % 2 == 1]
xy = np.array([ [x_i, y_i] for x_i, y_i in zip(x, y)])
poly = patches.Polygon(xy = xy,
fill = False,
linewidth = 2,
edgecolor = 'cyan')
ax.add_patch(poly)
plt.text(min(x), min(y), label, fontproperties=font_prop)
plt.axis('off')
plt.savefig(os.path.join(output_dir, img_name), bbox_inches = 'tight', pad_inches = 0)
plt.clf()
```
|
{
"source": "jentiai/Korean-Light-OCR-Data",
"score": 2
}
|
#### File: jentiai/Korean-Light-OCR-Data/jenti.py
```python
import os
import sys
import json
import requests
from tqdm import tqdm
def main():
url = "http://172.16.31.10:5000/evaluation"
img_dir = sys.argv[1]
with open('./jenti.json', 'w', encoding = 'UTF-8-sig') as json_res:
res_dict = {}
for img_name in tqdm(list(filter(lambda x: x.find('.jpg') != -1 or x.find('.png') != -1, os.listdir(img_dir)))):
img_path = os.path.join(img_dir, img_name)
files = {'file': open(img_path, 'rb').read()}
r = requests.post(url, files = files)
res_dict[os.path.splitext(img_name)[0]] = r.json()
json.dump(res_dict, json_res, ensure_ascii = False, indent = '\t')
if __name__ == '__main__':
main()
```
#### File: Korean-Light-OCR-Data/utils/post_processor.py
```python
import os
def remove_labels_no_image(image_files, label_path):
image_list = list(image_files.keys())
for (path, dir, files) in os.walk(label_path):
files = list(files)
for file in files:
if file.replace(".txt", ".jpg") not in image_list:
os.remove(os.path.join(path, file))
def remove_no_gt(parent_dir):
deleted_gt = []
for path, dir, files in os.walk(parent_dir):
if not files:
continue
for name in files:
gt_path = os.path.join(path, name)
# remove duplicated name
if (
name == "00C6DD18C320D0A8E8E26AFA84AB5555.txt"
or name == "00F3E524ED9EC2FFD20BC6156EDF5BE3.txt"
):
deleted_gt.append(gt_path)
os.remove(gt_path)
continue
# remove empty gt
with open(gt_path, "r", encoding="utf-8") as f:
lines = f.read()
if not lines:
deleted_gt.append(gt_path)
os.remove(gt_path)
return
```
|
{
"source": "jentjr/envirobase",
"score": 3
}
|
#### File: app/api/waste_units.py
```python
from flask import jsonify, request, current_app, url_for
from . import api
from .. import db
from ..models import WasteUnit
@api.route("/waste-units/", methods=["GET"])
def get_waste_units():
waste_units = WasteUnit.query.all()
data = [
{
"type": "Feature",
"properties": {
"facility": waste_unit.facility.name,
"name": waste_unit.name,
"constructed_date": waste_unit.constructed_date,
"unit_type": waste_unit.unit_type,
},
"geometry": {
"type": "MultiPolygon",
"coordinates": [waste_unit.longitude, waste_unit.latitude],
},
}
for waste_unit in waste_units
]
return jsonify({"type": "FeatureCollection", "features": data})
```
#### File: envirobase/app/models.py
```python
import json
from . import db
import pandas as pd
from datetime import datetime
from geoalchemy2 import functions
from geoalchemy2.types import Geometry
from flask import current_app, request, url_for
from .errors import AlreadyExistsError
class BaseExtension(db.MapperExtension):
"""Base extension for all entities."""
def before_insert(self, mapper, connection, instance):
instance.created_on = datetime.now()
def before_update(self, mapper, connection, instance):
instance.updated_on = datetime.now()
class BaseEntity(object):
__mapper_args__ = {"extension": BaseExtension()}
created_on = db.Column(db.DateTime)
updated_on = db.Column(db.DateTime)
class Facility(db.Model, BaseEntity):
__tablename__ = "facility"
facility_id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.Text, nullable=False, unique=True)
type = db.Column(db.Text)
address = db.Column(db.Text)
city = db.Column(db.Text)
state = db.Column(db.CHAR(2))
zipcode = db.Column(db.String)
longitude = db.Column(db.Float, nullable=True)
latitude = db.Column(db.Float, nullable=True)
geometry = db.Column(Geometry(geometry_type="POINT", srid=4326))
storage_tank = db.relationship("StorageTank", back_populates="facility")
waste_unit = db.relationship("WasteUnit", back_populates="facility")
def __repr__(self):
return f"Facility('{self.facility_id}','{self.name}', '{self.address}', '{self.city}','{self.state}', '{self.zipcode}')"
@classmethod
def add_facility(cls, name, address, city, state, zipcode, longitude, latitude):
"""Add a new facility in the database."""
geometry = "POINT({} {})".format(longitude, latitude)
facility = Facility(
name=name,
address=address,
city=city,
state=state,
zipcode=zipcode,
longitude=longitude,
latitude=latitude,
geometry=geometry,
)
db.session.add(facility)
db.session.commit()
@classmethod
def update_geometries(cls):
"""Using each facility's longitude and latitude, add geometry data to db."""
facilities = Facility.query.all()
for facility in facilities:
point = "POINT({} {})".format(facility.longitude, facility.latitude)
facility.geometry = point
db.session.commit()
def to_json(self):
json_facility = {
"url": url_for("api.get_facility", facility_id=self.facility_id),
"name": self.name,
"address": self.address,
"city": self.city,
"state": self.state,
"zipcode": self.zipcode,
"longitude": self.longitude,
"latitude": self.latitude,
}
return json_facility
@staticmethod
def from_json(json_facility):
name = json_facility.get("name")
address = json_facility.get("address")
city = json_facility.get("city")
state = json_facility.get("state")
zipcode = json_facility.get("zipcode")
longitude = json_facility.get("longitude")
latitude = json_facility.get("latitude")
if name is None or name == "":
raise ValidationError("Facility must have a name")
return Facility(
name=name,
address=address,
city=city,
state=state,
zipcode=zipcode,
longitude=longitude,
latitude=latitude,
created_on=datetime.utcnow()
# geometry = "POINT({} {})".format(longitude, latitude)
)
class WasteUnit(db.Model, BaseEntity):
__tablename__ = "waste_unit"
__table_args__ = (db.UniqueConstraint("name", "facility_id"),)
unit_id = db.Column(db.Integer, primary_key=True)
facility_id = db.Column(db.Integer, db.ForeignKey("facility.facility_id"))
name = db.Column(db.String(64), nullable=False)
constructed_date = db.Column(db.Date)
geometry = db.Column(Geometry(geometry_type="POLYGON", srid=4326))
unit_type = db.Column(db.String(12), nullable=False)
facility = db.relationship("Facility", back_populates="waste_unit")
__mapper_args__ = {
"polymorphic_identity": "waste_unit",
"polymorphic_on": unit_type,
}
def __repr__(self):
return f"WasteUnit('{self.name}')"
def to_json(self):
json_waste_unit = {
"url": url_for("api.get_waste_unit", unit_id=self.unit_id),
"name": self.name,
"constructed_date": self.constructed_date,
"unit_type": self.unit_type,
}
return json_waste_unit
class Landfill(WasteUnit, BaseEntity):
__tablename__ = "landfill"
permit_id = db.Column(db.String(24))
__mapper_args__ = {"polymorphic_identity": "landfill"}
def __repr__(self):
return f"Landfill('{self.name}')"
def to_json(self):
json_landfill = {
"url": url_for("api.get_landfill", unit_id=self.unit_id),
"name": self.name,
}
return json_landfill
class Impoundment(WasteUnit, BaseEntity):
__tablename__ = "impoundment"
dam_id = db.Column(db.String(24))
hazard_class = db.Column(db.Text)
__mapper_args__ = {"polymorphic_identity": "impoundment"}
def __repr__(self):
return f"Impoundment('{self.dam_id}', '{self.name}', '{self.hazard_class}')"
def to_json(self):
json_impoundment = {
"url": url_for("api.get_impoundment", unit_id=self.unit_id),
"name": self.name,
}
return json_impoundment
class StorageTank(db.Model, BaseEntity):
"""Base class for UndergroundStorageTank and AbovegroundStorageTank classes using Joined Table Inheritance. When StorageTank is queried only columns in this class are returned."""
__tablename__ = "storage_tank"
__table_args__ = (db.UniqueConstraint("tank_registration_id", "facility_id"),)
tank_id = db.Column(db.Integer, primary_key=True)
tank_registration_id = db.Column(db.String(12))
facility_id = db.Column(db.Integer, db.ForeignKey("facility.facility_id"))
date_installed = db.Column(db.Date)
date_removed = db.Column(db.Date)
capacity = db.Column(db.Integer)
stored_substance = db.Column(db.String(64))
status = db.Column(db.String(10))
longitude = db.Column(db.Float)
latitude = db.Column(db.Float)
geometry = db.Column(Geometry(geometry_type="POINT", srid=4326))
tank_type = db.Column(db.String(3), nullable=False)
facility = db.relationship("Facility", back_populates="storage_tank")
__mapper_args__ = {
"polymorphic_identity": "storage_tank",
"polymorphic_on": tank_type,
}
def __repr__(self):
return f"StorageTank('{self.tank_id}', '{self.tank_type}', '{self.stored_substance}', '{self.status}')"
def to_json(self):
json_storage_tank = {
"url": url_for("api.get_storage_tank", tank_id=self.tank_id),
"facility": self.facility.name,
"tank_registration_id": self.tank_registration_id,
"capacity": self.capacity,
"stored_substance": self.stored_substance,
"status": self.status,
"tank_type": self.tank_type,
"longitude": self.longitude,
"latitude": self.latitude,
}
return json_storage_tank
@staticmethod
def from_json(json_storage_tank):
facility_id = json_storage_tank.get("facility_id")
tank_registration_id = json_storage_tank.get("tank_registration_id")
capacity = json_storage_tank.get("capacity")
stored_substance = json_storage_tank.get("stored_substance")
status = json_storage_tank.get("status")
tank_type = json_storage_tank.get("tank_type")
longitude = json_storage_tank.get("longitude")
latitude = json_storage_tank.get("latitude")
if facility_id is None or facility_id == "":
raise ValidationError("Tank must be associated with a Facility")
return StorageTank(
facility_id=facility_id,
tank_registration_id=tank_registration_id,
capacity=capacity,
stored_substance=stored_substance,
status=status,
tank_type=tank_type,
longitude=longitude,
latitude=latitude,
created_on=datetime.utcnow()
# geometry = "POINT({} {})".format(longitude, latitude)
)
class UndergroundStorageTank(StorageTank, BaseEntity):
"""Subclass to StorageTank with Joined Table Inheritance. When UndergroundStorageTank is queried all columns from StorageTank are inherited."""
__tablename__ = "ust"
__mapper_args__ = {"polymorphic_identity": "ust"}
tank_double_wall = db.Column(db.Boolean)
inner_tank_material = db.Column(db.Text)
outer_tank_material = db.Column(db.Text)
tank_leak_detection = db.Column(db.Text)
tank_corrosion_protection = db.Column(db.Text)
tank_monitoring_system = db.Column(db.Text)
piping_double_wall = db.Column(db.Boolean)
piping_type = db.Column(db.Text) # Pressurized or suction
inner_pipe_material = db.Column(db.Text)
outer_pipe_material = db.Column(db.Text)
piping_corrosion_protection = db.Column(db.Text)
spill_protection = db.Column(db.Text)
overflow_protection = db.Column(db.Text)
def __repr__(self):
return f"UndergroundStorageTank('{self.tank_id}', '{self.tank_type}', '{self.stored_substance}', '{self.status}')"
def to_json(self):
json_ust = {
"url": url_for("api.get_ust", tank_id=self.tank_id),
"capacity": self.capacity,
"stored_substance": self.stored_substance,
}
return json_ust
class AbovegroundStorageTank(StorageTank, BaseEntity):
"""Subclass to StorageTank with Joined Table Inheritance. When AbovegroundStorageTank is queried all columns from StorageTank are inherited."""
__tablename__ = "ast"
__mapper_args__ = {"polymorphic_identity": "ast"}
def __repr__(self):
return f"AbovegroundStorageTank('{self.tank_id}', '{self.tank_type}', '{self.stored_substance}', '{self.status}')"
def to_json(self):
json_ast = {
"url": url_for("api.get_ast", tank_id=self.tank_id),
"capacity": self.capacity,
"stored_substance": self.stored_substance,
}
return json_ast
class MediumCode(db.Model, BaseEntity):
__tablename__ = "medium_code"
medium_cd = db.Column(db.String(3), primary_key=True)
medium_name = db.Column(db.String(64))
medium_description = db.Column(db.Text)
legacy_cd = db.Column(db.CHAR(1))
def __init__(self, **kwargs):
super(MediumCode, self).__init__(**kwargs)
def _insert_medium_codes():
"""Inserts USGS Medium Codes. If the codes have already been entered, an error is thrown."""
if MediumCode.query.first():
raise AlreadyExistsError("Medium Codes have already been entered.")
else:
url = "https://help.waterdata.usgs.gov/medium_cd"
df = pd.read_html(url, header=0, converters={0: str})[0]
df.rename(
index=str,
columns={
"Medium Code": "medium_cd",
"Medium Name": "medium_name",
"Medium Description": "medium_description",
"Medium Legacy Code": "legacy_cd",
},
inplace=True,
)
df.to_sql("medium_code", con=db.engine, if_exists="append", index=False)
class SampleParameter(db.Model, BaseEntity):
__tablename__ = "sample_parameter"
__table_args__ = (
db.CheckConstraint(
"param_cd ~ similar_escape('[[:digit:]]{5}'::text, NULL::text)"
),
)
param_cd = db.Column(db.CHAR(5), primary_key=True)
group_name = db.Column(db.Text)
description = db.Column(db.Text)
epa_equivalence = db.Column(db.Text)
statistical_basis = db.Column(db.Text)
time_basis = db.Column(db.Text)
weight_basis = db.Column(db.Text)
particle_size_basis = db.Column(db.Text)
sample_fraction = db.Column(db.Text)
temperature_basis = db.Column(db.Text)
casrn = db.Column(db.Text)
srsname = db.Column(db.Text)
parameter_unit = db.Column(db.Text)
def __init__(self, **kwargs):
super(SampleParameter, self).__init__(**kwargs)
def _insert_param_codes():
"""Inserts USGS Parameter Codes. If the codes have already been entered, an error is thrown."""
if SampleParameter.query.first():
raise AlreadyExistsError("Parameter Codes have already been entered.")
else:
url = "https://help.waterdata.usgs.gov/parameter_cd?group_cd=%"
df = pd.read_html(url, header=0, converters={0: str})[0]
df.rename(
index=str,
columns={
"Parameter Code": "param_cd",
"Group Name": "group_name",
"Parameter Name/Description": "description",
"Epa equivalence": "epa_equivalence",
"Result Statistical Basis": "statistical_basis",
"Result Time Basis": "time_basis",
"Result Weight Basis": "weight_basis",
"Result Particle Size Basis": "particle_size_basis",
"Result Sample Fraction": "sample_fraction",
"Result Temperature Basis": "temperature_basis",
"CASRN": "casrn",
"SRSName": "srsname",
"Parameter Unit": "parameter_unit",
},
inplace=True,
)
df.to_sql(
"sample_parameter", con=db.engine, if_exists="append", index=False
)
class SampleId(db.Model, BaseEntity):
__tablename__ = "sample_id"
__table_args__ = (db.UniqueConstraint("sample_id", "facility_id"),)
sample_id = db.Column(db.Integer, primary_key=True)
facility_id = db.Column(db.Integer, db.ForeignKey("facility.facility_id"))
sample_name = db.Column(db.Text)
description = db.Column(db.Text)
longitude = db.Column(db.Float, nullable=True)
latitude = db.Column(db.Float, nullable=True)
geometry = db.Column(Geometry(geometry_type="POINT", srid=4326))
sample_type = db.Column(db.String(24))
facility = db.relationship("Facility")
__mapper_args__ = {
"polymorphic_identity": "sample_id",
"polymorphic_on": sample_type,
}
def __repr__(self):
return f"SampleId('{self.sample_id}', '{self.facility.name}', '{self.sample_type}')"
def to_json(self):
json_sample_location = {
"url": url_for("api.get_sample_id", sample_id_id=self.sample_id),
"facility": self.facility.name,
"sample_id": self.sample_id,
"sample_type": self.sample_type,
}
return json_sample_id
@staticmethod
def from_json(json_sample_location):
facility = json_sample_location.get("facility.name")
sample_id = json_sample_location.get("sample_id")
sample_type = json_sample_location.get("sample_type")
if location_id is None or location_id == "":
raise ValidationError("Sample does not have an ID")
return SampleId(sample_id=sample_id, sample_type=sample_type)
class Boring(db.Model, BaseEntity):
__tablename__ = "boring"
boring_id = db.Column(db.Text, primary_key=True)
start_date = db.Column(db.Date)
end_date = db.Column(db.Date)
class Well(SampleId, BaseEntity):
__tablename__ = "well"
__mapper_args__ = {"polymorphic_identity": "monitoring_well"}
well_id = db.Column(db.Text)
boring_id = db.Column(db.Text, db.ForeignKey("boring.boring_id"))
well_type = db.Column(db.String(10))
installation_date = db.Column(db.Date)
abandoned_date = db.Column(db.Date)
top_riser = db.Column(db.Float)
top_bent_seal = db.Column(db.Float)
top_gravel_pack = db.Column(db.Float)
top_screen = db.Column(db.Float)
bottom_screen = db.Column(db.Float)
bottom_well = db.Column(db.Float)
bottom_gravel_pack = db.Column(db.Float)
bottom_boring = db.Column(db.Float)
grout_seal_desc = db.Column(db.Text)
bent_seal_desc = db.Column(db.Text)
screen_type = db.Column(db.Text)
gravel_pack_desc = db.Column(db.Text)
riser_pipe_desc = db.Column(db.Text)
spacer_depths = db.Column(db.Text)
notes = db.Column(db.Text)
boring = db.relationship("Boring")
def __repr__(self):
return f"MonitoringWell('{self.well_id}')"
def to_json(self):
json_monitoring_well = {
"url": url_for("api.get_monitoring_well", well_id=self.well_id),
"top_screen": self.top_screen,
"bottom_screen": self.bottom_screen,
}
return json_monitoring_well
class SampleResult(db.Model, BaseEntity):
__tablename__ = "sample_result"
__table_args__ = (
db.UniqueConstraint(
"lab_id", "sample_id", "sample_date", "param_cd", "analysis_result"
),
db.CheckConstraint(
"param_cd ~ similar_escape('[[:digit:]]{5}'::text, NULL::text)"
),
)
result_id = db.Column(db.Integer, primary_key=True)
lab_id = db.Column(db.Text)
facility_id = db.Column(db.Integer, db.ForeignKey("facility.facility_id"))
sample_id = db.Column(db.Integer, db.ForeignKey("sample_id.sample_id"))
param_cd = db.Column(db.CHAR(5), db.ForeignKey("sample_parameter.param_cd"))
medium_cd = db.Column(db.String(3), db.ForeignKey("medium_code.medium_cd"))
sample_date = db.Column(db.Date, nullable=False)
sample_time = db.Column(db.Time, nullable=True)
prep_method = db.Column(db.Text)
analysis_method = db.Column(db.Text, nullable=True)
analysis_flag = db.Column(db.CHAR(1), nullable=True)
analysis_result = db.Column(db.Float, nullable=True)
analysis_unit = db.Column(db.Text, nullable=False)
detection_limit = db.Column(db.Float)
reporting_limit = db.Column(db.Float)
analysis_qualifier = db.Column(db.CHAR(1))
disclaimer = db.Column(db.Text)
analysis_date = db.Column(db.DateTime)
order_comment = db.Column(db.Text)
analysis_comment = db.Column(db.Text)
sample = db.relationship("SampleId")
medium_code = db.relationship("MediumCode")
sample_parameter = db.relationship("SampleParameter")
facility = db.relationship("Facility")
def __repr__(self):
return f"SampleResult('{self.result_id}')"
def to_json(self):
json_sample_result = {
"url": url_for("api.get_sample_result", result_id=self.result_id),
"lab_id": self.lab_id,
}
return json_sample_result
```
#### File: envirobase/external/manages.py
```python
import pyodbc
import pandas
__all__ = ["read_manages3"]
def _list_or_tuple(x):
return isinstance(x, (list, tuple))
def _flatten(sequence, to_expand=_list_or_tuple):
for item in sequence:
if to_expand(item):
for subitem in _flatten(item, to_expand):
yield subitem
else:
yield item
def read_manages3(mdb_path):
"""
Function to read a MANAGES 3.x database and return
the data in a pandas DataFrame for analysis.
Parameters
----------
mdb_path : str
The path to the MANAGES 3.x Site.mdb file.
Returns
-------
DataFrame : pandas DataFrame
returns a pandas DataFrame.
Examples
--------
>>> from enviropy.external import read_manages3
>>> data = read_manages3('H:\INTERNAL\MANAGES_DATA\Cardinal\Cardinal\Site.mdb')
"""
driver = "{Microsoft Access Driver (*.mdb, *.accdb)}"
database = mdb_path
conxn = pyodbc.connect("DRIVER={0};DBQ={1}".format(driver, database))
query = """
SELECT sample_results.lab_id, sample_results.location_id,
sample_results.sample_date, site_parameters.param_name,
sample_results.lt_measure, sample_results.analysis_result,
site_parameters.default_unit
FROM sample_results LEFT JOIN site_parameters
ON sample_results.storet_code = site_parameters.storet_code
"""
data = pandas.read_sql(query, conxn)
conxn.close()
return data
class Manages(object):
"""
Function to read a MANAGES 4.x database and return
the data in a pandas DataFrame for analysis.
Parameters
----------
server : str
The name of the server.
database : str
The name of the database
Returns
-------
DataFrame : pandas DataFrame
returns a pandas DataFrame.
Examples
--------
>>> from enviropy.external import manages
>>> db = manages.Manages()
"""
_instance = None
def __new__(cls):
if cls._instance is None:
cls._instance = object.__new__(cls)
try:
print("connecting to manages database...")
params = config.config(filename="database.ini", section="manages")
_conxn = Manages._instance._conxn = pyodbc.connect(**params)
except (Exception, pyodbc.DatabaseError) as error:
print(error)
Manages._instance = None
else:
print("connection established")
return cls._instance
def __init__(self):
self._conxn = self._instance._conxn
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self._conxn.close()
def site_names(self):
return pandas.read_sql("SELECT NAME FROM SITE", self._conxn)
def get_results(self, site=None):
"""
query Manages database by Site
"""
query = """
SELECT site.site_id, site.name,
sample_results.lab_id, sample_results.location_id,
sample_results.sample_date, site_parameters.param_name,
sample_results.lt_measure, sample_results.analysis_result,
sample_results.detection_limit, sample_results.RL,
sample_results.flags, site_parameters.default_unit
FROM sample_results
LEFT JOIN site_parameters
ON sample_results.storet_code = site_parameters.storet_code AND sample_results.site_id = site_parameters.site_id
LEFT JOIN locations
ON locations.site_id = sample_results.site_id AND locations.location_id = sample_results.location_id
LEFT JOIN site
ON site.site_id = locations.site_id
WHERE (name in ({0}))
"""
if site is not None:
query = query.format(",".join("?" * len(site)))
query_params = tuple(_flatten(site))
else:
all_sites = self.site_names()["NAME"]
query = query.format(",".join("?" * len(all_sites)))
query_params = tuple(_flatten(all_sites))
return pandas.read_sql(query, self._conxn, params=query_params)
```
#### File: tests/unit/test_models.py
```python
def test_new_facility(new_facility):
"""Test Facility model when a new Facility is created"""
assert new_facility.name == "test site"
assert new_facility.address == "1234 test rd."
assert new_facility.city == "Test"
assert new_facility.state == "OH"
assert new_facility.zipcode == "12345"
assert new_facility.longitude == -80.0
assert new_facility.latitude == 40.0
```
|
{
"source": "jentjr/enviropy",
"score": 3
}
|
#### File: enviropy/io/file.py
```python
import pandas as pd
from enviropy import Enviropy
def read_csv(fname):
df = pd.read_csv(fname)
return Enviropy(df)
```
|
{
"source": "jentjr/observations",
"score": 3
}
|
#### File: observations/io/io_knmi.py
```python
import os
import re
from io import StringIO
import numpy as np
import pandas as pd
import requests
def get_stations(variable='RD'):
"""get knmi stations from json files according to variable
Parameters
----------
variable : str, optional
[description], by default 'RD'
Returns
-------
pandas DataFrame with stations, names and coordinates (Lat/Lon & RD)
"""
dir_path = os.path.dirname(os.path.realpath(__file__))
if variable == "RD":
fname = "../../data/knmi_neerslagstation.json"
else:
fname = "../../data/knmi_meteostation.json"
stations = pd.read_json(os.path.join(dir_path, fname))
if variable == 'PG':
# in Ell wordt geen luchtdruk gemeten
stations.drop(377, inplace=True)
if variable == 'EV24':
# in Woensdrecht wordt geen verdamping gemeten
stations.drop(340, inplace=True)
return stations
def get_nearest_stations_xy(x, y, variable, n=1, stations=None, ignore=None):
"""find the KNMI stations that measure 'variable' closest to the
x, y coordinates
Parameters
----------
x : int or float
x coordinate in RD
y : int or float
x coordinate in RD
variable : str
measurement variable e.g. 'RD' or 'EV24'
n : int, optional
number of stations you want to return. The default is 1.
stations : pd.DataFrame, optional
if None stations will be obtained using the get_stations function.
The default is None.
ignore : list, optional
list of stations to ignore. The default is None.
Returns
-------
list
station numbers.
"""
if stations is None:
stations = get_stations(variable=variable)
if ignore is not None:
stations.drop(ignore, inplace=True)
if stations.empty:
return None
d = np.sqrt((stations.x - x)**2 + (stations.y - y)**2)
return d.nsmallest(n).index.to_list()
def get_nearest_station_df(locations, stations=None, variable="RD", ignore=None):
"""find the KNMI stations that measure 'variable' closest to the
stations in 'locations'.
Parameters
----------
locations : pd.DataFrame
station number, x and y coordinates
stations : pd.DataFrame, optional
if None stations will be obtained using the get_stations function.
The default is None.
variable : str
measurement variable e.g. 'RD' or 'EV24'
ignore : list, optional
list of stations to ignore. The default is None.
Returns
-------
stns : list
station numbers.
"""
if stations is None:
stations = get_stations(variable=variable)
if ignore is not None:
stations.drop(ignore, inplace=True)
if stations.empty:
return None
xo = pd.to_numeric(locations.x)
xt = pd.to_numeric(stations.x)
yo = pd.to_numeric(locations.y)
yt = pd.to_numeric(stations.y)
xh, xi = np.meshgrid(xt, xo)
yh, yi = np.meshgrid(yt, yo)
distances = pd.DataFrame(np.sqrt((xh - xi) ** 2 + (yh - yi) ** 2),
index=locations.index,
columns=stations.index)
stns = distances.idxmin(axis=1).unique()
if np.any(np.isnan(stns)):
stns = stns[~np.isnan(stns)].astype(int)
return stns
def _start_end_to_datetime(start, end):
"""convert start and endtime to datetime
Parameters
----------
start : str, datetime, None
start time
end : str, datetime, None
start time
Returns
-------
start : pd.TimeStamp
start time
end : pd.TimeStamp
end time
"""
if start is None:
start = pd.Timestamp(pd.Timestamp.today().year, 1, 1)
else:
start = pd.to_datetime(start)
# start date one day before because later the datetime index is modified
start = start - pd.Timedelta(1, 'D')
if end is None:
end = pd.Timestamp.today()
else:
end = pd.to_datetime(end)
return start, end
def download_knmi_data(stn, var='RD', start=None, end=None, interval='daily',
inseason=False, raise_exceptions=True, verbose=False):
"""download knmi data of a measurements station for certain observation
type
Parameters
----------
stn : int or str
number of measurements station
var : str, optional
measurement type 'RD' or 'EV24'. The default is 'RD'.
start : str, datetime or None, optional
start date of observations. The default is None.
end : str, datetime or None, optional
end date of observations. The default is None.
interval : str, optional
time interval of observations. The default is 'daily'.
inseason : bool, optional
passed to the knmi api. The default is False.
raise_exceptions : bool, optional
if True you get errors when no data is returned. The default is True.
verbose : boolean, optional
Print additional information to the screen (default is False).
Raises
------
NotImplementedError
different time intervals and inseason data is not yet working.
ValueError
if the data from knmi cannot not be read a ValueError is raised.
Unless raise_exceptions is False
Returns
-------
knmi_df : pd.DataFrame
data from one station from one type of observation
variables : dictionary
information about the observerd variables
stations : pd.DataFrame
information about the measurement station.
"""
# checks
if interval.startswith('hour') and var == 'RD':
message = 'Interval can not be hourly for rainfall-stations'
raise (ValueError(message))
if interval != 'daily':
raise NotImplementedError('only daily intervals are working now')
if inseason:
raise NotImplementedError('season stuff not implemented')
start, end = _start_end_to_datetime(start, end)
# convert possible integer to string
stn = str(stn)
# define variables
knmi_df = pd.DataFrame()
variables = {}
stations = pd.DataFrame()
# download and read data
try:
if interval.startswith('hour'):
# hourly data from meteorological stations
url = 'http://projects.knmi.nl/klimatologie/uurgegevens/getdata_uur.cgi'
knmi_df = get_knmi_hourly(url, stn, var, start, end)
elif var == 'RD':
# daily data from rainfall-stations
url = 'http://projects.knmi.nl/klimatologie/monv/reeksen/getdata_rr.cgi'
knmi_df, variables = get_knmi_daily_rainfall(
url, stn, var, start, end, inseason, verbose)
else:
# daily data from meteorological stations
url = 'http://projects.knmi.nl/klimatologie/daggegevens/getdata_dag.cgi'
knmi_df, variables, stations = get_knmi_daily_meteo(
url, stn, var, start, end, inseason, verbose)
except ValueError as e:
if verbose:
print(e)
if raise_exceptions:
raise ValueError(e)
return knmi_df, variables, stations
def get_knmi_daily_rainfall(url, stn, var, start, end, inseason, verbose=False):
"""download and read knmi daily rainfall
Parameters
----------
url : str
download url.
stn : str
station number.
var : str
must be 'RD'.
start : pd.TimeStamp
start time of observations.
end : pd.TimeStamp
end time of observations.
inseason : boolean
flag to obtain inseason data.
verbose : boolean, optional
Print additional information to the screen (default is False).
Raises
------
ValueError
if there is no data for the provided stn an error is raised.
Returns
-------
pd.DataFrame
measurements.
variables : dictionary
additional information about the variables
"""
data = {
'start': start.strftime('%Y%m%d'),
'end': end.strftime('%Y%m%d'),
'inseason': str(int(inseason)),
'vars': var,
'stns': stn,
}
result = requests.get(url, params=data).text
f = StringIO(result)
knmi_df, variables = read_knmi_daily_rainfall(f, var, verbose=verbose)
if int(stn) not in knmi_df.STN.unique():
raise ValueError(f'KNMI station {stn} not recognized please provide '
'valid rainfall station number')
return knmi_df[[var]], variables
def _read_knmi_header(f, verbose=False):
variables = {}
line = f.readline()
if 'DOCTYPE HTML PUBLIC' in line:
if verbose:
print(f.read())
raise ValueError('Internal Server Error')
for iline in range(500):
if ' = ' in line:
line = line.lstrip(' #').strip('\n')
varDes = line.split(' = ')
variables[varDes[0].strip()] = varDes[1].strip()
if 'STN,YY' in line:
header = line.split(',')
header = [item.lstrip().rstrip() for item in header]
break
line = f.readline()
if iline > 498:
raise ValueError('cannot read measurements from file')
return f, variables, header
def _transform_variables(df, variables, verbose=False):
for key, value in variables.items():
# test if key existst in data
if key not in df.keys():
if key == 'YYYYMMDD' or key == 'HH':
pass
elif key == 'T10N':
variables.pop(key)
key = 'T10'
else:
raise NameError(key + ' does not exist in data')
if '0.1 ' in value:
if verbose:
print(f'transform {key}, {value} from 0.1 to 1')
# transform 0.1 to 1
df[key] = df[key] * 0.1
value = value.replace('0.1 ', '')
if ' tiende ' in value:
if verbose:
print(f'transform {key}, {value} from 0.1 to 1')
# transform 0.1 to 1
df[key] = df[key] * 0.1
value = value.replace(' tiende ', ' ')
if ' mm' in value:
if verbose:
print(f'transform {key}, {value} from mm to m')
# transform mm to m
df[key] = df[key] * 0.001
value = value.replace(' mm', ' m')
if ' millimeters' in value:
if verbose:
print(f'transform {key}, {value} from mm to m')
# transform mm to m
df[key] = df[key] * 0.001
value = value.replace(' millimeters', ' m')
# Store new variable
variables[key] = value
return df, variables
def read_knmi_daily_rainfall(f, var, verbose=False):
f, variables, header = _read_knmi_header(f)
df = pd.read_csv(f, header=None, names=header, na_values=' ')
f.close()
df.set_index(pd.to_datetime(df.YYYYMMDD, format='%Y%m%d'),
inplace=True)
df = df.drop('YYYYMMDD', axis=1)
# sometimes the last row is messed up, check for that and remove it
if df.iloc[-1].isna().any():
if verbose:
print('last row contains no data, remove last row')
df = df.drop(index=df.index[-1])
df.loc[:, var] = df[var].astype(float)
# daily precipitation amount in 0.1 mm over the period 08.00
# preceding day - 08.00 UTC present day
df.index = df.index + pd.to_timedelta(8, unit='h')
# from UT to UT+1 (standard-time in the Netherlands)
df.index = df.index + pd.to_timedelta(1, unit='h')
df, variables = _transform_variables(df, variables, verbose)
return df, variables
def _read_station_location(f, verbose=False):
stations = None
line = f.readline()
for iline in range(30):
if 'STN' in line:
titels = line.strip('# ').split()
titels = [x.replace('(', '_') for x in titels]
titels = [x.replace(r')', '') for x in titels]
values = f.readline().strip('# ').strip().replace(':', '')
values = re.split(r'\s{2,}', values)
# Create pd.DataFrame for station data
stations = pd.DataFrame(columns=titels, data=[values])
stations.set_index(['STN'], inplace=True)
for col in stations.columns:
try:
stations.loc[:, col] = stations[col].astype(float)
except ValueError:
pass
if ':' in f.readline():
raise ValueError(
'KNMI station number not recognized please provide '
'valid meteo station number')
break
line = f.readline()
if stations is None:
if verbose:
print('could not find stations')
return f, stations
def get_knmi_daily_meteo(url, stn, var, start, end, inseason, verbose=False):
"""download and read knmi daily meteo data
Parameters
----------
url : str
download url.
stn : str
station number.
var : str
e.g. 'EV24'.
start : pd.TimeStamp
start time of observations.
end : pd.TimeStamp
end time of observations.
inseason : boolean
flag to obtain inseason data.
verbose : boolean, optional
Print additional information to the screen (default is False).
Returns
-------
pd.DataFrame
measurements.
variables : dictionary
additional information about the variables
stations : pd.DataFrame
additional data about the measurement station
"""
data = {
'start': start.strftime('%Y%m%d'),
'end': end.strftime('%Y%m%d'),
'inseason': str(int(inseason)),
'vars': var,
'stns': stn,
}
result = requests.get(url, params=data).text
f = StringIO(result)
knmi_df, variables, stations = read_knmi_daily_meteo(f)
return knmi_df[[var]], variables, stations
def read_knmi_daily_meteo(f, verbose=False):
f, stations = _read_station_location(f, verbose)
f, variables, header = _read_knmi_header(f)
header[0] = header[0].lstrip('# ')
df = pd.read_csv(f, header=None, names=header, na_values=' ')
f.close()
df.set_index(pd.to_datetime(df.YYYYMMDD, format='%Y%m%d'),
inplace=True)
df = df.drop('YYYYMMDD', axis=1)
df = df.loc[df.index.notnull(), :]
# add a full day for meteorological data, so that the
# timestamp is at the end of the period in the data
df.index = df.index + pd.to_timedelta(1, unit='d')
# from UT to UT+1 (standard-time in the Netherlands)
df.index = df.index + pd.to_timedelta(1, unit='h')
df, variables = _transform_variables(df, variables, verbose)
return df, variables, stations
# return knmi_series
def get_knmi_hourly(url, stn, var, start, end):
data = {
'start': start.strftime('%Y%m%d') + '01',
'end': end.strftime('%Y%m%d') + '24',
'vars': var,
'stns': stn,
}
result = requests.get(url, params=data).text
f = StringIO(result)
knmi_series = read_knmi_hourly(f)
return knmi_series
def read_knmi_hourly(f):
raise NotImplementedError('work in progress')
knmi_df = pd.DataFrame()
return knmi_df
def get_knmi_timeseries_xy(x, y, var, start, end, fill_missing_obs=True,
interval='daily', inseason=False,
raise_exceptions=False,
verbose=False):
# get station
stations = get_stations(variable=var)
stn = get_nearest_stations_xy(x, y, var, stations=stations)[0]
# download data
if fill_missing_obs:
knmi_df, variables, station_meta = \
fill_missing_measurements(stn, var, start, end,
interval, raise_exceptions,
verbose=verbose)
else:
knmi_df, variables, station_meta = \
download_knmi_data(stn, var, start, end,
interval, inseason, raise_exceptions,
verbose=verbose)
meta = station_meta.to_dict()
meta.update(variables)
# set metadata
name = var + ' ' + stations.loc[stn, 'naam']
x = stations.loc[stn, 'x']
y = stations.loc[stn, 'y']
meta.update({'x': x, 'y': y, 'station': stn, 'name': name})
return knmi_df, meta
def get_knmi_timeseries_stn(stn, var, start, end,
fill_missing_obs=True, interval='daily',
inseason=False, raise_exceptions=False,
verbose=False):
# get station
stations = get_stations(variable=var)
# download data
if fill_missing_obs:
knmi_df, variables, station_meta = \
fill_missing_measurements(stn, var, start, end,
interval, raise_exceptions,
verbose=verbose)
else:
knmi_df, variables, station_meta = \
download_knmi_data(stn, var, start, end,
interval, inseason, raise_exceptions,
verbose=verbose)
meta = station_meta.to_dict()
meta.update(variables)
# set metadata
name = var + ' ' + stations.loc[stn, 'naam']
x = stations.loc[stn, 'x']
y = stations.loc[stn, 'y']
meta.update({'x': x, 'y': y, 'station': stn, 'name': name})
return knmi_df, meta
def add_missing_indices(knmi_df, stn, start, end, verbose=False):
"""when downloading KNMI data you don't always get a DataFrame with the
periods that you provided in your request. Thus the index does not cover
the complete period that you are interested in. This function adds the
missing period to the index of the DataFrame.
Parameters
----------
knmi_df : pd.DataFrame
data from one station from one type of observation, with additional
column to see which station is used to fill the value
stn : int or str
measurement station.
start : pd.TimeStamp
start time of observations.
end : pd.TimeStamp
end time of observations.
verbose : boolean, optional
Print additional information to the screen (default is False).
Returns
-------
knmi_df : pd.DataFrame
data from one station from one type of observation
"""
# check if given dates are more or less similar than measurement dates
if (knmi_df.index[0] - start).days < 2:
new_start = knmi_df.index[0]
else:
new_start = pd.Timestamp(year=start.year, month=start.month,
day=start.day, hour=knmi_df.index[0].hour,
minute=knmi_df.index[0].minute,
second=knmi_df.index[0].second)
if verbose:
print(
f'station {stn} has no measurements before {knmi_df.index[0]}')
if (knmi_df.index[-1] - end).days < 2:
new_end = knmi_df.index[-1]
else:
new_end = pd.Timestamp(year=end.year, month=end.month, day=end.day,
hour=knmi_df.index[-1].hour,
minute=knmi_df.index[-1].minute,
second=knmi_df.index[-1].second)
if verbose:
print(
f'station {stn} has no measurements after {knmi_df.index[-1]}')
# add missing indices
new_index = pd.date_range(new_start, new_end, freq='D')
knmi_df = knmi_df.reindex(new_index)
return knmi_df
def fill_missing_measurements(stn, var='RD', start=None, end=None,
interval='daily',
raise_exceptions=False, verbose=False):
"""fill missing measurements in knmi data
Parameters
----------
stn : int or str
measurement station.
interval : str, optional
desired time interval for observations. The default is 'daily'.
var : str, optional
observation type. The default is 'RD'.
start : str, datetime or None, optional
start date of observations. The default is None.
end : str, datetime or None, optional
end date of observations. The default is None.
raise_exceptions : bool, optional
if True you get errors when no data is returned. The default is True.
verbose : boolean, optional
Print additional information to the screen (default is False).
Returns
-------
knmi_df : pd.DataFrame
data from one station from one type of observation, with additional
column to see which station is used to fill the value
variables : dictionary
information about the observerd variables
stations : pd.DataFrame
information about the measurement station.
"""
if type(var) is not str:
raise (TypeError('Only one variable supported for now'))
# get the location of the stations
stations = get_stations(variable=var)
if start is None:
start = pd.Timestamp(pd.Timestamp.today().year, 1, 1)
else:
start = pd.to_datetime(start)
if end is None:
end = pd.Timestamp.today()
else:
end = pd.to_datetime(end)
if verbose:
print('Download ' + var + ' from ' +
str(stn) + ' ' + stations.loc[stn, 'naam'])
knmi_df, variables, station_meta = \
download_knmi_data(stn, var, start=start,
end=end, interval=interval,
inseason=False,
raise_exceptions=raise_exceptions,
verbose=verbose)
# if the first station cannot be read, read another station as the first
ignore = [stn]
while knmi_df.empty:
stn = get_nearest_station_df(
stations.loc[[stn]], variable=var, ignore=ignore)[0]
if verbose:
print('Download ' + var + ' from ' +
str(stn) + ' ' + stations.loc[stn, 'naam'])
knmi_df, variables, station_meta = \
download_knmi_data(stn, var, start=start,
end=end, interval=interval,
inseason=False,
raise_exceptions=raise_exceptions,
verbose=verbose)
ignore.append(stn)
# find missing values
knmi_df = add_missing_indices(knmi_df, stn, start, end, verbose)
missing = knmi_df[var].isna()
if verbose:
print(f'station {stn} has {missing.sum()} missing measurements')
# fill missing values
while np.any(missing) and not np.all(missing):
stn_comp = get_nearest_station_df(
stations.loc[[stn]], variable=var, ignore=ignore)
if verbose:
print(f'trying to fill {missing.sum()} '
f'measurements with station {stn_comp}')
if stn_comp is None:
if verbose:
print('could not fill all missing measurements there are '
'no stations left to check')
missing[:] = False
break
else:
stn_comp = stn_comp[0]
knmi_df_comp, _, __ = \
download_knmi_data(stn_comp, var,
start=start, end=end,
interval=interval,
inseason=False,
raise_exceptions=raise_exceptions,
verbose=verbose)
if knmi_df_comp.empty:
if verbose:
print(f'station {stn_comp} cannot be downloaded')
else:
# dropnans from new data
knmi_df_comp = knmi_df_comp.loc[~knmi_df_comp[var].isna(), :]
# get index of missing data in original timeseries
missing_idx = missing.loc[missing].index
# if any missing are in the new data, update
if missing_idx.isin(knmi_df_comp.index).any():
# index for missing but in newly downloaded data
ix_idx = missing_idx.intersection(knmi_df_comp.index)
# update missing data
knmi_df.loc[ix_idx, var] = knmi_df_comp.loc[ix_idx, var]
# add source station number
knmi_df.loc[ix_idx, 'station_opvulwaarde'] = str(stn_comp)
missing = knmi_df[var].isna()
ignore.append(stn_comp)
return knmi_df, variables, station_meta
```
#### File: observations/observations/observation.py
```python
import warnings
import numpy as np
from pandas import DataFrame
class Obs(DataFrame):
"""class for point observations.
An Obs object is a subclass of a pandas.DataFrame and allows for additional
attributes and methods.
pandas can be found here:
http://pandas.pydata.org/pandas-docs/stable/development/extending.html#extending-subclassing-pandas
Parameters
----------
x : int or float
x coordinate of observation point
y : int or float
y coordinate of observation point
name : str
name
meta : dictionary
metadata
filename : str
filename with data of observation point
"""
# temporary properties
_internal_names = DataFrame._internal_names + ['none']
_internal_names_set = set(_internal_names)
# normal properties
_metadata = ['x', 'y', 'name', 'meta', 'filename']
def __init__(self, *args, **kwargs):
""" constructor of Obs class
*args must be input for the pandas.DataFrame constructor,
**kwargs can be one of the attributes listed in _metadata or
keyword arguments for the constructor of a pandas.DataFrame.
"""
self.x = kwargs.pop('x', np.nan)
self.y = kwargs.pop('y', np.nan)
self.name = kwargs.pop('name', '')
self.meta = kwargs.pop('meta', {})
self.filename = kwargs.pop('filename', '')
super(Obs, self).__init__(*args, **kwargs)
@property
def _constructor(self):
return Obs
def to_collection_dict(self, include_meta=False):
"""get dictionary with registered attributes and their values
of an Obs object.
This method can be used to create a dataframe from a collection
of Obs objects.
Parameters
----------
include_meta : boolean, optional
include the meta dictionary in the collection dictionary,
default is false
Returns
-------
d : dictionary
dictionary with Obs information
"""
attrs = self._metadata.copy()
if not include_meta:
attrs.remove('meta')
d = {}
for att in attrs:
d[att] = getattr(self, att)
d['obs'] = self
return d
class GroundwaterObs(Obs):
"""class for groundwater quantity point observations
Subclass of the Obs class. Can have the following attributes:
- locatie: 2 filters at one piezometer should have the same 'locatie'
- filternr: 2 filters at one piezometer should have a different 'filternr'.
a higher filter number is preferably deeper than a lower filter number.
- bovenkant_filter: top op the filter in m NAP
- onderkant_filter: bottom of the filter in m NAP
- maaiveld: surface level in m NAP
- meetpunt: ? in m NAP
- metadata_available: boolean indicating if metadata is available for
the measurement point.
"""
_metadata = Obs._metadata + \
['locatie', 'filternr',
'bovenkant_filter', 'onderkant_filter',
'maaiveld', 'meetpunt', 'metadata_available'
]
def __init__(self, *args, **kwargs):
"""
*args must be input for the pandas.DataFrame constructor,
**kwargs can be one of the attributes listed in _metadata or
keyword arguments for the constructor of a pandas.DataFrame.
if the pandas.DataFrame has a column 'stand_m_tov_nap' a lot of
plotting and other methods will work automatically without changing
the default arguments.
"""
self.locatie = kwargs.pop('locatie', '')
self.filternr = kwargs.pop('filternr', '')
self.maaiveld = kwargs.pop('maaiveld', np.nan)
self.meetpunt = kwargs.pop('meetpunt', np.nan)
self.bovenkant_filter = kwargs.pop('bovenkant_filter', np.nan)
self.onderkant_filter = kwargs.pop('onderkant_filter', np.nan)
self.metadata_available = kwargs.pop('metadata_available', np.nan)
super(GroundwaterObs, self).__init__(*args, **kwargs)
@property
def _constructor(self):
return GroundwaterObs
@classmethod
def from_dino(cls, fname=None, location=None, filternr=1.,
tmin="1900-01-01", tmax="2040-01-01",
**kwargs):
"""download dino data from the server.
Parameters
----------
fname : str, optional
dino csv filename
location : str, optional
location of the peilbuis, i.e. B57F0077
filternr : float, optional
filter_nr of the peilbuis, i.e. 1.
tmin : str
start date in format YYYY-MM-DD
tmax : str
end date in format YYYY-MM-DD
kwargs : key-word arguments
these arguments are passed to io_dino.read_dino_groundwater_csv if
fname is not None and otherwise to io_dino.findMeetreeks
"""
from .io import io_dino
if fname is not None:
# read dino csv file
measurements, meta = io_dino.read_dino_groundwater_csv(
fname, **kwargs)
return cls(measurements, meta=meta, **meta)
elif location is not None:
measurements, meta = io_dino.download_dino_groundwater(location,
filternr,
tmin, tmax,
**kwargs)
if meta['metadata_available']:
return cls(measurements, meta=meta,
x=meta.pop('x'), y=meta.pop('y'),
onderkant_filter=meta.pop('onderkant_filter'),
bovenkant_filter=meta.pop('bovenkant_filter'),
name=meta.pop('name'),
metadata_available=meta.pop('metadata_available'),
locatie=meta.pop('locatie'),
maaiveld=meta.pop('maaiveld'),
filternr=meta.pop('filternr'))
else:
return cls(measurements, meta=meta)
else:
raise ValueError(
'specify fname or location to obtain groundwater heads')
@classmethod
def from_dino_server(cls, location, filternr=1.,
tmin="1900-01-01", tmax="2040-01-01",
**kwargs):
"""download dino data from the server.
Parameters
----------
location : str
location of the peilbuis, i.e. B57F0077
filternr : float
filter_nr of the peilbuis, i.e. 1.0
tmin : str
start date in format YYYY-MM-DD
tmax : str
end date in format YYYY-MM-DD
kwargs : key-word arguments
these arguments are passed to dino.findMeetreeks functie
"""
warnings.warn(
"this method will be removed in future versions, use from_dino instead", DeprecationWarning)
from .io import io_dino
measurements, meta = io_dino.download_dino_groundwater(location,
filternr,
tmin, tmax,
**kwargs)
if meta['metadata_available']:
return cls(measurements, meta=meta,
x=meta.pop('x'), y=meta.pop('y'),
onderkant_filter=meta.pop('onderkant_filter'),
bovenkant_filter=meta.pop('bovenkant_filter'),
name=meta.pop('name'),
locatie=meta.pop('locatie'),
maaiveld=meta.pop('maaiveld'),
filternr=meta.pop('filternr'))
else:
return cls(measurements, meta=meta)
@classmethod
def from_dino_file(cls, fname=None, **kwargs):
"""read a dino csv file.
Parameters
----------
name : str, optional
name of the peilbuis, i.e. B57F0077
fname : str, optional
dino csv filename
kwargs : key-word arguments
these arguments are passed to io_dino.read_dino_groundwater_csv
"""
warnings.warn(
"this method will be removed in future versions, use from_dino instead", DeprecationWarning)
from .io import io_dino
if fname is not None:
# read dino csv file
measurements, meta = io_dino.read_dino_groundwater_csv(
fname, **kwargs)
return cls(measurements, meta=meta, **meta)
else:
raise ValueError(
'specify either the name or the filename of the measurement point')
@classmethod
def from_artdino_file(cls, fname=None, **kwargs):
"""read a dino csv file (artdiver style).
Parameters
----------
name : str, optional
name of the peilbuis, i.e. B57F0077
fname : str, optional
dino csv filename
kwargs : key-word arguments
these arguments are passed to io_dino.read_dino_groundwater_csv
"""
from .io import io_dino
if fname is not None:
# read dino csv file
measurements, meta = io_dino.read_artdino_groundwater_csv(
fname, **kwargs)
return cls(measurements, meta=meta, **meta)
else:
raise ValueError('specify either the name or the filename of the '
'measurement point!')
@classmethod
def from_wiski(cls, fname, **kwargs):
"""[summary]
Parameters
----------
fname : [type]
[description]
Returns
-------
[type]
[description]
"""
from .io import io_wiski
data, metadata = io_wiski.read_wiski_file(fname, **kwargs)
return cls(data, meta=metadata, **metadata)
@classmethod
def from_pystore_item(cls, item):
"""Create GroundwaterObs DataFrame from Pystore item
Parameters
----------
item : pystore.item.Item
Pystore item
Returns
-------
GroundwaterObs
GroundwaterObs DataFrame
"""
df = item.to_pandas()
try:
x = item.metadata["x"]
y = item.metadata["y"]
except KeyError:
x = np.nan
y = np.nan
item.metadata["datastore"] = item.datastore
return cls(df, x=x, y=y, meta=item.metadata)
class GroundwaterQualityObs(Obs):
"""class for groundwater quality (grondwatersamenstelling)
point observations.
Subclass of the Obs class
"""
_metadata = Obs._metadata + \
['locatie', 'filternr', 'maaiveld', 'metadata_available']
def __init__(self, *args, **kwargs):
self.locatie = kwargs.pop('locatie', '')
self.filternr = kwargs.pop('filternr', '')
self.maaiveld = kwargs.pop('maaiveld', np.nan)
self.metadata_available = kwargs.pop('metadata_available', np.nan)
super(GroundwaterQualityObs, self).__init__(*args, **kwargs)
@property
def _constructor(self):
return GroundwaterQualityObs
@classmethod
def from_dino(cls, fname, **kwargs):
"""read ad dino file with groundwater quality data
Parameters
----------
fname : str
dino txt filename
kwargs : key-word arguments
these arguments are passed to io_dino.read_dino_groundwater_quality_txt
"""
from .io import io_dino
measurements, meta = io_dino.read_dino_groundwater_quality_txt(
fname, **kwargs)
return cls(measurements, meta=meta, **meta)
class WaterlvlObs(Obs):
"""class for water level point observations.
Subclass of the Obs class
"""
_metadata = Obs._metadata + ['locatie', 'metadata_available']
def __init__(self, *args, **kwargs):
self.locatie = kwargs.pop('locatie', '')
self.metadata_available = kwargs.pop('metadata_available', np.nan)
super(WaterlvlObs, self).__init__(*args, **kwargs)
@property
def _constructor(self):
return WaterlvlObs
@classmethod
def from_dino(cls, fname, **kwargs):
"""read a dino file with waterlvl data
Parameters
----------
fname : str
dino csv filename
kwargs : key-word arguments
these arguments are passed to io_dino.read_dino_waterlvl_csv
"""
from .io import io_dino
measurements, meta = io_dino.read_dino_waterlvl_csv(fname, **kwargs)
return cls(measurements, meta=meta, **meta)
@classmethod
def from_waterinfo(cls, fname, **kwargs):
"""
Read data from waterinfo csv-file or zip.
Parameters
----------
fname : str
path to file (file can zip or csv)
Returns
-------
df : WaterlvlObs
WaterlvlObs object
Raises
------
ValueError
if file contains data for more than one location
"""
from .io import io_waterinfo
df, metadata = io_waterinfo.read_waterinfo_file(fname,
return_metadata=True)
return cls(df, meta=metadata, **metadata)
class ModelObs(Obs):
"""class for model point results.
Subclass of the Obs class
"""
_metadata = Obs._metadata + ['model']
def __init__(self, *args, **kwargs):
self.model = kwargs.pop('model', '')
super(ModelObs, self).__init__(*args, **kwargs)
@property
def _constructor(self):
return ModelObs
class KnmiObs(Obs):
"""class for KNMI timeseries.
Subclass of the Obs class
"""
_metadata = Obs._metadata + ['station']
def __init__(self, *args, **kwargs):
self.station = kwargs.pop('station', np.nan)
super(KnmiObs, self).__init__(*args, **kwargs)
@property
def _constructor(self):
return KnmiObs
@classmethod
def from_knmi(cls, stn, variable, startdate=None, enddate=None,
fill_missing_obs=True, verbose=False):
from .io import io_knmi
ts, meta = io_knmi.get_knmi_timeseries_stn(stn, variable,
startdate, enddate,
fill_missing_obs,
verbose=verbose)
return cls(ts, meta=meta, station=meta['station'], x=meta['x'],
y=meta['y'], name=meta['name'])
@classmethod
def from_nearest_xy(cls, x, y, variable, startdate=None, enddate=None,
fill_missing_obs=True, verbose=False):
from .io import io_knmi
ts, meta = io_knmi.get_knmi_timeseries_xy(x, y, variable,
startdate, enddate,
fill_missing_obs,
verbose=verbose)
return cls(ts, meta=meta, station=meta['station'], x=meta['x'],
y=meta['y'], name=meta['name'])
@classmethod
def from_obs(cls, obs, variable, startdate=None, enddate=None,
fill_missing_obs=True, verbose=False):
from .io import io_knmi
x = obs.x
y = obs.y
if startdate is None:
startdate = obs.index[0]
if enddate is None:
enddate = obs.index[-1]
ts, meta = io_knmi.get_knmi_timeseries_xy(x, y, variable,
startdate, enddate,
fill_missing_obs,
verbose=verbose)
return cls(ts, meta=meta, station=meta['station'], x=meta['x'],
y=meta['y'], name=meta['name'])
```
|
{
"source": "jent-ly/summarizer_server",
"score": 3
}
|
#### File: summarizer_server/summarizer_server/account_service.py
```python
from models import database, Account
from serializers import AccountSchema
class AccountService:
def __init__(self):
self.anonymous_email = "<EMAIL>"
self.anonymous_gaia = "ew<PASSWORD>"
self.user_single = AccountSchema()
self.user_multiple = AccountSchema(many=True)
def serialize_single(self, account):
return self.user_single.dump(account)
def serialize_multiple(self, users):
return self.user_multiple.dump(users)
def get_or_create(self, email, gaia):
account = self.get(email)
if account is not None:
print("Account <'email={0} gaia={1}'> already exists.".format(email, gaia))
return account
account = Account(email=email, gaia=gaia)
database.session.add(account)
database.session.commit()
return account
def get(self, email):
account = Account.query.filter_by(email=email).first()
if account is None:
print("Account <'email={0}'> does not exist yet.".format(email))
return None
return account
def get_all(self):
return Account.query.order_by(Account.create_time.desc()).all()
def get_anonymous(self):
return self.get_or_create(self.anonymous_email, self.anonymous_gaia)
```
#### File: summarizer_server/tests/test_text_rank.py
```python
import unittest
import tests.utils as utils
import math
import pdb
from nltk import tokenize
from text_rank import TextRank
class TestTextRank(unittest.TestCase):
def setUp(self):
self.text_rank = TextRank()
def test_process_html(self):
article_html = utils.get_article_contents("article1.html")
expected_article_text = utils.get_article_contents("article1.txt")
article = self.text_rank.process_html(article_html)
self.assertEqual(
"Poll finds Raptorsโ playoff run has attracted new fans across Canada",
article.title,
)
self.assertEqual(expected_article_text, article.text)
self.assertEqual("en", article.config.get_language())
def test_summarize_from_html(self):
article_html = utils.get_article_contents("article2.html")
summary = self.text_rank.summarize_from_html(article_html, 15)
self.assertTrue(summary)
def test_evaluate_newspaper_summary_deterministic(self):
article = utils.get_article_contents("article2.txt")
sentences = tokenize.sent_tokenize(article)
scores = self.text_rank.evaluate_newspaper_summary(
"What's inside the Barcode?", article, sentences, "en"
)
ranked_sentences = sorted(((v, k[1]) for k, v in scores.items()), reverse=True)
top_sentences = list(
score_sentence_tuple[1] for score_sentence_tuple in ranked_sentences[:3]
)
self.assertListEqual(
[
"If the Scanner doesnโt find it, it will not acknowledge the EAN13 barcode.",
"In this article, weโre gonna take an example of the EAN13 barcode.",
"Whatโs inside the Barcode?",
],
top_sentences,
)
def test_evaluate_newspaper_summary_returns_normalized_scores(self):
article = utils.get_article_contents("article2.txt")
sentences = tokenize.sent_tokenize(article)
scores = self.text_rank.evaluate_newspaper_summary(
"What's inside the Barcode?", article, sentences, "en"
)
score_sum = sum(scores.values())
self.assertEqual(1, score_sum)
def test_evaluate_textrank_summary_returns_normalized_scores(self):
# evaluate_textrank_summary depends heavily on word vectorizations
# which are impractical to load on every test run, so this is all we can do
article = utils.get_article_contents("article1.txt")
sentences = tokenize.sent_tokenize(article)
scores = self.text_rank.evaluate_textrank_summary(sentences)
score_sum = sum(scores.values())
self.assertEqual(1, score_sum)
def test_summarize_returns_15_percent_of_sentences(self):
article = utils.get_article_contents("article1.txt")
sentences = tokenize.sent_tokenize(article)
all_top_sentences = self.text_rank.summarize("test title", article, "en", 100)
top_15p_sentences = self.text_rank.summarize("test title", article, "en", 15)
self.assertEqual(len(sentences), len(all_top_sentences))
self.assertEqual(
math.ceil(len(all_top_sentences) * 15 / 100), len(top_15p_sentences)
)
def test_summarize_one_sentence(self):
summary = self.text_rank.summarize("Hello world!", "Hello world!", "en", 100)
self.assertListEqual([], summary)
def test_summarize_default_language(self):
summary = self.text_rank.summarize(
"Hello world!", "Hello world! Welcome.", None, 100
)
self.assertListEqual(["Welcome."], summary)
if __name__ == "__main__":
unittest.main()
```
#### File: summarizer_server/tests/utils.py
```python
def get_article_contents(article_name):
article_file_name = "summarizer_server/tests/fixtures/" + article_name
with open(article_file_name, "r") as f:
return f.read()
```
#### File: summarizer_server/summarizer_server/text_rank.py
```python
import logging
import pickle
import math
import os
import re
import pdb
import networkx as nx
import nltk
import numpy as np
import pandas as pd
from collections import Counter
from nltk import tokenize
from nltk import corpus
from sklearn.metrics.pairwise import cosine_similarity
from newspaper import Article
from newspaper import nlp
from image_setup import WORD_EMBEDDINGS_FILE
log = logging.getLogger("summarizer_server")
class TextRank:
def __init__(self):
self.word_embeddings = {}
self.stop_words = set()
def setup(self):
# TODO: consider how to handle languages other than English
self.stop_words = set(corpus.stopwords.words("english"))
with open(WORD_EMBEDDINGS_FILE, "rb") as handle:
self.word_embeddings = pickle.load(handle)
def _remove_stopwords(self, sen):
sen_new = " ".join([i for i in sen if i not in self.stop_words])
return sen_new
def process_html(self, html):
# fetch page content and parse html using newspaper
article = Article(url="")
article.set_html(html)
article.parse()
return article
def normalize_scores(self, scores):
total = sum(scores.values())
return {key: score / total for key, score in scores.items()}
# Implemented following:
# https://www.analyticsvidhya.com/blog/2018/11/introduction-text-summarization-textrank-python/
def evaluate_textrank_summary(self, sentences):
# remove punctuations, numbers and special characters
clean_sentences = pd.Series(sentences).str.replace("[^a-zA-Z]", " ")
clean_sentences = [s.lower() for s in clean_sentences]
clean_sentences = [self._remove_stopwords(r.split()) for r in clean_sentences]
# create sentence vectors
sentence_vectors = []
for i in clean_sentences:
if len(i) != 0:
v = sum(
[self.word_embeddings.get(w, np.zeros((300,))) for w in i.split()]
) / (len(i.split()) + 0.001)
else:
v = np.zeros((300,))
sentence_vectors.append(v)
# similarity matrix
sim_mat = np.zeros([len(sentences), len(sentences)])
# initialize matrix
for i in range(len(sentences)):
for j in range(len(sentences)):
if i != j:
sim_mat[i][j] = cosine_similarity(
sentence_vectors[i].reshape(1, 300),
sentence_vectors[j].reshape(1, 300),
)[0, 0]
# convert matrix into graph
nx_graph = nx.from_numpy_array(sim_mat)
textrank_scores = self.normalize_scores(nx.pagerank(nx_graph))
# return a dictionary of index to sentences and their scores
# ie. { 0: 0.145, 1: 0.105, 2: 0.127, 3: 0.123, 4: 0.120, 5: 0.154, 6: 0.101, 7: 0.125 }
return textrank_scores
def evaluate_newspaper_summary(self, title, text, sentences, language):
# get newspaper's nlp scores
# https://github.com/codelucas/newspaper/blob/master/newspaper/article.py#L372
nlp.load_stopwords(language)
# call to: nlp.summarize(title=article.title, text=article.text, max_sents=max_sents)
# https://github.com/codelucas/newspaper/blob/master/newspaper/nlp.py#L40
title_words = nlp.split_words(title)
most_frequent = nlp.keywords(text)
nlp_scores = self.normalize_scores(
nlp.score(sentences, title_words, most_frequent)
)
# Return a dictionary of tuple<sentence index, setence text> to score
# ie. { (0, 'A new poll suggests that the Toronto Raptors...') : 0.144, ... }
return nlp_scores
def summarize_from_html(self, html, percent_sentences):
# Use newspaper3k's clean text extraction and parsing
article = self.process_html(html)
return self.summarize(
article.title,
article.text,
article.config.get_language(),
percent_sentences,
)
def summarize(self, title, text, language, percent_sentences):
# remove title from the text, if it appears in the text
if text.startswith(title):
text = text[len(title) :]
if not text:
return []
if not language:
language = "en"
text = text.lstrip()
sentences = tokenize.sent_tokenize(text)
textrank_scores = self.evaluate_textrank_summary(sentences)
newspaper_scores = self.evaluate_newspaper_summary(
title, text, sentences, language
)
totalled_scores = Counter()
for key, value in newspaper_scores.items():
totalled_scores[key[0]] += value
for key, value in textrank_scores.items():
totalled_scores[key] += value
num_sentences = int(math.ceil(len(sentences) * percent_sentences / 100))
sentence_indices = list(
map(lambda x: x[0], totalled_scores.most_common(num_sentences))
)
return list(map(lambda x: sentences[x], sentence_indices))
```
|
{
"source": "jentron/Blender-PT2",
"score": 3
}
|
#### File: Blender-PT2/libs/Material.py
```python
import bpy
class Material:
def __init__(self, name='Material', overwrite=True):
self.diffuse_color = (1.0, 1.0, 1.0, 1.0)
self.specular_color = (1.0, 1.0, 1.0, 1.0)
self.ambient_color = (1.0, 1.0, 1.0, 1.0)
self.texture_color = (1.0, 1.0, 1.0, 1.0)
self.reflection_color= (1.0, 1.0, 1.0, 1.0)
self.reflect_factor = 0.5
self.ns_exponent = 50
self.tMax = 0
self.tMin = 0
self.tExpo = 0.6
self.bumpStrength = 1.0
self.ks_ignore_texture = 0
self.reflect_thru_lights = 0
self.reflect_thru_kd = 0
self.diffuse_texture =False #FIXME: Map to a texture class object
self.bump_texture =False #FIXME: Map to a texture class object
self.transparent_texture=False #FIXME: Map to a texture class object
self.reflection_texture =False #FIXME: Map to a texture class object
self.use_transparency=False # set for transparent_texture or alpha value < 1.0
self.specular = 0.5
self.roughness = 0.5
self.alpha = 1.0
if(overwrite is True) and ( name in bpy.data.materials ):
self.name=name
self.mat = bpy.data.materials[self.name]
else:
# get the material
self.mat = bpy.data.materials.new(name)
self.name = self.mat.name
# get the nodes
self.mat.use_nodes=True
self.nodes = self.mat.node_tree.nodes
# clear all nodes to start clean
for node in self.nodes:
self.nodes.remove(node)
# link nodes
self.links = self.mat.node_tree.links
def createBlenderMaterial(self):
#lazy typist
links=self.links
nodes=self.nodes
# Set custom values
self.alpha = 1 - self.tMax
if(self.alpha < 1):
self.use_transparency = True
#create the basic material nodes
node_output = self.nodes.new(type='ShaderNodeOutputMaterial')
node_output.location = 400,0
node_pbsdf = self.nodes.new(type='ShaderNodeBsdfPrincipled')
node_pbsdf.location = 0,0
node_pbsdf.inputs['Base Color'].default_value = self.diffuse_color
node_pbsdf.inputs['Alpha'].default_value = self.alpha
link = links.new(node_pbsdf.outputs['BSDF'], node_output.inputs['Surface'])
# create the texture mapping nodes
if( self.diffuse_texture or self.bump_texture or self.transparent_texture):
node_mapping = nodes.new(type='ShaderNodeMapping')
node_mapping.location = -1000, 0
node_texcoord = nodes.new(type='ShaderNodeTexCoord')
node_texcoord.location = -1200, 0
link = links.new(node_texcoord.outputs['UV'], node_mapping.inputs['Vector'])
# create diffuse texture
if(self.diffuse_texture):
node_texture = nodes.new(type='ShaderNodeTexImage')
node_texture.location = -600,0
node_texture.label = 'Diffuse Map'
node_texture.image = self.diffuse_texture
node_diffuseColor = nodes.new(type='ShaderNodeRGB')
node_diffuseColor.location = -500,250
node_diffuseColor.outputs['Color'].default_value=self.diffuse_color
node_diffuseMix = nodes.new(type='ShaderNodeMixRGB')
node_diffuseMix.location = -300, 25
node_diffuseMix.blend_type='MULTIPLY'
node_diffuseMix.inputs['Fac'].default_value = 1
link = links.new(node_diffuseColor.outputs['Color'], node_diffuseMix.inputs['Color1'])
link = links.new(node_texture.outputs['Color'], node_diffuseMix.inputs['Color2'])
link = links.new(node_diffuseMix.outputs['Color'], node_pbsdf.inputs['Base Color'])
link = links.new(node_mapping.outputs['Vector'], node_texture.inputs['Vector'])
# create transparent texture
if(self.transparent_texture):
self.use_transparency = True
node_trtext = nodes.new(type='ShaderNodeTexImage')
node_trtext.location = -600,-250
node_trtext.label = 'Transparent Map'
node_trtext.image = self.transparent_texture
node_trtext.image.colorspace_settings.name = 'Non-Color'
link = links.new(node_mapping.outputs['Vector'], node_trtext.inputs['Vector'])
link = links.new(node_trtext.outputs['Color'], node_pbsdf.inputs['Alpha'])
# create bump texture
if(self.bump_texture):
node_bump = nodes.new(type='ShaderNodeBump')
node_bump.location = -300,-500
node_bump.inputs['Strength'].default_value = self.bumpStrength
node_bumptext= nodes.new(type='ShaderNodeTexImage')
node_bumptext.location = -600,-500
node_bumptext.label = 'Bump Map'
node_bumptext.image = self.bump_texture
node_bumptext.image.colorspace_settings.name = 'Non-Color'
link = links.new(node_mapping.outputs['Vector'], node_bumptext.inputs['Vector'])
link = links.new(node_bumptext.outputs['Color'], node_bump.inputs['Height'])
link = links.new(node_bump.outputs['Normal'], node_pbsdf.inputs['Normal'])
if(self.reflection_texture):
print('I don\'t do reflective textures')
if(self.use_transparency):
self.mat.blend_method = 'HASHED'
self.mat.shadow_method = 'HASHED'
# return the material
return( self.mat )
if __name__ == "__main__":
myMat = Material(name='3 mat2', overwrite=True)
# myMat.diffuse_color = (.757, .757,.757, 1)
myMat.diffuse_color = (.455,.455,.455,1)
myMat.transparent_texture=False
myMat.bump_texture=True
myMat.createBlenderMaterial()
```
#### File: Blender-PT2/libs/PT2_open.py
```python
import gzip
from pathlib import Path
import re
import errno
import os
def PT2_open(name, mode):
is_gzip=True
myfile=Path(name)
#print("Test Existance")
if( myfile.exists() ):
print(f"%s exists!"%myfile)
new_file=myfile
else:
stem = list(myfile.suffix)
print(stem)
stem[-1] = 'z'
new_file = Path(myfile.parents[0], myfile.stem + "".join(stem))
if( new_file.exists()):
pass
else:
raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), new_file)
with gzip.open(new_file, mode) as fh:
try:
fh.read(1)
except OSError:
print(f'%s is not a gzip file by OSError'%new_file)
is_gzip=False
if(is_gzip):
return( gzip.open(new_file, mode))
return( open(new_file, mode))
def namecheck01(input):
output = re.sub(r' *: *[0-9]+', '', input)
return( output )
```
#### File: Blender-PT2/libs/ReadPZMD.py
```python
import struct
import os
import logging
logger = logging.getLogger(__name__)
class Morph:
def __init__(self):
self.deltas=[]
self.min = 0
self.max = 1
self.trackingScale = 0.02
self.value = 0
self.name = 'shape'
self.group = ''
self.indexes = -1 # this is the number of vertexes in the morph! ITS BACKWARDS
self.numbDeltas = -1 # this is the number of vertexes in target!
self.fileOffset = -1
def print(self):
print('Morph:', self.name,
'Target:', self.group,
'Indexes: ', self.indexes,
'numbDeltas:', self.numbDeltas )
def log(self, level=logging.INFO):
logger.log(level, 'Morph: %s\tTarget: %s\tIndexes: %s\tnumbDeltas: %s'%
(self.name, self.group, self.indexes, self.numbDeltas ) )
def readPZMD(filename):
filesize=os.stat(filename).st_size
file=open(filename, 'rb')
## 4bytes PZMD
foo=file.read(4)
if (foo==b'PZMD'):
logger.info("%s is a Poser Morph File!"%filename)
else:
logger.critical("%s is not a PZMD Morph File!" % filename)
raise ValueError("%s is not a PZMD Morph File!" % filename)
## 4bytes Number of morphs, or version?
foo=int.from_bytes(file.read(4), byteorder='big', signed=False)
logger.debug('Version: %d' % foo)
if(foo > 1):
logger.warning('Unexpected version %d' % foo )
## 4bytes pad? or should header size by 64 bits?
foo=int.from_bytes(file.read(4), byteorder='big', signed=False)
if(foo==0):
logger.debug('Always 0?\t%d' % foo)
else:
logger.critical('Pad is not zero! Contact Developer!')
## 4bytes Size of morph header
header_length=int.from_bytes(file.read(4), byteorder='big', signed=False)
logger.debug('Header Length?\t%d'%header_length)
## 4bytes Number of morphs, or version?
morphs_count=int.from_bytes(file.read(4), byteorder='big', signed=False)
logger.debug('Number of morphs?\t%d' % morphs_count)
morphs=[]
for m in range(morphs_count):
morph=Morph()
## bpl string [1 byte length, followed by ascii chars]
bpl_length=int.from_bytes(file.read(1), byteorder='big', signed=False)
morph.name=(file.read(bpl_length)).decode("ascii")
## bpl string [1 byte length, followed by ascii chars]
bpl_length=int.from_bytes(file.read(1), byteorder='big', signed=False)
morph.group=(file.read(bpl_length)).decode("ascii")
## 4 bytes number of verts in group
morph.numbDeltas=int.from_bytes(file.read(4), byteorder='big', signed=False)
## 4 bytes numDeltas
morph.fileOffset=int.from_bytes(file.read(4), byteorder='big', signed=False)
morphs.append( morph )
# Calculate number of indexes by finding the file offset of the next morph, subtract
# the file offset of this morph, subtract 4 for the size, then divide by 16
for i in range(morphs_count-1):
morphs[i].indexes=int( (morphs[i+1].fileOffset - morphs[i].fileOffset - 4) / 16 )
morphs[i].log()
# handle last morph separately
morphs[-1].indexes=int( (filesize - morphs[-1].fileOffset - 4) / 16 )
morphs[-1].log()
for morph in morphs:
file.seek(morph.fileOffset)
indexes = int.from_bytes(file.read(4), byteorder='big', signed=False)
if indexes == morph.indexes:
logger.debug('%s indexes count matched!' % morph.name)
## ## 4 bytes index 3x4 bytes delta
for i in range(morph.indexes):
delta = file.read(16)
idx=int.from_bytes(delta[0:4], byteorder='big', signed=False)
vect=struct.unpack('>fff', delta[4:17])
morph.deltas.append( { int(idx) : vect } )
# print(idx, vect)
if indexes == len(morph.deltas):
logger.debug('%s all delta indexes read!'%morph.name )
else:
logger.critical('Expected %d deltas but found %d' %(morph.indexes, indexes) )
raise ValueError('Morphs were not parsed correctly!')
file.close()
logger.info('Read %d morphs, expected %d' % (len(morphs), morphs_count) )
return(morphs)
if __name__ == '__main__':
import sys
print (sys.argv[1])
logging.basicConfig(level=logging.DEBUG)
morphs = readPZMD(filename=sys.argv[1])
```
|
{
"source": "jenuk/clip-bb",
"score": 2
}
|
#### File: jenuk/clip-bb/config2object.py
```python
import importlib
def get_obj_from_str(string, reload=False):
module, cls = string.rsplit(".", 1)
if reload:
module_imp = importlib.import_module(module)
importlib.reload(module_imp)
return getattr(importlib.import_module(module, package=None), cls)
def instantiate_from_config(config):
if not "target" in config:
raise KeyError("Expected key `target` to instantiate.")
return get_obj_from_str(config["target"])(**config.get("params", dict()))
```
|
{
"source": "jenwich/practicum_game_project",
"score": 3
}
|
#### File: jenwich/practicum_game_project/bear.py
```python
import pygame, random
bears = []
bear_img = pygame.image.load("src/images/Bear.png")
class Bear:
width = 64
height = 136
hp = 1
def __init__(self, screen, gameMap, moveDir, speed):
self.screen = screen
self.gameMap = gameMap
self.moveDir = moveDir
self.speed = speed * 0.3 + 0.5
if moveDir == 1:
self.x = self.width/2
elif moveDir == 2:
self.speed *= -1
self.x = screen.get_width() - self.width/2
self.y = self.gameMap.getY(self.x)
self.xi = self.x
self.yi = self.y
self.rect = pygame.Rect(self.x - self.width/2, self.y - self.height, self.width, self.height)
self.discard = 0
def getXY(self):
return (self.x, self.y)
def getSize(self):
return (self.width, self.height)
def isOut(self, x):
if self.moveDir == 1:
return x + self.width/2 >= self.screen.get_width()
elif self.moveDir == 2:
return x - self.width/2 <= 0
def move(self):
x = self.x + self.speed
if not self.isOut(x):
y = self.gameMap.getY(int(x))
self.rect = self.rect.move(int(x - self.xi) - int(self.x - self.xi), y - self.y)
self.x = x
self.y = y
else:
self.discard = 1
def isDiscarded(self):
return self.discard
def draw(self):
# pygame.draw.rect(self.screen, (0, 0, 255), self.rect)
img_scaled = pygame.transform.scale(bear_img, (self.width, self.height))
if self.moveDir == 1:
img_scaled = pygame.transform.flip(img_scaled, 1, 0)
self.screen.blit(img_scaled, self.rect)
def isHitPlayer(self, player):
x, y = int(self.x), self.y
w, h = self.width, self.height
px, py = player.getXY()
pw, ph = player.getSize()
return ((x-w/2 >= px-pw/2) and (x-w/2 <= px+pw/2)) or ((x+w/2 >= px-pw/2) and (x+w/2 <= px+pw/2))
def hitted(self):
self.hp = self.hp - 1
if self.hp == 0:
self.discard = 1
def spawnBear(screen, gameMap):
moveDir = random.randint(1, 2)
speed = random.randint(0, 2)
bear = Bear(screen, gameMap, moveDir, speed)
bears.append(bear)
def bearsExec():
for bear in bears:
bear.move()
bear.draw()
if bear.isDiscarded():
bears.remove(bear)
def clearBears():
del bears[:]
```
#### File: jenwich/practicum_game_project/player.py
```python
import pygame, math
from arrow import Arrow
pygame.mixer.init()
player_img = pygame.image.load("src/images/Player.png")
arrows = []
counter = { "score": 0, "allBear": 0, "latestBear": 0, "comboBear": 0 }
bear_sound = pygame.mixer.Sound("src/sounds/Bear.wav")
class Player:
def __init__(self, screen, gameMap):
self.screen = screen
self.screenWidth = screen.get_width()
self.screenHeight = screen.get_height()
self.gameMap = gameMap
self.moveDir = 0
self.currentDir = 1
self.speed = 2
self.dead = 0
def set(self, x, width, height):
self.x = x
self.y = self.gameMap.getY(self.x)
self.width = width
self.height = height
self.rect = pygame.Rect(self.x - self.width/2, self.y - self.height, width, height)
def getXY(self):
return (self.x, self.y)
def getSize(self):
return (self.width, self.height)
def getAngle(self, x, y):
mx = self.x
my = self.y - self.height/2
if x == mx:
return math.pi / 2
elif my - y < 0:
return 0
else:
return math.atan(math.fabs(float(y - my)/(x - mx)))
def isOut(self, x):
return x - self.width/2 <= 0 or x + self.width/2 >= self.screenWidth
def isDead(self):
return self.dead
def move(self):
x = self.x
if self.moveDir == 1:
x = x - self.speed
elif self.moveDir == 2:
x = x + self.speed
if self.moveDir != 0:
if not self.isOut(x):
y = self.gameMap.getY(x)
self.rect = self.rect.move(x-self.x, y-self.y)
self.x = x
self.y = y
def setDir(self, moveDir):
self.moveDir = moveDir
if moveDir != 0:
self.currentDir = moveDir
def draw(self):
# pygame.draw.rect(self.screen, (225, 0, 0), self.rect)
img_scaled = pygame.transform.scale(player_img, (self.width, self.height))
if self.currentDir == 1:
img_scaled = pygame.transform.flip(img_scaled, 1, 0)
self.screen.blit(img_scaled, self.rect)
def calculateU(self, t):
if t < 300:
return 3
elif t < 1500:
return t / 100
else:
return 15
def setupArrow(self, dt, angle):
u = self.calculateU(dt)
ux = u * math.cos(angle)
uy = u * math.sin(angle)
if self.currentDir == 1:
startX = self.x - self.width / 2
if self.currentDir == 2:
startX = self.x + self.width / 2
ux = ux * -1
arrow = Arrow(self.screen, self.gameMap, startX, self.y - self.height*0.75, ux, uy)
arrows.append(arrow)
def arrowsExec(self):
for arrow in arrows:
arrow.move()
arrow.draw()
if arrow.isDiscarded():
arrows.remove(arrow)
def hitBear(self, bears):
for bear in bears:
if bear.isHitPlayer(self):
self.dead = 1
def arrowHitBear(self, bears):
for bear in bears:
for arrow in arrows:
if arrow.isHitBear(bear):
arrow.discard = 1
bear.hitted()
for bear in bears:
if bear.isDiscarded():
global counter
counter["allBear"] += 1
counter["latestBear"] += 1
counter["comboBear"] += 1
bearPos = bear.getXY()
dist = math.sqrt((self.x-bearPos[0])**2 + (self.y-bearPos[1])**2)
sc = int(10 + dist * 0.1)
counter["score"] += sc
bear_sound.play()
bears.remove(bear)
for arrow in arrows:
if arrow.isDiscarded():
arrows.remove(arrow)
def clearArrows():
del arrows[:]
def resetCounter():
counter["score"] = 0
counter["allBear"] = 0
counter["latestBear"] = 0
counter["comboBear"] = 0
```
|
{
"source": "Jenya26/MLP",
"score": 2
}
|
#### File: MLP/assertion/fail.py
```python
from .assertion_fail import AssertionFail
__all__ = ['fail']
def fail(message="Assertion fail"):
raise AssertionFail(message)
```
#### File: MLP/assertion/ok.py
```python
from .equals import equals
__all__ = ['ok']
def ok():
equals(True, True)
```
#### File: functions/tests/linear_test.py
```python
import numpy as np
from assertion import equals
from functions import LinearFunction
__all__ = ['should_calculate_single_input', 'should_calculate_multiple_input']
def should_calculate_single_input():
linear = LinearFunction()
equals(12., linear(12.))
def should_calculate_multiple_input():
linear = LinearFunction()
equals([12., 8.], linear(np.asarray([12., 8.])))
```
#### File: MLP/gradients/gradient.py
```python
import numpy as np
__all__ = ['Gradient']
class Gradient:
@staticmethod
def _forward(network, x, y):
layer_inputs = [x]
layer_outputs = [network.layers[0](x)]
for i, layer in enumerate(network.layers[1:]):
layer_inputs += [layer_outputs[i]]
layer_outputs += [layer(layer_inputs[i + 1])]
return layer_inputs, layer_outputs
@staticmethod
def _normalize_vectors(vectors):
if isinstance(vectors, float):
return np.asarray([vectors])
return vectors
@staticmethod
def _calculate_layer_gradient(gradient, layer, inputs, outputs, outputs_gradient):
activation_function_gradient = outputs_gradient * layer.activation_function.derivative(inputs, outputs)
weights_gradient = np.zeros((layer.input_dimension, layer.output_dimension))
for i in range(inputs.shape[0]):
current_input = inputs[i].reshape((layer.input_dimension, 1))
current_output = activation_function_gradient[i].reshape((1, layer.output_dimension))
weights_gradient += np.dot(current_input, current_output)
biases_gradient = np.sum(activation_function_gradient, axis=0)
gradient += [[
weights_gradient,
biases_gradient
]]
return activation_function_gradient.dot(layer.weights.T)
def __call__(self, network, inputs, outputs, error):
inputs = Gradient._normalize_vectors(inputs)
outputs = Gradient._normalize_vectors(outputs)
layer_inputs, layer_outputs = Gradient._forward(network, inputs, outputs)
outputs_gradient = error(outputs, layer_outputs[-1], 1)
gradient = []
for i, layer in enumerate(network.layers[::-1]):
outputs_gradient = Gradient._calculate_layer_gradient(
gradient,
layer,
layer_inputs[-1 - i],
layer_outputs[-1 - i],
outputs_gradient
)
return np.asarray(gradient[::-1])
```
#### File: MLP/graphics/neural_network_model_controller_widget.py
```python
from PyQt5.QtWidgets import QWidget, QPushButton, QVBoxLayout, QHBoxLayout, QLineEdit, QLabel, QComboBox
from PyQt5.QtCore import Qt
from functions import SigmoidFunction, LinearFunction
__all__ = ['NeuralNetworkModelControllerWidget']
def get_current_function_index(function):
if isinstance(function, LinearFunction):
return 0
if isinstance(function, SigmoidFunction):
return 1
return -1
class NeuralNetworkModelControllerWidget(QWidget):
def __init__(self,
model,
parent=None):
super(NeuralNetworkModelControllerWidget, self).__init__(parent=parent)
self._model = model
self._layout = QVBoxLayout(self)
self._layers_layout = QVBoxLayout()
self.__update_layers_info()
add_layout_button = QPushButton("Add new layer")
add_layout_button.clicked.connect(self._add_layer)
self._layout.addLayout(self._layers_layout, 1)
self._layout.addWidget(add_layout_button, alignment=Qt.AlignBottom)
@property
def model(self):
return self._model
@model.setter
def model(self, model):
self._model = model
self.__update_layers_info()
def __update_layers_info(self):
for i in range(self._layers_layout.count()):
child = self._layers_layout.itemAt(i)
widget = child.widget()
widget.setVisible(False)
for i, layer in enumerate(self._model.layers[:-1]):
self.__add_layer_info(i, layer)
index = len(self._model.layers) - 1
self.__add_layer_info(index, self._model.layers[-1], False)
self._layers_layout.setAlignment(Qt.AlignTop)
def __create_new_layer(self, i, layer, enabled=True):
widget = QWidget()
layout = QVBoxLayout(widget)
layer_count_line_edit = QLineEdit(str(layer.output_dimension))
layer_count_line_edit.setFixedWidth(120)
layer_count_line_edit.textChanged[str].connect(
self.__on_change_layer_count_maker(layer_count_line_edit, i)
)
layer_count_line_edit.setEnabled(enabled)
remove_layout_button = QPushButton("X")
remove_layout_button.clicked.connect(self._remove_layout_maker(i))
remove_layout_button.setVisible(enabled)
label = QLabel("Layer %d:" % (i + 1))
line = QHBoxLayout()
line.addWidget(label, alignment=Qt.AlignTop)
line.addWidget(remove_layout_button, alignment=Qt.AlignTop)
activation_functions = QComboBox()
activation_functions.addItem("Linear function")
activation_functions.addItem("Sigmoid function")
activation_functions.setCurrentIndex(get_current_function_index(layer.activation_function))
activation_functions.currentIndexChanged.connect(self.__change_activation_function_maker(i))
layout.addLayout(line)
layout.addWidget(layer_count_line_edit, alignment=Qt.AlignTop)
layout.addWidget(activation_functions, alignment=Qt.AlignTop)
return widget
def __add_layer_info(self, i, layer, enabled=True):
item = self._layers_layout.itemAt(i)
if item is not None:
widget = item.widget()
widget_layout = widget.layout()
line_item = widget_layout.itemAt(0)
line = line_item.layout()
remove_layout_button_item = line.itemAt(1)
remove_layout_button = remove_layout_button_item.widget()
remove_layout_button.setVisible(enabled)
layer_count_line_edit_item = widget_layout.itemAt(1)
layer_count_line_edit = layer_count_line_edit_item.widget()
layer_count_line_edit.setText(str(layer.output_dimension))
layer_count_line_edit.setEnabled(enabled)
activation_function_item = widget_layout.itemAt(2)
activation_function = activation_function_item.widget()
activation_function.setCurrentIndex(get_current_function_index(layer.activation_function))
widget.setVisible(True)
else:
widget = self.__create_new_layer(i, layer, enabled)
self._layers_layout.addWidget(widget, alignment=Qt.AlignTop)
def __change_activation_function_maker(self, index):
def __change_activation_function(selected):
function = None
if selected == 0:
function = LinearFunction()
if selected == 1:
function = SigmoidFunction()
if function is not None:
self._model.change_activation_function(index, function)
return __change_activation_function
def _remove_layout_maker(self, index):
def _remove_layout():
self._model.remove_layer(index)
self.__update_layers_info()
self.repaint()
return _remove_layout
def _add_layer(self):
self._model.add_layer()
self.__update_layers_info()
def __on_change_layer_count_maker(self, layer_count_line_edit, index):
def __on_change_layer_count(text):
layer_count = 0
for ch in text:
if ord('0') <= ord(ch) <= ord('9'):
layer_count = 10 * layer_count + ord(ch) - ord('0')
layer_count_line_edit.setText(str(layer_count))
layer_count = max(layer_count, 1)
self._model.change_layer_count(index, layer_count)
return __on_change_layer_count
```
#### File: MLP/graphics/neural_network_teaching_service.py
```python
from PyQt5.QtCore import QThread
__all__ = ['NeuralNetworkTeachingService']
class NeuralNetworkTeachingService(QThread):
def __init__(self,
model,
teacher,
gradient,
error,
train_data_store,
batch=10,
learning_rate=1e-3,
iterations=1):
QThread.__init__(self)
self._teacher = teacher
self._model = model
self._gradient = gradient
self._error = error
self._train_data_store = train_data_store
self._batch = batch
self._learning_rate = learning_rate
self._iterations = iterations
self._on_update_model = None
self._start_callback = None
self._stop_callback = None
self._is_stop = False
def __del__(self):
self.wait()
@property
def network(self):
return self._model
@network.setter
def network(self, network):
self._model = network
@property
def gradient(self):
return self._gradient
@gradient.setter
def gradient(self, gradient):
self._gradient = gradient
@property
def error(self):
return self._error
@error.setter
def error(self, error):
self._error = error
@property
def train_data_store(self):
return self._train_data_store
@train_data_store.setter
def train_data_store(self, train_data_store):
self._train_data_store = train_data_store
@property
def teacher(self):
return self._teacher
@teacher.setter
def teacher(self, teacher):
self._teacher = teacher
@property
def learning_rate(self):
return self._learning_rate
@learning_rate.setter
def learning_rate(self, learning_rate):
self._learning_rate = learning_rate
@property
def iterations(self):
return self._iterations
@iterations.setter
def iterations(self, iterations):
self._iterations = iterations
@property
def on_update_model(self):
return self._on_update_model
@on_update_model.setter
def on_update_model(self, on_update_model):
if not callable(on_update_model):
raise ValueError('on_update_model should be callable')
self._on_update_model = on_update_model
@property
def start_callback(self):
return self._start_callback
@start_callback.setter
def start_callback(self, start_callback):
if not callable(start_callback):
raise ValueError('Stop callback should be callable')
self._start_callback = start_callback
@property
def stop_callback(self):
return self._stop_callback
@stop_callback.setter
def stop_callback(self, stop_callback):
if not callable(stop_callback):
raise ValueError('Stop callback should be callable')
self._stop_callback = stop_callback
def stop(self, stop_callback=None):
self._is_stop = True
if callable(stop_callback):
self._stop_callback = stop_callback
def run(self):
if self._start_callback is not None:
self._start_callback()
self._is_stop = False
model = self._model
while self._iterations > 0 and not self._is_stop:
model = model.copy()
self._teacher(
model=model,
gradient=self._gradient,
error=self._error,
data_store=self._train_data_store,
max_iterations=1,
batch=self._batch,
learning_rate=self._learning_rate
)
self._model = model
if self._on_update_model is not None:
self._on_update_model(model)
self._iterations -= 1
if self._stop_callback is not None:
self._stop_callback(self._iterations, model)
```
|
{
"source": "Jenyay/py_cache_manager",
"score": 2
}
|
#### File: py_cache_manager/cacheman/csvcache.py
```python
import os
from .registers import *
from .cachewrap import CacheWrap
from .autosync import AutoSyncCacheBase
class CSVCache(CacheWrap):
def __init__(self, cache_name, row_builder=None, row_reader=None, **kwargs):
self.row_builder = row_builder
self.row_reader = row_reader
CacheWrap.__init__(self, cache_name, **kwargs)
def saver(self, name, contents):
return csv_saver(self.manager.cache_directory, name, contents, self.row_builder)
def loader(self, name):
return csv_loader(self.manager.cache_directory, name, self.row_reader)
def deleter(self, name):
try:
os.remove(generate_csv_path(self.manager.cache_directory, name))
except OSError:
pass
def async_presaver(self, name, contents, extensions):
return csv_pre_saver(self.manager.cache_directory, name, contents, extensions, self.row_builder)
def async_saver(self, name, contents, extensions):
return csv_mover(self.manager.cache_directory, name, contents, extensions)
def async_cleaner(self, name, extensions):
return csv_cleaner(self.manager.cache_directory, name, extensions)
class AutoSyncCSVCache(AutoSyncCacheBase, CSVCache):
'''
AutoSyncCSVCache defaults to a csv basis.
'''
def __init__(self, cache_name, **kwargs):
AutoSyncCacheBase.__init__(self, CSVCache, cache_name, **kwargs)
```
|
{
"source": "jenyu7/hw9",
"score": 3
}
|
#### File: hw9/hw9/mle.py
```python
import scipy.stats as st
import scipy.optimize
import warnings
import numpy as np
def log_like_iid_gamma(params, n):
"""Log likelihood for i.i.d. Gamma measurements, parametrized
by x, a"""
beta, alpha = params
if n.any() <= 0:
return -np.inf
if beta <= 0:
return -np.inf
if alpha<=0:
return -np.inf
return st.gamma.logpdf(n , alpha, loc=0, scale=1/beta).sum()
#Code based on Bois (2020)
def mle_iid_gamma(n):
"""Perform maximum likelihood estimates for parameters for i.i.d.
Gamma measurements, parametrized by x, a (cov matrix)"""
with warnings.catch_warnings():
warnings.simplefilter("ignore")
res = scipy.optimize.minimize(
fun=lambda params, n: -log_like_iid_gamma(params, n),
x0=np.array([2.00, 0.005]),
args=(n,),
method='Powell'
)
if res.success:
return res.x
else:
raise RuntimeError('Convergence failed with message', res.message)
def log_like_iid_bespoke(params, n):
beta, dbeta = params
if n.any() <= 0:
return -np.inf
if beta <= 0:
return -np.inf
if dbeta <= 0:
return -np.inf
return np.sum(np.log(beta)+np.log(beta+dbeta)-np.log(dbeta)-beta*n+np.log(1-np.exp(-dbeta*n)))
def mle_iid_bespoke(n):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
res = scipy.optimize.minimize(
fun=lambda params, n: -log_like_iid_bespoke(params, n),
x0=np.array([1.00, 0.5]),
args=(n,),
method='Powell'
)
if res.success:
return res.x
else:
raise RuntimeError('Convergence failed with message', res.message)
```
#### File: hw9/hw9/reps.py
```python
import numpy as np
from tqdm import tqdm
#Set up Numpy random generator
rg = np.random.default_rng()
def draw_parametric_bs_reps_mle(
mle_fun, gen_fun, data, args=(), size=1, progress_bar=False
):
"""Draw parametric bootstrap replicates of maximum likelihood estimator.
Parameters
----------
mle_fun : function
Function with call signature mle_fun(data, *args) that computes
a MLE for the parameters
gen_fun : function
Function to randomly draw a new data set out of the model
distribution parametrized by the MLE. Must have call
signature `gen_fun(*params, size)`.
data : one-dimemsional Numpy array
Array of measurements
args : tuple, default ()
Arguments to be passed to `mle_fun()`.
size : int, default 1
Number of bootstrap replicates to draw.
progress_bar : bool, default False
Whether or not to display progress bar.
Returns
-------
output : numpy array
Bootstrap replicates of MLEs.
"""
params = mle_fun(data, *args)
if progress_bar:
iterator = tqdm(range(size))
else:
iterator = range(size)
return np.array(
[mle_fun(gen_fun(*params, size=len(data), *args)) for _ in iterator]
)
#Generates samples from the model distribution.
def sp_gamma(beta, alpha, size):
return rg.gamma(alpha, 1/beta, size=size)
```
|
{
"source": "jen-zecena/matrix-to-latex",
"score": 3
}
|
#### File: data/data-prep/convertall.py
```python
from __future__ import print_function
import inkml2img, glob, os
import pickle
import os.path
import csv
#.inkml2img('2013ALL_inkml_data/200923-1556-49.inkml','./2013ALL_inkml_data_image/200923-1556-49.png')
#inkml2img.inkml2img('200923-1556-49.inkml','linewidth.jpg')
"""takes a txt file with the names of all the inkml files you want to
convert to convert
input: filename (+ path if not in same directory) textfile with inkml file
names in it, foldername is a string you want to name the directory the
converted jpgs are stored.
output: technically none, but creates a folder called foldername with
jpgs in it.
"""
def convert2jpgs(filename):
inkmls = open(filename, "r")
for row in inkmls.readlines():
jpgname = 'jpgs/' + os.path.splitext(row)[0] + '.jpg'
print(jpgname)
inkml2img.inkml2img('TestEM2014/' + row.rstrip('\n'), jpgname)
filename = 'MatricesTrain/listeMatTrain.txt'
filename1 = 'individualsymbols/individualsymbols1.txt'
filename2 = 'individualsymbols/individualsymbols2.txt'
filename3 = 'TestEM2014/listTestInkml.txt'
convert2jpgs(filename3)
print("made it half way!!!!!!!!!!")
#convert2jpgs(filename2)
```
|
{
"source": "jenzenho/pyMARS",
"score": 2
}
|
#### File: pyMARS/pymars/pfa.py
```python
import networkx as nx
import numpy as np
import h5py
from collections import Counter
import time as tm
from drg import graph_search
import os, sys, argparse
import cantera as ct
import soln2ck
import soln2cti
import math
from create_trimmed_model import trim
from numpy import genfromtxt
from readin_initial_conditions import readin_conditions
import helper
def trim_pfa(total_edge_data, solution_object, threshold_value, keeper_list, done, target_species, model_file):
"""
This function determines what species should be excluded from the reduced model based on their DICs compared to the threshold value and a simple graph search.
Parameters
----------
total_edge_data: information containing the DICs for the graph edge weights
solution_object: The solution being reduced
threshold_value: User specified threshold value
keeper_list: Speicies that should always be kept
done: Determines wether or not the reduction is complete
target_species: The target species for the search in the array
model_file: File holding the model being reduced
Returns
-------
Returns an array of species that should be excluded from the original model at this threshold level
"""
start_time = tm.time()
# Initalize solution and components
solution = solution_object
species_objects = solution.species()
reaction_objects = solution.reactions()
# Use the networkx library to create a weighted graph of all of the species and their dependencies on each other.
graph = nx.DiGraph()
safe = [] # A list of species that are to be retained for this threshold value
# Calculate edge weights based on list received from get_rate_data
# Initial condition
for ic in total_edge_data.keys(): # For each initial condition
for species in species_objects: # Make graph
graph.add_node(species.name)
# Timestep
for tstep in total_edge_data[ic].keys(): # Set edge values for the graph
number = total_edge_data[ic][tstep]
# Each species
for edge in number:
try:
edge_name = edge.split('_', 1)
species_a_name = edge_name[0]
species_b_name = edge_name[1]
# PFA weight between two species
weight = number[edge]
if graph.has_edge(species_a_name, species_b_name):
old_weight = graph[species_a_name][species_b_name]['weight']
if weight > old_weight and weight > threshold_value: # Only include the weight if it is greater than the threshold value.
graph.add_weighted_edges_from(
[(species_a_name, species_b_name, weight)])
#elif weight > 1:
# print("Error. Edge weights should not be greater than one.")
# exit()
elif weight > threshold_value:
graph.add_weighted_edges_from(
[(species_a_name, species_b_name, weight)])
#elif weight > 1:
# print("Error. Edge weights should not be greater than one.")
# exit()
except IndexError:
print(edge)
continue
dic = graph_search(graph, target_species) # Search graph for max values to each species based on targets
for sp in dic: # Add to max dictionary if it is new or greater than the value already there.
if sp not in safe:
safe.append(sp)
graph.clear() # Reset graph
core_species = []
species_objects = solution_object.species()
# Take all species that are over the threshold value and add them to essentail species.
essential_species = []
for sp in species_objects:
if sp.name in safe:
if sp not in essential_species:
essential_species.append(sp)
done[0] = False
# Add all species in essential species to core species
for sp in essential_species:
if sp not in core_species:
core_species.append(sp.name)
# Add all of the must keep species to core species
retained_species = keeper_list # Specified by the user. A list of species that also need to be kept.
for sp in retained_species:
if sp not in core_species:
core_species.append(sp)
exclusion_list = []
# Exclude everything not in core species.
for species in solution_object.species():
# If its not one of our species we must keep, add it to the list of species to be trimmed.
if species.name not in core_species:
exclusion_list.append(species.name)
return exclusion_list
def run_pfa(solution_object, conditions_file, error_limit, target_species, retained_species, model_file, final_error):
""""
This is the MAIN top level function for running PFA
Parameters
----------
solution_object: a Cantera object of the solution to be reduced
conditions_file: The file holding the initial conditions to simulate
error_limit: The highest allowed error percentage
target_species: The target species for reduction
retained_species: A list of species to be retained even if they should be cut by the algorithm
model_file: The path to the file where the solution object was generated from
final_error: To hold the error level of the simulation
Returns
-------
Writes reduced Cantera file and returns reduced Catnera solution object
"""
if len(target_species) == 0: # If the target species are not specified, puke and die.
print("Please specify a target species.")
exit()
done = [] # Singleton to hold wether or not any more species can be cut from the simulation.
done.append(False)
threshold = .1 # Starting threshold value
threshold_i = .1
n = 1
error = [10.0] # Singleton to hold the error value of the previously ran simulation.
# Check to make sure that conditions exist
if conditions_file:
conditions_array = readin_conditions(str(conditions_file))
elif not conditions_file:
print("Conditions file not found")
exit()
# Turn conditions array into unran simulation objects for the original solution
sim_array = helper.setup_simulations(conditions_array,solution_object)
ignition_delay_detailed = helper.simulate(sim_array) #Run simulations and process results
rate_edge_data = get_rates_pfa(sim_array, solution_object) #Get edge weight calculation data.
print("Testing for starting threshold value")
# Trim the solution at that threshold and find the error.
pfa_loop_control(
solution_object, target_species, retained_species, model_file, error, threshold, done, rate_edge_data, ignition_delay_detailed, conditions_array)
while error[0] != 0 and threshold_i > .001: # While the error for trimming with that threshold value is greater than allowed.
threshold = threshold / 10 # Reduce the starting threshold value and try again.
threshold_i = threshold_i / 10
n = n + 1
pfa_loop_control(
solution_object, target_species, retained_species, model_file, error, threshold, done, rate_edge_data, ignition_delay_detailed, conditions_array)
if error[0] <= .02:
error[0] = 0
print("Starting with a threshold value of " + str(threshold))
sol_new = solution_object
final_error[0] = 0 # An integer representing the error introduced in the final simulation.
done[0] = False
while not done[0] and error[0] < error_limit: # Run the simulation until nothing else can be cut.
# Trim at this threshold value and calculate error.
sol_new = pfa_loop_control(
solution_object, target_species, retained_species, model_file, error, threshold, done, rate_edge_data, ignition_delay_detailed, conditions_array)
if error_limit >= error[0]: # If a new max species cut without exceeding what is allowed is reached, save that threshold.
max_t = threshold
#if (final_error == error[0]): #If error wasn't increased, increase the threshold at a higher rate.
# threshold = threshold + (threshold_i * 4)
final_error[0] = error[0]
#if (threshold >= .01):
# threshold_i = .01
threshold = threshold + threshold_i
threshold = round(threshold, n)
print("\nGreatest result: ")
sol_new = pfa_loop_control(
solution_object, target_species, retained_species, model_file, error, max_t, done, rate_edge_data, ignition_delay_detailed, conditions_array)
return sol_new
def pfa_loop_control(solution_object, target_species, retained_species, model_file, stored_error, threshold, done, rate_edge_data, ignition_delay_detailed, conditions_array):
"""
This function handles the reduction, simulation, and comparision for a single threshold value
Parameters
----------
solution_object: object being reduced # target_species:
target_species: The target species for reduction
retained_species: A list of species to be retained even if they should be cut by the algorithm
model_file: The path to the file where the solution object was generated from
stored_error: Error from the previous reduction attempt
threshold: current threshold value
done: are we done reducing yet? Boolean
rate_edge_data: the DICs for reduction
ignition_delay_detailed: ignition delay of detailed model
conditions_array: array holding information about initial conditions
Returns
-------
Returns the reduced solution object for this threshold and updates error value
"""
# Run detailed mechanism and retain initial conditions
species_retained = []
printout = ''
print('Threshold Species in Mech Error')
# Run DRG and create new reduced solution
exclusion_list = trim_pfa(
rate_edge_data, solution_object, threshold, retained_species, done,target_species,model_file) # Find out what to cut from the model
new_solution_objects = trim(solution_object, exclusion_list, model_file) # Cut the exclusion list from the model.
species_retained.append(len(new_solution_objects[1].species()))
# Simulated reduced solution
new_sim = helper.setup_simulations(conditions_array,new_solution_objects[1]) # Create simulation objects for reduced model for all conditions
ignition_delay_reduced = helper.simulate(new_sim) # Run simulations and process results
if (ignition_delay_detailed.all() == 0): # Ensure that ignition occured
print("Original model did not ignite. Check initial conditions.")
exit()
# Calculate error
error = (abs(ignition_delay_reduced-ignition_delay_detailed)/ignition_delay_detailed)*100 # Calculate error
printout += str(threshold) + ' ' + str(len(new_solution_objects[1].species())) + ' '+ str(round(np.max(error), 2)) +'%' + '\n'
print(printout)
stored_error[0] = round(np.max(error), 2)
# Return new model
new_solution_objects = new_solution_objects[1]
return new_solution_objects
def get_rates_pfa(sim_array, solution_object):
"""
This function calculates values to be used in the calculation of Direct Interaction Coefficients
Parameters
----------
sim_array: Array of simulated simulation objects
solution_object: Cantera object of the solution being reduced
Returns
-------
total_edge_data: a dictionary with keys of initial conditions and values of dictionarys that hold information for caculating DICs at each timestep.
*the subdictionaries have the timestep as their keys and their values hold an array of DICs
"""
old_solution = solution_object
# Iterate through all initial conditions
total_edge_data = {}
for ic in sim_array:
ic_edge_data = {}
for tstep in ic.sample_points: # Iterate through all timesteps
temp = tstep[0] # Set up variables
pressure = tstep[1]
mass_fractions = np.array(tstep[2])
# Set up solution at current timestep
new_solution = old_solution
new_solution.TPY = temp, pressure, mass_fractions
new_reaction_production_rates = new_solution.net_rates_of_progress
new_species_prod_rates=new_solution.net_production_rates
DIC = {}
single = get_PA(new_solution,new_reaction_production_rates) # Get PA and CA
PA = single[0]
CA = single[1]
double = get_PAB(new_solution,new_reaction_production_rates) # Get PAB and CAB
PAB = double[0]
CAB = double[1]
r1 = get_rAB_1(new_solution,PA,CA,PAB,CAB)
rAB_p1 = r1[0]
rAB_c1 = r1[1]
r2 = get_rAB_2(new_solution,rAB_p1,rAB_c1)
rAB_p2 = r2[0]
rAB_c2 = r2[1]
s_names = new_solution.species_names
for species_a in s_names:
for species_b in s_names:
if (species_a != species_b):
full_name = species_a + "_" + species_b
add = rAB_p1[full_name] + rAB_c1[full_name] + rAB_p2[full_name] + rAB_c2[full_name]
DIC[full_name] = add
ic_edge_data[temp] = DIC
total_edge_data[ic] = ic_edge_data
return total_edge_data
def get_PA(new_solution, new_reaction_production_rates):
"""
Gets the PA (and CA) values of all species in a given solution.
Parameters
----------
new_solution: The object representing the cantera model
new_reaction_production_rates: the production rates associated with the model
Returns
-------
PA and CA dictionaries
"""
PA = {} # Dictionary that will hold the PA values for each species.
CA = {} # Dictionary that will hold the CA values for each species.
# Initalize all species
s_names = new_solution.species_names
for species in s_names:
PA[species] = 0
CA[species] = 0
for i, reac in enumerate(new_solution.reactions()): # For all reactions
reac_prod_rate = float(new_reaction_production_rates[i]) # Set up values
if reac_prod_rate != 0:
if reac_prod_rate > 0: # For forward reactions
# Add all products to PA
for species in reac.products:
add = float(reac_prod_rate * reac.products[species])
PA[species] += abs(add)
# Add all reactants to CA
for species in reac.reactants:
add = float(reac_prod_rate * reac.reactants[species])
CA[species] += abs(add)
if reac_prod_rate < 0: # For forward reactions
# Add all products to CA
for species in reac.products:
add = float(reac_prod_rate * reac.products[species])
CA[species] += abs(add)
# Add all reactants to PA
for species in reac.reactants:
add = float(reac_prod_rate * reac.reactants[species])
PA[species] += abs(add)
return PA,CA
def get_PAB(new_solution, new_reaction_production_rates):
"""
Gets the PAB (and CAB) values of all species in a given solution.
Parameters
----------
new_solution: The object representing the cantera model
new_reaction_production_rates: the production rates associated with the model
Returns
-------
PAB and CAB dictionaries.
"""
PAB = {} # Set up dictionaries
CAB = {}
s_names = new_solution.species_names
for species_a in s_names: # For every pair of species A and B in the solution
for species_b in s_names:
if species_a != species_b:
full_name = species_a + "_" + species_b
PAB[full_name] = 0
CAB[full_name] = 0
for i, reac in enumerate(new_solution.reactions()): # For all reactions
reac_prod_rate = float(new_reaction_production_rates[i]) # Set up values
all_species = reac.products
all_species.update(reac.reactants)
# If both species exsist in the reaction, add the calculated value to the correct dictionary.
if reac_prod_rate != 0:
if species_a in all_species:
if species_b in all_species:
# For forward reactions
if reac_prod_rate > 0:
# Add products to PAB
if species_a in reac.products:
add = float(reac_prod_rate * reac.products[species_a])
PAB[full_name] += abs(add)
# Add reactants to CAB
if species_a in reac.reactants:
add = float(reac_prod_rate * reac.reactants[species_a])
CAB[full_name] += abs(add)
# For backward reactions
if reac_prod_rate < 0:
# Add products to CAB
if species_a in reac.products:
add = float(reac_prod_rate * reac.products[species_a])
CAB[full_name] += abs(add)
# Add reactants to PAB
if species_a in reac.reactants:
add = float(reac_prod_rate * reac.reactants[species_a])
PAB[full_name] += abs(add)
return PAB, CAB
def get_rAB_1(new_solution,PA,CA,PAB,CAB):
"""
Gets the rAB_p1 (and rAB_c1) values of all species in a given solution.
Parameters
----------
new_solution: The object representing the cantera model
PA: A dictionary containing the PA values for the reduction
CA: A dictionary containing the CA values for the reduction
PAB: A dictionary containing the PAB values for the reduction
CAB: A dictionary containing the CAB values for the reduction
Returns
-------
rAB_p1 and rAB_c1 dictionaries.
"""
rAB_p1 = {} # Set up dictionaries
rAB_c1 = {}
s_names = new_solution.species_names
for species_a in s_names: # For all pairs of species
for species_b in s_names:
if species_a != species_b:
full_name = species_a + "_" + species_b # Set up
rAB_p1[full_name] = 0
rAB_c1[full_name] = 0
top_p = PAB[full_name] # Get numerator
top_c = CAB[full_name]
if (PA[species_a] > CA[species_a]): # Get denomonator
bot = PA[species_a]
else:
bot = CA[species_a]
if (bot != 0): # Calculate
rAB_p1[full_name] = top_p/bot
rAB_c1[full_name] = top_c/bot
return rAB_p1, rAB_c1
def get_rAB_2(new_solution,rAB_p1,rAB_c1):
"""
Gets the rAB_p2 (and rAB_c2) values of all species in a given solution.
Parameters
----------
new_solution: The object representing the cantera model
rAB_p1: A dictionary containing the rAB_p1 values for the reduction
rAB_c1: A dictionary containing the rAB_c1 values for the reduction
Returns
-------
rAB_p2 and rAB_c2 dictionaries.
"""
rAB_p2 = {} # Set up dictionaries
rAB_c2 = {}
s_names = new_solution.species_names
for species_a in s_names: # For all pairs of species
for species_b in s_names:
if species_a != species_b:
full_name = species_a + "_" + species_b # Set up
rAB_p2[full_name] = 0
rAB_c2[full_name] = 0
# Look through all possible middle step species
for species_m in s_names:
if (species_m != species_a and species_m != species_b):
am_name = species_a + "_" + species_m
mb_name = species_m + "_" + species_b
# Get what to add for species_m
add_p = rAB_p1[am_name] * rAB_p1[mb_name]
add_c = rAB_c1[am_name] * rAB_c1[mb_name]
# Add that value
rAB_p2[full_name] += add_p
rAB_c2[full_name] += add_c
return rAB_p2,rAB_c2
```
|
{
"source": "jeobrien/cfn-python-lint",
"score": 2
}
|
#### File: cfnlint/decode/__init__.py
```python
import sys
import logging
import six
try:
from json.decoder import JSONDecodeError
except ImportError:
JSONDecodeError = ValueError
from yaml.parser import ParserError, ScannerError
from yaml import YAMLError
from cfnlint.decode import cfn_yaml, cfn_json
from cfnlint.rules import Match, ParseError
LOGGER = logging.getLogger(__name__)
def decode(filename, ignore_bad_template):
"""
Decode filename into an object
"""
template = None
matches = []
try:
template = cfn_yaml.load(filename)
except IOError as e:
if e.errno == 2:
LOGGER.error('Template file not found: %s', filename)
matches.append(create_match_file_error(
filename, 'Template file not found: %s' % filename))
elif e.errno == 21:
LOGGER.error('Template references a directory, not a file: %s',
filename)
matches.append(create_match_file_error(
filename,
'Template references a directory, not a file: %s' % filename))
elif e.errno == 13:
LOGGER.error('Permission denied when accessing template file: %s',
filename)
matches.append(create_match_file_error(
filename,
'Permission denied when accessing template file: %s' % filename))
if matches:
return(None, matches)
except UnicodeDecodeError as err:
LOGGER.error('Cannot read file contents: %s', filename)
matches.append(create_match_file_error(
filename, 'Cannot read file contents: %s' % filename))
except cfn_yaml.CfnParseError as err:
err.match.Filename = filename
matches = [err.match]
except ParserError as err:
matches = [create_match_yaml_parser_error(err, filename)]
except ScannerError as err:
if err.problem in [
'found character \'\\t\' that cannot start any token',
'found unknown escape character']:
try:
template = cfn_json.load(filename)
except cfn_json.JSONDecodeError as json_err:
json_err.match.filename = filename
matches = [json_err.match]
except JSONDecodeError as json_err:
if hasattr(json_err, 'message'):
if json_err.message == 'No JSON object could be decoded': # pylint: disable=no-member
matches = [create_match_yaml_parser_error(err, filename)]
else:
matches = [create_match_json_parser_error(json_err, filename)]
if hasattr(json_err, 'msg'):
if json_err.msg == 'Expecting value': # pylint: disable=no-member
matches = [create_match_yaml_parser_error(err, filename)]
else:
matches = [create_match_json_parser_error(json_err, filename)]
except Exception as json_err: # pylint: disable=W0703
if ignore_bad_template:
LOGGER.info('Template %s is malformed: %s',
filename, err.problem)
LOGGER.info('Tried to parse %s as JSON but got error: %s',
filename, str(json_err))
else:
LOGGER.error(
'Template %s is malformed: %s', filename, err.problem)
LOGGER.error('Tried to parse %s as JSON but got error: %s',
filename, str(json_err))
return (None, [create_match_file_error(
filename,
'Tried to parse %s as JSON but got error: %s' % (
filename, str(json_err)))])
else:
matches = [create_match_yaml_parser_error(err, filename)]
except YAMLError as err:
matches = [create_match_file_error(filename, err)]
if not isinstance(template, dict) and not matches:
# Template isn't a dict which means nearly nothing will work
matches = [Match(1, 1, 1, 1, filename, ParseError(),
message='Template needs to be an object.')]
return (template, matches)
def create_match_yaml_parser_error(parser_error, filename):
"""Create a Match for a parser error"""
lineno = parser_error.problem_mark.line + 1
colno = parser_error.problem_mark.column + 1
msg = parser_error.problem
return Match(
lineno, colno, lineno, colno + 1, filename,
ParseError(), message=msg)
def create_match_file_error(filename, msg):
"""Create a Match for a parser error"""
return Match(
linenumber=1, columnnumber=1, linenumberend=1, columnnumberend=2,
filename=filename, rule=ParseError(), message=msg)
def create_match_json_parser_error(parser_error, filename):
"""Create a Match for a parser error"""
if sys.version_info[0] == 3:
lineno = parser_error.lineno
colno = parser_error.colno
msg = parser_error.msg
elif sys.version_info[0] == 2:
lineno = 1
colno = 1
msg = parser_error.message
return Match(
lineno, colno, lineno, colno + 1, filename, ParseError(), message=msg)
```
|
{
"source": "jeoe12/selenium",
"score": 2
}
|
#### File: webdriver/webkitgtk/options.py
```python
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
class Options(object):
KEY = 'webkitgtk:browserOptions'
def __init__(self):
self._binary_location = ''
self._arguments = []
self._overlay_scrollbars_enabled = True
@property
def binary_location(self):
"""
Returns the location of the browser binary otherwise an empty string
"""
return self._binary_location
@binary_location.setter
def binary_location(self, value):
"""
Allows you to set the browser binary to launch
:Args:
- value : path to the browser binary
"""
self._binary_location = value
@property
def arguments(self):
"""
Returns a list of arguments needed for the browser
"""
return self._arguments
def add_argument(self, argument):
"""
Adds an argument to the list
:Args:
- Sets the arguments
"""
if argument:
self._arguments.append(argument)
else:
raise ValueError("argument can not be null")
@property
def overlay_scrollbars_enabled(self):
"""
Returns whether overlay scrollbars should be enabled
"""
return self._overlay_scrollbars_enabled
@overlay_scrollbars_enabled.setter
def overlay_scrollbars_enabled(self, value):
"""
Allows you to enable or disable overlay scrollbars
:Args:
- value : True or False
"""
self._overlay_scrollbars_enabled = value
def to_capabilities(self):
"""
Creates a capabilities with all the options that have been set and
returns a dictionary with everything
"""
webkitgtk = DesiredCapabilities.WEBKITGTK.copy()
browser_options = {}
if self.binary_location:
browser_options["binary"] = self.binary_location
if self.arguments:
browser_options["args"] = self.arguments
browser_options["useOverlayScrollbars"] = self.overlay_scrollbars_enabled
webkitgtk[Options.KEY] = browser_options
return webkitgtk
```
|
{
"source": "Jeoffreybauvin/puppenc",
"score": 3
}
|
#### File: app/environments/routes.py
```python
from flask_restful import Resource
from flask import jsonify, request
from app.puppenc import api, db, app, auth, PuppencResource
from app.decorators import *
from app.environments.models import Environment
from app.environments.schema import EnvironmentSchema
class Environments(PuppencResource):
def __init__(self):
self.environment_schema = EnvironmentSchema()
self.environments_schema = EnvironmentSchema(many=True)
@auth.login_required
@get_item(Environment)
def get(self, id=None):
"""
@api {get} /environments Get all environments
@apiName get_environments
@apiGroup Environments
@apiVersion 1.0.0
@apiPermission user
@apiParam {String} [limit=10] (query parameter) Objects per page to display. Use limit=0 for disabling limit
@apiParam {String} [page=1] (query parameter) Current page
@apiParam {String} [filter] (query parameter) Filter on name parameter (use * for searching any strings. Ex: *maclass*)
@apiSuccess {Number} id The environment's id
@apiSuccess {String} name The environment's name
@apiSuccess {Array} nodes The environment's nodes (by id)
@apiSuccess {Datetime} insert_date The environment's inserted date
@apiSuccess {Datetime} update_date The environment's updated date
@apiSuccess {Datetime} delete_date The environment's deleted date
@apiExample {curl} Example usage :
curl -X GET -u user:pwd http://127.0.0.1:5000/api/v1/environments
@apiSuccessExample {json} Success-Response:
HTTP/1.0 200 OK
[
{
"delete_date": null,
"id": 1,
"insert_date": "2017-04-11T13:56:03+00:00",
"name": "stable",
"nodes": [
104,
2582,
2588
],
"update_date": null
},
{
"delete_date": null,
"id": 2,
"insert_date": "2017-04-11T13:56:04+00:00",
"name": "staging",
"nodes": [
8,
34,
42
],
"update_date": null
}
]
"""
"""
@api {get} /environments/:id Get a single environment
@apiName get_environment
@apiGroup Environments
@apiVersion 1.0.0
@apiPermission user
@apiParam {Number} id (uri parameter) The environment's id.
@apiSuccess {Number} id The environment's id.
@apiSuccess {String} name The environment's name.
@apiSuccess {Array} nodes The environment's nodes (by id)
@apiSuccess {Datetime} insert_date The environment's inserted date
@apiSuccess {Datetime} update_date The environment's updated date
@apiSuccess {Datetime} delete_date The environment's deleted date
@apiExample {curl} Example usage :
curl -X GET -u user:pwd http://127.0.0.1:5000/api/v1/environments/1
@apiSuccessExample {json} Success-Response:
HTTP/1.0 200 OK
{
"delete_date": null,
"id": 2,
"insert_date": "2017-04-11T13:56:03+00:00",
"name": "my_environment",
"nodes": [
1498,
2817,
2818
],
"update_date": null
}
"""
if not id:
return self.environments_schema.jsonify(g.obj_info)
else:
return self.environment_schema.jsonify(g.obj_info)
@auth.login_required
@body_is_valid
@is_unique_item(Environment)
@post_item(Environment)
def post(self):
"""
@api {post} /environments Add a new environment
@apiName add_environment
@apiGroup Environments
@apiVersion 1.0.0
@apiPermission user
@apiParam {String} name (json document) The environment's name.
@apiSuccess {Number} id The environment's id.
@apiExample {curl} Example usage :
curl -X POST -H "Content-Type: application/json" \
-d '{ "name": "my_new_environment" }' \
http://127.0.0.1:5000/api/v1/environments
@apiSuccessExample {json} Success-Response:
HTTP/1.0 200 OK
{
"227": {
"name": "my_new_environment"
}
}
"""
pass
@auth.login_required
@body_is_valid
@is_unique_item(Environment)
@get_item(Environment)
@edit_item(Environment)
def put(self, id=None):
"""
@api {put} /environments/:id Edit an existing environment
@apiName edit_environment
@apiGroup Environments
@apiVersion 1.0.0
@apiPermission user
@apiParam {String} name (uri parameter) The environment's id
@apiParam {String} name (json document) The new environment's name
@apiSuccess {Number} success True if success
@apiSuccess {Number} message A information message
@apiExample {curl} Example usage :
curl -X PUT -H "Content-Type: application/json" \
-d '{ "name": "my_new_environment" }' \
http://127.0.0.1:5000/api/v1/environments/:id
@apiSuccessExample {json} Success-Response:
HTTP/1.0 200 OK
{
"message": "successfully modified",
"success": true
}
"""
pass
@auth.login_required
@get_item(Environment)
@delete_item(Environment)
def delete(self, id):
"""
@api {delete} /environments/:id Delete a single environment
@apiName rm_hostgorup
@apiGroup Environments
@apiVersion 1.0.0
@apiPermission user
@apiParam {Number} id (uri parameter) The environment's id.
@apiSuccess {Boolean} success Success (True if ok).
@apiSuccess {String} message A success or error message.
@apiExample {curl} Example usage :
curl -X DELETE http://127.0.0.1:5000/api/v1/environments/:id
@apiSuccessExample {json} Success-Response:
HTTP/1.0 200 OK
{
"message": "<Environment 'my_new_environment'> deleted",
"success": true
}
"""
pass
```
#### File: app/hostgroups/models.py
```python
from app.puppenc import db
from sqlalchemy.orm import scoped_session, sessionmaker, relationship, backref
from app.nodes.models import Node
class Hostgroup(db.Model):
__tablename__ = 'hostgroups'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(255))
insert_date = db.Column(db.DateTime, default=db.func.current_timestamp())
update_date = db.Column(db.DateTime, default=None)
delete_date = db.Column(db.DateTime, default=None)
class_id = db.Column(db.Integer, db.ForeignKey('classes.id'))
nodes = db.relationship('Node', backref='hostgroup_info', lazy='dynamic')
def __init__(self, name, class_id):
self.name = name
self.class_id = class_id
def __repr__(self):
return '<Hostgroup %r>' % (self.name)
```
#### File: app/hostgroups/routes.py
```python
from flask_restful import Resource
from flask import jsonify, request
from app.puppenc import api, db, app, auth, PuppencResource
from app.decorators import *
from app.hostgroups.models import Hostgroup
from app.hostgroups.schema import HostgroupSchema
class Hostgroups(PuppencResource):
def __init__(self):
self.hostgroup_schema = HostgroupSchema()
self.hostgroups_schema = HostgroupSchema(many=True)
@auth.login_required
@get_item(Hostgroup)
def get(self, id=None):
"""
@api {get} /hostgroups Get all hostgroups
@apiName get_hostgroups
@apiGroup Hostgroups
@apiVersion 1.0.0
@apiPermission user
@apiParam {String} [limit=10] (query parameter) Objects per page to display. Use limit=0 for disabling limit
@apiParam {String} [page=1] (query parameter) Current page
@apiParam {String} [filter] (query parameter) Filter on name parameter (use * for searching any strings. Ex: *maclass*)
@apiSuccess {Number} id The hostgroup's id.
@apiSuccess {String} name The hostgroup's name.
@apiSuccess {Array} nodes The environment's nodes (by id)
@apiSuccess {Datetime} insert_date The hostgroup's inserted date
@apiSuccess {Datetime} update_date The hostgroup's updated date
@apiSuccess {Datetime} delete_date The hostgroup's deleted date
@apiExample {curl} Example usage :
curl -X GET -u user:pwd http://127.0.0.1:5000/api/v1/hostgroups
@apiSuccessExample {json} Success-Response:
HTTP/1.0 200 OK
[
{
"class_id": 1,
"id": 1,
"insert_date": "2017-04-11T13:57:08+00:00",
"name": "webserver",
"nodes": [
8,
42,
2661
],
"update_date": null
},
{
"class_id": 2,
"id": 2,
"insert_date": "2017-04-11T13:56:40+00:00",
"name": "logs",
"nodes": [],
"update_date": null
}
]
"""
"""
@api {get} /hostgroups/<id> Get a single hostgroup
@apiVersion 1.0.0
@apiName get_hostgroup
@apiGroup Hostgroups
@apiPermission user
@apiParam {Number} id The hostgroup's id.
@apiSuccess {Number} id The hostgroup's id.
@apiSuccess {String} name The hostgroup's name.
@apiSuccess {Array} nodes The environment's nodes (by id)
@apiSuccess {Datetime} insert_date The hostgroup's inserted date
@apiSuccess {Datetime} update_date The hostgroup's updated date
@apiSuccess {Datetime} delete_date The hostgroup's deleted date
@apiExample {curl} Example usage :
curl -X GET http://127.0.0.1:5000/api/v1/classes/:id
@apiSuccessExample {json} Success-Response:
HTTP/1.0 200 OK
{
"class_id": 10,
"id": 15,
"insert_date": "2017-04-11T13:56:30+00:00",
"name": "my_hostgroup",
"nodes": [
2164,
2165,
2166,
2167
],
"update_date": "2017-05-09T17:08:57+00:00"
}
"""
if not id:
return self.hostgroups_schema.jsonify(g.obj_info)
else:
return self.hostgroup_schema.jsonify(g.obj_info)
@auth.login_required
@is_unique_item(Hostgroup)
@body_is_valid
# @post_item(Hostgroup)
def post(self, id=None):
"""
@api {post} /hostgroups Add a new hostgroup
@apiVersion 1.0.0
@apiName add_hostgroup
@apiPermission user
@apiGroup Hostgroups
@apiParam {String} name The hostgroup's name.
@apiParam {Number} class_id The related class id.
@apiSuccess {Number} id The hostgroup's id.
@apiExample {curl} Example usage :
curl -X POST -H "Content-Type: application/json" \
-d '{ "name": "my_new_hostgroup" }' \
http://127.0.0.1:5000/api/v1/hostgroups
@apiSuccessExample {json} Success-Response:
HTTP/1.0 200 OK
{
"227": {
"name": "my_new_hostgroup"
}
}
"""
content = request.get_json(silent=True)
if not 'class_id' in content:
class_id = None
else:
class_id = content['class_id']
obj = Hostgroup(g.obj_name, class_id=class_id)
db.session.add(obj)
db.session.commit()
app.logger.info(u"Create Hostgroup %s %s by %s" % (Hostgroup, g.obj_name, g.user))
return jsonify({obj.id: {
'name': obj.name,
}})
@auth.login_required
@edit_item(Hostgroup)
def put(self, id):
"""
@api {put} /hostgroups/:id Edit an hostgroup
@apiVersion 1.0.0
@apiName put_hostgroup
@apiGroup Hostgroups
@apiPermission user
@apiParam {Number} id The hostgroup's id.
@apiParam {String} name The hostgroup's name.
@apiParam {Number} class_id The hostgroup's class_id.
@apiSuccess {Boolean} success Success (True if ok).
@apiSuccess {String} message A success or error message.
@apiExample {curl} Example usage :
curl -X PUT -H "Content-Type: application/json" \
-d '{ "name": "my_new_hostgroup" }' \
http://127.0.0.1:5000/api/v1/hostgroups/1
@apiSuccessExample {json} Success-Response:
HTTP/1.0 200 OK
{
"message": "successfully modified",
"success": true
}
"""
pass
@auth.login_required
@get_item(Hostgroup)
@delete_item(Hostgroup)
def delete(self, id):
"""
@api {delete} /hostgroups/<id> Delete a single hostgroup
@apiVersion 1.0.0
@apiPermission user
@apiName rm_hostgorup
@apiGroup Hostgroups
@apiParam {Number} id The hostgroup's id.
@apiSuccess {Boolean} success Success (True if ok).
@apiSuccess {String} message A success or error message.
@apiExample {curl} Example usage :
curl -X DELETE http://127.0.0.1:5000/api/v1/environments/:id
@apiSuccessExample {json} Success-Response:
HTTP/1.0 200 OK
{
"message": "<Hostgroup 'my_new_hostgroup'> deleted",
"success": true
}
"""
pass
```
#### File: app/users/models.py
```python
from app.puppenc import db, app, g
from sqlalchemy.orm import scoped_session, sessionmaker, relationship, backref
from passlib.apps import custom_app_context as pwd_context
from itsdangerous import (TimedJSONWebSignatureSerializer as Serializer, BadSignature, SignatureExpired)
class User(db.Model):
__tablename__ = 'users'
id = db.Column(db.Integer, primary_key =True)
name = db.Column(db.String(32), index = True)
password_hash = db.Column(db.String(128))
def __init__(self, name):
self.name = name
def __repr__(self):
return '<User %r>' % (self.name)
def hash_password(self, password):
self.password_hash = pwd_context.encrypt(password)
def verify_password(self, password):
return pwd_context.verify(password, self.password_hash)
def generate_auth_token(self, expiration=app.config['AUTH_DURATION']):
app.logger.info("Generate a token for %s with duration of %s seconds", g.user, expiration)
s = Serializer(app.config['SECRET_KEY'], expires_in=expiration)
return s.dumps({'id': self.id})
def verify_auth_token(token):
s = Serializer(app.config['SECRET_KEY'])
try:
data = s.loads(token)
except SignatureExpired:
return None # valid token, but expired
except BadSignature:
return None # invalid token
user = User.query.get(data['id'])
# app.logger.info('Sending a token for user %s', user)
return user
```
|
{
"source": "jeohalves/leetcode_solutions",
"score": 3
}
|
#### File: leetcode_solutions/python/13-roman_to_integer.py
```python
class Solution:
def romanToInt(self, s: str) -> int:
number = 0
sbl = dict()
sbl['I'] = 1
sbl['V'] = 5
sbl['X'] = 10
sbl['L'] = 50
sbl['C'] = 100
sbl['D'] = 500
sbl['M'] = 1000
s_list = list(s)
while s_list:
cur = s_list.pop(0)
if s_list and sbl[cur] < sbl[s_list[0]]:
next_value = sbl[s_list.pop(0)]
number += next_value - sbl[cur]
else:
number += sbl[cur]
return number
```
#### File: leetcode_solutions/python/20-valid_parentheses.py
```python
class Solution:
def isValid(self, s: str) -> bool:
stack = list()
open_chars = ['(', '{', '[']
combo = ['()', '{}', '[]']
s_list = list(s)
while(s_list):
cur_char = s_list.pop(0)
if not stack:
if cur_char not in open_chars:
return False
stack += [cur_char]
else:
if cur_char in open_chars:
stack += [cur_char]
else:
open_in_stack = stack.pop()
if f'{open_in_stack}{cur_char}' not in combo:
return False
if stack:
return False
else:
return True
```
#### File: leetcode_solutions/python/26_remove-duplicates-from-sorted-array.py
```python
class Solution:
def removeDuplicates(self, nums: List[int]) -> int:
temp = list()
temp.extend(nums)
nums.clear()
for x in temp:
if x not in nums:
nums += [x]
return len(nums)
```
#### File: leetcode_solutions/python/9-palindrome_number.py
```python
class Solution:
def isPalindrome(self, x: int) -> bool:
str_x = str(x)
for i in range(len(str_x)//2+1):
if str_x[i] != str_x[-i-1]:
return False
return True
```
|
{
"source": "jeojoe/simple-http-server",
"score": 3
}
|
#### File: jeojoe/simple-http-server/demo.py
```python
from httpserver.HTTPServer import HTTPServer
import signal
import sys
server = None
def signal_handler(signum, frame):
if signum == signal.SIGINT:
print('Closing server...')
server.server_close()
print('Server closed.')
sys.exit()
else:
print('Other signal received', signum)
if __name__ == "__main__":
server = HTTPServer(8080)
server.serve('sample_docroot', {
'400': '400.html',
'404': '404.html'
})
# Handle server close with ctrl+c in terminal
signal.signal(signal.SIGINT, signal_handler)
# Start the server...
server.serve_forever()
```
#### File: simple-http-server/httpserver/HTTPServer.py
```python
import os
import mimetypes
from datetime import datetime
from httpserver.TCPServer import TCPServer
from httpserver.HTTPConnectionHandler import HTTPConnectionHandler, HTTPResponse, BadRequestError, RecvTimeoutError
from httpserver.utils import debugprint
def get_last_modified_formatted_string(requested_path):
timestamp = os.path.getmtime(requested_path)
time_format = '%a, %d %b %y %T %z'
return datetime.fromtimestamp(timestamp).strftime(time_format)
class HTTPServer(TCPServer):
"""
Wrapper for TCPServer that implements HTTP protocol
"""
def __init__(self, port):
# We support only GET, so use daemon threads
TCPServer.__init__(self, port, self.handle_tcp_connection, use_daemon_threads=True)
self.serve_docroot = None
self.serve_config = {}
def handle_tcp_connection(self, connection, client_address):
http_connection = HTTPConnectionHandler(connection, client_address)
try:
# Continue to get request(s) over the socket
# Until:
# client closes the connection,
# client timeout,
# request header is not keep-alive,
# or bad request from client.
while True:
request = http_connection.get_request()
if not request:
break
# TODO: Allow choosing to handle request or serve file
self.__serve_file(request, http_connection)
if not request.is_connection_keep_alive():
break
except RecvTimeoutError:
if len(http_connection.unprocessed_data) > 0:
# There's an incomplete request on timeout
http_connection.send_response(HTTPResponse.client_error_400())
except BadRequestError as e:
debugprint('Bad Request Error', e)
http_connection.send_response(HTTPResponse.client_error_400())
finally:
http_connection.close()
def serve(self, docroot, serve_config={}):
"""
Serve files from `docroot`.
Can pass config as dict with keys mapped to html pages, i.e.:
{
'index': 'custom_index.html',
'400': '400.html',
'404': '404.html'
}
Note that paths must be relative to `docroot`. Also 'index.html' is the default mapping for 'index'.
"""
self.serve_docroot = docroot
self.serve_config = serve_config
def get_index_html_path(self):
"""
Get index.html (or any from config) path.
"""
return os.path.join('/', self.serve_config.get('index', 'index.html'))
def get_abspath_relative_to_docroot(self, path):
assert self.serve_docroot is not None, 'Must setup `serve_docroot` first.'
return os.path.abspath(os.path.join(self.serve_docroot, path))
def __serve_file(self, request, http_connection):
"""
Return HTTPResponse for serving file.
"""
requested_path = self.get_index_html_path() if request.path == '/' else request.path
if requested_path.startswith('/'):
requested_path = requested_path[1:]
abs_requested_path = self.get_abspath_relative_to_docroot(requested_path)
abs_docroot_path = os.path.abspath(self.serve_docroot)
del requested_path
# Won't serve out of docroot
if not abs_requested_path.startswith(abs_docroot_path):
if '400' in self.serve_config:
http_connection.send_response(HTTPResponse.client_error_400())
http_connection.send_body(self.get_abspath_relative_to_docroot(self.serve_config['400']))
else:
http_connection.send_response(HTTPResponse.client_error_400())
return
# File not exists
if not os.path.exists(abs_requested_path):
if '404' in self.serve_config:
http_connection.send_response(HTTPResponse.not_found_404())
http_connection.send_body(self.get_abspath_relative_to_docroot(self.serve_config['404']))
else:
http_connection.send_response(HTTPResponse.not_found_404())
return
mimetype, _ = mimetypes.guess_type(abs_requested_path)
file_size = os.path.getsize(abs_requested_path)
last_modified_time = get_last_modified_formatted_string(abs_requested_path)
response_headers = {
'Content-Type': mimetype,
'Content-Length': file_size,
'Last-Modified': last_modified_time
}
# Send first-line and headers
http_connection.send_response(HTTPResponse(200, headers=response_headers))
# Send body
n_bytes_sent = http_connection.send_file(abs_requested_path)
assert n_bytes_sent == file_size, 'Incomplete file sent.'
```
|
{
"source": "jeok/Cavern",
"score": 4
}
|
#### File: jeok/Cavern/classes.py
```python
import pyglet
import pytmx.util_pyglet
import game_utils
class Rect():
"""
Class for storing rectangular information
"""
def __init__(self, x, y, width, height):
self.x = x
self.y = y
self.width = width
self.height = height
self.topleft = (x, y + height)
self.topright = (x + width, y + height)
self.middle = (self.topright[0] / 2, self.topright[1] / 2)
self.right = self.x + width
def move(self, x_delta, y_delta):
self.x = self.x + x_delta
self.y = self.y + y_delta
self.topleft = (self.x, self.y + self.height)
self.topright = (self.x + self.width, self.y + self.height)
self.middle = (self.topright[0] / 2, self.topright[1] / 2)
self.right = self.x + self.width
def updateposition(self, new_x, new_y):
self.x = new_x
self.y = new_y
self.topleft = (self.x, self.y + self.height)
self.topright = (self.x + self.width, self.y + self.height)
self.middle = (self.topright[0] / 2, self.topright[1] / 2)
self.right = self.x + self.width
class Player():
""" Class for player
x = position on x plane
y = -,,- y plane
Different animation states:
idle
walk
run
jump
push
"""
def __init__(self, x, y):
self.speed_x = 0
self.speed_y = 0
self.x = x
self.y = y
self.size_x = 10
self.size_y = 10
self.anim_state = "idle"
# Create a rectangle for collision detection
self.collision_box = Rect(self.x, self.y, self.size_x, self.size_y)
def move(self, movement_direction, jump_pressed, run_pressed):
if movement_direction == "NONE":
self.speed_x = 0
if movement_direction == "LEFT":
self.speed_x = -1
elif movement_direction == "RIGHT":
self.speed_x = 1
if jump_pressed:
self.speed_y = 5
def update(self, collisionmap, GRAVITY):
"""
Update method which checks if player can move.
Collision detection works as such:
See if player's future position is legal according to tilemap's foreground layer (layer 1)
"""
# print(self.speed_y)
if self.speed_y > -5:
self.speed_y += GRAVITY
# If player's going left, boundaries need to aknowledge player's size_x
if self.speed_x > 0:
if not game_utils.check_collision(self, collisionmap, "RIGHT"):
self.x += self.speed_x
elif self.speed_x < 0:
if not game_utils.check_collision(self, collisionmap, " "):
self.x += self.speed_x
if self.speed_y > 0:
if not game_utils.check_collision(self, collisionmap, "UP"):
self.y += self.speed_y
if self.speed_y < 0:
if not game_utils.check_collision(self, collisionmap, "UP"):
self.y += self.speed_y
else:
self.speed_y = - 1 * (self.y % collisionmap.tileheight)
self.collision_box.updateposition(self.x, self.y)
class Camera(object):
""" Camera class, heavily inspired by:
https://stackoverflow.com/questions/14354171/add-scrolling-to-a-platformer-in-pygame
"""
def __init__(self, camera_func, width, height):
self.camera_func = camera_func
self.state = Rect(0, 0, width, height)
def apply(self, target):
return target.rect.move(self.state.topleft)
def update(self, target):
self.state = self.camera_func(self.state, target.rect)
```
|
{
"source": "jeokrohn/wxc_sdk",
"score": 4
}
|
#### File: wxc_sdk/examples/calendarific.py
```python
import os
from datetime import date
from typing import Literal, List, Union, Any
from pydantic import BaseModel, Field, parse_obj_as, validator
import requests
HolidayType = Literal['national', 'local', 'religious', 'observance']
class Country(BaseModel):
country_id: str = Field(alias='id')
name: str
AllOrAny = Union[Literal['All'], Any]
class ApiError(Exception):
pass
class Holiday(BaseModel):
name: str
description: str
country: Country
date: date
holiday_type: List[str] = Field(alias='type')
locations: AllOrAny # quick and dirty. Don't need more detail right now
states: AllOrAny # quick and dirty. Don't need more detail right now
@validator('date', pre=True)
def validate_date(cls, v):
data = v['datetime']
r = date(day=data['day'], month=data['month'], year=data['year'])
return r
class CalendarifiyApi:
def __init__(self, api_key: str = None):
self.api_key = api_key or os.getenv('CALENDARIFIC_KEY')
if not self.api_key:
raise ValueError('API key needs to be passed or present in CALENDARIFIC_KEY environment variable')
def holidays(self, *, country: str, year: int, day: int = None, month: int = None, location: str = None,
holiday_type: HolidayType = None) -> List[Holiday]:
"""
This provides a list of holidays based on the parameters passed to it.
https://calendarific.com/api-documentation
:param country: The country parameter must be in the iso-3166 format as specified in the document here. To
view a list of countries and regions we support, visit our list of supported countries.
:type country: str
:param year: The year you want to return the holidays. We currently support both historical and future years
until 2049. The year must be specified as a number eg, 2019
:type year: int
:param day: Limits the number of holidays to a particular day. Must be passed as the numeric value of the
day [1..31].
:type day: int
:param month: Limits the number of holidays to a particular month. Must be passed as the numeric value of the
month [1..12].
:type month: int
:param location: We support multiple counties, states and regions for all the countries we support. This
optional parameter allows you to limit the holidays to a particular state or region. The value of field
is iso-3166 format of the state. View a list of supported countries and states. An example is, for New York
state in the United States, it would be us-nyc
:type location: str
:param holiday_type: We support multiple types of holidays and observances. This parameter allows users to
return only a particular type of holiday or event. By default, the API returns all holidays. Below is the
list of holiday types supported by the API and this is how to reference them.
* national - Returns public, federal and bank holidays
* local - Returns local, regional and state holidays
* religious - Return religious holidays: buddhism, christian, hinduism, muslim, etc
* observance - Observance, Seasons, Times
:type holiday_type: HolidayType
:return: list of holidays
:rtype: List[Holiday]
"""
params = {k: v for i, (k, v) in enumerate(locals().items())
if i and v is not None and k != 'holiday_type'}
params['api_key'] = self.api_key
if holiday_type:
params['type'] = holiday_type
r = requests.get('https://calendarific.com/api/v2/holidays', params=params)
r.raise_for_status()
data = r.json()
code = data['meta']['code']
if code != 200:
raise ApiError(data['meta']['code'], data['meta']['error_type'], data['meta']['error_detail'])
result = parse_obj_as(List[Holiday], data['response']['holidays'])
return result
```
#### File: wxc_sdk/script/all_types.py
```python
import os.path
import sys
from importlib import import_module
from io import StringIO
from pathlib import Path
def module_name_from_path(path):
"""
Create a module name from a path
:param path:
:return:
"""
p_split = str(path).split('/')
wxc_sdk_base = next(i for i in range(100)
if p_split[-i] == 'wxc_sdk')
p_split = p_split[-wxc_sdk_base:]
p_split[-1] = os.path.splitext(p_split[-1])[0]
if p_split[-1] == '__init__':
p_split = p_split[:-1]
mod_name = '.'.join(p_split)
return mod_name
def main():
# all Python sources
wxc_sdk = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', 'wxc_sdk'))
py_files = list(Path(wxc_sdk).rglob('*.py'))
py_files.sort()
# module names from paths
module_names = list(map(module_name_from_path, py_files))
# a set to collect all identifiers
combined_all = set()
# print to a string
source = StringIO()
# exclude some sources
to_skip = ['wxc_sdk',
'wxc_sdk.api_child',
'wxc_sdk.integration',
'wxc_sdk.rest',
'wxc_sdk.as_rest',
'wxc_sdk.as_api',
'wxc_sdk.all_types']
err = False
for module_name in module_names:
if module_name in to_skip:
continue
module = import_module(module_name)
module_all = module.__dict__.get('__all__')
if module_all is None:
continue
names_in_module = []
for name in module_all:
if name.endswith('Api'):
# Apis not needed
continue
if name in combined_all:
print(f'duplicate name {module_name}.{name}', file=sys.stderr)
err = True
else:
combined_all.add(name)
names_in_module.append(name)
# import statement for module
if names_in_module:
names_in_module.sort()
line = f'from {module_name} import '
max_line = 116
pending_line = ''
for name in names_in_module:
entry = f"{name}, "
if len(line) + len(entry) >= max_line:
if pending_line:
print(f'{pending_line.rstrip()}\\', file=source)
pending_line = line.rstrip()
# next line is indented by 4 spaces
line = ' ' * 4
line = f'{line}{entry}'
if pending_line:
if line.strip():
print(f'{pending_line.rstrip()}\\', file=source)
print(f'{line.rstrip(" ,")}', file=source)
else:
print(f'{pending_line.rstrip(" ,")}\\', file=source)
else:
print(f'{line.rstrip(" ,")}', file=source)
if err:
raise NameError('Duplicate names')
# create __all__
print(file=source)
line = '__all__ = ['
combined_all = sorted(combined_all)
max_line = 120
for name in combined_all:
entry = f"'{name}', "
if len(line) + len(entry) >= max_line:
print(line.rstrip(), file=source)
line = ' ' * 11
line = f'{line}{entry}'
print(f'{line.rstrip(" ,")}]', file=source)
with open(os.path.join(wxc_sdk, 'all_types.py'), mode='w') as f:
f.write(source.getvalue())
print(source.getvalue())
return
if __name__ == '__main__':
main()
```
#### File: wxc_sdk/tests/test_license.py
```python
from .base import TestCaseWithLog
class TestLicense(TestCaseWithLog):
def test_001_list(self):
"""
list licenses
"""
lic_list = list(self.api.licenses.list())
print(f'got {len(lic_list)} licenses')
def test_002_calling_users_by_license(self):
calling_license_ids = set(lic.license_id for lic in self.api.licenses.list()
if lic.webex_calling)
calling_users = [user for user in self.api.people.list()
if any(license_id in calling_license_ids for license_id in user.licenses)]
print(f'Found {len(calling_users)} calling users')
```
#### File: wxc_sdk/tests/test_locations.py
```python
from .base import TestCaseWithLog
class TestLocation(TestCaseWithLog):
def test_001_list_all(self):
"""
list all locations
"""
location_list = list(self.api.locations.list())
print(f'Got {len(location_list)} locations')
```
#### File: wxc_sdk/tests/test_person_appservices.py
```python
import random
from concurrent.futures import ThreadPoolExecutor
from contextlib import contextmanager
from dataclasses import dataclass
from unittest import skip
from wxc_sdk.all_types import Person, AppServicesSettings
from .base import TestCaseWithUsers
class TestRead(TestCaseWithUsers):
def test_001_read_all(self):
"""
Read app services settings of all users
"""
asa = self.api.person_settings.appservices
with ThreadPoolExecutor() as pool:
settings = list(pool.map(lambda user: asa.read(person_id=user.person_id),
self.users))
print(f'Got app services settings for {len(self.users)} users')
print('\n'.join(s.json() for s in settings))
@dataclass(init=False)
class TestUpdate(TestCaseWithUsers):
@contextmanager
def target_user(self):
"""
Get target user
"""
user = random.choice(self.users)
settings = self.api.person_settings.appservices.read(person_id=user.person_id)
try:
yield user
finally:
# restore old settings
self.api.person_settings.appservices.configure(person_id=user.person_id, settings=settings)
restored = self.api.person_settings.appservices.read(person_id=user.person_id)
self.assertEqual(settings, restored)
def test_001_toggle_ring_devices_for_click_to_dial_calls_enabled(self):
"""
Toggle ring_devices_for_click_to_dial_calls_enabled on random user
"""
with self.target_user() as user:
asa = self.api.person_settings.appservices
user: Person
before = asa.read(person_id=user.person_id)
settings = AppServicesSettings(
ring_devices_for_click_to_dial_calls_enabled=not before.ring_devices_for_click_to_dial_calls_enabled)
asa.configure(person_id=user.person_id, settings=settings)
after = asa.read(person_id=user.person_id)
self.assertEqual(settings.ring_devices_for_click_to_dial_calls_enabled,
after.ring_devices_for_click_to_dial_calls_enabled)
after.ring_devices_for_click_to_dial_calls_enabled = before.ring_devices_for_click_to_dial_calls_enabled
self.assertEqual(before, after)
@skip('available_line_count cannot be changed')
def test_002_available_line_count(self):
"""
change available_line_count
"""
with self.target_user() as user:
asa = self.api.person_settings.appservices
user: Person
before = asa.read(person_id=user.person_id)
settings = before.copy(deep=True)
settings.available_line_count = settings.available_line_count - 1
asa.configure(person_id=user.person_id, settings=settings)
after = asa.read(person_id=user.person_id)
self.assertEqual(settings.available_line_count, after.available_line_count)
after.available_line_count = before.available_line_count
self.assertEqual(before, after)
```
#### File: wxc_sdk/tests/test_person_call_privacy.py
```python
import random
from concurrent.futures import ThreadPoolExecutor
from contextlib import contextmanager
from dataclasses import dataclass
from wxc_sdk.all_types import Person, Privacy
import base64
from .base import TestCaseWithUsers
class TestRead(TestCaseWithUsers):
def test_001_read_all(self):
"""
Read privacy setting of all users
"""
ps = self.api.person_settings.privacy
with ThreadPoolExecutor() as pool:
list(pool.map(lambda user: ps.read(person_id=user.person_id),
self.users))
print(f'Got privacy settings for {len(self.users)} users')
@dataclass(init=False)
class TestUpdate(TestCaseWithUsers):
@contextmanager
def target_user(self):
"""
Get target user
"""
user = random.choice(self.users)
settings = self.api.person_settings.privacy.read(person_id=user.person_id)
try:
yield user
finally:
# restore old settings
# makes sure to clear list of monitored elements
settings.monitoring_agents = settings.monitoring_agents or []
self.api.person_settings.privacy.configure(person_id=user.person_id, settings=settings)
settings.monitoring_agents = settings.monitoring_agents or None
restored = self.api.person_settings.privacy.read(person_id=user.person_id)
self.assertEqual(settings, restored)
def test_001_toggle_aa_extension_dialing_enabled(self):
"""
Toggle aa_extension_dialing_enabled on random user
"""
with self.target_user() as user:
priv = self.api.person_settings.privacy
user: Person
before = priv.read(person_id=user.person_id)
settings = Privacy(aa_extension_dialing_enabled=not before.aa_extension_dialing_enabled)
priv.configure(person_id=user.person_id, settings=settings)
after = priv.read(person_id=user.person_id)
self.assertEqual(settings.aa_extension_dialing_enabled, after.aa_extension_dialing_enabled)
after.aa_extension_dialing_enabled = before.aa_extension_dialing_enabled
self.assertEqual(before, after)
def test_002_toggle_enable_phone_status_directory_privacy(self):
"""
Toggle enable_phone_status_directory_privacy on random user
"""
with self.target_user() as user:
priv = self.api.person_settings.privacy
user: Person
before = priv.read(person_id=user.person_id)
settings = Privacy(enable_phone_status_directory_privacy=not before.enable_phone_status_directory_privacy)
priv.configure(person_id=user.person_id, settings=settings)
after = priv.read(person_id=user.person_id)
self.assertEqual(settings.enable_phone_status_directory_privacy, after.enable_phone_status_directory_privacy)
after.enable_phone_status_directory_privacy = before.enable_phone_status_directory_privacy
self.assertEqual(before, after)
def test_003_add_user_by_id(self):
"""
Add some users by ID
"""
with self.target_user() as user:
# API shortcut
priv = self.api.person_settings.privacy
# get current settings
before = priv.read(person_id=user.person_id)
present_ids = [agent.agent_id for agent in before.monitoring_agents or []]
user_candidates = [user for user in self.users
if user.person_id not in present_ids]
to_add = random.sample(user_candidates, 3)
# ths is what we want to add
new_agents = [user.person_id
for user in to_add]
settings = Privacy(
monitoring_agents=(before.monitoring_agents or []) + new_agents)
# update
priv.configure(person_id=user.person_id, settings=settings)
# how does it look like after the update?
after = priv.read(person_id=user.person_id)
# all new user ids need to be present now
after_agent_ids = set(agent.agent_id for agent in after.monitoring_agents)
new_user_ids = set(user.person_id for user in to_add)
try:
self.assertEqual(new_user_ids, after_agent_ids & new_user_ids)
except AssertionError as e:
new_ids_missing = new_user_ids - after_agent_ids
for new_id in new_ids_missing:
print(f'New ID missing: {new_id}, {base64.b64decode(new_id + "==").decode()}')
unexpected_ids = after_agent_ids - set(agent.agent_id for agent in before.monitoring_agents or []) - \
new_user_ids
for unexpected_id in unexpected_ids:
print(f'Unexpected ID: {unexpected_id}, {base64.b64decode(unexpected_id + "==").decode()}')
raise
# other than that nothing should've changed
after.monitoring_agents = before.monitoring_agents
self.assertEqual(before, after)
def test_004_verify_agent_id_format(self):
"""
verify format of agent IDs
# TODO: defect, wrong agent id format; broadcloud ID instead of UUID, CALL-68642
"""
with self.target_user() as user:
# API shortcut
priv = self.api.person_settings.privacy
# get current settings
before = priv.read(person_id=user.person_id)
present_ids = [agent.agent_id for agent in before.monitoring_agents or []]
user_candidates = [user for user in self.users
if user.person_id not in present_ids]
to_add = random.sample(user_candidates, 3)
# ths is what we want to add
new_agents = [user.person_id
for user in to_add]
settings = Privacy(
monitoring_agents=(before.monitoring_agents or []) + new_agents)
# update
priv.configure(person_id=user.person_id, settings=settings)
# how does it look like after the update?
after = priv.read(person_id=user.person_id)
decoded_agent_ids = list(map(lambda agent: base64.b64decode(agent.agent_id + '==').decode(),
after.monitoring_agents))
for agent, decoded in zip(after.monitoring_agents, decoded_agent_ids):
print(f'id: {agent.agent_id} -> {decoded}')
# an "@" in the decoded agent IDs is an indicator that broadcloud IDs are returned instead of proper user IDs
self.assertTrue(not any('@' in d for d in decoded_agent_ids), "wrong format for agent IDs")
```
#### File: wxc_sdk/tests/test_person_call_waiting.py
```python
import asyncio
from concurrent.futures import ThreadPoolExecutor
from .base import TestCaseWithUsers
class TestRead(TestCaseWithUsers):
def test_001_read_all(self):
"""
read settings for all users
"""
cw = self.api.person_settings.call_waiting
with ThreadPoolExecutor() as pool:
details = list(pool.map(lambda user:cw.read(person_id=user.person_id),
self.users))
print(f'Got details for {len(details)} users.')
@TestCaseWithUsers.async_test
async def test_002_read_all_async(self):
"""
read settings for all users
"""
cw = self.async_api.person_settings.call_waiting
details = await asyncio.gather(*[cw.read(person_id=u.person_id) for u in self.users])
print(f'Got details for {len(details)} users.')
...
```
#### File: wxc_sdk/tests/test_person_exec_assistant.py
```python
import random
from concurrent.futures import ThreadPoolExecutor
from wxc_sdk.all_types import ExecAssistantType
from .base import TestCaseWithUsers
class TestRead(TestCaseWithUsers):
def test_001_read_all(self):
"""
Read exec assistant settings of all users
"""
ea = self.api.person_settings.exec_assistant
with ThreadPoolExecutor() as pool:
settings = list(pool.map(lambda user: ea.read(person_id=user.person_id),
self.users))
print(f'Got exec assistant settings for {len(self.users)} users')
max_len = max(len(user.display_name) for user in self.users)
print('\n'.join(f'{user.display_name:{max_len}}: {s.name}' for user, s in zip(self.users, settings)))
def test_002_update(self):
"""
update exec assistant settings for a user
"""
target = random.choice(self.users)
ea = self.api.person_settings.exec_assistant
setting = ea.read(person_id=target.person_id)
try:
# cycle through all possible values
for new_setting in ExecAssistantType:
if new_setting == setting:
continue
ea.configure(person_id=target.person_id, setting=new_setting)
after = ea.read(person_id=target.person_id)
self.assertEqual(new_setting, after)
finally:
# restore old settings
ea.configure(person_id=target.person_id, setting=setting)
restored = ea.read(person_id=target.person_id)
self.assertEqual(setting, restored)
```
#### File: wxc_sdk/tests/test_person_numbers.py
```python
from concurrent.futures import ThreadPoolExecutor
from .base import TestCaseWithUsers
class TestRead(TestCaseWithUsers):
def test_001_read_all(self):
"""
Read numbers all users
"""
nu = self.api.person_settings.numbers
with ThreadPoolExecutor() as pool:
_ = list(pool.map(lambda user: nu.read(person_id=user.person_id),
self.users))
print(f'Got numbers for {len(self.users)} users')
def test_002_direct_number_format(self):
"""
Read numbers all users, verify number format for direct number
# TODO: defect, direct number are not +E.164, CALL-69213
"""
nu = self.api.person_settings.numbers
with ThreadPoolExecutor() as pool:
numbers = list(pool.map(lambda user: nu.read(person_id=user.person_id),
self.users))
direct_number_issues = [(user, direct_numbers) for user, numbers in zip(self.users, numbers)
if (direct_numbers := [number.direct_number
for number in numbers.phone_numbers
if number.direct_number and not
number.direct_number.startswith('+')])]
print('\n'.join(f'{user.display_name}: {", ".join(numbers)}' for user, numbers in direct_number_issues))
self.assertTrue(not direct_number_issues), 'Some direct numbers are not +E.164'
```
#### File: wxc_sdk/tests/test_person_permisssions_in.py
```python
import random
from concurrent.futures import ThreadPoolExecutor
from contextlib import contextmanager
from wxc_sdk.all_types import *
from .base import TestCaseWithUsers
class TestRead(TestCaseWithUsers):
def test_001_read_all(self):
"""
Read incoming permissions settings of all users
"""
pi = self.api.person_settings.permissions_in
with ThreadPoolExecutor() as pool:
settings = list(pool.map(lambda user: pi.read(person_id=user.person_id),
self.users))
print(f'Got incoming permissions for {len(self.users)} users')
print('\n'.join(f'{user.display_name}: {s.json()}' for user, s in zip(self.users, settings)))
class TestUpdate(TestCaseWithUsers):
@contextmanager
def target_user(self):
"""
Get target user
"""
user = random.choice(self.users)
settings = self.api.person_settings.permissions_in.read(person_id=user.person_id)
try:
yield user
finally:
# restore old settings
# makes sure to clear list of monitored elements
self.api.person_settings.permissions_in.configure(person_id=user.person_id, settings=settings)
restored = self.api.person_settings.permissions_in.read(person_id=user.person_id)
self.assertEqual(settings, restored)
def test_001_toggle_enabled(self):
"""
toggle enabled
"""
with self.target_user() as user:
pi = self.api.person_settings.permissions_in
user: Person
before = pi.read(person_id=user.person_id)
settings: IncomingPermissions = before.copy(deep=True)
settings.use_custom_enabled = not settings.use_custom_enabled
pi.configure(person_id=user.person_id, settings=settings)
after = pi.read(person_id=user.person_id)
self.assertEqual(settings, after)
def test_002_external_transfer(self):
"""
try all external_transfer options
"""
with self.target_user() as user:
pi = self.api.person_settings.permissions_in
user: Person
before = pi.read(person_id=user.person_id)
settings: IncomingPermissions = before.copy(deep=True)
settings.use_custom_enabled = True
et = before.external_transfer
for v in ExternalTransfer:
if v == et:
continue
settings.external_transfer = v
pi.configure(person_id=user.person_id, settings=settings)
after = pi.read(person_id=user.person_id)
self.assertEqual(settings, after)
```
#### File: wxc_sdk/tests/test_person_settings.py
```python
import os.path
import random
import uuid
from concurrent.futures import ThreadPoolExecutor
from contextlib import contextmanager
from typing import Callable
from .base import TestCaseWithUsers, gather
from wxc_sdk.people import Person
from wxc_sdk.person_settings.barge import BargeSettings
from wxc_sdk.person_settings.call_intercept import InterceptSetting, InterceptTypeIncoming, Greeting
from wxc_sdk.person_settings.caller_id import CallerIdSelectedType
class TestRead(TestCaseWithUsers):
def execute_read_test(self, f: Callable):
with ThreadPoolExecutor() as pool:
result_map = pool.map(lambda user: f(person_id=user.person_id), self.users)
results = list(gather(result_map, return_exceptions=True))
return results
def test_001_read_barge(self):
"""
read_barge for all users
"""
results = self.execute_read_test(self.api.person_settings.barge.read)
for e in (r for r in results if isinstance(r, Exception)):
print(f'{e}')
self.assertFalse(any(isinstance(r, Exception) for r in results))
def test_002_read_forwarding(self):
"""
read_barge for all users
"""
results = self.execute_read_test(self.api.person_settings.forwarding.read)
for e in (r for r in results if isinstance(r, Exception)):
print(f'{e}')
self.assertFalse(any(isinstance(r, Exception) for r in results))
def test_003_read_call_intercept(self):
"""
read_barge for all users
"""
results = self.execute_read_test(self.api.person_settings.call_intercept.read)
for e in (r for r in results if isinstance(r, Exception)):
print(f'{e}')
self.assertFalse(any(isinstance(r, Exception) for r in results))
def test_004_read_call_recording(self):
"""
read call recording settings for all users
"""
results = self.execute_read_test(self.api.person_settings.call_recording.read)
for e in (r for r in results if isinstance(r, Exception)):
print(f'{e}')
self.assertFalse(any(isinstance(r, Exception) for r in results))
def test_005_read_caller_id(self):
"""
read caller id settings for all users
"""
results = self.execute_read_test(self.api.person_settings.caller_id.read)
for e in (r for r in results if isinstance(r, Exception)):
print(f'{e}')
self.assertFalse(any(isinstance(r, Exception) for r in results))
class TestConfigure(TestCaseWithUsers):
"""
Testing configure (update) endpoints
"""
@property
def wav_path(self) -> str:
return os.path.join(os.path.dirname(os.path.abspath(__file__)), 'sample.wav')
def test_004_configure_barge(self):
"""
pick random user and try to update barge settings
"""
@contextmanager
def user_context():
"""
pick a random user, save barge setting and restore setting after end of test
"""
target_user = random.choice(self.users)
barge_settings = self.api.person_settings.barge.read(person_id=target_user.person_id)
print(f'target user: {target_user.display_name}: enabled: {barge_settings.enabled}, tone enabled: '
f'{barge_settings.tone_enabled}')
try:
yield target_user
finally:
# restore barge settings
print(f'restore enabled: {barge_settings.enabled}, tone enabled: {barge_settings.tone_enabled}')
self.api.person_settings.barge.configure(person_id=target_user.person_id,
barge_settings=barge_settings)
def update_and_check(barge_settings: BargeSettings):
"""
Update and verify barge settings
"""
print(f'Setting enabled: {barge_settings.enabled}, tone enabled: {barge_settings.tone_enabled}')
self.api.person_settings.barge.configure(person_id=user.person_id, barge_settings=barge_settings)
after = self.api.person_settings.barge.read(person_id=user.person_id)
self.assertEqual(barge_settings, after)
return
with user_context() as user:
# try all barge setting variations
user: Person
bs = BargeSettings(enabled=True, tone_enabled=True)
update_and_check(bs)
bs = BargeSettings(enabled=True, tone_enabled=False)
update_and_check(bs)
bs = BargeSettings(enabled=False, tone_enabled=True)
update_and_check(bs)
bs = BargeSettings(enabled=False, tone_enabled=False)
update_and_check(bs)
@contextmanager
def call_intercept_user_context(self):
"""
pick a random user, save call intercept setting and restore setting after end of test
"""
target_user = random.choice(self.users)
settings = self.api.person_settings.call_intercept.read(person_id=target_user.person_id)
print(f'target user: {target_user.display_name}: {settings} ')
try:
yield target_user
finally:
# restore settings
print(f'restore {settings}')
self.api.person_settings.call_intercept.configure(person_id=target_user.person_id, intercept=settings)
def call_intercept_update_and_check(self, user: Person, settings: InterceptSetting):
"""
Update and verify call intercept settings
"""
print(f'setting: {settings}')
self.api.person_settings.call_intercept.configure(person_id=user.person_id, intercept=settings)
after = self.api.person_settings.call_intercept.read(person_id=user.person_id)
self.assertEqual(settings, after)
return
def test_005_configure_call_intercept(self):
"""
try to update call intercept settings of a user
:return:
"""
with self.call_intercept_user_context() as user:
user: Person
intercept = self.api.person_settings.call_intercept.read(person_id=user.person_id)
intercept.enabled = not intercept.enabled
self.call_intercept_update_and_check(user=user, settings=intercept)
def test_006_upload_intercept_greeting(self):
"""
test to upload a custom greeting for call intercept
:return:
"""
with self.call_intercept_user_context() as user:
ps = self.api.person_settings
ps.call_intercept.greeting(person_id=user.person_id, content=self.wav_path)
intercept = ps.call_intercept.read(person_id=user.person_id)
self.assertEqual(os.path.basename(self.wav_path), intercept.incoming.announcements.file_name)
@TestCaseWithUsers.async_test
async def test_006a_async_upload_intercept_greeting(self):
"""
test to upload a custom greeting for call intercept
:return:
"""
with self.call_intercept_user_context() as user:
ps = self.async_api.person_settings
await ps.call_intercept.greeting(person_id=user.person_id, content=self.wav_path)
intercept = await ps.call_intercept.read(person_id=user.person_id)
self.assertEqual(os.path.basename(self.wav_path), intercept.incoming.announcements.file_name)
def test_007_upload_intercept_greeting_from_open_file(self):
"""
test to upload a custom greeting for call intercept from an open file
:return:
"""
with self.call_intercept_user_context() as user:
with open(self.wav_path, mode='rb') as wav_file:
upload_as = f'w{uuid.uuid4()}.wav'
ps = self.api.person_settings
ps.call_intercept.greeting(person_id=user.person_id, content=wav_file,
upload_as=upload_as)
intercept = ps.call_intercept.read(person_id=user.person_id)
self.assertEqual(upload_as, intercept.incoming.announcements.file_name)
def test_008_incoming_intercept_with_custom_greeting(self):
"""
set tup incoming intercept w/ custom greeting
"""
with self.call_intercept_user_context() as user:
ps = self.api.person_settings
intercept = ps.call_intercept.read(person_id=user.person_id)
intercept.incoming.intercept_type = InterceptTypeIncoming.intercept_all
intercept.incoming.announcements.greeting = Greeting.custom
# first upload custom greeting
with open(self.wav_path, mode='rb') as file:
upload_as = f'w{uuid.uuid4()}.wav'
ps.call_intercept.greeting(person_id=user.person_id, content=file,
upload_as=upload_as)
intermediate = ps.call_intercept.read(person_id=user.person_id)
# .. and then set the greeting to custom
ps.call_intercept.configure(person_id=user.person_id, intercept=intercept)
updated = ps.call_intercept.read(person_id=user.person_id)
# validation
self.assertEqual(upload_as, intermediate.incoming.announcements.file_name)
self.assertEqual(upload_as, updated.incoming.announcements.file_name)
self.assertEqual(Greeting.custom, updated.incoming.announcements.greeting)
class TestCallerIdConfigure(TestCaseWithUsers):
"""
Tests for
"""
@contextmanager
def user_context(self, *, users_with_tn: bool) -> Person:
candidates = [user for user in self.users
if not users_with_tn or user.tn]
if not candidates:
self.skipTest('No candidate user for test found')
target_user = random.choice(candidates)
print(f'Target user: {target_user.display_name}')
# get caller id settings
ps = self.api.person_settings
caller_id = ps.caller_id.read(person_id=target_user.person_id)
try:
yield random.choice(candidates)
finally:
# restore caller id settings
restore = caller_id.configure_params()
ps.caller_id.configure(person_id=target_user.person_id, **restore)
return
def test_set_direct_line(self):
"""
Try to set the caller ID to direct line
"""
with self.user_context(users_with_tn=True) as user:
ps = self.api.person_settings
ps.caller_id.configure(person_id=user.person_id, selected=CallerIdSelectedType.direct_line)
after = ps.caller_id.read(person_id=user.person_id)
self.assertEqual(CallerIdSelectedType.direct_line, after.selected)
def test_set_location_number(self):
"""
Try to set the caller ID to location number
"""
with self.user_context(users_with_tn=True) as user:
ps = self.api.person_settings
ps.caller_id.configure(person_id=user.person_id, selected=CallerIdSelectedType.location_number)
after = ps.caller_id.read(person_id=user.person_id)
self.assertEqual(CallerIdSelectedType.location_number, after.selected)
```
#### File: wxc_sdk/tests/test_person_vm_pin.py
```python
from concurrent.futures import ThreadPoolExecutor
from .base import TestCaseWithUsers
class TestRead(TestCaseWithUsers):
def test_001_reset_all(self):
"""
Read numbers all users
"""
ps = self.api.person_settings
with ThreadPoolExecutor() as pool:
list(pool.map(lambda user: ps.reset_vm_pin(person_id=user.person_id),
self.users))
print(f'reset VM PIN for {len(self.users)} users')
```
#### File: wxc_sdk/tests/test_telephony_autoattendant.py
```python
import json
# TODO: additional tests
import random
from concurrent.futures import ThreadPoolExecutor
from wxc_sdk.all_types import AutoAttendant, ScheduleType
from .base import TestCaseWithLog, TestWithLocations
class TestAutoAttendant(TestCaseWithLog):
"""
List all auto attendants
"""
def test_001_list(self):
"""
List all auto attendants
"""
aa_list = list(self.api.telephony.auto_attendant.list())
print(f'got {len(aa_list)} auto attendants')
print('\n'.join(f'{aa}' for aa in aa_list))
def test_002_details(self):
"""
Get details of all auto attendants
"""
aa_list = list(self.api.telephony.auto_attendant.list())
if not aa_list:
self.skipTest('No existing auto attendants')
ata = self.api.telephony.auto_attendant
with ThreadPoolExecutor() as pool:
aa = aa_list[0]
ata.details(location_id=aa.location_id, auto_attendant_id=aa.auto_attendant_id)
details = list(pool.map(
lambda aa: ata.details(location_id=aa.location_id, auto_attendant_id=aa.auto_attendant_id),
aa_list))
print(f'got details for {len(aa_list)} auto attendants')
print('\n'.join(f'{aa}' for aa in details))
class TestCreate(TestWithLocations):
def test_001_create(self):
"""
Create a simple AA in a random location
"""
target_location = random.choice(self.locations)
schedules = list(self.api.telephony.schedules.list(obj_id=target_location.location_id,
schedule_type=ScheduleType.business_hours))
if not schedules:
self.skipTest(f'No business hours schedule in location "{target_location.name}"')
# we prefer schedule "workday"
target_schedule = next((schedule for schedule in schedules
if schedule.name == 'workday'), None)
# .. but are ok with any other scheduled if that doesn't exist
target_schedule = target_schedule or schedules[0]
# shortcut
ata = self.api.telephony.auto_attendant
# get an available name for the new auto attendant
existing_aa = list(ata.list(location_id=target_location.location_id))
names = set(aa.name for aa in existing_aa)
new_name = next(name for i in range(1000)
if (name := f'aa_{i:03}') not in names)
# for simplicity we just assume that auto attendants can use extension 9XXX
extension = str(9000 + int(new_name[-3:]))
print(f'creating AA "{new_name}" ({extension}) witch scheduled "{target_schedule.name}" '
f'in location "{target_location.name}"...')
aa_settings = AutoAttendant.create(name=new_name,
business_schedule=target_schedule.name,
extension=extension)
aa_id = ata.create(location_id=target_location.location_id,
settings=aa_settings)
details = ata.details(location_id=target_location.location_id, auto_attendant_id=aa_id)
print(json.dumps(json.loads(details.json()), indent=2))
print(f'Created AA: {aa_id}')
class TestDelete(TestCaseWithLog):
def test_001_delete(self):
"""
Delete a random "aa_*" auto attendant
:return:
"""
ata = self.api.telephony.auto_attendant
aa_list = list(ata.list(name='aa_'))
if not aa_list:
self.skipTest('No existing auto attendant "aa_*"')
target_aa = random.choice(aa_list)
print(f'Deleting aa "{target_aa.name}" in location "{target_aa.location_name}"')
ata.delete_auto_attendant(location_id=target_aa.location_id, auto_attendant_id=target_aa.auto_attendant_id)
class TestForwarding(TestCaseWithLog):
def test_001_get_all_forwarding_settings(self):
"""
get forwarding settings for all auto attendants
"""
ata = self.api.telephony.auto_attendant
aa_list = list(ata.list(name='aa_'))
with ThreadPoolExecutor() as pool:
forwarding_settings = list(pool.map(
lambda aa: ata.forwarding.settings(location_id=aa.location_id, feature_id=aa.auto_attendant_id),
aa_list))
print(f'Got forwarding settings for {len(forwarding_settings)} auto attendants.')
```
#### File: wxc_sdk/tests/test_telephony_location_vm.py
```python
import random
from concurrent.futures import ThreadPoolExecutor
from .base import TestWithLocations
from wxc_sdk.common import AuthCode
from wxc_sdk.telephony.location_vm import LocationVoiceMailSettings
class Test(TestWithLocations):
def test_001_read_all(self):
with ThreadPoolExecutor() as pool:
details = list(pool.map(
lambda location: self.api.telephony.location_voicemail.read(location_id=location.location_id),
self.locations))
print(f'Got voicemail settings for {len(details)} locations')
def test_002_update(self):
"""
Update VM settings for one locations
"""
target_location = random.choice(self.locations)
lvm = self.api.telephony.location_voicemail
before = lvm.read(location_id=target_location.location_id)
try:
new_settings = LocationVoiceMailSettings(
voicemail_transcription_enabled=not before.voicemail_transcription_enabled)
lvm.update(location_id=target_location.location_id, settings=new_settings)
after = lvm.read(location_id=target_location.location_id)
self.assertEqual(new_settings, after)
finally:
# restore old settings
lvm.update(location_id=target_location.location_id, settings=before)
after = lvm.read(location_id=target_location.location_id)
self.assertEqual(before, after)
```
#### File: wxc_sdk/tests/test_workspaces.py
```python
import random
from collections.abc import Generator
from concurrent.futures import ThreadPoolExecutor
from contextlib import contextmanager
from wxc_sdk.rest import RestError
from wxc_sdk.all_types import *
from .base import TestCaseWithLog
TEST_WORKSPACES_PREFIX = 'workspace test '
class TestList(TestCaseWithLog):
def test_001_list(self):
workspaces = list(self.api.workspaces.list())
print(f'got {len(workspaces)} workspaces')
print('\n'.join(w.json() for w in workspaces))
class TestDetails(TestCaseWithLog):
def test_001_all(self):
"""
details for all workspaces
"""
ws = self.api.workspaces
ws_list = ws.list()
with ThreadPoolExecutor() as pool:
details = list(pool.map(lambda w: ws.details(workspace_id=w.workspace_id),
ws_list))
print(f'got details for {len(details)} workspaces')
class TestOutgoingPermissionsAutoTransferNumbers(TestCaseWithLog):
def test_001_get_all(self):
"""
get outgoing permissions auto transfer numbers for all workspaces
"""
wsa = self.api.workspaces
tna = self.api.workspace_settings.permissions_out.transfer_numbers
targets = [ws for ws in wsa.list()
if ws.calling == CallingType.webex]
if not targets:
self.skipTest('Need some WxC enabled workspaces to run this test')
with ThreadPoolExecutor() as pool:
_ = list(pool.map(lambda ws: tna.read(person_id=ws.workspace_id),
targets))
print(f'outgoing permissions auto transfer numbers for {len(targets)} workspaces')
@contextmanager
def target_ws_context(self, use_custom_enabled: bool = True) -> Workspace:
"""
pick a random workspace and make sure that the outgoing permission settings are restored
:return:
"""
po = self.api.workspace_settings.permissions_out
targets = [ws for ws in self.api.workspaces.list()
if ws.calling == CallingType.webex]
if not targets:
self.skipTest('Need some WxC enabled workspaces to run this test')
random.shuffle(targets)
# if enable == False then we need a workspace where custom_enabled is not set. Else setting it to False
# will clear all existing customer settings and we want to avoid that side effect of the test
po_settings = None
target_ws = next((ws for ws in targets
if use_custom_enabled or
not (po_settings := po.read(person_id=ws.workspace_id)).use_custom_enabled),
None)
if target_ws is None:
self.skipTest('No WxC enabled workspace with use_custom_enabled==False')
if po_settings is None:
po_settings = po.read(person_id=target_ws.workspace_id)
try:
if use_custom_enabled:
# enable custom settings: else auto transfer numbers can't be set
po.configure(person_id=target_ws.workspace_id,
settings=OutgoingPermissions(use_custom_enabled=use_custom_enabled))
yield target_ws
finally:
# restore old settings
if use_custom_enabled:
po.configure(person_id=target_ws.workspace_id, settings=po_settings)
po_restored = po.read(person_id=target_ws.workspace_id)
self.assertEqual(po_settings, po_restored)
def test_002_update_wo_custom_enabled(self):
"""
updating auto transfer numbers requires use_custom_enabled to be set
:return:
"""
tna = self.api.workspace_settings.permissions_out.transfer_numbers
with self.target_ws_context(use_custom_enabled=False) as target_ws:
target_ws: Workspace
numbers = tna.read(person_id=target_ws.workspace_id)
try:
# change auto transfer number 1
update = numbers.copy(deep=True)
transfer = f'+4961007739{random.randint(0, 999):03}'
update.auto_transfer_number1 = transfer
tna.configure(person_id=target_ws.workspace_id, settings=update)
# verify update
updated = tna.read(person_id=target_ws.workspace_id)
# update should not work with use_custom_enabled == False
self.assertEqual(numbers, updated)
finally:
# restore old settings
tna.configure(person_id=target_ws.workspace_id, settings=numbers.configure_unset_numbers)
restored = tna.read(person_id=target_ws.workspace_id)
self.assertEqual(numbers, restored)
# try
# with
def test_003_update_one_number(self):
"""
try to update auto transfer numbers for a workspace
"""
tna = self.api.workspace_settings.permissions_out.transfer_numbers
with self.target_ws_context() as target_ws:
target_ws: Workspace
numbers = tna.read(person_id=target_ws.workspace_id)
try:
# change auto transfer number 1
update = numbers.copy(deep=True)
transfer = f'+496100773{random.randint(0, 9999):03}'
update.auto_transfer_number1 = transfer
tna.configure(person_id=target_ws.workspace_id, settings=update)
# verify update
updated = tna.read(person_id=target_ws.workspace_id)
# number should be equal; ignore hyphens in number returned by API
self.assertEqual(transfer, updated.auto_transfer_number1.replace('-', ''))
# other than that the updated numbers should be identical to the numbers before
updated.auto_transfer_number1 = numbers.auto_transfer_number1
self.assertEqual(numbers, updated)
finally:
# restore old settings
tna.configure(person_id=target_ws.workspace_id, settings=numbers.configure_unset_numbers)
restored = tna.read(person_id=target_ws.workspace_id)
self.assertEqual(numbers, restored)
# try
# with
def test_002_update_one_number_no_effect_on_other_numbers(self):
"""
try to update auto transfer numbers for a workspace. Verify that updating a single number doesn't affect the
other numbers
"""
tna = self.api.workspace_settings.permissions_out.transfer_numbers
with self.target_ws_context() as target_ws:
target_ws: Workspace
numbers = tna.read(person_id=target_ws.workspace_id)
try:
all_numbers_set = AutoTransferNumbers(auto_transfer_number1='+4961007738001',
auto_transfer_number2='+4961007738002',
auto_transfer_number3='+4961007738003')
tna.configure(person_id=target_ws.workspace_id, settings=all_numbers_set)
all_numbers_set = tna.read(person_id=target_ws.workspace_id)
# change auto transfer number 1
transfer = f'+496100773{random.randint(0, 9999):03}'
update = AutoTransferNumbers(auto_transfer_number1=transfer)
tna.configure(person_id=target_ws.workspace_id, settings=update)
# verify update
updated = tna.read(person_id=target_ws.workspace_id)
# number should be equal; ignore hyphens in number returned by API
self.assertEqual(transfer, updated.auto_transfer_number1.replace('-', ''))
# other than that the updated numbers should be identical to the numbers before
updated.auto_transfer_number1 = all_numbers_set.auto_transfer_number1
self.assertEqual(all_numbers_set, updated)
finally:
# restore old settings
tna.configure(person_id=target_ws.workspace_id, settings=numbers.configure_unset_numbers)
restored = tna.read(person_id=target_ws.workspace_id)
self.assertEqual(numbers, restored)
# try
# with
class TestCreateUpdate(TestCaseWithLog):
def new_names(self) -> Generator[str, None, None]:
ws_list = list(self.api.workspaces.list())
ws_names = set(w.display_name for w in ws_list)
new_gen = (name for i in range(1000)
if (name := f'{TEST_WORKSPACES_PREFIX}{i:03}') not in ws_names)
return new_gen
@contextmanager
def target(self, no_edge: bool = False):
ws = self.api.workspaces
ws_list = list(ws.list())
if no_edge:
ws_list = [ws for ws in ws_list
if ws.calling != CallingType.edge_for_devices]
targat_ws = random.choice(ws_list)
targat_ws = ws.details(workspace_id=targat_ws.workspace_id)
try:
yield targat_ws
finally:
ws.update(workspace_id=targat_ws.workspace_id, settings=targat_ws)
restored = ws.details(workspace_id=targat_ws.workspace_id)
self.assertEqual(targat_ws, restored)
def test_001_trivial(self):
"""
create workspace with minimal settings
"""
ws = self.api.workspaces
name = next(self.new_names())
settings = Workspace.create(display_name=name)
workspace = ws.create(settings=settings)
print(f'new worksspace: {workspace.json()}')
self.assertEqual(name, workspace.display_name)
def test_002_edge_for_devices(self):
"""
create workspace with edge_for_devices
"""
ws = self.api.workspaces
name = next(self.new_names())
settings = Workspace(display_name=name, calling=CallingType.edge_for_devices)
workspace = ws.create(settings=settings)
print(f'new worksspace: {workspace.json()}')
self.assertEqual(name, workspace.display_name)
def test_003_change_name_full(self):
"""
change name of a workspace, full settings
"""
ws = self.api.workspaces
with self.target(no_edge=True) as target_ws:
target_ws: Workspace
settings: Workspace = target_ws.copy(deep=True)
new_name = next(self.new_names())
settings.display_name = new_name
after = ws.update(workspace_id=target_ws.workspace_id,
settings=settings)
self.assertEqual(new_name, after.display_name)
def test_004_change_name_name_only(self):
"""
change name of a workspace, only name update
"""
ws = self.api.workspaces
with self.target(no_edge=True) as target_ws:
target_ws: Workspace
new_name = next(self.new_names())
settings = Workspace(display_name=new_name)
after = ws.update(workspace_id=target_ws.workspace_id,
settings=settings)
self.assertEqual(new_name, after.display_name)
class TestDelete(TestCaseWithLog):
def test_001_delete_one(self):
"""
delete a random workspace
"""
ws = self.api.workspaces
ws_list = list(ws.list(display_name=TEST_WORKSPACES_PREFIX))
if not ws_list:
self.skipTest('No test workspace to delete')
target = random.choice(ws_list)
ws.delete_workspace(workspace_id=target.workspace_id)
with self.assertRaises(RestError) as exc:
ws.details(workspace_id=target.workspace_id)
rest_error: RestError = exc.exception
self.assertEqual(404, rest_error.response.status_code)
```
#### File: wxc_sdk/wxc_sdk/api_child.py
```python
from dataclasses import dataclass
from .base import StrOrDict
from .rest import RestSession
__all__ = ['ApiChild']
@dataclass(init=False)
class ApiChild:
"""
Base class for child APIs of :class:`WebexSimpleApi`
"""
session: RestSession
def __init__(self, *, session: RestSession, base: str = None):
#: REST session
self.session = session
if base:
self.base = base
def __init_subclass__(cls, base: str):
"""
Subclass registration hook. Each APIChild has a specific endpoint prefix which we gather at subclass
registration time-
:param base: APIChild specific URL path
"""
super().__init_subclass__()
# save endpoint prefix
cls.base = base
def ep(self, path: str = None):
"""
endpoint URL for given path
:param path: path after APIChild subclass specific endpoint URI prefix
:type path: str
:return: endpoint URL
:rtype: str
"""
path = path and f'/{path}' or ''
return self.session.ep(f'{self.base}{path}')
def get(self, *args, **kwargs) -> StrOrDict:
"""
GET request
:param args:
:param kwargs:
:return:
"""
return self.session.rest_get(*args, **kwargs)
def post(self, *args, **kwargs) -> StrOrDict:
"""
POST request
:param args:
:param kwargs:
:return:
"""
return self.session.rest_post(*args, **kwargs)
def put(self, *args, **kwargs) -> StrOrDict:
"""
PUT request
:param args:
:param kwargs:
:return:
"""
return self.session.rest_put(*args, **kwargs)
def delete(self, *args, **kwargs) -> None:
"""
DELETE request
:param args:
:param kwargs:
"""
self.session.rest_delete(*args, **kwargs)
def patch(self, *args, **kwargs) -> StrOrDict:
"""
PATCH request
:param args:
:param kwargs:
"""
return self.session.rest_patch(*args, **kwargs)
```
#### File: wxc_sdk/wxc_sdk/as_api.py
```python
import json
import logging
import os
from collections.abc import AsyncGenerator
from dataclasses import dataclass
from enum import Enum
from io import BufferedReader
from typing import Union, Dict, Optional, Literal, List
from aiohttp import MultipartWriter
from pydantic import parse_obj_as
from wxc_sdk.all_types import *
from wxc_sdk.as_rest import AsRestSession
from wxc_sdk.base import to_camel, StrOrDict
log = logging.getLogger(__name__)
__all__ = ['AsWebexSimpleApi']
class MultipartEncoder(MultipartWriter):
"""
Compatibility class for requests toolbelt MultipartEncoder
"""
def __init__(self, *, fields: dict):
super().__init__('form_data')
for field_name, (file_name, content, content_type) in fields.items():
# noinspection PyTypeChecker
part = self.append(content, {'Content-Type': content_type})
part.set_content_disposition('form-data', name=field_name, filename=file_name)
# there seems to be a problem with getting too many users with calling data at the same time
# this is the maximum number the SDK enforces
MAX_USERS_WITH_CALLING_DATA = 10
__all__ = ['AsAccessCodesApi', 'AsAnnouncementApi', 'AsApiChild', 'AsAppServicesApi', 'AsAuthCodesApi',
'AsAutoAttendantApi', 'AsBargeApi', 'AsCallInterceptApi', 'AsCallParkApi', 'AsCallPickupApi',
'AsCallQueueApi', 'AsCallRecordingApi', 'AsCallWaitingApi', 'AsCallerIdApi', 'AsCallingBehaviorApi',
'AsCallparkExtensionApi', 'AsCallsApi', 'AsDndApi', 'AsExecAssistantApi', 'AsForwardingApi', 'AsGroupsApi',
'AsHotelingApi', 'AsHuntGroupApi', 'AsIncomingPermissionsApi', 'AsLicensesApi', 'AsLocationInterceptApi',
'AsLocationMoHApi', 'AsLocationVoicemailSettingsApi', 'AsLocationsApi', 'AsMonitoringApi', 'AsNumbersApi',
'AsOrganisationVoicemailSettingsAPI', 'AsOutgoingPermissionsApi', 'AsPagingApi', 'AsPeopleApi',
'AsPersonForwardingApi', 'AsPersonSettingsApi', 'AsPersonSettingsApiChild', 'AsPrivacyApi',
'AsPrivateNetworkConnectApi', 'AsPushToTalkApi', 'AsReceptionistApi', 'AsRestSession', 'AsScheduleApi',
'AsTelephonyApi', 'AsTransferNumbersApi', 'AsVoicePortalApi', 'AsVoicemailApi', 'AsVoicemailGroupsApi',
'AsVoicemailRulesApi', 'AsWebexSimpleApi', 'AsWebhookApi', 'AsWorkspaceSettingsApi', 'AsWorkspacesApi']
@dataclass(init=False)
class AsApiChild:
"""
Base class for child APIs of :class:`WebexSimpleApi`
"""
session: AsRestSession
def __init__(self, *, session: AsRestSession, base: str = None):
#: REST session
self.session = session
if base:
self.base = base
def __init_subclass__(cls, base: str):
"""
Subclass registration hook. Each APIChild has a specific endpoint prefix which we gather at subclass
registration time-
:param base: APIChild specific URL path
"""
super().__init_subclass__()
# save endpoint prefix
cls.base = base
def ep(self, path: str = None):
"""
endpoint URL for given path
:param path: path after APIChild subclass specific endpoint URI prefix
:type path: str
:return: endpoint URL
:rtype: str
"""
path = path and f'/{path}' or ''
return self.session.ep(f'{self.base}{path}')
async def get(self, *args, **kwargs) -> StrOrDict:
"""
GET request
:param args:
:param kwargs:
:return:
"""
return await self.session.rest_get(*args, **kwargs)
async def post(self, *args, **kwargs) -> StrOrDict:
"""
POST request
:param args:
:param kwargs:
:return:
"""
return await self.session.rest_post(*args, **kwargs)
async def put(self, *args, **kwargs) -> StrOrDict:
"""
PUT request
:param args:
:param kwargs:
:return:
"""
return await self.session.rest_put(*args, **kwargs)
async def delete(self, *args, **kwargs) -> None:
"""
DELETE request
:param args:
:param kwargs:
"""
await self.session.rest_delete(*args, **kwargs)
async def patch(self, *args, **kwargs) -> StrOrDict:
"""
PATCH request
:param args:
:param kwargs:
"""
return await self.session.rest_patch(*args, **kwargs)
class AsGroupsApi(AsApiChild, base='groups'):
def list_gen(self, *, include_members: bool = None, attributes: str = None, sort_by: str = None,
sort_order: str = None, list_filter: str = None, org_id: str = None,
**params) -> AsyncGenerator[Group, None, None]:
"""
List groups
:param include_members: Include members in list response
:type include_members: bool
:param attributes: comma separated list of attributes to return
:type attributes: str
:param sort_by: attribute to sort by
:type sort_by: str
:param sort_order: sort order, ascending or descending
:type sort_order: str
:param org_id: organisation ID
:type org_id: str
:param list_filter: filter expression. Example: displayName eq "test"
:type list_filter: str
:param params:
:return: generator of :class:`Group` objects
"""
params.update((to_camel(k), v) for i, (k, v) in enumerate(locals().items())
if i and k != 'params' and v is not None)
for k, v in params.items():
if isinstance(v, bool):
params[k] = 'true' if v else 'false'
if lf := params.pop('listFilter', None):
params['filter'] = lf
url = self.ep()
return self.session.follow_pagination(url=url, model=Group, item_key='groups', params=params)
async def list(self, *, include_members: bool = None, attributes: str = None, sort_by: str = None,
sort_order: str = None, list_filter: str = None, org_id: str = None,
**params) -> List[Group]:
"""
List groups
:param include_members: Include members in list response
:type include_members: bool
:param attributes: comma separated list of attributes to return
:type attributes: str
:param sort_by: attribute to sort by
:type sort_by: str
:param sort_order: sort order, ascending or descending
:type sort_order: str
:param org_id: organisation ID
:type org_id: str
:param list_filter: filter expression. Example: displayName eq "test"
:type list_filter: str
:param params:
:return: generator of :class:`Group` objects
"""
params.update((to_camel(k), v) for i, (k, v) in enumerate(locals().items())
if i and k != 'params' and v is not None)
for k, v in params.items():
if isinstance(v, bool):
params[k] = 'true' if v else 'false'
if lf := params.pop('listFilter', None):
params['filter'] = lf
url = self.ep()
return [o async for o in self.session.follow_pagination(url=url, model=Group, item_key='groups', params=params)]
async def create(self, *, settings: Group) -> Group:
"""
Create a new group using the provided settings. Only display_name is mandatory
:param settings: settings for new group
:type settings: Group
:return: new group
:rtype: :class:`Group`
"""
url = self.ep()
body = settings.json(exclude={'group_id': True,
'members': {'__all__': {'member_type': True,
'display_name': True,
'operation': True}},
'created': True,
'last_modified': True})
data = await self.post(url, data=body)
return Group.parse_obj(data)
async def details(self, group_id: str, include_members: bool = None) -> Group:
"""
Get group details
:param group_id: group id
:type group_id: str
:param include_members: return members in response
:type include_members: bool
:return: group details
:rtype: Group
"""
url = self.ep(group_id)
params = dict()
if include_members is not None:
params['includeMembers'] = 'true' if include_members else 'false'
data = await self.get(url, params=params)
return Group.parse_obj(data)
def members_gen(self, *, group_id: str, **params) -> AsyncGenerator[GroupMember, None, None]:
"""
Query members of a group
:param group_id: group id
:type group_id: str
:param params:
:return: generator of :class:`GroupMember` instances
"""
url = self.ep(f'{group_id}/Members')
return self.session.follow_pagination(url=url, model=GroupMember, params=params, item_key='members')
async def members(self, *, group_id: str, **params) -> List[GroupMember]:
"""
Query members of a group
:param group_id: group id
:type group_id: str
:param params:
:return: generator of :class:`GroupMember` instances
"""
url = self.ep(f'{group_id}/Members')
return [o async for o in self.session.follow_pagination(url=url, model=GroupMember, params=params, item_key='members')]
async def update(self, *, group_id: str, settings: Group = None, remove_all: bool = None) -> Group:
"""
update group information.
Options: change displayName, add new members, remove some or all members, replace all members
:param group_id:
:param settings:
:param remove_all:
:return:
"""
if not any((settings, remove_all)):
raise ValueError('settings or remove_all have to be present')
url = self.ep(group_id)
if settings:
body = settings.json(exclude={'group_id': True,
'members': {'__all__': {'member_type': True,
'display_name': True}},
'created': True,
'last_modified': True})
else:
body = 'purgeAllValues:{"attributes":["members"]}'
data = await self.patch(url, data=body)
return Group.parse_obj(data)
async def delete_group(self, group_id: str):
"""
Delete a group
:param group_id: group id
:type group_id: str
"""
url = self.ep(group_id)
await self.delete(url)
class AsLicensesApi(AsApiChild, base='licenses'):
"""
Licenses
An allowance for features and services that are provided to users on a Webex services subscription. Cisco and its
partners manage the amount of licenses provided to administrators and users. This license resource can be accessed
only by an admin.
"""
def list_gen(self, org_id: str = None) -> AsyncGenerator[License, None, None]:
"""
List all licenses for a given organization. If no org_id is specified, the default is the organization of
the authenticated user.
Response properties that are not applicable to the license will not be present in the response.
:param org_id: List licenses for this organization.
:type org_id: str
:return: yields :class:`License` instances
"""
params = org_id and {'orgId': org_id} or None
ep = self.ep()
# noinspection PyTypeChecker
return self.session.follow_pagination(url=ep, model=License, params=params)
async def list(self, org_id: str = None) -> List[License]:
"""
List all licenses for a given organization. If no org_id is specified, the default is the organization of
the authenticated user.
Response properties that are not applicable to the license will not be present in the response.
:param org_id: List licenses for this organization.
:type org_id: str
:return: yields :class:`License` instances
"""
params = org_id and {'orgId': org_id} or None
ep = self.ep()
# noinspection PyTypeChecker
return [o async for o in self.session.follow_pagination(url=ep, model=License, params=params)]
async def details(self, license_id) -> License:
"""
Shows details for a license, by ID.
Response properties that are not applicable to the license will not be present in the response.
:param license_id: The unique identifier for the license.
:type license_id: str
:return: license details
:rtype: License
"""
ep = self.ep(license_id)
return License.parse_obj(await self.get(ep))
class AsLocationsApi(AsApiChild, base='locations'):
"""
Location API
Locations are used to organize Webex Calling (BroadCloud) features within physical locations. Webex Control Hub
may be used to define new locations.
Searching and viewing locations in your organization requires an administrator auth token with the
spark-admin:people_read and spark-admin:people_write or spark-admin:device_read AND spark-admin:device_write
scope combinations.
"""
def list_gen(self, name: str = None, location_id: str = None, org_id: str = None,
**params) -> AsyncGenerator[Location, None, None]:
"""
List locations for an organization.
:param name: List locations whose name contains this string (case-insensitive).
:type name: str
:param location_id: List locations by ID.
:type location_id: str
:param org_id: List locations in this organization. Only admin users of another organization
(such as partners) may use this parameter.
:type org_id: str
:return: generator of :class:`Location` instances
"""
params.update((to_camel(k), v)
for i, (k, v) in enumerate(locals().items())
if i and k != 'params' and v is not None)
if location_id is not None:
params.pop('locationId')
params['id'] = location_id
ep = self.ep()
# noinspection PyTypeChecker
return self.session.follow_pagination(url=ep, model=Location, params=params)
async def list(self, name: str = None, location_id: str = None, org_id: str = None,
**params) -> List[Location]:
"""
List locations for an organization.
:param name: List locations whose name contains this string (case-insensitive).
:type name: str
:param location_id: List locations by ID.
:type location_id: str
:param org_id: List locations in this organization. Only admin users of another organization
(such as partners) may use this parameter.
:type org_id: str
:return: generator of :class:`Location` instances
"""
params.update((to_camel(k), v)
for i, (k, v) in enumerate(locals().items())
if i and k != 'params' and v is not None)
if location_id is not None:
params.pop('locationId')
params['id'] = location_id
ep = self.ep()
# noinspection PyTypeChecker
return [o async for o in self.session.follow_pagination(url=ep, model=Location, params=params)]
async def by_name(self, name: str, org_id: str = None) -> Optional[Location]:
"""
Get a location by name
:param name: name of the location to search
:type name: str
:param org_id: search in list of locations in this organization. Only admin users of another organization
(such as partners) may use this parameter.
:type org_id: str
:return: locations
:rtype: Location
"""
return next((location for location in await self.list(name=name, org_id=org_id)
if location.name == name), None)
async def details(self, location_id) -> Location:
"""
Shows details for a location, by ID.
This API only works for Customer administrators and for Partner administrators to query their own organization.
Partner administrators looking to query customer organizations should use the List Locations endpoint to
retrieve information about locations.
:param location_id: A unique identifier for the location.
:type location_id: str
:return: location details
:rtype: Location
"""
ep = self.ep(location_id)
return Location.parse_obj(await self.get(ep))
class AsPeopleApi(AsApiChild, base='people'):
"""
People API
"""
def list_gen(self, email: str = None, display_name: str = None, id_list: list[str] = None, org_id: str = None,
calling_data: bool = None, location_id: str = None, **params) -> AsyncGenerator[Person, None, None]:
"""
List people in your organization. For most users, either the email or display_name parameter is required. Admin
users can omit these fields and list all users in their organization.
Response properties associated with a user's presence status, such as status or last_activity, will only be
displayed for people within your organization or an organization you manage. Presence information will not be
shown if the authenticated user has disabled status sharing.
Admin users can include Webex Calling (BroadCloud) user details in the response by specifying calling_data
parameter as True. Admin users can list all users in a location or with a specific phone number.
:param email: List people with this email address. For non-admin requests, either this or displayName are
required.
:type email: str
:param display_name: List people whose name starts with this string. For non-admin requests, either this or
email are required.
:type display_name: str
:param id_list: List people by ID. Accepts up to 85 person IDs. If this parameter is provided then presence
information (such as the last_activity or status properties) will not be included in the response.
:type id_list: list[str]
:param org_id: List people in this organization. Only admin users of another organization (such as partners)
may use this parameter.
:type org_id: str
:param calling_data: Include Webex Calling user details in the response. Default: False
:type calling_data: bool
:param location_id: List people present in this location.
:type location_id: str
:return: yield :class:`Person` instances
"""
params.update((to_camel(k), v)
for i, (k, v) in enumerate(locals().items())
if i and v is not None and k != 'params')
if calling_data:
params['callingData'] = 'true'
# apparently there is a performance problem with getting too many users w/ calling data at the same time
params['max'] = params.get('max', MAX_USERS_WITH_CALLING_DATA)
id_list = params.pop('idList', None)
if id_list:
params['id'] = ','.join(id_list)
ep = self.ep()
# noinspection PyTypeChecker
return self.session.follow_pagination(url=ep, model=Person, params=params)
async def list(self, email: str = None, display_name: str = None, id_list: list[str] = None, org_id: str = None,
calling_data: bool = None, location_id: str = None, **params) -> List[Person]:
"""
List people in your organization. For most users, either the email or display_name parameter is required. Admin
users can omit these fields and list all users in their organization.
Response properties associated with a user's presence status, such as status or last_activity, will only be
displayed for people within your organization or an organization you manage. Presence information will not be
shown if the authenticated user has disabled status sharing.
Admin users can include Webex Calling (BroadCloud) user details in the response by specifying calling_data
parameter as True. Admin users can list all users in a location or with a specific phone number.
:param email: List people with this email address. For non-admin requests, either this or displayName are
required.
:type email: str
:param display_name: List people whose name starts with this string. For non-admin requests, either this or
email are required.
:type display_name: str
:param id_list: List people by ID. Accepts up to 85 person IDs. If this parameter is provided then presence
information (such as the last_activity or status properties) will not be included in the response.
:type id_list: list[str]
:param org_id: List people in this organization. Only admin users of another organization (such as partners)
may use this parameter.
:type org_id: str
:param calling_data: Include Webex Calling user details in the response. Default: False
:type calling_data: bool
:param location_id: List people present in this location.
:type location_id: str
:return: yield :class:`Person` instances
"""
params.update((to_camel(k), v)
for i, (k, v) in enumerate(locals().items())
if i and v is not None and k != 'params')
if calling_data:
params['callingData'] = 'true'
# apparently there is a performance problem with getting too many users w/ calling data at the same time
params['max'] = params.get('max', MAX_USERS_WITH_CALLING_DATA)
id_list = params.pop('idList', None)
if id_list:
params['id'] = ','.join(id_list)
ep = self.ep()
# noinspection PyTypeChecker
return [o async for o in self.session.follow_pagination(url=ep, model=Person, params=params)]
async def create(self, settings: Person, calling_data: bool = False) -> Person:
"""
Create a Person
Create a new user account for a given organization. Only an admin can create a new user account.
At least one of the following body parameters is required to create a new user: displayName, firstName,
lastName.
Currently, users may have only one email address associated with their account. The emails parameter is an
array, which accepts multiple values to allow for future expansion, but currently only one email address will
be used for the new user.
Admin users can include Webex calling (BroadCloud) user details in the response by specifying callingData
parameter as true.
When doing attendee management, to make the new user an attendee for a site: append #attendee to the siteUrl
parameter (eg: mysite.webex.com#attendee).
:param settings: settings for new user
:type settings: Person
:param calling_data: Include Webex Calling user details in the response.
:type calling_data: bool
:return: new user
:rtype: Person
"""
params = calling_data and {'callingData': 'true'} or None
url = self.ep()
data = settings.json(exclude={'person_id': True,
'created': True,
'last_modified': True,
'timezone': True,
'last_activity': True,
'sip_addresses': True,
'status': True,
'invite_pending': True,
'login_enabled': True,
'person_type': True})
return Person.parse_obj(await self.post(url, data=data, params=params))
async def details(self, person_id: str, calling_data: bool = False) -> Person:
"""
Shows details for a person, by ID.
Response properties associated with a user's presence status, such as status or last_activity, will only be
displayed for people within your organization or an organization you manage. Presence information will not be
shown if the authenticated user has disabled status sharing.
Admin users can include Webex Calling (BroadCloud) user details in the response by specifying calling_data
parameter as True.
:param person_id: A unique identifier for the person.
:type person_id: str
:param calling_data: Include Webex Calling user details in the response. Default: false
:type calling_data: bool
:return: person details
:rtype: Person
"""
ep = self.ep(path=person_id)
params = calling_data and {'callingData': 'true'} or None
return Person.parse_obj(await self.get(ep, params=params))
async def delete_person(self, person_id: str):
"""
Remove a person from the system. Only an admin can remove a person.
:param person_id: A unique identifier for the person.
:return:
"""
ep = self.ep(path=person_id)
await self.delete(ep)
async def update(self, person: Person, calling_data: bool = False, show_all_types: bool = False) -> Person:
"""
Update details for a person, by ID.
Only an admin can update a person details.
Include all details for the person. This action expects all user details to be present in the request. A
common approach is to first GET the person's details, make changes, then PUT both the changed and unchanged
values.
Admin users can include Webex Calling (BroadCloud) user details in the response by specifying calling_data
parameter as True.
Note: The location_id can only be set when adding a calling license to a user. It cannot be changed if a user
is already an existing calling user.
When doing attendee management, to update a user โfrom host to attendeeโ for a site append #attendee to the
respective site_url and remove the meeting host license for this site from the license array. To update a
person โfrom attendee to hostโ for a site, add the meeting license for this site in the meeting array and
remove that site from the site_url parameter.
Removing the attendee privilege for a user on a meeting site is done by removing that sitename#attendee from
the siteUrls array. The show_all_types parameter must be set to True.
:param person: The person to update
:type person: Person
:param calling_data: Include Webex Calling user details in the response. Default: False
:type calling_data: bool
:param show_all_types: Include Webex Calling user details in the response. Default: False
:type show_all_types: bool
:return: Person details
:rtype: Person
"""
params = calling_data and {'callingData': 'true'} or None
if not all(v is not None
for v in (person.display_name, person.first_name, person.last_name)):
raise ValueError('display_name, first_name, and last_name are required')
# some attributes should not be included in update
data = person.json(exclude={'created': True,
'last_modified': True,
'timezone': True,
'last_activity': True,
'sip_addresses': True,
'status': True,
'invite_pending': True,
'login_enabled': True,
'person_type': True})
ep = self.ep(path=person.person_id)
return Person.parse_obj(await self.put(url=ep, data=data, params=params))
async def me(self, calling_data: bool = False) -> Person:
"""
Show the profile for the authenticated user. This is the same as GET /people/{personId} using the Person ID
associated with your Auth token.
Admin users can include Webex Calling (BroadCloud) user details in the response by specifying callingData
parameter as true.
:param calling_data: True -> return calling data
:type calling_data: bool
:rtype: Person
:return: profile of authenticated user
"""
ep = self.ep('me')
params = calling_data and {'callingData': 'true'} or None
data = await self.get(ep, params=params)
result = Person.parse_obj(data)
return result
class AsPersonSettingsApiChild(AsApiChild, base=''):
"""
Base class for all classes implementing person settings APIs
"""
feature = None
def __init__(self, *, session: AsRestSession, base: str = None,
workspaces: bool = False, locations: bool = False):
self.feature_prefix = '/features/'
if workspaces:
self.selector = 'workspaces'
elif locations:
self.selector = 'telephony/config/locations'
self.feature_prefix = '/'
else:
self.selector = 'people'
super().__init__(session=session, base=base)
def __init_subclass__(cls, **kwargs):
super().__init_subclass__(base='')
if cls.feature is None:
raise TypeError('feature has to be defined')
def f_ep(self, *, person_id: str, path: str = None) -> str:
"""
person specific feature endpoint like v1/people/{uid}/features/....
:param person_id: Unique identifier for the person.
:type person_id: str
:param path: path in the endpoint after the feature base URL
:type path: str
:return: full endpoint
:rtype: str
"""
path = path and f'/{path}' or ''
return self.session.ep(f'{self.selector}/{person_id}{self.feature_prefix}{self.feature}{path}')
class AsAppServicesApi(AsPersonSettingsApiChild):
"""
API for person's app services settings
"""
feature = 'applications'
async def read(self, *, person_id: str, org_id: str = None) -> AppServicesSettings:
"""
Retrieve a Person's Application Services Settings
Application services let you determine the ringing behavior for calls made to people in certain scenarios.
You can also specify which devices can download the Webex Calling app.
This API requires a full, user, or read-only administrator auth token with a scope of spark-admin:people_read.
:param person_id: Unique identifier for the person.
:type person_id: str
:param org_id: Person is in this organization. Only admin users of another organization (such as partners)
may use this parameter as the default is the same organization as the token used to access API.
:type org_id: str
:return: privacy settings
:rtype: :class:`Privacy`
"""
ep = self.f_ep(person_id=person_id)
params = org_id and {'orgId': org_id} or None
data = await self.get(ep, params=params)
return AppServicesSettings.parse_obj(data)
async def configure(self, *, person_id: str, settings: AppServicesSettings, org_id: str = None):
"""
Modify a Person's Application Services Settings
Application services let you determine the ringing behavior for calls made to users in certain scenarios. You
can also specify which devices users can download the Webex Calling app on.
This API requires a full or user administrator auth token with the spark-admin:people_write scope.
:param person_id: Unique identifier for the person.
:type person_id: str
:param settings: settings for update
:type settings: :class:`AppServicesSettings`
:param org_id: Person is in this organization. Only admin users of another organization (such as partners)
may use this parameter as the default is the same organization as the token used to access API.
:type org_id: str
"""
ep = self.f_ep(person_id=person_id)
params = org_id and {'orgId': org_id} or None
data = settings.json(exclude={'available_line_count': True})
await self.put(ep, params=params, data=data)
class AsBargeApi(AsPersonSettingsApiChild):
"""
API for person's barge settings
"""
feature = 'bargeIn'
async def read(self, *, person_id: str, org_id: str = None) -> BargeSettings:
"""
Retrieve a Person's Barge In Settings
The Barge In feature enables you to use a Feature Access Code (FAC) to answer a call that was directed to
another subscriber, or barge-in on the call if it was already answered. Barge In can be used across locations.
This API requires a full, user, or read-only administrator auth token with a scope of spark-admin:people_read
or a user auth token with spark:people_read scope can be used by a person to read their own settings.
:param person_id: Unique identifier for the person.
:type person_id: str
:param org_id: Person is in this organization. Only admin users of another organization (such as partners)
may use this parameter as the default is the same organization as the token used to access API.
:type org_id: str
:return: barge settings for specific user
:rtype: BargeSettings
"""
ep = self.f_ep(person_id=person_id)
params = org_id and {'orgId': org_id} or None
return BargeSettings.parse_obj(await self.get(ep, params=params))
async def configure(self, *, person_id: str, barge_settings: BargeSettings, org_id: str = None):
"""
Configure a Person's Barge In Settings
The Barge In feature enables you to use a Feature Access Code (FAC) to answer a call that was directed to
another subscriber, or barge-in on the call if it was already answered. Barge In can be used across locations.
This API requires a full or user administrator auth token with the spark-admin:people_write scope or a user
auth token with spark:people_write scope can be used by a person to update their own settings.
:param person_id: Unique identifier for the person.
:type person_id: str
:param barge_settings: new setting to be applied
:type barge_settings: BargeSettings
:param org_id: Person is in this organization. Only admin users of another organization (such as partners)
may use this parameter as the default is the same organization as the token used to access API.
"""
ep = self.f_ep(person_id=person_id)
params = org_id and {'orgId': org_id} or None
await self.put(ep, params=params, data=barge_settings.json())
class AsCallInterceptApi(AsPersonSettingsApiChild):
"""
API for person's call intercept settings
"""
feature = 'intercept'
async def read(self, *, person_id: str, org_id: str = None) -> InterceptSetting:
"""
Read Call Intercept Settings for a Person
Retrieves Person's Call Intercept Settings
The intercept feature gracefully takes a personโs phone out of service, while providing callers with
informative announcements and alternative routing options. Depending on the service configuration, none,
some, or all incoming calls to the specified person are intercepted. Also depending on the service
configuration, outgoing calls are intercepted or rerouted to another location.
This API requires a full, user, or read-only administrator auth token with a scope of spark-admin:people_read.
:param person_id: Unique identifier for the person.
:type person_id: str
:param org_id: Person is in this organization. Only admin users of another organization (such as partners)
may use this parameter as the default is the same organization as the token used to access API.
:type org_id: str
:return: user's call intercept settings
:rtype: InterceptSetting
"""
ep = self.f_ep(person_id=person_id)
params = org_id and {'orgId': org_id} or None
return InterceptSetting.parse_obj(await self.get(ep, params=params))
async def configure(self, *, person_id: str, intercept: InterceptSetting, org_id: str = None):
"""
Configure Call Intercept Settings for a Person
Configures a Person's Call Intercept Settings
The intercept feature gracefully takes a personโs phone out of service, while providing callers with
informative announcements and alternative routing options. Depending on the service configuration, none, some,
or all incoming calls to the specified person are intercepted. Also depending on the service configuration,
outgoing calls are intercepted or rerouted to another location.
This API requires a full or user administrator auth token with the spark-admin:people_write scope.
:param person_id: Unique identifier for the person.
:type person_id: str
:param intercept: new intercept settings
:type intercept: InterceptSetting
:param org_id: Person is in this organization. Only admin users of another organization (such as partners)
may use this parameter as the default is the same organization as the token used to access API.
:type org_id: str
"""
ep = self.f_ep(person_id=person_id)
params = org_id and {'orgId': org_id} or None
data = json.loads(intercept.json())
try:
# remove attribute not present in update
data['incoming']['announcements'].pop('fileName', None)
except KeyError:
pass
await self.put(ep, params=params, json=data)
async def greeting(self, person_id: str, content: Union[BufferedReader, str],
upload_as: str = None, org_id: str = None):
"""
Configure Call Intercept Greeting for a Person
Configure a Person's Call Intercept Greeting by uploading a Waveform Audio File Format, .wav, encoded audio
file.
Your request will need to be a multipart/form-data request rather than JSON, using the audio/wav Content-Type.
This API requires a full or user administrator auth token with the spark-admin:people_write scope or a user
auth token with spark:people_write scope can be used by a person to update their settings.
:param person_id: Unique identifier for the person.
:type person_id: str
:param content: the file to be uploaded, can be a path to a file or a buffered reader (opened file); if a
reader referring to an open file is passed then make sure to open the file as binary b/c otherwise the
content length might be calculated wrong
:type content: Union[BufferedReader, str]
:param upload_as: filename for the content. Only required if content is a reader; has to be a .wav file name.
:type upload_as: str
:param org_id: Person is in this organization. Only admin users of another organization (such as partners)
may use this parameter as the default is the same organization as the token used to access API.
:type org_id: str
"""
if isinstance(content, str):
upload_as = os.path.basename(content)
content = open(content, mode='rb')
must_close = True
pass
else:
must_close = False
# an existing reader
if not upload_as:
raise ValueError('upload_as is required')
encoder = MultipartEncoder(fields={'file': (upload_as, content, 'audio/wav')})
ep = self.f_ep(person_id=person_id, path='actions/announcementUpload/invoke')
params = org_id and {'orgId': org_id} or None
try:
await self.post(ep, data=encoder, headers={'Content-Type': encoder.content_type},
params=params)
finally:
if must_close:
content.close()
return
class AsCallRecordingApi(AsPersonSettingsApiChild):
"""
API for person's call recording settings
"""
feature = 'callRecording'
async def read(self, *, person_id: str, org_id: str = None) -> CallRecordingSetting:
"""
Read Call Recording Settings for a Person
Retrieve a Person's Call Recording Settings
The Call Recording feature provides a hosted mechanism to record the calls placed and received on the Carrier
platform for replay and archival. This feature is helpful for quality assurance, security, training, and more.
This API requires a full or user administrator auth token with the spark-admin:people_write scope.
:param person_id: Unique identifier for the person.
:type person_id: str
:param org_id: Person is in this organization. Only admin users of another organization (such as partners)
may use this parameter as the default is the same organization as the token used to access API.
:type org_id: str
"""
ep = self.f_ep(person_id=person_id)
params = org_id and {'orgId': org_id} or None
return CallRecordingSetting.parse_obj(await self.get(ep, params=params))
async def configure(self, *, person_id: str, recording: CallRecordingSetting, org_id: str = None):
"""
Configure Call Recording Settings for a Person
Configure a Person's Call Recording Settings
The Call Recording feature provides a hosted mechanism to record the calls placed and received on the Carrier
platform for replay and archival. This feature is helpful for quality assurance, security, training, and more.
This API requires a full or user administrator auth token with the spark-admin:people_write scope.
:param person_id: Unique identifier for the person.
:type person_id: str
:param recording: the new recording settings
:type recording: CallRecordingSetting
:param org_id: Person is in this organization. Only admin users of another organization (such as partners)
may use this parameter as the default is the same organization as the token used to access API.
:type org_id: str
"""
ep = self.f_ep(person_id=person_id)
params = org_id and {'orgId': org_id} or None
data = json.loads(recording.json())
for key in ['serviceProvider', 'externalGroup', 'externalIdentifier']:
# remove attribute not present in update
data.pop(key, None)
await self.put(ep, params=params, json=data)
class AsCallWaitingApi(AsPersonSettingsApiChild):
"""
API for person's call waiting settings
"""
feature = 'callWaiting'
async def read(self, *, person_id: str, org_id: str = None) -> bool:
"""
Read Call Waiting Settings for a Person
Retrieve a Person's Call Waiting Settings
With this feature, a person can place an active call on hold and answer an incoming call. When enabled,
while you are on an active call, a tone alerts you of an incoming call and you can choose to answer or
ignore the call.
This API requires a full, user, or read-only administrator auth token with a scope of spark-admin:people_read.
:param person_id: Unique identifier for the person.
:type person_id: str
:param org_id: Person is in this organization. Only admin users of another organization (such as partners)
may use this parameter as the default is the same organization as the token used to access API.
:type org_id: str
:return: call waiting setting
:rtype: bool
"""
ep = self.f_ep(person_id=person_id)
params = org_id and {'orgId': org_id} or None
data = await self.get(ep, params=params)
return data['enabled']
async def configure(self, *, person_id: str, enabled: bool, org_id: str = None):
"""
Configure Call Waiting Settings for a Person
Configure a Person's Call Waiting Settings
With this feature, a person can place an active call on hold and answer an incoming call. When enabled,
while you are on an active call, a tone alerts you of an incoming call and you can choose to answer or ignore
the call.
This API requires a full or user administrator auth token with the spark-admin:people_write scope.
:param person_id: Unique identifier for the person.
:type person_id: str
:param enabled: true if the Call Waiting feature is enabled.
:type enabled: bool
:param org_id: Person is in this organization. Only admin users of another organization (such as partners)
may use this parameter as the default is the same organization as the token used to access API.
:type org_id: str
"""
ep = self.f_ep(person_id=person_id)
params = org_id and {'orgId': org_id} or None
data = json.dumps({'enabled': enabled})
await self.put(ep, params=params, json=data)
class AsCallerIdApi(AsPersonSettingsApiChild):
"""
API for person's caller id settings
"""
feature = 'callerId'
async def read(self, *, person_id: str, org_id: str = None) -> CallerId:
"""
Retrieve a Person's Caller ID Settings
Caller ID settings control how a personโs information is displayed when making outgoing calls.
This API requires a full, user, or read-only administrator auth token with a scope of spark-admin:people_read
or a user auth token with spark:people_read scope can be used by a person to read their settings.
:param person_id: Unique identifier for the person.
:type person_id: str
:param org_id: Person is in this organization. Only admin users of another organization (such as partners)
may use this parameter as the default is the same organization as the token used to access API.
:type org_id: str
"""
ep = self.f_ep(person_id=person_id)
params = org_id and {'orgId': org_id} or None
return CallerId.parse_obj(await self.get(ep, params=params))
async def configure(self, *, person_id: str, org_id: str = None,
selected: CallerIdSelectedType = None,
custom_number: str = None,
first_name: str = None,
last_name: str = None,
external_caller_id_name_policy: ExternalCallerIdNamePolicy = None,
custom_external_caller_id_name: str = None):
"""
Configure a Person's Caller ID Settings
Caller ID settings control how a personโs information is displayed when making outgoing calls.
This API requires a full or user administrator auth token with the spark-admin:people_write scope or a user
auth token with spark:people_write scope can be used by a person to update their own settings.
:param person_id: Unique identifier for the person.
:type person_id: str
:param org_id: Person is in this organization. Only admin users of another organization (such as partners)
may use this parameter as the default is the same organization as the token used to access API.
:type org_id: str
:param selected: Which type of outgoing Caller ID will be used.
:type selected: CallerIdSelectedType
:param custom_number: This value must be an assigned number from the person\'s location.
:type custom_number: str
:param first_name: Person\'s Caller ID first name. Characters of %, +, \`, \" and Unicode characters are not
allowed.
:type first_name: str
:param last_name: Person\'s Caller ID last name. Characters of %, +, \`, \" and Unicode characters are not
allowed.
:type last_name: str
:param external_caller_id_name_policy: Designates which type of External Caller Id Name policy is used.
Default is DIRECT_LINE.
:type external_caller_id_name_policy: ExternalCallerIdNamePolicy
:param custom_external_caller_id_name: Custom External Caller Name, which will be shown if External Caller Id
Name is OTHER.
:type custom_external_caller_id_name: str
"""
data = {to_camel(k): v for i, (k, v) in enumerate(locals().items())
if i > 2 and v is not None}
params = org_id and {'orgId': org_id} or None
ep = self.f_ep(person_id=person_id)
await self.put(ep, params=params, json=data)
class AsCallingBehaviorApi(AsPersonSettingsApiChild):
"""
API for person's calling behavior settings
"""
feature = 'callingBehavior'
async def read(self, *, person_id: str, org_id: str = None) -> CallingBehavior:
"""
Read Person's Calling Behavior
Retrieves the calling behavior and UC Manager Profile settings for the person which includes overall calling
behavior and calling UC Manager Profile ID.
Webex Calling Behavior controls which Webex telephony application is to be used.
An organization has an organization-wide default Calling Behavior that may be overridden for individual persons.
In addition, UC Manager Profiles are applicable if your organization uses Jabber in Team Messaging mode or
Calling in Webex Teams (Unified CM).
The UC Manager Profile also has an organization-wide default and may be overridden for individual persons.
This API requires a full, user, or read-only administrator auth token with a scope of spark-admin:people_read.
:param person_id: Unique identifier for the person.
:type person_id: str
:param org_id: Person is in this organization. Only admin users of another organization (such as partners)
may use this parameter as the default is the same organization as the token used to access API.
:type org_id: str
:return: calling behavior setting
:rtype: CallingBehavior
"""
ep = self.f_ep(person_id=person_id)
params = org_id and {'orgId': org_id} or None
data = await self.get(ep, params=params)
return CallingBehavior.parse_obj(data)
async def configure(self, *, person_id: str, settings: CallingBehavior,
org_id: str = None):
"""
Configure a Person's Calling Behavior
Modifies the calling behavior settings for the person which includes overall calling behavior and UC Manager
Profile ID.
Webex Calling Behavior controls which Webex telephony application is to be used.
An organization has an organization-wide default Calling Behavior that may be overridden for individual persons.
In addition, UC Manager Profiles are applicable if your organization uses Jabber in Team Messaging mode or
Calling in Webex Teams (Unified CM).
The UC Manager Profile also has an organization-wide default and may be overridden for individual persons.
This API requires a full or user administrator auth token with the spark-admin:people_write scope.
:param person_id: Unique identifier for the person.
:type person_id: str
:param settings: new settings
:type settings: CallingBehavior
:param org_id: Person is in this organization. Only admin users of another organization (such as partners)
may use this parameter as the default is the same organization as the token used to access API.
:type org_id: str
"""
ep = self.f_ep(person_id=person_id)
params = org_id and {'orgId': org_id} or None
data = settings.json(exclude_none=False, exclude={'effective_behavior_type'}, exclude_unset=True)
await self.put(ep, params=params, data=data)
class AsDndApi(AsPersonSettingsApiChild):
"""
API for person's DND settings
"""
feature = 'doNotDisturb'
async def read(self, *, person_id: str, org_id: str = None) -> DND:
"""
Read Do Not Disturb Settings for a Person
Retrieve a Person's Do Not Disturb Settings
When enabled, this feature will give all incoming calls the busy treatment. Optionally, you can enable a Ring
Reminder to play a brief tone on your desktop phone when you receive incoming calls.
This API requires a full, user, or read-only administrator auth token with a scope of spark-admin:people_read
or a user auth token with spark:people_read scope can be used by a person to read their settings.
:param person_id: Unique identifier for the person.
:type person_id: str
:param org_id: Person is in this organization. Only admin users of another organization (such as partners) may
use this parameter as the default is the same organization as the token used to access API.
:type org_id: str
:return:
"""
ep = self.f_ep(person_id=person_id)
params = org_id and {'orgId': org_id} or None
return DND.parse_obj(await self.get(ep, params=params))
async def configure(self, *, person_id: str, dnd_settings: DND, org_id: str = None):
"""
Configure Do Not Disturb Settings for a Person
Configure a Person's Do Not Disturb Settings
When enabled, this feature will give all incoming calls the busy treatment. Optionally, you can enable a Ring
Reminder to play a brief tone on your desktop phone when you receive incoming calls.
This API requires a full or user administrator auth token with the spark-admin:people_write scope or a user
auth token with spark:people_write scope can be used by a person to update their settings.
:param person_id: Unique identifier for the person.
:type person_id: str
:param dnd_settings: new setting to be applied
:type dnd_settings: DND
:param org_id: Person is in this organization. Only admin users of another organization (such as partners)
may use this parameter as the default is the same organization as the token used to access API.
"""
ep = self.f_ep(person_id=person_id)
params = org_id and {'orgId': org_id} or None
await self.put(ep, params=params, data=dnd_settings.json())
class AsExecAssistantApi(AsPersonSettingsApiChild):
"""
API for person's exec assistant settings
"""
feature = 'executiveAssistant'
async def read(self, *, person_id: str, org_id: str = None) -> ExecAssistantType:
"""
Retrieve Executive Assistant Settings for a Person
Retrieve the executive assistant settings for the specified personId.
People with the executive service enabled, can select from a pool of assistants who have been assigned the
executive assistant service and who can answer or place calls on their behalf. Executive assistants can set
the call forward destination and join or leave an executive's pool.
This API requires a full, user, or read-only administrator auth token with a scope of spark-admin:people_read.
:param person_id: Unique identifier for the person.
:type person_id: str
:param org_id: Person is in this organization. Only admin users of another organization (such as partners)
may use this parameter as the default is the same organization as the token used to access API.
:type org_id: str
:return: exec assistant setting
:rtype: :class:`ExecAssistantType`
"""
ep = self.f_ep(person_id=person_id)
params = org_id and {'orgId': org_id} or None
data = await self.get(ep, params=params)
h: _Helper = _Helper.parse_obj(data)
return h.exec_type
async def configure(self, *, person_id: str, setting: ExecAssistantType, org_id: str = None):
"""
Modify Executive Assistant Settings for a Person
Modify the executive assistant settings for the specified personId.
People with the executive service enabled, can select from a pool of assistants who have been assigned the
executive assistant service and who can answer or place calls on their behalf. Executive assistants can set
the call forward destination and join or leave an executive's pool.
This API requires a full or user administrator auth token with the spark-admin:people_write scope.
:param person_id: Unique identifier for the person.
:type person_id: str
:param setting: New exex assistant settings
:type setting: :class:`ExecAssistantType`
:param org_id: Person is in this organization. Only admin users of another organization (such as partners)
may use this parameter as the default is the same organization as the token used to access API.
:type org_id: str
"""
ep = self.f_ep(person_id=person_id)
h = _Helper(exec_type=setting)
params = org_id and {'orgId': org_id} or None
data = h.json()
await self.put(ep, params=params, data=data)
class AsHotelingApi(AsPersonSettingsApiChild):
"""
API for person's hoteling settings
"""
feature = 'hoteling'
async def read(self, *, person_id: str, org_id: str = None) -> bool:
"""
Read Hoteling Settings for a Person
Retrieve a person's hoteling settings.
As an administrator, you can enable hoteling for people so that their phone profile (phone number, features,
and calling plan) is temporarily loaded onto a shared (host) phone.
This API requires a full, user, or read-only administrator auth token with a scope of spark-admin:people_read.
:param person_id: Unique identifier for the person.
:type person_id: str
:param org_id: Person is in this organization. Only admin users of another organization (such as partners)
may use this parameter as the default is the same organization as the token used to access API.
:type org_id: str
:return: hoteling setting
:rtype: bool
"""
ep = self.f_ep(person_id=person_id)
params = org_id and {'orgId': org_id} or None
data = await self.get(ep, params=params)
return data['enabled']
async def configure(self, *, person_id: str, enabled: bool, org_id: str = None):
"""
Configure Hoteling Settings for a Person
Configure a person's hoteling settings.
As an administrator, you can enable hoteling for people so that their phone profile (phone number, features,
and calling plan) is temporarily loaded onto a shared (host) phone.
This API requires a full or user administrator auth token with the spark-admin:people_write scope.
:param person_id: Unique identifier for the person.
:type person_id: str
:param enabled: When true, allow this person to connect to a Hoteling host device.
:type enabled: bool
:param org_id: Person is in this organization. Only admin users of another organization (such as partners)
may use this parameter as the default is the same organization as the token used to access API.
:type org_id: str
"""
ep = self.f_ep(person_id=person_id)
params = org_id and {'orgId': org_id} or None
data = json.dumps({'enabled': enabled})
await self.put(ep, params=params, json=data)
class AsIncomingPermissionsApi(AsPersonSettingsApiChild):
"""
API for person's incoming permissions settings
"""
feature = 'incomingPermission'
async def read(self, *, person_id: str, org_id: str = None) -> IncomingPermissions:
"""
Read Incoming Permission Settings for a Person
Retrieve a Person's Incoming Permission Settings
You can change the incoming calling permissions for a person if you want them to be different from your
organization's default.
This API requires a full, user, or read-only administrator auth token with a scope of spark-admin:people_read.
:param person_id: Unique identifier for the person.
:type person_id: str
:param org_id: Person is in this organization. Only admin users of another organization (such as partners)
may use this parameter as the default is the same organization as the token used to access API.
:type org_id: str
:return: incoming permission settings for specific user
:rtype: :class:`IncomingPermissions`
"""
ep = self.f_ep(person_id=person_id)
params = org_id and {'orgId': org_id} or None
return IncomingPermissions.parse_obj(await self.get(ep, params=params))
async def configure(self, *, person_id: str, settings: IncomingPermissions, org_id: str = None):
"""
Configure a Person's Barge In Settings
The Barge In feature enables you to use a Feature Access Code (FAC) to answer a call that was directed to
another subscriber, or barge-in on the call if it was already answered. Barge In can be used across locations.
This API requires a full or user administrator auth token with the spark-admin:people_write scope or a user
auth token with spark:people_write scope can be used by a person to update their own settings.
:param person_id: Unique identifier for the person.
:type person_id: str
:param settings: new setting to be applied
:type settings: :class:`IncomingPermissions`
:param org_id: Person is in this organization. Only admin users of another organization (such as partners)
may use this parameter as the default is the same organization as the token used to access API.
:type org_id: str
"""
ep = self.f_ep(person_id=person_id)
params = org_id and {'orgId': org_id} or None
await self.put(ep, params=params, data=settings.json())
class AsMonitoringApi(AsPersonSettingsApiChild):
"""
API for person's call monitoring settings
"""
feature = 'monitoring'
async def read(self, *, person_id: str, org_id: str = None) -> Monitoring:
"""
Retrieve a Person's Monitoring Settings
Retrieves the monitoring settings of the person, which shows specified people, places or, call park
extensions under monitoring. Monitors the line status which indicates if a person or place is on a call and
if a call has been parked on that extension.
This API requires a full, user, or read-only administrator auth token with a scope of spark-admin:people_read.
:param person_id: Unique identifier for the person.
:type person_id: str
:param org_id: Person is in this organization. Only admin users of another organization (such as partners)
may use this parameter as the default is the same organization as the token used to access API.
:type org_id: str
:return: monitoring settings
:rtype: :class:`Monitoring`
"""
ep = self.f_ep(person_id=person_id)
params = org_id and {'orgId': org_id} or None
data = await self.get(ep, params=params)
return Monitoring.parse_obj(data)
async def configure(self, *, person_id: str, settings: Monitoring, org_id: str = None):
"""
Configure Call Waiting Settings for a Person
Configure a Person's Call Waiting Settings
With this feature, a person can place an active call on hold and answer an incoming call. When enabled,
while you are on an active call, a tone alerts you of an incoming call and you can choose to answer or ignore
the call.
This API requires a full or user administrator auth token with the spark-admin:people_write scope.
:param person_id: Unique identifier for the person.
:type person_id: str
:param settings: settings for update
:type settings: :class:`Monitoring`
:param org_id: Person is in this organization. Only admin users of another organization (such as partners)
may use this parameter as the default is the same organization as the token used to access API.
:type org_id: str
"""
ep = self.f_ep(person_id=person_id)
params = org_id and {'orgId': org_id} or None
data = {}
if settings.call_park_notification_enabled is not None:
data['enableCallParkNotification'] = settings.call_park_notification_enabled
if settings.monitored_elements is not None:
id_list = []
for me in settings.monitored_elements:
if isinstance(me, str):
id_list.append(me)
else:
id_list.append(me.member and me.member.member_id or me.cpe and me.cpe.cpe_id)
data['monitoredElements'] = id_list
await self.put(ep, params=params, json=data)
class AsNumbersApi(AsPersonSettingsApiChild):
"""
API for person's numbers
"""
feature = 'numbers'
# TODO: documentation defect:
# https://developer.webex.com/docs/api/v1/webex-calling-person-settings-with-additional-settings/get-a-list-of-phone-numbers-for-a-person
# says the URL is /v1/people/{personId}/numbers
# while it actually is /v1/people/{personId}/features/numbers
async def read(self, *, person_id: str, org_id: str = None) -> PersonNumbers:
"""
Read Do Not Disturb Settings for a Person
Retrieve a Person's Do Not Disturb Settings
When enabled, this feature will give all incoming calls the busy treatment. Optionally, you can enable a Ring
Reminder to play a brief tone on your desktop phone when you receive incoming calls.
This API requires a full, user, or read-only administrator auth token with a scope of spark-admin:people_read
or a user auth token with spark:people_read scope can be used by a person to read their settings.
:param person_id: Unique identifier for the person.
:type person_id: str
:param org_id: Person is in this organization. Only admin users of another organization (such as partners) may
use this parameter as the default is the same organization as the token used to access API.
:type org_id: str
:return:
"""
# ep = self.ep(path=f'{person_id}/numbers')
ep = self.f_ep(person_id=person_id)
params = org_id and {'orgId': org_id} or None
return PersonNumbers.parse_obj(await self.get(ep, params=params))
class AsAuthCodesApi(AsPersonSettingsApiChild):
"""
API for person's outgoing permission authorization codes
"""
feature = 'outgoingPermission/authorizationCodes'
async def read(self, person_id: str, org_id: str = None) -> list[AuthCode]:
"""
Retrieve Authorization codes for a Workspace.
Authorization codes are used to bypass permissions.
This API requires a full or read-only administrator auth token with a scope of spark-admin:workspaces_read or
a user auth token with spark:workspaces_read scope can be used to read workspace settings.
:param person_id: Unique identifier for the workspace.
:type person_id: str
:param org_id: Workspace is in this organization. Only admin users of another organization (such as partners)
may use this parameter as the default is the same organization as the token used to access API.
:type org_id: str
:return: list of authorization codes
:rtype: list of :class:`AuthCode`
"""
url = self.f_ep(person_id=person_id)
params = org_id and {'orgId': org_id} or None
data = await self.get(url, params=params)
return parse_obj_as(list[AuthCode], data['authorizationCodes'])
async def delete_codes(self, person_id: str, access_codes: list[Union[str, AuthCode]], org_id: str = None):
"""
Modify Authorization codes for a workspace.
Authorization codes are used to bypass permissions.
This API requires a full or user administrator auth token with the spark-admin:workspaces_write scope or a
user auth token with spark:workspaces_write scope can be used to update workspace settings.
:param person_id: Unique identifier for the workspace.
:type person_id: str
:param access_codes: authorization codes to remove
:type access_codes: list[str]
:param org_id: Workspace is in this organization. Only admin users of another organization (such as partners)
may use this parameter as the default is the same organization as the token used to access API.
:type org_id: str
"""
url = self.f_ep(person_id=person_id)
params = org_id and {'orgId': org_id} or None
body = {'deleteCodes': [ac.code if isinstance(ac, AuthCode) else ac
for ac in access_codes]}
await self.put(url, params=params, json=body)
async def create(self, person_id: str, code: str, description: str, org_id: str = None):
"""
Modify Authorization codes for a workspace.
Authorization codes are used to bypass permissions.
This API requires a full or user administrator auth token with the spark-admin:workspaces_write scope or a
user auth token with spark:workspaces_write scope can be used to update workspace settings.
:param person_id: Unique identifier for the workspace.
:type person_id: str
:param code: Indicates an authorization code.
:type code: str
:param description: Indicates the description of the authorization code.
:type description: str
:param org_id: Workspace is in this organization. Only admin users of another organization (such as partners)
may use this parameter as the default is the same organization as the token used to access API.
:type org_id: str
"""
url = self.f_ep(person_id=person_id)
params = org_id and {'orgId': org_id} or None
body = {'code': code,
'description': description}
await self.post(url, params=params, json=body)
@dataclass(init=False)
class AsTransferNumbersApi(AsPersonSettingsApiChild):
"""
API for outgoing permission auto transfer numbers
"""
feature = 'outgoingPermission/autoTransferNumbers'
async def read(self, person_id: str, org_id: str = None) -> AutoTransferNumbers:
"""
Retrieve Transfer Numbers Settings for a Workspace.
When calling a specific call type, this workspace will be automatically transferred to another number. The
person assigned the Auto Transfer Number can then approve the call and send it through or reject the call
type. You can add up to 3 numbers.
This API requires a full or read-only administrator auth token with a scope of spark-admin:workspaces_read or
a user auth token with spark:workspaces_read scope can be used to read workspace settings.
:param person_id: Unique identifier for the workspace.
:type person_id: str
:param org_id: Workspace is in this organization. Only admin users of another organization (such as partners)
may use this parameter as the default is the same organization as the token used to access API.
:type org_id: str
:return: auto transfer numbers
:rtype: :class:`AutoTransferNumbers`
"""
url = self.f_ep(person_id=person_id)
params = org_id and {'orgId': org_id} or None
data = await self.get(url, params=params)
return AutoTransferNumbers.parse_obj(data)
async def configure(self, person_id: str, settings: AutoTransferNumbers, org_id: str = None):
"""
Modify Transfer Numbers Settings for a Place.
When calling a specific call type, this workspace will be automatically transferred to another number.
The person assigned the Auto Transfer Number can then approve the call and send it through or reject the
call type. You can add up to 3 numbers.
This API requires a full or user administrator auth token with the spark-admin:workspaces_write scope or a
user auth token with spark:workspaces_write scope can be used to update workspace settings.
:param person_id: Unique identifier for the workspace.
:type person_id: str
:param settings: new auto transfer numbers
:type settings: AutoTransferNumbers
:param org_id: Workspace is in this organization. Only admin users of another organization (such as partners)
may use this parameter as the default is the same organization as the token used to access API.
:type org_id: str
"""
url = self.f_ep(person_id=person_id)
params = org_id and {'orgId': org_id} or None
body = settings.json()
await self.put(url, params=params, data=body)
class AsOutgoingPermissionsApi(AsPersonSettingsApiChild):
"""
API for person's outgoing permissions settings
also used for workspace and location outgoing permissions
"""
#: Only available for workspaces
transfer_numbers: AsTransferNumbersApi
#: Only available for workspaces
auth_codes: AsAuthCodesApi
feature = 'outgoingPermission'
def __init__(self, *, session: AsRestSession, base: str = None,
workspaces: bool = False, locations: bool = False):
super().__init__(session=session, base=base, workspaces=workspaces, locations=locations)
if workspaces:
# auto transfer numbers API seems to only exist for workspaces
self.transfer_numbers = AsTransferNumbersApi(session=session,
base=base, workspaces=True)
self.auth_codes = AsAuthCodesApi(session=session, base=base, workspaces=True)
elif locations:
self.transfer_numbers = AsTransferNumbersApi(session=session,
base=base, locations=True)
self.auth_codes = None
else:
self.transfer_numbers = None
self.auth_codes = None
async def read(self, *, person_id: str, org_id: str = None) -> OutgoingPermissions:
"""
Retrieve a Person's Outgoing Calling Permissions Settings
You can change the outgoing calling permissions for a person if you want them to be different from your
organization's default.
This API requires a full, user, or read-only administrator auth token with a scope of spark-admin:people_read.
:param person_id: Unique identifier for the person.
:type person_id: str
:param org_id: Person is in this organization. Only admin users of another organization (such as partners)
may use this parameter as the default is the same organization as the token used to access API.
:type org_id: str
:return: outgoing permission settings for specific user
:rtype: :class:`OutgoingPermissions`
"""
ep = self.f_ep(person_id=person_id)
params = org_id and {'orgId': org_id} or None
return OutgoingPermissions.parse_obj(await self.get(ep, params=params))
async def configure(self, *, person_id: str, settings: OutgoingPermissions, org_id: str = None):
"""
Configure a Person's Outgoing Calling Permissions Settings
Turn on outgoing call settings for this person to override the calling settings from the location that are
used by default.
This API requires a full or user administrator auth token with the spark-admin:people_write scope or a user
auth token with spark:people_write scope can be used by a person to update their own settings.
:param person_id: Unique identifier for the person.
:type person_id: str
:param settings: new setting to be applied
:type settings: :class:`OutgoingPermissions`
:param org_id: Person is in this organization. Only admin users of another organization (such as partners)
may use this parameter as the default is the same organization as the token used to access API.
"""
ep = self.f_ep(person_id=person_id)
params = org_id and {'orgId': org_id} or None
await self.put(ep, params=params, data=settings.json())
class AsPersonForwardingApi(AsPersonSettingsApiChild):
"""
API for person's call forwarding settings
"""
feature = 'callForwarding'
async def read(self, *, person_id: str, org_id: str = None) -> PersonForwardingSetting:
"""
Retrieve a Person's Call Forwarding Settings
Three types of call forwarding are supported:
* Always โ forwards all incoming calls to the destination you choose.
* When busy โ forwards all incoming calls to the destination you chose while the phone is in use or the person
is busy.
* When no answer โ forwarding only occurs when you are away or not answering your phone.
In addition, the Business Continuity feature will send calls to a destination of your choice if your phone is
not connected to the network for any reason, such as power outage, failed Internet connection, or wiring problem
This API requires a full, user, or read-only administrator auth token with a scope of spark-admin:people_read
or a user auth token with spark:people_read scope can be used by a person to read their own settings.
:param person_id: Unique identifier for the person.
:type person_id: str
:param org_id: Person is in this organization. Only admin users of another organization (such as partners)
may use this parameter as the default is the same organization as the token used to access API.
:type org_id: str
:return: user's forwarding settings
:rtype: PersonForwardingSetting
"""
ep = self.f_ep(person_id=person_id)
params = org_id and {'orgId': org_id} or None
return PersonForwardingSetting.parse_obj(await self.get(ep, params=params))
async def configure(self, *, person_id: str, forwarding: PersonForwardingSetting, org_id: str = None):
"""
Configure a Person's Call Forwarding Settings
Three types of call forwarding are supported:
* Always โ forwards all incoming calls to the destination you choose.
* When busy โ forwards all incoming calls to the destination you chose while the phone is in use or the person
is busy.
* When no answer โ forwarding only occurs when you are away or not answering your phone.
In addition, the Business Continuity feature will send calls to a destination of your choice if your phone is
not connected to the network for any reason, such as power outage, failed Internet connection, or wiring problem
This API requires a full or user administrator auth token with the spark-admin:people_write scope or a user
auth token with spark:people_write scope can be used by a person to update their settings.
:param person_id: Unique identifier for the person.
:type person_id: str
:param forwarding: new forwarding settings
:type forwarding: PersonForwardingSetting
:param org_id: Person is in this organization. Only admin users of another organization (such as partners)
may use this parameter as the default is the same organization as the token used to access API.
:type org_id: str
"""
ep = self.f_ep(person_id=person_id)
params = org_id and {'orgId': org_id} or None
# system_max_number_of_ring cannot be used in update
data = forwarding.json(
exclude={'call_forwarding':
{'no_answer':
{'system_max_number_of_rings': True}}})
await self.put(ep, params=params, data=data)
class AsPrivacyApi(AsPersonSettingsApiChild):
"""
API for person's call monitoring settings
"""
feature = 'privacy'
async def read(self, *, person_id: str, org_id: str = None) -> Privacy:
"""
Get a person's Privacy Settings
Get a person's privacy settings for the specified person id.
The privacy feature enables the person's line to be monitored by others and determine if they can be reached
by Auto Attendant services.
This API requires a full, user, or read-only administrator auth token with a scope of spark-admin:people_read.
:param person_id: Unique identifier for the person.
:type person_id: str
:param org_id: Person is in this organization. Only admin users of another organization (such as partners)
may use this parameter as the default is the same organization as the token used to access API.
:type org_id: str
:return: privacy settings
:rtype: :class:`Privacy`
"""
ep = self.f_ep(person_id=person_id)
params = org_id and {'orgId': org_id} or None
data = await self.get(ep, params=params)
return Privacy.parse_obj(data)
async def configure(self, *, person_id: str, settings: Privacy, org_id: str = None):
"""
Configure Call Waiting Settings for a Person
Configure a Person's Call Waiting Settings
With this feature, a person can place an active call on hold and answer an incoming call. When enabled,
while you are on an active call, a tone alerts you of an incoming call and you can choose to answer or ignore
the call.
This API requires a full or user administrator auth token with the spark-admin:people_write scope.
:param person_id: Unique identifier for the person.
:type person_id: str
:param settings: settings for update
:type settings: :class:`Monitoring`
:param org_id: Person is in this organization. Only admin users of another organization (such as partners)
may use this parameter as the default is the same organization as the token used to access API.
:type org_id: str
"""
ep = self.f_ep(person_id=person_id)
params = org_id and {'orgId': org_id} or None
data = json.loads(settings.json())
if settings.monitoring_agents is not None:
id_list = []
for ma in settings.monitoring_agents:
if isinstance(ma, str):
id_list.append(ma)
else:
id_list.append(ma.agent_id)
data['monitoringAgents'] = id_list
await self.put(ep, params=params, json=data)
class AsPushToTalkApi(AsPersonSettingsApiChild):
"""
API for person's PTT settings
"""
feature = 'pushToTalk'
async def read(self, *, person_id: str, org_id: str = None) -> PushToTalkSettings:
"""
Read Push-to-Talk Settings for a Person
Retrieve a Person's Push-to-Talk Settings
Push-to-Talk allows the use of desk phones as either a one-way or two-way intercom that connects people in
different parts of your organization.
This API requires a full, user, or read-only administrator auth token with a scope of spark-admin:people_read.
:param person_id: Unique identifier for the person.
:type person_id: str
:param org_id: Person is in this organization. Only admin users of another organization (such as partners)
may use this parameter as the default is the same organization as the token used to access API.
:type org_id: str
:return: PTT settings for specific user
:rtype: PushToTalkSettings
"""
ep = self.f_ep(person_id=person_id)
params = org_id and {'orgId': org_id} or None
return PushToTalkSettings.parse_obj(await self.get(ep, params=params))
async def configure(self, *, person_id: str, settings: PushToTalkSettings, org_id: str = None):
"""
Configure Push-to-Talk Settings for a Person
Configure a Person's Push-to-Talk Settings
Push-to-Talk allows the use of desk phones as either a one-way or two-way intercom that connects people in
different parts of your organization.
This API requires a full or user administrator auth token with the spark-admin:people_write scope.
:param person_id: Unique identifier for the person.
:type person_id: str
:param settings: new setting to be applied. For members only the ID needs to be set
:type settings: PushToTalkSettings
:param org_id: Person is in this organization. Only admin users of another organization (such as partners)
may use this parameter as the default is the same organization as the token used to access API.
"""
ep = self.f_ep(person_id=person_id)
params = org_id and {'orgId': org_id} or None
if settings.members:
# for an update member is just a list of IDs
body_settings = settings.copy(deep=True)
members = [m.member_id if isinstance(m, MonitoredMember) else m
for m in settings.members]
body_settings.members = members
else:
body_settings = settings
body = body_settings.json(exclude_none=False,
exclude_unset=True)
await self.put(ep, params=params, data=body)
class AsReceptionistApi(AsPersonSettingsApiChild):
"""
API for person's receptionist client settings
"""
feature = 'reception'
async def read(self, *, person_id: str, org_id: str = None) -> ReceptionistSettings:
"""
Read Receptionist Client Settings for a Person
Retrieve a Person's Receptionist Client Settings
To help support the needs of your front-office personnel, you can set up people or workspaces as telephone
attendants so that they can screen all incoming calls to certain numbers within your organization.
This API requires a full, user, or read-only administrator auth token with a scope of spark-admin:people_read.
:param person_id: Unique identifier for the person.
:type person_id: str
:param org_id: Person is in this organization. Only admin users of another organization (such as partners)
may use this parameter as the default is the same organization as the token used to access API.
:type org_id: str
:return: receptionist client settings
:rtype: :class:`ReceptionistSettings`
"""
ep = self.f_ep(person_id=person_id)
params = org_id and {'orgId': org_id} or None
data = await self.get(ep, params=params)
return ReceptionistSettings.parse_obj(data)
async def configure(self, *, person_id: str, settings: ReceptionistSettings, org_id: str = None):
"""
Modify Executive Assistant Settings for a Person
Modify the executive assistant settings for the specified personId.
People with the executive service enabled, can select from a pool of assistants who have been assigned the
executive assistant service and who can answer or place calls on their behalf. Executive assistants can set
the call forward destination and join or leave an executive's pool.
This API requires a full or user administrator auth token with the spark-admin:people_write scope.
:param person_id: Unique identifier for the person.
:type person_id: str
:param settings: New receptionist client settings
:type settings: :class:`ReceptionistSettings`
:param org_id: Person is in this organization. Only admin users of another organization (such as partners)
may use this parameter as the default is the same organization as the token used to access API.
:type org_id: str
"""
if settings.enabled is None:
raise ValueError('enabled is a mandatory parameter for updates')
if settings.monitored_members and not settings.enabled:
raise ValueError('when setting members enabled has to be True')
ep = self.f_ep(person_id=person_id)
params = org_id and {'orgId': org_id} or None
data = json.loads(settings.json())
if settings.monitored_members is not None:
id_list = []
for me in settings.monitored_members:
if isinstance(me, str):
id_list.append(me)
else:
id_list.append(me.member_id)
data['monitoredMembers'] = id_list
await self.put(ep, params=params, json=data)
class AsScheduleApi(AsApiChild, base='telephony/config/locations'):
"""
Schedules API
"""
def __init__(self, *, session: AsRestSession, base: ScheduleApiBase):
super().__init__(session=session, base=base.value)
if base == ScheduleApiBase.people:
self.after_id = '/features/schedules'
elif base == ScheduleApiBase.locations:
self.after_id = '/schedules'
else:
raise ValueError('unexpected value for base')
def _endpoint(self, *, obj_id: str, schedule_type: ScheduleTypeOrStr = None, schedule_id: str = None,
event_id: str = None):
"""
location specific feature endpoint like v1/telephony/config/locations/{obj_id}/schedules/.... or
v1/people/{obj_id}/features/schedules/....
:meta private:
:param obj_id: Unique identifier for the location or user
:type obj_id: str
:param schedule_type: type of schedule
:type schedule_type: ScheduleType
:param schedule_id: schedule id
:type schedule_id: str
:return: full endpoint
:rtype: str
"""
ep = self.ep(path=f'{obj_id}{self.after_id}')
if schedule_type is not None:
schedule_type = ScheduleType.type_or_str(schedule_type)
ep = f'{ep}/{schedule_type.value}/{schedule_id}'
if event_id is not None:
event_id = event_id and f'/{event_id}' or ''
ep = f'{ep}/events{event_id}'
return ep
def list_gen(self, *, obj_id: str, org_id: str = None, schedule_type: ScheduleType = None,
name: str = None, **params) -> AsyncGenerator[Schedule, None, None]:
"""
List of Schedules for a Person or location
List schedules for a person or location in an organization.
Schedules are used to support calling features and can be defined at the location or person level.
businessHours schedules allow you to apply specific call settings at different times of the day or week
by defining one or more events. holidays schedules define exceptions to normal business hours by defining one
or more events.
This API requires a full, user, or read-only administrator auth token with a scope of spark-admin:people_read.
:param obj_id: Return the list of schedules for this location or user
:type obj_id: str
:param org_id: List schedules for this organization.
:type org_id: str
:param schedule_type: Type of the schedule.
businessHours - Business hours schedule type.
holidays - Holidays schedule type.
:param name: Only return schedules with the matching name.
:return: yields schedules
"""
url = self._endpoint(obj_id=obj_id)
if schedule_type is not None:
params['type'] = schedule_type.value
if name is not None:
params['name'] = name
if org_id is not None:
params['orgId'] = org_id
# noinspection PyTypeChecker
return self.session.follow_pagination(url=url, model=Schedule, params=params or None)
async def list(self, *, obj_id: str, org_id: str = None, schedule_type: ScheduleType = None,
name: str = None, **params) -> List[Schedule]:
"""
List of Schedules for a Person or location
List schedules for a person or location in an organization.
Schedules are used to support calling features and can be defined at the location or person level.
businessHours schedules allow you to apply specific call settings at different times of the day or week
by defining one or more events. holidays schedules define exceptions to normal business hours by defining one
or more events.
This API requires a full, user, or read-only administrator auth token with a scope of spark-admin:people_read.
:param obj_id: Return the list of schedules for this location or user
:type obj_id: str
:param org_id: List schedules for this organization.
:type org_id: str
:param schedule_type: Type of the schedule.
businessHours - Business hours schedule type.
holidays - Holidays schedule type.
:param name: Only return schedules with the matching name.
:return: yields schedules
"""
url = self._endpoint(obj_id=obj_id)
if schedule_type is not None:
params['type'] = schedule_type.value
if name is not None:
params['name'] = name
if org_id is not None:
params['orgId'] = org_id
# noinspection PyTypeChecker
return [o async for o in self.session.follow_pagination(url=url, model=Schedule, params=params or None)]
async def details(self, *, obj_id: str, schedule_type: ScheduleTypeOrStr, schedule_id: str,
org_id: str = None) -> Schedule:
"""
Get Details for a Schedule
Retrieve Schedule details.
A time schedule establishes a set of times during the day or holidays in the year in which a feature, for
example auto attendants, can perform a specific action.
Retrieving schedule details requires a full or read-only administrator auth token with a scope
of spark-admin:telephony_config_read.
:param obj_id: Retrieve schedule details in this location or user
:type obj_id: str
:param schedule_type: Type of the schedule.
businessHours - Business hours schedule type.
holidays - Holidays schedule type.
:type schedule_type: ScheduleTypeOrStr
:param schedule_id: Retrieve the schedule with the matching ID.
:type schedule_id: str
:param org_id: Retrieve schedule details from this organization.
:type org_id: str
:return:
"""
params = org_id and {'orgId': org_id} or None
url = self._endpoint(obj_id=obj_id, schedule_type=schedule_type, schedule_id=schedule_id)
data = await self.get(url, params=params)
result = Schedule.parse_obj(data)
return result
async def create(self, *, obj_id: str, schedule: Schedule, org_id: str = None) -> str:
"""
Create a Schedule
Create new Schedule for the given location.
A time schedule establishes a set of times during the day or holidays in the year in which a feature, for
example auto attendants, can perform a specific action.
Creating a schedule requires a full administrator auth token with a scope of spark-admin:telephony_config_write.
:param obj_id: Create the schedule for this location or user
:type obj_id: str
:param schedule: Schedule to be created
:type schedule: Schedule
:param org_id: Create the schedule for this organization.
:type org_id: str
:return: ID of the newly created schedule.
:rtype: str
"""
schedule_data = schedule.create_update()
params = org_id and {'orgId': org_id} or None
url = self._endpoint(obj_id=obj_id)
data = await self.post(url, data=schedule_data, params=params)
result = data['id']
return result
async def update(self, *, obj_id: str, schedule: Schedule, schedule_type: ScheduleTypeOrStr = None,
schedule_id: str = None, org_id: str = None) -> str:
"""
Update a Schedule
Update the designated Schedule.
A time schedule establishes a set of times during the day or holidays in the year in which a feature, for
example auto attendants, can perform a specific action.
Updating a schedule requires a full administrator auth token with a scope of spark-admin:telephony_config_write.
NOTE: The Schedule ID will change upon modification of the Schedule name
:param obj_id: Location or user for which this schedule exists
:type obj_id: str
:param schedule: data for the update
:type schedule: Schedule
:param schedule_type: Type of the schedule. Default: schedule_type from schedule
businessHours - Business hours schedule type.
holidays - Holidays schedule type.
:type schedule_type: ScheduleTypeOrStr
:param schedule_id: Update schedule with the matching ID. Default: schedule_id from schedule
:type schedule_id: str
:param org_id: Update schedule from this organization.
:type org_id: str
:return: schedule id
"""
schedule_type = schedule_type or schedule.schedule_type
schedule_id = schedule_id or schedule.schedule_id
schedule_data = schedule.create_update(update=True)
params = org_id and {'orgId': org_id} or None
url = self._endpoint(obj_id=obj_id, schedule_type=schedule_type, schedule_id=schedule_id)
data = await self.put(url, data=schedule_data, params=params)
return data['id']
async def delete_schedule(self, *, obj_id: str, schedule_type: ScheduleTypeOrStr, schedule_id: str,
org_id: str = None):
"""
Delete a Schedule
Delete the designated Schedule.
A time schedule establishes a set of times during the day or holidays in the year in which a feature, for
example auto attendants, can perform a specific action.
Deleting a schedule requires a full administrator auth token with a scope of spark-admin:telephony_config_write.
:param obj_id: Location or user from which to delete a schedule.
:type obj_id: str
:param schedule_type: Type of the schedule.
businessHours - Business hours schedule type.
holidays - Holidays schedule type.
:type schedule_type: ScheduleTypeOrStr
:param schedule_id: Delete the schedule with the matching ID.
:type schedule_id: str
:param org_id: Retrieve schedule details from this organization.
:type org_id: str
:return:
"""
url = self._endpoint(obj_id=obj_id, schedule_type=schedule_type, schedule_id=schedule_id)
params = org_id and {'orgId': org_id} or None
await self.delete(url, params=params)
async def event_details(self, *, obj_id: str, schedule_type: ScheduleTypeOrStr, schedule_id: str,
event_id: str, org_id: str = None) -> Event:
"""
Get Details for a Schedule Event
Retrieve Schedule Event details.
A time schedule establishes a set of times during the day or holidays in the year in which a feature, for
example auto attendants, can perform a specific action.
Retrieving schedule event details requires a full or read-only administrator auth token with a scope
of spark-admin:telephony_config_read.
:param obj_id: Retrieve schedule event details for this location or user
:type obj_id: str
:param schedule_type: Type of the schedule.
businessHours - Business hours schedule type.
holidays - Holidays schedule type.
:type schedule_type: ScheduleTypeOrStr
:param schedule_id: Retrieve schedule event details for schedule with the matching ID.
:type schedule_id: str
:param event_id: Retrieve the schedule event with the matching schedule event ID.
:type event_id: str
:param org_id: Retrieve schedule event details from this organization.
:type org_id: str
:return:
"""
params = org_id and {'orgId': org_id} or None
url = self._endpoint(obj_id=obj_id, schedule_type=schedule_type, schedule_id=schedule_id,
event_id=event_id)
data = await self.get(url, params=params)
result = Event.parse_obj(data)
return result
async def event_create(self, *, obj_id: str, schedule_type: ScheduleTypeOrStr, schedule_id: str,
event: Event, org_id: str = None) -> str:
"""
Create a Schedule Event
Create new Event for the given location or user Schedule.
A time schedule establishes a set of times during the day or holidays in the year in which a feature, for
example auto attendants, can perform a specific action.
Creating a schedule event requires a full administrator auth token with a scope of
spark-admin:telephony_config_write.
:param obj_id: Create the schedule for this location.
:type obj_id: str
:param schedule_type: Type of the schedule.
businessHours - Business hours schedule type.
holidays - Holidays schedule type.
:type schedule_type: ScheduleTypeOrStr
:param schedule_id: Create event for a given schedule ID.
:type schedule_id: str
:param event: event data
:type event: Event
:param org_id: Retrieve schedule event details from this organization.
:type org_id: str
:return: event id
:rtype: str
"""
params = org_id and {'orgId': org_id} or None
url = self._endpoint(obj_id=obj_id, schedule_type=schedule_type, schedule_id=schedule_id,
event_id='')
data = event.json(exclude={'event_id'})
data = await self.post(url, data=data, params=params)
return data['id']
async def event_update(self, *, obj_id: str, schedule_type: ScheduleTypeOrStr, schedule_id: str,
event: Event, event_id: str = None, org_id: str = None) -> str:
"""
Update a Schedule Event
Update the designated Schedule Event.
A time schedule establishes a set of times during the day or holidays in the year in which a feature, for
example auto attendants, can perform a specific action.
Updating a schedule event requires a full administrator auth token with a scope of
spark-admin:telephony_config_write.
NOTE: The Schedule Event ID will change upon modification of the Schedule event name.
:param obj_id: Location or user for which this schedule event exists.
:type obj_id: str
:param schedule_type: Type of the schedule.
businessHours - Business hours schedule type.
holidays - Holidays schedule type.
:type schedule_type: ScheduleTypeOrStr
:param schedule_id: Update schedule event with the matching schedule ID.
:type schedule_id: str
:param event: update settings
:type event: Event
:param event_id: Update the schedule event with the matching schedule event ID. Default: event id from event
:type event_id: str
:param org_id: Update schedule from this organization.
:type org_id: str
:return: event id; changed if name changed
"""
event_id = event_id or event.event_id
params = org_id and {'orgId': org_id} or None
url = self._endpoint(obj_id=obj_id, schedule_type=schedule_type, schedule_id=schedule_id,
event_id=event_id)
event_data = event.json(exclude={'event_id'})
data = await self.put(url, data=event_data, params=params)
return data['id']
async def event_delete(self, *, obj_id: str, schedule_type: ScheduleTypeOrStr, schedule_id: str,
event_id: str, org_id: str = None):
"""
Delete a Schedule Event
Delete the designated Schedule Event.
A time schedule establishes a set of times during the day or holidays in the year in which a feature, for
example auto attendants, can perform a specific action.
Deleting a schedule event requires a full administrator auth token with a scope
of spark-admin:telephony_config_write.
:param obj_id: Location or user from which to delete a schedule.
:type obj_id: str
:param schedule_type: Type of the schedule.
businessHours - Business hours schedule type.
holidays - Holidays schedule type.
:type schedule_type: ScheduleTypeOrStr
:param schedule_id: Delete schedule event with the matching schedule ID.
:type schedule_id: str
:param event_id: Delete the schedule event with the matching schedule event ID. Default: event id from event
:type event_id: str
:param org_id: Delete schedule from this organization.
:type org_id: str
"""
params = org_id and {'orgId': org_id} or None
url = self._endpoint(obj_id=obj_id, schedule_type=schedule_type, schedule_id=schedule_id,
event_id=event_id)
await self.delete(url, params=params)
class AsVoicemailApi(AsPersonSettingsApiChild):
"""
API for person's call voicemail settings
"""
feature = 'voicemail'
async def read(self, *, person_id: str, org_id: str = None) -> VoicemailSettings:
"""
Read Voicemail Settings for a Person
Retrieve a Person's Voicemail Settings
The voicemail feature transfers callers to voicemail based on your settings. You can then retrieve voice
messages via Voicemail. Voicemail audio is sent in Waveform Audio File Format, .wav, format.
Optionally, notifications can be sent to a mobile phone via text or email. These notifications will not include
the voicemail files.
This API requires a full, user, or read-only administrator auth token with a scope of spark-admin:people_read
or a user auth token with spark:people_read scope can be used by a person to read their settings.
:param person_id: Unique identifier for the person.
:type person_id: str
:param org_id: Person is in this organization. Only admin users of another organization (such as partners)
may use this parameter as the default is the same organization as the token used to access API.
:type org_id: str
:return: user's voicemail settings
:rtype: VoicemailSettings
"""
url = self.f_ep(person_id=person_id)
params = org_id and {'orgId': org_id} or None
return VoicemailSettings.parse_obj(await self.get(url, params=params))
async def configure(self, *, person_id: str, settings: VoicemailSettings, org_id: str = None):
"""
Configure Voicemail Settings for a Person
Configure a person's Voicemail Settings
The voicemail feature transfers callers to voicemail based on your settings. You can then retrieve voice
messages via Voicemail. Voicemail audio is sent in Waveform Audio File Format, .wav, format.
Optionally, notifications can be sent to a mobile phone via text or email. These notifications will not
include the voicemail files.
This API requires a full or user administrator auth token with the spark-admin:people_write scope or a user
auth token with spark:people_write scope can be used by a person to update their settings.
:return:
"""
# some settings can't be part of an update
data = settings.json(exclude={'send_busy_calls': {'greeting_uploaded': True},
'send_unanswered_calls': {'system_max_number_of_rings': True,
'greeting_uploaded': True},
'voice_message_forwarding_enabled': True
})
url = self.f_ep(person_id=person_id)
params = org_id and {'orgId': org_id} or None
await self.put(url, data=data, params=params)
async def _configure_greeting(self, *, person_id: str, content: Union[BufferedReader, str],
upload_as: str = None, org_id: str = None,
greeting_key: str):
"""
handled greeting configuration
:param person_id: Unique identifier for the person.
:type person_id: str
:param content: the file to be uploaded, can be a path to a file or a buffered reader (opened file); if a
reader referring to an open file is passed then make sure to open the file as binary b/c otherwise the
content length might be calculated wrong
:type content: Union[BufferedReader, str]
:param upload_as: filename for the content. Only required if content is a reader; has to be a .wav file name.
:type upload_as: str
:param org_id: Person is in this organization. Only admin users of another organization (such as partners)
may use this parameter as the default is the same organization as the token used to access API.
:type org_id: str
:param greeting_key: 'uploadBusyGreeting' or 'uploadNoAnswerGreeting'
"""
if isinstance(content, str):
upload_as = os.path.basename(content)
content = open(content, mode='rb')
must_close = True
else:
must_close = False
# an existing reader
if not upload_as:
raise ValueError('upload_as is required')
encoder = MultipartEncoder(fields={'file': (upload_as, content, 'audio/wav')})
ep = self.f_ep(person_id=person_id, path=f'actions/{greeting_key}/invoke')
params = org_id and {'orgId': org_id} or None
try:
await self.post(ep, data=encoder, headers={'Content-Type': encoder.content_type},
params=params)
finally:
if must_close:
content.close()
def configure_busy_greeting(self, *, person_id: str, content: Union[BufferedReader, str],
upload_as: str = None, org_id: str = None):
"""
Configure Busy Voicemail Greeting for a Person
Configure a Person's Busy Voicemail Greeting by uploading a Waveform Audio File Format, .wav, encoded audio
file.
Your request will need to be a multipart/form-data request rather than JSON, using the audio/wav Content-Type.
This API requires a full or user administrator auth token with the spark-admin:people_write scope or a user
auth token with spark:people_write scope can be used by a person to update their settings.
:param person_id: Unique identifier for the person.
:type person_id: str
:param content: the file to be uploaded, can be a path to a file or a buffered reader (opened file); if a
reader referring to an open file is passed then make sure to open the file as binary b/c otherwise the
content length might be calculated wrong
:type content: Union[BufferedReader, str]
:param upload_as: filename for the content. Only required if content is a reader; has to be a .wav file name.
:type upload_as: str
:param org_id: Person is in this organization. Only admin users of another organization (such as partners)
may use this parameter as the default is the same organization as the token used to access API.
:type org_id: str
"""
self._configure_greeting(person_id=person_id, content=content, upload_as=upload_as, org_id=org_id,
greeting_key='uploadBusyGreeting')
def configure_no_answer_greeting(self, person_id: str, content: Union[BufferedReader, str],
upload_as: str = None, org_id: str = None):
"""
Configure No Answer Voicemail Greeting for a Person
Configure a Person's No Answer Voicemail Greeting by uploading a Waveform Audio File Format, .wav, encoded
audio file.
Your request will need to be a multipart/form-data request rather than JSON, using the audio/wav Content-Type.
This API requires a full or user administrator auth token with the spark-admin:people_write scope or a user
auth token with spark:people_write scope can be used by a person to update their settings.
:param person_id: Unique identifier for the person.
:type person_id: str
:param content: the file to be uploaded, can be a path to a file or a buffered reader (opened file); if a
reader referring to an open file is passed then make sure to open the file as binary b/c otherwise the
content length might be calculated wrong
:type content: Union[BufferedReader, str]
:param upload_as: filename for the content. Only required if content is a reader; has to be a .wav file name.
:type upload_as: str
:param org_id: Person is in this organization. Only admin users of another organization (such as partners)
may use this parameter as the default is the same organization as the token used to access API.
:type org_id: str
"""
self._configure_greeting(person_id=person_id, content=content, upload_as=upload_as, org_id=org_id,
greeting_key='uploadNoAnswerGreeting')
@dataclass(init=False)
class AsPersonSettingsApi(AsApiChild, base='people'):
"""
API for all user level settings
"""
appservices: AsAppServicesApi
barge: AsBargeApi
dnd: AsDndApi
call_intercept: AsCallInterceptApi
call_recording: AsCallRecordingApi
call_waiting: AsCallWaitingApi
caller_id: AsCallerIdApi
calling_behavior: AsCallingBehaviorApi
exec_assistant: AsExecAssistantApi
forwarding: AsPersonForwardingApi
hoteling: AsHotelingApi
monitoring: AsMonitoringApi
numbers: AsNumbersApi
permissions_in: AsIncomingPermissionsApi
permissions_out: AsOutgoingPermissionsApi
privacy: AsPrivacyApi
push_to_talk: AsPushToTalkApi
receptionist: AsReceptionistApi
schedules: AsScheduleApi
voicemail: AsVoicemailApi
def __init__(self, session: AsRestSession):
super().__init__(session=session)
self.appservices = AsAppServicesApi(session=session)
self.barge = AsBargeApi(session=session)
self.dnd = AsDndApi(session=session)
self.call_intercept = AsCallInterceptApi(session=session)
self.call_recording = AsCallRecordingApi(session=session)
self.call_waiting = AsCallWaitingApi(session=session)
self.calling_behavior = AsCallingBehaviorApi(session=session)
self.caller_id = AsCallerIdApi(session=session)
self.exec_assistant = AsExecAssistantApi(session=session)
self.forwarding = AsPersonForwardingApi(session=session)
self.hoteling = AsHotelingApi(session=session)
self.monitoring = AsMonitoringApi(session=session)
self.numbers = AsNumbersApi(session=session)
self.permissions_in = AsIncomingPermissionsApi(session=session)
self.permissions_out = AsOutgoingPermissionsApi(session=session)
self.privacy = AsPrivacyApi(session=session)
self.push_to_talk = AsPushToTalkApi(session=session)
self.receptionist = AsReceptionistApi(session=session)
self.schedules = AsScheduleApi(session=session, base=ScheduleApiBase.people)
self.voicemail = AsVoicemailApi(session=session)
async def reset_vm_pin(self, person_id: str, org_id: str = None):
"""
Reset Voicemail PIN
Reset a voicemail PIN for a person.
The voicemail feature transfers callers to voicemail based on your settings. You can then retrieve voice
messages via Voicemail. A voicemail PIN is used to retrieve your voicemail messages.
This API requires a full or user administrator auth token with the spark-admin:people_write scope.
:param person_id: Unique identifier for the person.
:param org_id: Person is in this organization. Only admin users of another organization (such as partners) may
use this parameter as the default is the same organization as the token used to access API.
"""
params = org_id and {'orgId': org_id} or None
url = self.ep(f'{person_id}/features/voicemail/actions/resetPin/invoke')
await self.post(url, params=params)
class AsAccessCodesApi(AsApiChild, base='telephony/config/locations'):
"""
Access codes API
"""
def _endpoint(self, *, location_id: str, path: str = None) -> str:
"""
location specific feature endpoint like
/v1/telephony/config/locations/{locationId}/outgoingPermission/accessCodes}
:meta private:
:param location_id: Unique identifier for the location.
:type location_id: str
:param path: additional path
:type: path: str
:return: full endpoint
:rtype: str
"""
path = path and f'/{path}' or ''
ep = self.session.ep(f'telephony/config/locations/{location_id}/outgoingPermission/accessCodes{path}')
return ep
async def read(self, *, location_id: str, org_id: str = None) -> list[AuthCode]:
"""
Get Location Access Code
Retrieve access codes details for a customer location.
Use Access Codes to bypass the set permissions for all persons/workspaces at this location.
Retrieving access codes details requires a full, user or read-only administrator auth token with a scope of
spark-admin:telephony_config_read.
:param location_id: Retrieve access codes details for this location.
:type location_id: str
:param org_id: Retrieve access codes details for a customer location in this organization
:type org_id: str
:return: list of :class:`wxc_sdk.common.CallPark`
"""
params = org_id and {'orgId': org_id} or None
url = self._endpoint(location_id=location_id)
data = await self.get(url, params=params)
return parse_obj_as(list[AuthCode], data['accessCodes'])
async def create(self, *, location_id: str, access_codes: list[AuthCode], org_id: str = None) -> list[AuthCode]:
"""
:param location_id: Add new access code for this location.
:type location_id: str
:param access_codes: Access code details
:type access_codes: list of :class:`wxc_sdk.common.AuthCode`
:param org_id: Add new access code for this organization.
:type org_id: str
"""
params = org_id and {'orgId': org_id} or None
url = self._endpoint(location_id=location_id)
body = {'accessCodes': [json.loads(ac.json()) for ac in access_codes]}
await self.post(url, json=body, params=params)
async def delete_codes(self, *, location_id: str, access_codes: list[Union[str, AuthCode]],
org_id: str = None) -> list[AuthCode]:
"""
Delete Access Code Location
Deletes the access code details for a particular location for a customer.
Use Access Codes to bypass the set permissions for all persons/workspaces at this location.
Modifying the access code location details requires a full administrator auth token with a scope
of spark-admin:telephony_config_write.
:param location_id: Deletes the access code details for this location.
:type location_id: str
:param access_codes: access codes to delete
:type access_codes: list of :class:`wxc_sdk.common.AuthCode` or str
:param org_id: Delete access codes from this organization.
:type org_id: str
"""
params = org_id and {'orgId': org_id} or None
url = self._endpoint(location_id=location_id)
body = {'deleteCodes': [ac.code if isinstance(ac, AuthCode) else ac
for ac in access_codes]}
await self.put(url, json=body, params=params)
class AsForwardingApi:
"""
API for forwarding settings on call queues, hunt groups, and auto attendants
"""
def __init__(self, session: AsRestSession, feature_selector: FeatureSelector):
self._session = session
self._feature = feature_selector
def _endpoint(self, location_id: str, feature_id: str, path: str = None):
"""
:meta private:
:param location_id:
:param feature_id:
:param path:
:return:
"""
path = path and f'/{path}' or ''
ep = self._session.ep(path=f'telephony/config/locations/{location_id}/{self._feature.value}/'
f'{feature_id}/callForwarding{path}')
return ep
async def settings(self, location_id: str, feature_id: str, org_id: str = None) -> CallForwarding:
"""
Retrieve Call Forwarding settings for the designated feature including the list of call
forwarding rules.
:param location_id: Location in which this feature exists.
:type location_id: str
:param feature_id: Retrieve the call forwarding settings for this entity
:type feature_id: str
:param org_id: Retrieve call forwarding settings from this organization.
:type org_id: str
:return: call forwarding settings
:rtype: class:`CallForwarding`
"""
params = org_id and {'orgId': org_id} or {}
url = self._endpoint(location_id=location_id, feature_id=feature_id)
data = await self._session.rest_get(url=url, params=params)
result = CallForwarding.parse_obj(data['callForwarding'])
return result
async def update(self, location_id: str, feature_id: str,
forwarding: CallForwarding, org_id: str = None):
"""
Update Call Forwarding Settings for a feature
Update Call Forwarding settings for the designated feature.
Updating call forwarding settings for a feature requires a full administrator auth token with a scope
of spark-admin:telephony_config_write.
:param location_id: Location in which this feature exists.
:type location_id: str
:param feature_id: Update call forwarding settings for this feature.
:type feature_id: str
:param forwarding: Forwarding settings
:type forwarding: :class:`CallForwarding`
:param org_id: Update feature forwarding settings from this organization.
:type org_id: str
"""
params = org_id and {'orgId': org_id} or {}
url = self._endpoint(location_id=location_id, feature_id=feature_id)
body = forwarding.dict()
# update only has 'id' and 'enabled' in rules
# determine names of ForwardingRule fields to remove
to_pop = [field
for field in ForwardingRule.__fields__
if field not in {'id', 'enabled'}]
for rule in body['rules']:
rule: Dict
for field in to_pop:
rule.pop(field, None)
body = {'callForwarding': body}
await self._session.rest_put(url=url, json=body, params=params)
async def create_call_forwarding_rule(self, location_id: str, feature_id: str,
forwarding_rule: ForwardingRuleDetails, org_id: str = None) -> str:
"""
Create a Selective Call Forwarding Rule feature
A selective call forwarding rule for feature to be forwarded or not
forwarded to the designated number, based on the defined criteria.
Note that the list of existing call forward rules is available feature's call
forwarding settings.
:param location_id: Location in which the call queue exists.
:type location_id: str
:param feature_id: Create the rule for this feature
:type feature_id: str
:param forwarding_rule: details of rule to be created
:type forwarding_rule: :class:`ForwardingRuleDetails`
:param org_id: Create the feature forwarding rule for this organization.
:type org_id: str
:return: forwarding rule id
:rtype; str
"""
url = self._endpoint(location_id=location_id, feature_id=feature_id, path='selectiveRules')
body = forwarding_rule.dict()
params = org_id and {'orgId': org_id} or None
data = await self._session.rest_post(url=url, json=body, params=params)
return data['id']
async def call_forwarding_rule(self, location_id: str, feature_id: str, rule_id: str,
org_id: str = None) -> ForwardingRuleDetails:
"""
Retrieve a Selective Call Forwarding Rule's settings for the designated Call Queue.
A selective call forwarding rule for feature allows calls to be forwarded or not forwarded
to the designated number, based on the defined criteria.
Note that the list of existing call forward rules is available in the feature's call
forwarding settings.
:param location_id: Location in which the feature exists.
:type location_id: stre
:param feature_id: Retrieve setting for a rule for this feature.
:type feature_id: str
:param rule_id: feature rule you are retrieving settings for.
:type rule_id: str
:param org_id: Retrieve feature forwarding settings from this organization.
:type org_id: str
:return: call forwarding rule details
:rtype: :class:`ForwardingRuleDetails`
"""
url = self._endpoint(location_id=location_id, feature_id=feature_id, path=f'selectiveRules/{rule_id}')
params = org_id and {'orgId': org_id} or None
data = await self._session.rest_get(url=url, params=params)
result = ForwardingRuleDetails.parse_obj(data)
return result
async def update_call_forwarding_rule(self, location_id: str, feature_id: str, rule_id: str,
forwarding_rule: ForwardingRuleDetails, org_id: str = None) -> str:
"""
Update a Selective Call Forwarding Rule's settings for the designated feature.
A selective call forwarding rule for feature allows calls to be forwarded or not forwarded
to the designated number, based on the defined criteria.
Note that the list of existing call forward rules is available in the feature's call
forwarding settings.
NOTE: The Call Forwarding Rule ID will change upon modification of the Call Forwarding Rule name.
:param location_id: Location in which the feature exists.
:type location_id: str
:param feature_id: Update settings for a rule for this feature.
:type feature_id: str
:param rule_id: feature you are updating settings for.
:type rule_id: str
:param forwarding_rule: forwarding rule details for update
:type forwarding_rule: :class:`ForwardingRuleDetails`
:param org_id: Update feature rule settings for this organization.
:type org_id: str
:return: new call forwarding rule id
:rtype: str
"""
url = self._endpoint(location_id=location_id, feature_id=feature_id, path=f'selectiveRules/{rule_id}')
params = org_id and {'orgId': org_id} or None
body = forwarding_rule.dict()
data = await self._session.rest_put(url=url, params=params, json=body)
return data['id']
async def delete_call_forwarding_rule(self, location_id: str, feature_id: str, rule_id: str, org_id: str = None):
"""
Delete a Selective Call Forwarding Rule for the designated feature.
A selective call forwarding rule for a feature allows calls to be forwarded or not forwarded
to the designated number, based on the defined criteria.
Note that the list of existing call forward rules is available in the feature's call
forwarding
settings.
"""
url = self._endpoint(location_id=location_id, feature_id=feature_id, path=f'selectiveRules/{rule_id}')
params = org_id and {'orgId': org_id} or None
await self._session.delete(url=url, params=params)
class AsAutoAttendantApi(AsApiChild, base='telephony/config/autoAttendants'):
"""
Auto attendant API
"""
forwarding: AsForwardingApi
def __init__(self, session: AsRestSession):
super().__init__(session=session)
self.forwarding = AsForwardingApi(session=session, feature_selector=FeatureSelector.auto_attendants)
def _endpoint(self, *, location_id: str = None, auto_attendant_id: str = None) -> str:
"""
auto attendant specific feature endpoint like /v1/telephony/config/locations/{locationId}/autoAttendants/{
auto_attendant_id}
:meta private:
:param location_id: Unique identifier for the location.
:type location_id: str
:param auto_attendant_id: auto attendant id
:type auto_attendant_id: str
:return: full endpoint
:rtype: str
"""
if location_id is None:
return self.session.ep('telephony/config/autoAttendants')
else:
ep = self.session.ep(f'telephony/config/locations/{location_id}/autoAttendants')
if auto_attendant_id:
ep = f'{ep}/{auto_attendant_id}'
return ep
def list_gen(self, *, org_id: str = None, location_id: str = None, name: str = None,
phone_number: str = None, **params) -> AsyncGenerator[AutoAttendant, None, None]:
"""
Read the List of Auto Attendants
List all Auto Attendants for the organization.
Auto attendants play customized prompts and provide callers with menu options for routing their calls through
your system.
Retrieving this list requires a full or read-only administrator auth token with a scope
of spark-admin:telephony_config_read.
:param org_id: List auto attendants for this organization.
:type org_id: str
:param location_id: Return the list of auto attendants for this location.
:type location_id: str
:param name: Only return auto attendants with the matching name.
:type name: str
:param phone_number: Only return auto attendants with the matching phone number.
:type phone_number: str
:return: yields :class:`AutoAttendant` objects
"""
params.update((to_camel(k), v)
for i, (k, v) in enumerate(locals().items())
if i and v is not None and k != 'params')
url = self._endpoint()
# noinspection PyTypeChecker
return self.session.follow_pagination(url=url, model=AutoAttendant, params=params, item_key='autoAttendants')
async def list(self, *, org_id: str = None, location_id: str = None, name: str = None,
phone_number: str = None, **params) -> List[AutoAttendant]:
"""
Read the List of Auto Attendants
List all Auto Attendants for the organization.
Auto attendants play customized prompts and provide callers with menu options for routing their calls through
your system.
Retrieving this list requires a full or read-only administrator auth token with a scope
of spark-admin:telephony_config_read.
:param org_id: List auto attendants for this organization.
:type org_id: str
:param location_id: Return the list of auto attendants for this location.
:type location_id: str
:param name: Only return auto attendants with the matching name.
:type name: str
:param phone_number: Only return auto attendants with the matching phone number.
:type phone_number: str
:return: yields :class:`AutoAttendant` objects
"""
params.update((to_camel(k), v)
for i, (k, v) in enumerate(locals().items())
if i and v is not None and k != 'params')
url = self._endpoint()
# noinspection PyTypeChecker
return [o async for o in self.session.follow_pagination(url=url, model=AutoAttendant, params=params, item_key='autoAttendants')]
async def by_name(self, *, name: str, location_id: str = None, org_id: str = None) -> Optional[AutoAttendant]:
"""
Get auto attendant info by name
:param location_id:
:param name:
:param org_id:
:return:
"""
return next((hg for hg in await self.list(name=name, location_id=location_id, org_id=org_id)
if hg.name == name), None)
async def details(self, *, location_id: str, auto_attendant_id: str, org_id: str = None) -> AutoAttendant:
"""
Get Details for an Auto Attendant
Retrieve an Auto Attendant details.
Auto attendants play customized prompts and provide callers with menu options for routing their calls through
your system.
Retrieving an auto attendant details requires a full or read-only administrator auth token with a scope
of spark-admin:telephony_config_read.
:param location_id: Retrieve an auto attendant details in this location.
:type location_id: str
:param auto_attendant_id: Retrieve the auto attendant with the matching ID.
:type auto_attendant_id: str
:param org_id: Retrieve auto attendant details from this organization.
:type org_id: str
:return: auto attendant details
:rtype: :class:`AutoAttendant`
"""
url = self._endpoint(location_id=location_id, auto_attendant_id=auto_attendant_id)
params = org_id and {'orgId': org_id} or None
return AutoAttendant.parse_obj(await self.get(url, params=params))
async def create(self, *, location_id: str, settings: AutoAttendant, org_id: str = None) -> str:
"""
Create an Auto Attendant
Create new Auto Attendant for the given location.
Auto attendants play customized prompts and provide callers with menu options for routing their calls through
your system.
Creating an auto attendant requires a full administrator auth token with a scope
of spark-admin:telephony_config_write.
:param location_id: Create the auto attendant for this location.
:type location_id: str
:param settings: auto attendant settings for new auto attendant
:type settings: :class:`AutoAttendant`
:param org_id: Create the auto attendant for this organization.
:type org_id: str
:return: ID of the newly created auto attendant.
:rtype: str
"""
data = settings.create_or_update()
url = self._endpoint(location_id=location_id)
params = org_id and {'orgId': org_id} or None
data = await self.post(url, data=data, params=params)
return data['id']
async def update(self, *, location_id: str, auto_attendant_id: str, settings: AutoAttendant, org_id: str = None):
"""
Update an Auto Attendant
Update the designated Auto Attendant.
Auto attendants play customized prompts and provide callers with menu options for routing their calls through
your system.
Updating an auto attendant requires a full administrator auth token with a scope
of spark-admin:telephony_config_write.
:param location_id: Location in which this auto attendant exists.
:type location_id: str
:param auto_attendant_id: Update an auto attendant with the matching ID.
:type auto_attendant_id: str
:param settings: auto attendant settings for the update
:type settings: :class:`AutoAttendant`
:param org_id: Create the auto attendant for this organization.
:type org_id: str
"""
data = settings.create_or_update()
url = self._endpoint(location_id=location_id, auto_attendant_id=auto_attendant_id)
params = org_id and {'orgId': org_id} or None
await self.put(url, data=data, params=params)
async def delete_auto_attendant(self, *, location_id: str, auto_attendant_id: str, org_id: str = None):
"""
elete the designated Auto Attendant.
Auto attendants play customized prompts and provide callers with menu options for routing their calls through
your system.
Deleting an auto attendant requires a full administrator auth token with a scope
of spark-admin:telephony_config_write.
:param location_id: Location from which to delete an auto attendant.
:type location_id: str
:param auto_attendant_id: Delete the auto attendant with the matching ID.
:type auto_attendant_id: str
:param org_id: Delete the auto attendant from this organization.
:type org_id: str
"""
url = self._endpoint(location_id=location_id, auto_attendant_id=auto_attendant_id)
params = org_id and {'orgId': org_id} or None
await self.delete(url, params=params)
class AsCallParkApi(AsApiChild, base='telephony/config/callParks'):
"""
Call Park API
"""
def _endpoint(self, *, location_id: str, callpark_id: str = None, path: str = None) -> str:
"""
call park specific feature endpoint like /v1/telephony/config/locations/{locationId}/callParks/{callpark_id}
:meta private:
:param location_id: Unique identifier for the location.
:type location_id: str
:param callpark_id: call park id
:type callpark_id: str
:param path: addtl. path
:type path: str
:return: full endpoint
:rtype: str
"""
call_park_id = callpark_id and f'/{callpark_id}' or ''
path = path and f'/{path}' or ''
ep = self.session.ep(f'telephony/config/locations/{location_id}/callParks{call_park_id}{path}')
return ep
def list_gen(self, location_id: str, order: Literal['ASC', 'DSC'] = None, name: str = None,
org_id: str = None, **params) -> AsyncGenerator[CallPark, None, None]:
"""
Read the List of Call Parks
List all Call Parks for the organization.
Call Park allows call recipients to place a call on hold so that it can be retrieved from another device.
Retrieving this list requires a full or read-only administrator auth token with a scope
of spark-admin:telephony_config_read.
NOTE: The Call Park ID will change upon modification of the Call Park name.
:param location_id: Return the list of call parks for this location.
:type location_id: str
:param order: Sort the list of call parks by name, either ASC or DSC. Default is ASC.
:type order: str
:param name: Return the list of call parks that contains the given name. The maximum length is 80.
:type name: str
:param org_id: List call parks for this organization.
:param params: dict of additional parameters passed directly to endpoint
:type params: dict
:type org_id: str
:return yields :class:`CallPark` objects
"""
params.update((to_camel(k), v)
for i, (k, v) in enumerate(locals().items())
if i > 1 and v is not None and k != 'params')
url = self._endpoint(location_id=location_id)
# noinspection PyTypeChecker
return self.session.follow_pagination(url=url, model=CallPark, params=params, item_key='callParks')
async def list(self, location_id: str, order: Literal['ASC', 'DSC'] = None, name: str = None,
org_id: str = None, **params) -> List[CallPark]:
"""
Read the List of Call Parks
List all Call Parks for the organization.
Call Park allows call recipients to place a call on hold so that it can be retrieved from another device.
Retrieving this list requires a full or read-only administrator auth token with a scope
of spark-admin:telephony_config_read.
NOTE: The Call Park ID will change upon modification of the Call Park name.
:param location_id: Return the list of call parks for this location.
:type location_id: str
:param order: Sort the list of call parks by name, either ASC or DSC. Default is ASC.
:type order: str
:param name: Return the list of call parks that contains the given name. The maximum length is 80.
:type name: str
:param org_id: List call parks for this organization.
:param params: dict of additional parameters passed directly to endpoint
:type params: dict
:type org_id: str
:return yields :class:`CallPark` objects
"""
params.update((to_camel(k), v)
for i, (k, v) in enumerate(locals().items())
if i > 1 and v is not None and k != 'params')
url = self._endpoint(location_id=location_id)
# noinspection PyTypeChecker
return [o async for o in self.session.follow_pagination(url=url, model=CallPark, params=params, item_key='callParks')]
async def create(self, location_id: str, settings: CallPark, org_id: str = None) -> str:
"""
Create a Call Park
Create new Call Parks for the given location.
Call Park allows call recipients to place a call on hold so that it can be retrieved from another device.
Creating a call park requires a full administrator auth token with a scope
of spark-admin:telephony_config_write.
NOTE: The Call Park ID will change upon modification of the Call Park name.
:param location_id: Create the call park for this location.
:type location_id: str
:param settings: settings for new call park
:type settings: :class:`CallPark`
:param org_id: Create the call park for this organization.
:return: ID of the newly created call park.
:rtype: str
"""
params = org_id and {'orgId': org_id} or None
url = self._endpoint(location_id=location_id)
body = settings.create_or_update()
data = await self.post(url, data=body, params=params)
return data['id']
async def delete_callpark(self, location_id: str, callpark_id: str, org_id: str = None):
"""
Delete a Call Park
Delete the designated Call Park.
Call Park allows call recipients to place a call on hold so that it can be retrieved from another device.
Deleting a call park requires a full administrator auth token with a scope
of spark-admin:telephony_config_write.
NOTE: The Call Park ID will change upon modification of the Call Park name.
:param location_id: Location from which to delete a call park.
:type location_id: str
:param callpark_id: Delete the call park with the matching ID.
:type callpark_id: str
:param org_id: Delete the call park from this organization.
:type org_id: str
"""
url = self._endpoint(location_id=location_id, callpark_id=callpark_id)
params = org_id and {'orgId': org_id} or None
await self.delete(url, params=params)
async def details(self, location_id: str, callpark_id: str, org_id: str = None) -> CallPark:
"""
Get Details for a Call Park
Retrieve Call Park details.
Call Park allows call recipients to place a call on hold so that it can be retrieved from another device.
Retrieving call park details requires a full or read-only administrator auth token with a scope
of spark-admin:telephony_config_read.
NOTE: The Call Park ID will change upon modification of the Call Park name.
:param location_id: Retrieve settings for a call park in this location.
:type location_id: str
:param callpark_id: Retrieve settings for a call park with the matching ID.
:type callpark_id: str
:param org_id: Retrieve call park settings from this organization.
:type org_id: str
:return: call park info
:rtype: :class:`CallPark`
"""
url = self._endpoint(location_id=location_id, callpark_id=callpark_id)
params = org_id and {'orgId': org_id} or None
return CallPark.parse_obj(await self.get(url, params=params))
async def update(self, location_id: str, callpark_id: str, settings: CallPark, org_id: str = None) -> str:
"""
Update a Call Park
Update the designated Call Park.
Call Park allows call recipients to place a call on hold so that it can be retrieved from another device.
Updating a call park requires a full administrator auth token with a scope
of spark-admin:telephony_config_write.
NOTE: The Call Park ID will change upon modification of the Call Park name.
:param location_id: RLocation in which this call park exists.
:type location_id: str
:param callpark_id: Update settings for a call park with the matching ID.
:type callpark_id: str
:param settings: updates
:type settings: :class:`CallPark`
:param org_id: Update call park settings from this organization.
:type org_id: str
"""
params = org_id and {'orgId': org_id} or None
url = self._endpoint(location_id=location_id, callpark_id=callpark_id)
body = settings.create_or_update()
data = await self.put(url, data=body, params=params)
return data['id']
def available_agents_gen(self, location_id: str, call_park_name: str = None, name: str = None, phone_number: str = None,
order: str = None, org_id: str = None) -> AsyncGenerator[PersonPlaceAgent, None, None]:
"""
Get available agents from Call Parks
Retrieve available agents from call parks for a given location.
Call Park allows call recipients to place a call on hold so that it can be retrieved from another device.
Retrieving available agents from call parks requires a full or read-only administrator auth token with
a scope of spark-admin:telephony_config_read.
:param location_id: Return the available agents for this location.
:type location_id: str
:param call_park_name: Only return available agents from call parks with the matching name.
:type call_park_name: str
:param name: Only return available agents with the matching name.
:type name: str
:param phone_number: Only return available agents with the matching primary number.
:type phone_number: str
:param order: Order the available agents according to the designated fields. Up to three vertical bar (|)
separated sort order fields may be specified. Available sort fields: fname, lname, number and extension.
The maximum supported sort order value is 3.
:type order: str
:param org_id: Return the available agents for this organization.
:type org_id: str
:return: yields :class:`PersonPlaceCallPark` objects
"""
params = {to_camel(k): v for i, (k, v) in enumerate(locals().items())
if i > 1 and v is not None}
url = self._endpoint(location_id=location_id, path='availableUsers')
# noinspection PyTypeChecker
return self.session.follow_pagination(url=url, model=PersonPlaceAgent, params=params, item_key='agents')
async def available_agents(self, location_id: str, call_park_name: str = None, name: str = None, phone_number: str = None,
order: str = None, org_id: str = None) -> List[PersonPlaceAgent]:
"""
Get available agents from Call Parks
Retrieve available agents from call parks for a given location.
Call Park allows call recipients to place a call on hold so that it can be retrieved from another device.
Retrieving available agents from call parks requires a full or read-only administrator auth token with
a scope of spark-admin:telephony_config_read.
:param location_id: Return the available agents for this location.
:type location_id: str
:param call_park_name: Only return available agents from call parks with the matching name.
:type call_park_name: str
:param name: Only return available agents with the matching name.
:type name: str
:param phone_number: Only return available agents with the matching primary number.
:type phone_number: str
:param order: Order the available agents according to the designated fields. Up to three vertical bar (|)
separated sort order fields may be specified. Available sort fields: fname, lname, number and extension.
The maximum supported sort order value is 3.
:type order: str
:param org_id: Return the available agents for this organization.
:type org_id: str
:return: yields :class:`PersonPlaceCallPark` objects
"""
params = {to_camel(k): v for i, (k, v) in enumerate(locals().items())
if i > 1 and v is not None}
url = self._endpoint(location_id=location_id, path='availableUsers')
# noinspection PyTypeChecker
return [o async for o in self.session.follow_pagination(url=url, model=PersonPlaceAgent, params=params, item_key='agents')]
def available_recalls_gen(self, location_id: str, name: str = None, order: str = None,
org_id: str = None) -> AsyncGenerator[AvailableRecallHuntGroup, None, None]:
"""
Get available recall hunt groups from Call Parks
Retrieve available recall hunt groups from call parks for a given location.
Call Park allows call recipients to place a call on hold so that it can be retrieved from another device.
Retrieving available recall hunt groups from call parks requires a full or read-only administrator auth
token with a scope of spark-admin:telephony_config_read.
:param location_id: Return the available recall hunt groups for this location.
:type location_id: str
:param name: Only return available recall hunt groups with the matching name.
:type name: str
:param order: Order the available recall hunt groups according to the designated fields. Available sort
fields: lname.
:param order: str
:param org_id: Return the available recall hunt groups for this organization.
:type org_id: str
:return: yields :class:`AvailableRecallHuntGroup` objects
"""
params = {to_camel(k): v for i, (k, v) in enumerate(locals().items())
if i > 1 and v is not None}
url = self._endpoint(location_id=location_id, path='availableRecallHuntGroups')
# noinspection PyTypeChecker
return self.session.follow_pagination(url=url, model=AvailableRecallHuntGroup,
params=params, item_key='huntGroups')
async def available_recalls(self, location_id: str, name: str = None, order: str = None,
org_id: str = None) -> List[AvailableRecallHuntGroup]:
"""
Get available recall hunt groups from Call Parks
Retrieve available recall hunt groups from call parks for a given location.
Call Park allows call recipients to place a call on hold so that it can be retrieved from another device.
Retrieving available recall hunt groups from call parks requires a full or read-only administrator auth
token with a scope of spark-admin:telephony_config_read.
:param location_id: Return the available recall hunt groups for this location.
:type location_id: str
:param name: Only return available recall hunt groups with the matching name.
:type name: str
:param order: Order the available recall hunt groups according to the designated fields. Available sort
fields: lname.
:param order: str
:param org_id: Return the available recall hunt groups for this organization.
:type org_id: str
:return: yields :class:`AvailableRecallHuntGroup` objects
"""
params = {to_camel(k): v for i, (k, v) in enumerate(locals().items())
if i > 1 and v is not None}
url = self._endpoint(location_id=location_id, path='availableRecallHuntGroups')
# noinspection PyTypeChecker
return [o async for o in self.session.follow_pagination(url=url, model=AvailableRecallHuntGroup,
params=params, item_key='huntGroups')]
async def call_park_settings(self, location_id: str, org_id: str = None) -> LocationCallParkSettings:
"""
Get Call Park Settings
Retrieve Call Park Settings from call parks for a given location.
Call Park allows call recipients to place a call on hold so that it can be retrieved from another device.
Retrieving settings from call parks requires a full or read-only administrator auth token with a scope
of spark-admin:telephony_config_read.
:param location_id: Return the call park settings for this location.
:type location_id: str
:param org_id: Return the call park settings for this organization.
:type org_id: str
:return:
"""
params = org_id and {'orgId': org_id} or None
url = self._endpoint(location_id=location_id, path='settings')
return LocationCallParkSettings.parse_obj(await self.get(url, params=params))
async def update_call_park_settings(self, location_id: str, settings: LocationCallParkSettings, org_id: str = None):
"""
Update Call Park settings
Update Call Park settings for the designated location.
Call Park allows call recipients to place a call on hold so that it can be retrieved from another device.
Updating call park settings requires a full administrator auth token with a scope
of spark-admin:telephony_config_write.
:param location_id: Location for which call park settings will be updated.
:type location_id: str
:param settings: update settings
:type settings: :class:`LocationCallParkSettings`
:param org_id: Update call park settings from this organization.
:type org_id: str
"""
params = org_id and {'orgId': org_id} or None
url = self._endpoint(location_id=location_id, path='settings')
body = settings.update()
await self.put(url, params=params, data=body)
class AsCallPickupApi(AsApiChild, base='telephony/config/callPickups'):
"""
Call Pickup API
"""
def _endpoint(self, *, location_id: str, pickup_id: str = None, path: str = None) -> str:
"""
call park specific feature endpoint like /v1/telephony/config/locations/{locationId}/callPickups/{pickup_id}
:meta private:
:param location_id: Unique identifier for the location.
:type location_id: str
:param pickup_id: call pickup id
:type pickup_id: str
:param path: addtl. path
:type path: str
:return: full endpoint
:rtype: str
"""
pickup_id = pickup_id and f'/{pickup_id}' or ''
path = path and f'/{path}' or ''
ep = self.session.ep(f'telephony/config/locations/{location_id}/callPickups{pickup_id}{path}')
return ep
def list_gen(self, location_id: str, order: Literal['ASC', 'DSC'] = None, name: str = None,
org_id: str = None, **params) -> AsyncGenerator[CallPickup, None, None]:
"""
Read the List of Call Pickups
List all Call Pickups for the organization.
Call Pickup enables a user(agent) to answer any ringing line within their pickup group.
Retrieving this list requires a full, user, or read-only administrator auth token with a scope
of spark-admin:telephony_config_read.
NOTE: The Call Pickup ID will change upon modification of the Call Pickup name.
:param location_id: Return the list of call pickups for this location.
:type location_id: str
:param order: Sort the list of call pickups by name, either ASC or DSC. Default is ASC.
:type order: str
:param name: Return the list of call pickups that contains the given name. The maximum length is 80.
:type name: str
:param org_id: List call pickups for this organization.
:type org_id: str
:return yields :class:`CallPickup` objects
"""
params.update((to_camel(k), v)
for i, (k, v) in enumerate(locals().items())
if i > 1 and v is not None and k != 'params')
url = self._endpoint(location_id=location_id)
# noinspection PyTypeChecker
return self.session.follow_pagination(url=url, model=CallPickup, params=params, item_key='callPickups')
async def list(self, location_id: str, order: Literal['ASC', 'DSC'] = None, name: str = None,
org_id: str = None, **params) -> List[CallPickup]:
"""
Read the List of Call Pickups
List all Call Pickups for the organization.
Call Pickup enables a user(agent) to answer any ringing line within their pickup group.
Retrieving this list requires a full, user, or read-only administrator auth token with a scope
of spark-admin:telephony_config_read.
NOTE: The Call Pickup ID will change upon modification of the Call Pickup name.
:param location_id: Return the list of call pickups for this location.
:type location_id: str
:param order: Sort the list of call pickups by name, either ASC or DSC. Default is ASC.
:type order: str
:param name: Return the list of call pickups that contains the given name. The maximum length is 80.
:type name: str
:param org_id: List call pickups for this organization.
:type org_id: str
:return yields :class:`CallPickup` objects
"""
params.update((to_camel(k), v)
for i, (k, v) in enumerate(locals().items())
if i > 1 and v is not None and k != 'params')
url = self._endpoint(location_id=location_id)
# noinspection PyTypeChecker
return [o async for o in self.session.follow_pagination(url=url, model=CallPickup, params=params, item_key='callPickups')]
async def create(self, location_id: str, settings: CallPickup, org_id: str = None) -> str:
"""
Create a Call Pickup
Create new Call Pickups for the given location.
Call Pickup enables a user(agent) to answer any ringing line within their pickup group.
Creating a call pickup requires a full or user administrator auth token with a scope
of spark-admin:telephony_config_write.
NOTE: The Call Pickup ID will change upon modification of the Call Pickup name.
:param location_id: Create the call pickup for this location.
:type location_id: str
:param settings: settings for new call pickup
:type settings: :class:`CallPickup`
:param org_id: Create the call pickup for this organization.
:return: ID of the newly created call pickup.
:rtype: str
"""
params = org_id and {'orgId': org_id} or None
url = self._endpoint(location_id=location_id)
body = settings.create_or_update()
data = await self.post(url, data=body, params=params)
return data['id']
async def delete_pickup(self, location_id: str, pickup_id: str, org_id: str = None):
"""
Delete a Call Pickup
Delete the designated Call Pickup.
Call Pickup enables a user(agent) to answer any ringing line within their pickup group.
Deleting a call pickup requires a full or user administrator auth token with a scope
of spark-admin:telephony_config_write.
NOTE: The Call Pickup ID will change upon modification of the Call Pickup name.
:param location_id: Location from which to delete a call pickup.
:type location_id: str
:param pickup_id: Delete the call pickup with the matching ID.
:type pickup_id: str
:param org_id: Delete the call pickup from this organization.
:type org_id: str
"""
url = self._endpoint(location_id=location_id, pickup_id=pickup_id)
params = org_id and {'orgId': org_id} or None
await self.delete(url, params=params)
async def details(self, location_id: str, pickup_id: str, org_id: str = None) -> CallPickup:
"""
Get Details for a Call Pickup
Retrieve Call Pickup details.
Call Pickup enables a user(agent) to answer any ringing line within their pickup group.
Retrieving call pickup details requires a full, user, or read-only administrator auth token with a scope
of spark-admin:telephony_config_read.
NOTE: The Call Pickup ID will change upon modification of the Call Pickup name.
:param location_id: Retrieve settings for a call pickup in this location.
:type location_id: str
:param pickup_id: Retrieve settings for a call pickup with the matching ID.
:type pickup_id: str
:param org_id: Retrieve call pickup settings from this organization.
:type org_id: str
:return: call pickup info
:rtype: :class:`CallPickup`
"""
url = self._endpoint(location_id=location_id, pickup_id=pickup_id)
params = org_id and {'orgId': org_id} or None
return CallPickup.parse_obj(await self.get(url, params=params))
async def update(self, location_id: str, pickup_id: str, settings: CallPickup, org_id: str = None) -> str:
"""
Update a Call Pickup
Update the designated Call Pickup.
Call Pickup enables a user(agent) to answer any ringing line within their pickup group.
Updating a call pickup requires a full or user administrator auth token with a scope
of spark-admin:telephony_config_write.
NOTE: The Call Pickup ID will change upon modification of the Call Pickup name.
:param location_id: Location in which this call pickup exists.
:type location_id: str
:param pickup_id: Update settings for a call pickup with the matching ID.
:type pickup_id: str
:param settings: updates
:type settings: :class:`CallPickup`
:param org_id: Update call pickup settings from this organization.
:type org_id: str
"""
params = org_id and {'orgId': org_id} or None
url = self._endpoint(location_id=location_id, pickup_id=pickup_id)
body = settings.create_or_update()
data = await self.put(url, data=body, params=params)
return data['id']
def available_agents_gen(self, location_id: str, call_pickup_name: str = None, name: str = None,
phone_number: str = None, order: str = None,
org_id: str = None) -> AsyncGenerator[PersonPlaceAgent, None, None]:
"""
Get available agents from Call Pickups
Retrieve available agents from call pickups for a given location.
Call Pickup enables a user(agent) to answer any ringing line within their pickup group.
Retrieving available agents from call pickups requires a full, user, or read-only administrator auth token
with a scope of spark-admin:telephony_config_read.
:param location_id: Return the available agents for this location.
:type location_id: str
:param call_pickup_name: Only return available agents from call pickups with the matching name.
:type call_pickup_name: str
:param name: Only return available agents with the matching name.
:type name: str
:param phone_number: Only return available agents with the matching primary number.
:type phone_number: str
:param order: Order the available agents according to the designated fields. Up to three vertical bar (|)
separated sort order fields may be specified. Available sort fields: fname, lname, number and extension.
The maximum supported sort order value is 3.
:type order: str
:param org_id: Return the available agents for this organization.
:type org_id: str
:return: yields :class:`PersonPlaceCallPark` objects
"""
params = {to_camel(k): v for i, (k, v) in enumerate(locals().items())
if i > 1 and v is not None}
url = self._endpoint(location_id=location_id, path='availableUsers')
# noinspection PyTypeChecker
return self.session.follow_pagination(url=url, model=PersonPlaceAgent, params=params, item_key='agents')
async def available_agents(self, location_id: str, call_pickup_name: str = None, name: str = None,
phone_number: str = None, order: str = None,
org_id: str = None) -> List[PersonPlaceAgent]:
"""
Get available agents from Call Pickups
Retrieve available agents from call pickups for a given location.
Call Pickup enables a user(agent) to answer any ringing line within their pickup group.
Retrieving available agents from call pickups requires a full, user, or read-only administrator auth token
with a scope of spark-admin:telephony_config_read.
:param location_id: Return the available agents for this location.
:type location_id: str
:param call_pickup_name: Only return available agents from call pickups with the matching name.
:type call_pickup_name: str
:param name: Only return available agents with the matching name.
:type name: str
:param phone_number: Only return available agents with the matching primary number.
:type phone_number: str
:param order: Order the available agents according to the designated fields. Up to three vertical bar (|)
separated sort order fields may be specified. Available sort fields: fname, lname, number and extension.
The maximum supported sort order value is 3.
:type order: str
:param org_id: Return the available agents for this organization.
:type org_id: str
:return: yields :class:`PersonPlaceCallPark` objects
"""
params = {to_camel(k): v for i, (k, v) in enumerate(locals().items())
if i > 1 and v is not None}
url = self._endpoint(location_id=location_id, path='availableUsers')
# noinspection PyTypeChecker
return [o async for o in self.session.follow_pagination(url=url, model=PersonPlaceAgent, params=params, item_key='agents')]
class AsAnnouncementApi:
"""
API for call queue Announcements
"""
def __init__(self, *, session: AsRestSession):
self._session = session
def _endpoint(self, location_id: str, queue_id: str, path: str = None):
"""
:meta private:
:param location_id:
:param queue_id:
:param path:
:return:
"""
path = path and f'/{path}' or ''
ep = self._session.ep(path=f'telephony/config/locations/{location_id}/queues/{queue_id}/announcements{path}')
return ep
def list_gen(self, *, location_id: str, queue_id: str, org_id: str = None) -> AsyncGenerator[Announcement]:
"""
:param location_id:
:param queue_id:
:param org_id:
:return:
"""
url = self._endpoint(location_id=location_id, queue_id=queue_id)
params = org_id and {'orgId': org_id} or dict()
# noinspection PyTypeChecker
return self._session.follow_pagination(url=url, model=Announcement, params=params)
async def list(self, *, location_id: str, queue_id: str, org_id: str = None) -> List[Announcement]:
"""
:param location_id:
:param queue_id:
:param org_id:
:return:
"""
url = self._endpoint(location_id=location_id, queue_id=queue_id)
params = org_id and {'orgId': org_id} or dict()
# noinspection PyTypeChecker
return [o async for o in self._session.follow_pagination(url=url, model=Announcement, params=params)]
async def delete_announcement(self, *, location_id: str, queue_id: str, file_name: str, org_id: str = None):
"""
:param location_id:
:type location_id: str
:param queue_id:
:type queue_id: str
:param file_name:
:type file_name: str
:param org_id:
"""
url = self._endpoint(location_id=location_id, queue_id=queue_id, path=file_name)
params = org_id and {'orgId': org_id} or None
await self._session.delete(url=url, params=params)
class AsCallQueueApi:
"""
Call Queue APร
"""
forwarding: AsForwardingApi
announcement: AsAnnouncementApi
def __init__(self, session: AsRestSession):
self._session = session
self.forwarding = AsForwardingApi(session=session, feature_selector=FeatureSelector.queues)
self.announcement = AsAnnouncementApi(session=session)
def _endpoint(self, *, location_id: str = None, queue_id: str = None):
"""
Helper to get URL for API endpoints
:meta private:
:param location_id:
:param queue_id:
:return:
"""
if location_id is None:
return self._session.ep('telephony/config/queues')
else:
ep = self._session.ep(f'telephony/config/locations/{location_id}/queues')
if queue_id:
ep = f'{ep}/{queue_id}'
return ep
@staticmethod
def update_or_create(*, queue: CallQueue) -> str:
"""
Get JSON for update or create
:param queue:
:return:
:meta private:
"""
return queue.json(
exclude={'id': True,
'location_name': True,
'location_id': True,
'toll_free_number': True,
'language': True,
'agents':
{'__all__':
{'first_name': True,
'last_name': True,
'user_type': True,
'extension': True,
'phone_number': True}},
'alternate_number_settings':
{'alternate_numbers':
{'__all__':
{'toll_free_number': True}}},
'queue_settings':
{'overflow':
{'is_transfer_number_set': True}}})
def list_gen(self, *, location_id: str = None, name: str = None,
org_id: str = None, **params) -> AsyncGenerator[CallQueue, None, None]:
"""
Read the List of Call Queues
List all Call Queues for the organization.
Call queues temporarily hold calls in the cloud when all agents, which can be users or agents, assigned to
receive calls from the queue are unavailable. Queued calls are routed to an available agent when not on an
active call. Each call queue is assigned a Lead Number, which is a telephone number outside callers can dial
to reach users assigned to the call queue. Call queues are also assigned an internal extension, which can be
dialed internally to reach users assigned to the call queue.
Retrieving this list requires a full or read-only administrator auth token with a scope
of spark-admin:telephony_config_read.
:param location_id: Only return call queues with matching location ID.
:type location_id: str
:param name: Only return call queues with the matching name.
:type name: str
:param org_id: List call queues for this organization
:type org_id: str
:param params: dict of additional parameters passed directly to endpoint
:type params: dict
:return: yields :class:`CallQueue` objects
"""
params.update((to_camel(k), v)
for i, (k, v) in enumerate(locals().items())
if i and v is not None and k != 'params')
url = self._endpoint()
# noinspection PyTypeChecker
return self._session.follow_pagination(url=url, model=CallQueue, params=params)
async def list(self, *, location_id: str = None, name: str = None,
org_id: str = None, **params) -> List[CallQueue]:
"""
Read the List of Call Queues
List all Call Queues for the organization.
Call queues temporarily hold calls in the cloud when all agents, which can be users or agents, assigned to
receive calls from the queue are unavailable. Queued calls are routed to an available agent when not on an
active call. Each call queue is assigned a Lead Number, which is a telephone number outside callers can dial
to reach users assigned to the call queue. Call queues are also assigned an internal extension, which can be
dialed internally to reach users assigned to the call queue.
Retrieving this list requires a full or read-only administrator auth token with a scope
of spark-admin:telephony_config_read.
:param location_id: Only return call queues with matching location ID.
:type location_id: str
:param name: Only return call queues with the matching name.
:type name: str
:param org_id: List call queues for this organization
:type org_id: str
:param params: dict of additional parameters passed directly to endpoint
:type params: dict
:return: yields :class:`CallQueue` objects
"""
params.update((to_camel(k), v)
for i, (k, v) in enumerate(locals().items())
if i and v is not None and k != 'params')
url = self._endpoint()
# noinspection PyTypeChecker
return [o async for o in self._session.follow_pagination(url=url, model=CallQueue, params=params)]
async def by_name(self, *, name: str, location_id: str = None, org_id: str = None) -> Optional[CallQueue]:
"""
Get queue info by name
:param location_id:
:param name:
:param org_id:
:return:
"""
return next((cq for cq in await self.list(location_id=location_id, org_id=org_id, name=name)
if cq.name == name), None)
async def create(self, *, location_id: str, settings: CallQueue, org_id: str = None) -> str:
"""
Create a Call Queue
Create new Call Queues for the given location.
Call queues temporarily hold calls in the cloud when all agents, which can be users or agents, assigned to
receive calls from the queue are unavailable. Queued calls are routed to an available agent when not on an
active call. Each call queue is assigned a Lead Number, which is a telephone number outside callers can dial
to reach users assigned to the call queue. Call queues are also assigned an internal extension, which can be
dialed internally to reach users assigned to the call queue.
Creating a call queue requires a full administrator auth token with a scope
of spark-admin:telephony_config_write.
:param location_id: Create the call queue for this location.
:type location_id: str
:param settings: parameters for queue creation.
:type settings: :class:`CallQueue`
:param org_id: Create the call queue for this organization.
:type org_id: str
:return: queue id
:rtype: str
"""
params = org_id and {'orgId': org_id} or {}
cq_data = settings.create_or_update()
url = self._endpoint(location_id=location_id)
data = await self._session.rest_post(url, data=cq_data, params=params)
return data['id']
async def delete_queue(self, *, location_id: str, queue_id: str, org_id: str = None):
"""
Delete a Call Queue
Delete the designated Call Queue.
Call queues temporarily hold calls in the cloud when all agents, which can be users or agents, assigned to
receive calls from the queue are unavailable. Queued calls are routed to an available agent when not on an
active call. Each call queue is assigned a Lead Number, which is a telephone number outside callers can dial
to reach users assigned to the call queue. Call queues are also assigned an internal extension, which can be
dialed internally to reach users assigned to the call queue.
Deleting a call queue requires a full administrator auth token with a scope
of spark-admin:telephony_config_write.
:param location_id: Location from which to delete a call queue.
:type location_id: str
:param queue_id: Delete the call queue with the matching ID.
:type queue_id: str
:param org_id: Delete the call queue from this organization.
:type org_id: str
"""
url = self._endpoint(location_id=location_id, queue_id=queue_id)
params = org_id and {'orgId': org_id} or None
await self._session.rest_delete(url=url, params=params)
async def details(self, *, location_id: str, queue_id: str, org_id: str = None) -> CallQueue:
"""
Get Details for a Call Queue
Retrieve Call Queue details.
Call queues temporarily hold calls in the cloud when all agents, which can be users or agents, assigned to
receive calls from the queue are unavailable. Queued calls are routed to an available agent when not on an
active call. Each call queue is assigned a Lead Number, which is a telephone number outside callers can dial
to reach users assigned to the call queue. Call queues are also assigned anvinternal extension, which can be
dialed internally to reach users assigned to the call queue.
Retrieving call queue details requires a full or read-only administrator auth token with a scope
of spark-admin:telephony_config_read.
:param location_id: Retrieve settings for a call queue in this location
:type location_id: str
:param queue_id: Retrieve settings for the call queue with this identifier.
:type queue_id: str
:param org_id: Retrieve call queue settings from this organization.
:type org_id: str
:return: call queue details
:rtype: :class:`CallQueue`
"""
url = self._endpoint(location_id=location_id, queue_id=queue_id)
params = {'orgId': org_id} if org_id is not None else {}
data = await self._session.rest_get(url, params=params)
result = CallQueue.parse_obj(data)
# noinspection PyTypeChecker
return result
async def update(self, *, location_id: str, queue_id: str, update: CallQueue, org_id: str = None):
"""
Update a Call Queue
Update the designated Call Queue.
Call queues temporarily hold calls in the cloud when all agents, which can be users or agents, assigned to
receive calls from the queue are unavailable. Queued calls are routed to an available agent when not on an
active call. Each call queue is assigned a Lead Number, which is a telephone number outside callers can dial
to reach users assigned to the call queue. Call queues are also assigned an internal extension, which can be
dialed internally to reach users assigned to the call queue.
Updating a call queue requires a full administrator auth token with a scope
of spark-admin:telephony_config_write.
Examples:
.. code-block::
api = WebexSimpleApi()
# shortcut
cq = api.telephony.callqueue
# disable a call queue
update = CallQueue(enabled=False)
cq.update(location_id=...,
queue_id=...,
update=update)
# set the call routing policy to SIMULTANEOUS
update = CallQueue(call_policies=CallPolicies(policy=Policy.simultaneous))
cq.update(location_id=...,
queue_id=...,
update=update)
# don't bounce calls after the set number of rings.
update = CallQueue(
call_policies=CallPolicies(
call_bounce=CallBounce(
enabled=False)))
cq.update(location_id=...,
queue_id=...,
update=update)
Alternatively you can also read call queue details, update them in place and then call update().
.. code-block::
details = cq.details(location_id=...,
queue_id=...)
details.call_policies.call_bounce.agent_unavailable_enabled=False
details.call_policies.call_bounce.on_hold_enabled=False
cq.update(location_id=...,
queue_id=...,
update=details)
:param location_id: Location in which this call queue exists.
:type location_id: str
:param queue_id: Update setting for the call queue with the matching ID.
:type queue_id: str
:param update: updates
:type update: :class:`CallQueue`
:param org_id: Update call queue settings from this organization.
"""
params = org_id and {'orgId': org_id} or None
cq_data = update.create_or_update()
url = self._endpoint(location_id=location_id, queue_id=queue_id)
await self._session.rest_put(url=url, data=cq_data, params=params)
class AsCallparkExtensionApi(AsApiChild, base='telephony/config/huntGroups'):
"""
Call Park Extension API
"""
def _endpoint(self, *, location_id: str = None, cpe_id: str = None) -> str:
"""
call park extension specific feature endpoint like
/v1/telephony/config/locations/{locationId}/callParkExtensions/{cpe_id}
:meta private:
:param location_id: Unique identifier for the location.
:type location_id: str
:param cpe_id: call park extension id
:type cpe_id: str
:return: full endpoint
:rtype: str
"""
if location_id is None:
return self.session.ep('telephony/config/callParkExtensions')
else:
ep = self.session.ep(f'telephony/config/locations/{location_id}/callParkExtensions')
if cpe_id:
ep = f'{ep}/{cpe_id}'
return ep
def list_gen(self, org_id: str = None, extension: str = None, name: str = None, location_id: str = None,
location_name: str = None,
order: str = None, **params) -> AsyncGenerator[CallParkExtension, None, None]:
"""
Read the List of Call Park Extensions
List all Call Park Extensions for the organization.
The Call Park service, enabled for all users by default, allows a user to park a call against an available
user's extension or to a Call Park Extension. Call Park Extensions are extensions defined within the Call
Park service for holding parked calls.
Retrieving this list requires a full or read-only administrator auth token with a scope
of spark-admin:telephony_config_read.
:param org_id: List call park extensions for this organization.
:type org_id: str
:param extension: Only return call park extensions with the matching extension.
:type extension: str
:param name: Only return call park extensions with the matching name.
:type name: str
:param location_id: Only return call park extensions with matching location ID.
:type location_id: str
:param location_name: Only return call park extensions with matching location name.
:type location_name: str
:param order: Order the available agents according to the designated fields. Available sort fields: groupName,
callParkExtension, callParkExtensionName, callParkExtensionExternalId.
:type order: str
:param params: additional parameters
:return: yields :class:`wxc_sdk.common.CallParkExtension` instances
"""
params.update((to_camel(k), v)
for i, (k, v) in enumerate(locals().items())
if i and v is not None and k != 'params')
url = self._endpoint()
# noinspection PyTypeChecker
return self.session.follow_pagination(url=url, model=CallParkExtension, params=params)
async def list(self, org_id: str = None, extension: str = None, name: str = None, location_id: str = None,
location_name: str = None,
order: str = None, **params) -> List[CallParkExtension]:
"""
Read the List of Call Park Extensions
List all Call Park Extensions for the organization.
The Call Park service, enabled for all users by default, allows a user to park a call against an available
user's extension or to a Call Park Extension. Call Park Extensions are extensions defined within the Call
Park service for holding parked calls.
Retrieving this list requires a full or read-only administrator auth token with a scope
of spark-admin:telephony_config_read.
:param org_id: List call park extensions for this organization.
:type org_id: str
:param extension: Only return call park extensions with the matching extension.
:type extension: str
:param name: Only return call park extensions with the matching name.
:type name: str
:param location_id: Only return call park extensions with matching location ID.
:type location_id: str
:param location_name: Only return call park extensions with matching location name.
:type location_name: str
:param order: Order the available agents according to the designated fields. Available sort fields: groupName,
callParkExtension, callParkExtensionName, callParkExtensionExternalId.
:type order: str
:param params: additional parameters
:return: yields :class:`wxc_sdk.common.CallParkExtension` instances
"""
params.update((to_camel(k), v)
for i, (k, v) in enumerate(locals().items())
if i and v is not None and k != 'params')
url = self._endpoint()
# noinspection PyTypeChecker
return [o async for o in self.session.follow_pagination(url=url, model=CallParkExtension, params=params)]
async def details(self, location_id: str, cpe_id: str, org_id: str = None) -> CallParkExtension:
"""
Get Details for a Call Park Extension
Retrieve Call Park Extension details.
The Call Park service, enabled for all users by default, allows a user to park a call against an available
user's extension or to a Call Park Extension. Call Park Extensions are extensions defined within the Call
Park service for holding parked calls.
Retrieving call park extension details requires a full or read-only administrator auth token with a scope
of spark-admin:telephony_config_read.
:param location_id: Retrieve details for a call park extension in this location.
:type location_id: str
:param cpe_id: Retrieve details for a call park extension with the matching ID.
:type cpe_id: str
:param org_id: Retrieve call park extension details from this organization
:type org_id: str
:return: call park extension details
:rtype: :class:`wxc_sdk.common.CallParkExtension` instance (only name and extension are set)
"""
url = self._endpoint(location_id=location_id, cpe_id=cpe_id)
params = org_id and {'orgId': org_id} or {}
data = await self.get(url, params=params)
return CallParkExtension.parse_obj(data)
class AsCallsApi(AsApiChild, base='telephony/calls'):
async def dial(self, destination: str) -> DialResponse:
"""
Initiate an outbound call to a specified destination. This is also commonly referred to as Click to Call or
Click to Dial. Alerts on all the devices belonging to the user. When the user answers on one of these alerting
devices, an outbound call is placed from that device to the destination.
:param destination: The destination to be dialed. The destination can be digits or a URI. Some examples for
destination include: 1234, 2223334444, +12223334444, \*73, tel:+12223334444, <EMAIL>,
sip:<EMAIL>
:type destination: str
:return: Call id and call session id
"""
ep = self.ep('dial')
data = await self.post(ep, json={'destination': destination})
return DialResponse.parse_obj(data)
async def answer(self, call_id: str):
"""
Answer an incoming call on the user's primary device.
:param call_id: The call identifier of the call to be answered.
:type call_id: str
"""
ep = self.ep('answer')
await self.post(ep, json={'callId': call_id})
async def reject(self, call_id: str, action: RejectAction = None):
"""
Reject an unanswered incoming call.
:param call_id: The call identifier of the call to be rejected.
:type call_id: str
:param action: The rejection action to apply to the call. The busy action is applied if no specific action is
provided.
"""
data = {to_camel(p): v for i, (p, v) in enumerate(locals().items())
if i and v is not None}
ep = self.ep('reject')
await self.post(ep, json=data)
async def hangup(self, call_id: str):
"""
Hangup a call. If used on an unanswered incoming call, the call is rejected and sent to busy.
:param call_id: The call identifier of the call to hangup.
:type call_id: str
"""
ep = self.ep('hangup')
await self.post(ep, json={'callId': call_id})
async def hold(self, call_id: str):
"""
Hold a connected call.
:param call_id: The call identifier of the call to hold.
:type call_id: str
"""
data = {to_camel(p): v for i, (p, v) in enumerate(locals().items())
if i and v is not None}
ep = self.ep('hold')
await self.post(ep, json=data)
async def resume(self, call_id: str):
"""
Resume a held call.
:param call_id: The call identifier of the call to resume.
:type call_id: str
"""
data = {to_camel(p): v for i, (p, v) in enumerate(locals().items())
if i and v is not None}
ep = self.ep('resume')
await self.post(ep, json=data)
async def divert(self, call_id: str, destination: str = None, to_voicemail: bool = None):
"""
Divert a call to a destination or a user's voicemail. This is also commonly referred to as Blind Transfer
:param call_id: The call identifier of the call to divert.
:type call_id: str
:param destination: The destination to divert the call to. If toVoicemail is false, destination is required.
The destination can be digits or a URI. Some examples for destination include: 1234, 2223334444,
+12223334444, \*73, tel:+12223334444, <EMAIL>, sip:<EMAIL>
:type destination: str
:param to_voicemail: If set to true, the call is diverted to voicemail. If no destination is specified, the
call is diverted to the user's own voicemail. If a destination is specified, the call is diverted to the
specified user's voicemail.
:type to_voicemail: bool
"""
data = {to_camel(p): v for i, (p, v) in enumerate(locals().items())
if i and v is not None}
ep = self.ep('divert')
await self.post(ep, json=data)
async def transfer(self, call_id1: str = None, call_id2: str = None, destination: str = None):
"""
Transfer two calls together. Unanswered incoming calls cannot be transferred but can be diverted using the
divert API. If the user has only two calls and wants to transfer them together, the callId1 and callId2
parameters are optional and when not provided the calls are automatically selected and transferred. If the
user has more than two calls and wants to transfer two of them together, the callId1 and callId2 parameters
are mandatory to specify which calls are being transferred. These are also commonly referred to as Attended
Transfer, Consultative Transfer, or Supervised Transfer and will return a 204 response. If the user wants to
transfer one call to a new destination but only when the destination responds, the callId1 and destination
parameters are mandatory to specify the call being transferred and the destination. This is referred to as a
Mute Transfer and is similar to the divert API with the difference of waiting for the destination to respond
prior to transferring the call. If the destination does not respond, the call is not transferred. This will
return a 201 response.
:param call_id1: The call identifier of the first call to transfer. This parameter is mandatory if either
call_id2 or destination is provided.
:type call_id1: str
:param call_id2: The call identifier of the first call to transfer. This parameter is mandatory if either
callId2 or destination is provided.
:type call_id1: str
:param destination: The destination to be transferred to. The destination can be digits or a URI. Some
examples for destination include: 1234, 2223334444,
+12223334444, \*73, tel:+12223334444, <EMAIL>, sip:<EMAIL>.
This parameter is mandatory if call_id1 is provided and call_id2 is not provided.
:type destination: str
"""
data = {to_camel(p): v for i, (p, v) in enumerate(locals().items())
if i and v is not None}
ep = self.ep('transfer')
await self.post(ep, json=data)
async def park(self, call_id: str, destination: str = None, is_group_park: bool = None) -> ParkedAgainst:
"""
Park a connected call. The number field in the response can be used as the destination for the retrieve
command to retrieve the parked call.
:param call_id: The call identifier of the call to park.
:type call_id: str
:param destination: Identifies where the call is to be parked. If not provided, the call is parked against the
parking user.
The destination can be digits or a URI. Some examples for destination include: 1234, 2223334444,
+12223334444, \*73, tel:+12223334444, <EMAIL>, sip:<EMAIL>
:type destination: str
:param is_group_park: If set to true, the call is parked against an automatically selected member of the
user's call park group and the destination parameter is ignored.
:type is_group_park: bool
:return: The details of where the call has been parked.
:rtype: :class:`ParkedAgainst`
"""
data = {to_camel(p): v for i, (p, v) in enumerate(locals().items())
if i and v is not None}
ep = self.ep('park')
data = await self.post(ep, json=data)
return ParkedAgainst.parse_obj(data)
async def retrieve(self, destination: str) -> CallInfo:
"""
:param destination: Identifies where the call is parked. The number field from the park command response can
be used as the destination for the retrieve command. If not provided, the call parked against the
retrieving user is retrieved. The destination can be digits or a URI. Some examples for destination
include: 1234, 2223334444, +12223334444, \*73, tel:+12223334444, <EMAIL>,
sip:<EMAIL>
:return: call id and call session id of retreived call
:rtype: :class:`CallInfo`
"""
data = {to_camel(p): v for i, (p, v) in enumerate(locals().items())
if i and v is not None}
ep = self.ep('retrieve')
data = await self.post(ep, json=data)
return CallInfo.parse_obj(data)
async def start_recording(self, call_id: str):
"""
Start recording a call. Use of this API is only valid when the user's call recording mode is set to "On Demand".
:param call_id: The call identifier of the call to start recording.
:type call_id: str
"""
data = {to_camel(p): v for i, (p, v) in enumerate(locals().items())
if i and v is not None}
ep = self.ep('startRecording')
await self.post(ep, json=data)
async def stop_recording(self, call_id: str):
"""
Stop recording a call. Use of this API is only valid when a call is being recorded and the user's call
recording mode is set to "On Demand".
:param call_id: The call identifier of the call to stop recording.
:type call_id: str
"""
data = {to_camel(p): v for i, (p, v) in enumerate(locals().items())
if i and v is not None}
ep = self.ep('stopRecording')
await self.post(ep, json=data)
async def pause_recording(self, call_id: str):
"""
Pause recording on a call. Use of this API is only valid when a call is being recorded and the user's call
recording mode is set to "On Demand" or "Always with Pause/Resume".
:param call_id: The call identifier of the call to pause recording.
:type call_id: str
"""
data = {to_camel(p): v for i, (p, v) in enumerate(locals().items())
if i and v is not None}
ep = self.ep('pauseRecording')
await self.post(ep, json=data)
async def resume_recording(self, call_id: str):
"""
Resume recording a call. Use of this API is only valid when a call's recording is paused and the user's call
recording mode is set to "On Demand" or "Always with Pause/Resume".
:param call_id: The call identifier of the call to resume recording.
:type call_id: str
"""
data = {to_camel(p): v for i, (p, v) in enumerate(locals().items())
if i and v is not None}
ep = self.ep('resumeRecording')
await self.post(ep, json=data)
async def transmit_dtmf(self, call_id: str, dtmf: str):
"""
Transmit DTMF digits to a call.
:param call_id: The call identifier of the call to hold.
:type call_id: str
:param dtmf: The DTMF digits to transmit. Each digit must be part of the following set: [0, 1, 2, 3, 4, 5, 6,
7, 8, 9, \*, #, A, B, C, D]. A comma "," may be included to indicate a pause between digits. For the value
โ1,234โ, the DTMF 1 digit is initially sent. After a pause, the DTMF 2, 3, and 4 digits are sent
successively.
"""
data = {to_camel(p): v for i, (p, v) in enumerate(locals().items())
if i and v is not None}
ep = self.ep('transmitDtmf')
await self.post(ep, json=data)
async def push(self, call_id: str):
"""
Pushes a call from the assistant to the executive the call is associated with. Use of this API is only valid
when the assistantโs call is associated with an executive.
:param call_id: The call identifier of the call to push.
:type call_id: str
"""
data = {to_camel(p): v for i, (p, v) in enumerate(locals().items())
if i and v is not None}
ep = self.ep('push')
await self.post(ep, json=data)
async def pickup(self, target: str) -> CallInfo:
"""
Picks up an incoming call to another user. A new call is initiated to perform the pickup in a similar manner
to the dial command. When target is not present, the API pickups up a call from the userโs call pickup group.
When target is present, the API pickups an incoming call from the specified target user.
:param target: Identifies the user to pickup an incoming call from. If not provided, an incoming call to the
userโs call pickup group is picked up. The target can be digits or a URI. Some examples for target
include: 1234, 2223334444, +12223334444, tel:+12223334444, <EMAIL>, sip:<EMAIL>
:type target: str
:return: call info of picked up call
:rtype: :class:`CallInfo`
"""
data = {to_camel(p): v for i, (p, v) in enumerate(locals().items())
if i and v is not None}
ep = self.ep('pickup')
data = await self.post(ep, json=data)
return CallInfo.parse_obj(data)
async def barge_in(self, target: str):
"""
Barge-in on another userโs answered call. A new call is initiated to perform the barge-in in a similar manner
to the dial command.
:param target: Identifies the user to barge-in on. The target can be digits or a URI. Some examples for target
include: 1234, 2223334444, +12223334444, tel:+12223334444, <EMAIL>, sip:<EMAIL>
:type target: str
:return: call info of picked up call
:rtype: :class:`CallInfo`
"""
data = {to_camel(p): v for i, (p, v) in enumerate(locals().items())
if i and v is not None}
ep = self.ep('bargeIn')
data = await self.post(ep, json=data)
return CallInfo.parse_obj(data)
def list_calls_gen(self) -> AsyncGenerator[TelephonyCall, None, None]:
"""
Get the list of details for all active calls associated with the user.
:return: yield :class:`TelephonyCall`
"""
ep = self.ep()
# noinspection PyTypeChecker
return self.session.follow_pagination(url=ep, model=TelephonyCall)
async def list_calls(self) -> List[TelephonyCall]:
"""
Get the list of details for all active calls associated with the user.
:return: yield :class:`TelephonyCall`
"""
ep = self.ep()
# noinspection PyTypeChecker
return [o async for o in self.session.follow_pagination(url=ep, model=TelephonyCall)]
async def call_details(self, call_id: str) -> TelephonyCall:
"""
Get the details of the specified active call for the user.
:param call_id: The call identifier of the call.
:type call_id: str
:return: call details
:rtype: :class:`TelephonyCall`
"""
ep = self.ep(call_id)
data = await self.get(ep)
return TelephonyCall.parse_obj(data)
def call_history_gen(self, history_type: Union[str, HistoryType] = None) -> AsyncGenerator[CallHistoryRecord, None, None]:
"""
List Call History
Get the list of call history records for the user. A maximum of 20 call history records per type (placed,
missed, received) are returned.
:param history_type: The type of call history records to retrieve. If not specified, then all call history
records are retrieved.
Possible values: placed, missed, received
:type history_type: HistoryType or str
:return: yields :class:`CallHistoryRecord` objects
"""
history_type = history_type and HistoryType.history_type_or_str(history_type)
params = history_type and {'type': history_type.value} or None
url = self.ep('history')
return self.session.follow_pagination(url=url, model=CallHistoryRecord, params=params)
async def call_history(self, history_type: Union[str, HistoryType] = None) -> List[CallHistoryRecord]:
"""
List Call History
Get the list of call history records for the user. A maximum of 20 call history records per type (placed,
missed, received) are returned.
:param history_type: The type of call history records to retrieve. If not specified, then all call history
records are retrieved.
Possible values: placed, missed, received
:type history_type: HistoryType or str
:return: yields :class:`CallHistoryRecord` objects
"""
history_type = history_type and HistoryType.history_type_or_str(history_type)
params = history_type and {'type': history_type.value} or None
url = self.ep('history')
return [o async for o in self.session.follow_pagination(url=url, model=CallHistoryRecord, params=params)]
class AsHuntGroupApi(AsApiChild, base='telephony/config/huntGroups'):
"""
Hunt Group API
"""
forwarding: AsForwardingApi
def __init__(self, session: AsRestSession):
super().__init__(session=session)
self.forwarding = AsForwardingApi(session=session, feature_selector=FeatureSelector.huntgroups)
def _endpoint(self, *, location_id: str = None, huntgroup_id: str = None) -> str:
"""
hunt group specific feature endpoint like /v1/telephony/config/locations/{locationId}/huntGroups/{huntGroupId}
:meta private:
:param location_id: Unique identifier for the location.
:type location_id: str
:param huntgroup_id: schedule id
:type huntgroup_id: str
:return: full endpoint
:rtype: str
"""
if location_id is None:
return self.session.ep('telephony/config/huntGroups')
else:
ep = self.session.ep(f'telephony/config/locations/{location_id}/huntGroups')
if huntgroup_id:
ep = f'{ep}/{huntgroup_id}'
return ep
def list_gen(self, org_id: str = None, location_id: str = None, name: str = None,
phone_number: str = None, **params) -> AsyncGenerator[HuntGroup, None, None]:
"""
Read the List of Hunt Groups
List all calling Hunt Groups for the organization.
Hunt groups can route incoming calls to a group of people or workspaces. You can even configure a pattern to
route to a whole group.
Retrieving this list requires a full or read-only administrator auth token with a scope of
spark-admin:telephony_config_read.
:param org_id: List hunt groups for this organization.
:param location_id: Only return hunt groups with matching location ID.
:param name: Only return hunt groups with the matching name.
:param phone_number: Only return hunt groups with the matching primary phone number or extension.
:return: yields :class:`HuntGroup` instances
"""
params.update((to_camel(k), v)
for i, (k, v) in enumerate(locals().items())
if i and v is not None and k != 'params')
url = self._endpoint()
# noinspection PyTypeChecker
return self.session.follow_pagination(url=url, model=HuntGroup, params=params)
async def list(self, org_id: str = None, location_id: str = None, name: str = None,
phone_number: str = None, **params) -> List[HuntGroup]:
"""
Read the List of Hunt Groups
List all calling Hunt Groups for the organization.
Hunt groups can route incoming calls to a group of people or workspaces. You can even configure a pattern to
route to a whole group.
Retrieving this list requires a full or read-only administrator auth token with a scope of
spark-admin:telephony_config_read.
:param org_id: List hunt groups for this organization.
:param location_id: Only return hunt groups with matching location ID.
:param name: Only return hunt groups with the matching name.
:param phone_number: Only return hunt groups with the matching primary phone number or extension.
:return: yields :class:`HuntGroup` instances
"""
params.update((to_camel(k), v)
for i, (k, v) in enumerate(locals().items())
if i and v is not None and k != 'params')
url = self._endpoint()
# noinspection PyTypeChecker
return [o async for o in self.session.follow_pagination(url=url, model=HuntGroup, params=params)]
async def by_name(self, name: str, location_id: str = None, org_id: str = None) -> Optional[HuntGroup]:
"""
Get hunt group info by name
:param location_id:
:param name:
:param org_id:
:return:
"""
return next((hg for hg in await self.list(name=name, location_id=location_id, org_id=org_id)
if hg.name == name), None)
async def create(self, location_id: str, settings: HuntGroup, org_id: str = None) -> str:
"""
Create a Hunt Group
Create new Hunt Groups for the given location.
Hunt groups can route incoming calls to a group of people or workspaces. You can even configure a pattern to
route to a whole group.
Creating a hunt group requires a full administrator auth token with a scope of
spark-admin:telephony_config_write.
:param location_id: Create the hunt group for the given location.
:type location_id: str
:param settings: hunt group details
:type settings: :class:`HuntGroup`
:param org_id: Create the hunt group for this organization.
:type org_id: str
:return: ID of the newly created hunt group.
:rtype: str
"""
params = org_id and {'orgId': org_id} or {}
settings.call_policies = settings.call_policies or HGCallPolicies().default()
data = settings.create_or_update()
url = self._endpoint(location_id=location_id)
data = await self.post(url, data=data, params=params)
return data['id']
async def delete_huntgroup(self, location_id: str, huntgroup_id: str, org_id: str = None):
"""
Delete a Hunt Group
Delete the designated Hunt Group.
Hunt groups can route incoming calls to a group of people or workspaces. You can even configure a pattern to
route to a whole group.
Deleting a hunt group requires a full administrator auth token with a scope of
spark-admin:telephony_config_write.
:param location_id: Location from which to delete a hunt group.
:type location_id: str
:param huntgroup_id: Delete the hunt group with the matching ID.
:type huntgroup_id: str
:param org_id: Delete the hunt group with the matching ID.
:type org_id: str
"""
params = org_id and {'orgId': org_id} or None
url = self._endpoint(location_id=location_id, huntgroup_id=huntgroup_id)
await self.delete(url, params=params)
async def details(self, location_id: str, huntgroup_id: str, org_id: str = None) -> HuntGroup:
"""
Get Details for a Hunt Group
Retrieve Hunt Group details.
Hunt groups can route incoming calls to a group of people or workspaces. You can even configure a pattern to
route to a whole group.
Retrieving hunt group details requires a full or read-only administrator auth token with a scope of
spark-admin:telephony_config_read.
:param location_id: Retrieve settings for a hunt group in this location.
:type location_id: str
:param huntgroup_id: Retrieve settings for the hunt group with this identifier.
:type huntgroup_id: str
:param org_id: Retrieve hunt group settings from this organization.
:type org_id: str
:return: hunt group details
"""
url = self._endpoint(location_id=location_id, huntgroup_id=huntgroup_id)
params = org_id and {'orgId': org_id} or {}
data = await self.get(url, params=params)
result = HuntGroup.parse_obj(data)
return result
async def update(self, location_id: str, huntgroup_id: str, update: HuntGroup,
org_id: str = None):
"""
Update a Hunt Group
Update the designated Hunt Group.
Hunt groups can route incoming calls to a group of people or workspaces. You can even configure a pattern to
route to a whole group.
Updating a hunt group requires a full administrator auth token with a scope
of spark-admin:telephony_config_write.
:param location_id: Update the hunt group for this location.
:type location_id: str
:param huntgroup_id: Update setting for the hunt group with the matching ID.
:type huntgroup_id: str
:param update: hunt group settings
:type update: :class:`HuntGroup`
:param org_id: Update hunt group settings from this organization.
"""
params = org_id and {'orgId': org_id} or None
data = update.create_or_update()
url = self._endpoint(location_id=location_id, huntgroup_id=huntgroup_id)
await self.put(url, data=data, params=params)
class AsLocationInterceptApi(AsApiChild, base='telephony/config/locations'):
"""
API for location's call intercept settings
"""
def _endpoint(self, *, location_id: str, path: str = None) -> str:
"""
location specific
telephony/config/locations/{locationId}/intercept
:meta private:
:param location_id: Unique identifier for the location.
:type location_id: str
:param path: additional path
:type: path: str
:return: full endpoint
:rtype: str
"""
path = path and f'/{path}' or ''
ep = self.session.ep(f'telephony/config/locations/{location_id}/intercept{path}')
return ep
async def read(self, *, location_id: str, org_id: str = None) -> InterceptSetting:
"""
Get Location Intercept
Retrieve intercept location details for a customer location.
Intercept incoming or outgoing calls for persons in your organization. If this is enabled, calls are either
routed to a designated number the person chooses, or to the person's voicemail.
Retrieving intercept location details requires a full, user or read-only administrator auth token with a
scope of spark-admin:telephony_config_read.
:param location_id: Retrieve intercept details for this location.
:type location_id: str
:param org_id: Retrieve intercept location details for a customer location.
:type org_id: str
:return: user's call intercept settings
:rtype: :class:`wxc_sdk.person_settings.call_intercept.InterceptSetting`
"""
ep = self._endpoint(location_id=location_id)
params = org_id and {'orgId': org_id} or None
return InterceptSetting.parse_obj(await self.get(ep, params=params))
async def configure(self, *, location_id: str, settings: InterceptSetting, org_id: str = None):
"""
Put Location Intercept
Modifies the intercept location details for a customer location.
Intercept incoming or outgoing calls for users in your organization. If this is enabled, calls are either
routed to a designated number the user chooses, or to the user's voicemail.
Modifying the intercept location details requires a full, user administrator auth token with a scope
of spark-admin:telephony_config_write.
:param location_id: Unique identifier for the person.
:type location_id: str
:param settings: new intercept settings
:type settings: InterceptSetting
:param org_id: Person is in this organization. Only admin users of another organization (such as partners)
may use this parameter as the default is the same organization as the token used to access API.
:type org_id: str
"""
ep = self._endpoint(location_id=location_id)
params = org_id and {'orgId': org_id} or None
data = settings.json()
await self.put(ep, params=params, data=data)
class AsLocationMoHApi(AsApiChild, base='telephony/config/locations'):
"""
Access codes API
"""
def _endpoint(self, *, location_id: str, path: str = None) -> str:
"""
location specific feature endpoint like
/v1/telephony/config/locations/{locationId}/musicOnHold
:meta private:
:param location_id: Unique identifier for the location.
:type location_id: str
:param path: additional path
:type: path: str
:return: full endpoint
:rtype: str
"""
path = path and f'/{path}' or ''
ep = self.session.ep(f'telephony/config/locations/{location_id}/musicOnHold{path}')
return ep
async def read(self, *, location_id: str, org_id: str = None) -> LocationMoHSetting:
"""
Get Music On Hold
Retrieve the location's music on hold settings.
Location's music on hold settings allows you to play music when a call is placed on hold or parked.
Retrieving location's music on hold settings requires a full, user or read-only administrator auth token with
a scope of spark-admin:telephony_config_read.
:param location_id: Retrieve access codes details for this location.
:type location_id: str
:param org_id: Retrieve access codes details for a customer location in this organization
:type org_id: str
:return: MoH settings
:rtype: :class:`LocationMoHSetting`
"""
params = org_id and {'orgId': org_id} or None
url = self._endpoint(location_id=location_id)
data = await self.get(url, params=params)
return LocationMoHSetting.parse_obj(data)
async def update(self, *, location_id: str, settings: LocationMoHSetting, org_id: str = None) -> LocationMoHSetting:
"""
Get Music On Hold
Retrieve the location's music on hold settings.
Location's music on hold settings allows you to play music when a call is placed on hold or parked.
Retrieving location's music on hold settings requires a full, user or read-only administrator auth token with
a scope of spark-admin:telephony_config_read.
:param location_id: Retrieve access codes details for this location.
:type location_id: str
:param settings: new settings
:type settings: :class:`LocationMoHSetting`
:param org_id: Retrieve access codes details for a customer location in this organization
:type org_id: str
:return: list of :class:`wxc_sdk.common.CallPark`
"""
params = org_id and {'orgId': org_id} or None
data = settings.json()
url = self._endpoint(location_id=location_id)
await self.put(url, params=params, data=data)
async def create(self, *, location_id: str, access_codes: list[AuthCode], org_id: str = None) -> list[AuthCode]:
"""
:param location_id: Add new access code for this location.
:type location_id: str
:param access_codes: Access code details
:type access_codes: list of :class:`wxc_sdk.common.AuthCode`
:param org_id: Add new access code for this organization.
:type org_id: str
"""
params = org_id and {'orgId': org_id} or None
url = self._endpoint(location_id=location_id)
body = {'accessCodes': [json.loads(ac.json()) for ac in access_codes]}
await self.post(url, json=body, params=params)
async def delete_codes(self, *, location_id: str, access_codes: list[Union[str, AuthCode]],
org_id: str = None) -> list[AuthCode]:
"""
Delete Access Code Location
Deletes the access code details for a particular location for a customer.
Use Access Codes to bypass the set permissions for all persons/workspaces at this location.
Modifying the access code location details requires a full administrator auth token with a scope
of spark-admin:telephony_config_write.
:param location_id: Deletes the access code details for this location.
:type location_id: str
:param access_codes: access codes to delete
:type access_codes: list of :class:`wxc_sdk.common.AuthCode` or str
:param org_id: Delete access codes from this organization.
:type org_id: str
"""
params = org_id and {'orgId': org_id} or None
url = self._endpoint(location_id=location_id)
body = {'deleteCodes': [ac.code if isinstance(ac, AuthCode) else ac
for ac in access_codes]}
await self.put(url, json=body, params=params)
class AsLocationVoicemailSettingsApi(AsApiChild, base='telephony/config/locations'):
"""
location voicemail settings API, for now only enable/disable Vm transcription
"""
def _endpoint(self, *, location_id: str, path: str = None) -> str:
"""
location specific
telephony/config/locations/{locationId}/voicemail
:meta private:
:param location_id: Unique identifier for the location.
:type location_id: str
:param path: additional path
:type: path: str
:return: full endpoint
:rtype: str
"""
path = path and f'/{path}' or ''
ep = self.session.ep(f'telephony/config/locations/{location_id}/voicemail{path}')
return ep
async def read(self, *, location_id: str, org_id: str = None) -> LocationVoiceMailSettings:
"""
Get Location Voicemail
Retrieve voicemail settings for a specific location.
Location's voicemail settings allows you to enable voicemail transcription for a specific location.
Retrieving location's voicemail settings requires a full, user or read-only administrator auth token with
a scope of spark-admin:telephony_config_read.
:param location_id: Retrieve access codes details for this location.
:type location_id: str
:param org_id: Retrieve access codes details for a customer location in this organization
:type org_id: str
:return: location voicemail settings
:rtype: :class:`LocationVoiceMailSettings`
"""
params = org_id and {'orgId': org_id} or None
url = self._endpoint(location_id=location_id)
data = await self.get(url, params=params)
return LocationVoiceMailSettings.parse_obj(data)
async def update(self, *, location_id: str, settings: LocationVoiceMailSettings, org_id: str = None):
"""
Get Location Voicemail
Retrieve voicemail settings for a specific location.
Location's voicemail settings allows you to enable voicemail transcription for a specific location.
Retrieving location's voicemail settings requires a full, user or read-only administrator auth token with
a scope of spark-admin:telephony_config_read.
:param location_id: Retrieve access codes details for this location.
:type location_id: str
:param settings: new settings
:type settings: :class:`LocationVoiceMailSettings`
:param org_id: Retrieve access codes details for a customer location in this organization
:type org_id: str
"""
params = org_id and {'orgId': org_id} or None
url = self._endpoint(location_id=location_id)
body = settings.json()
await self.put(url, params=params, data=body)
class AsOrganisationVoicemailSettingsAPI(AsApiChild, base='telephony/config/voicemail/settings'):
"""
API for Organisation voicemail settings
"""
async def read(self, *, org_id: str = None) -> OrganisationVoicemailSettings:
"""
Get Voicemail Settings
Retrieve the organization's voicemail settings.
Organizational voicemail settings determines what voicemail features a person can configure and automatic
message expiration.
Retrieving organization's voicemail settings requires a full, user or read-only administrator auth token with
a scope of spark-admin:telephony_config_read.
:param org_id: Retrieve voicemail settings for this organization.
:type org_id: str
:return: VM settings
:rtype: OrganisationVoicemailSettings
"""
params = org_id and {'orgId': org_id} or None
url = self.ep()
return OrganisationVoicemailSettings.parse_obj(await self.get(url, params=params))
async def update(self, *, settings: OrganisationVoicemailSettings, org_id: str = None):
"""
Update the organization's voicemail settings.
Organizational voicemail settings determines what voicemail features a person can configure and automatic
message expiration.
Updating organization's voicemail settings requires a full administrator auth token with a scope
of spark-admin:telephony_config_write.
:param settings: new settings
:type settings: OrganisationVoicemailSettings
:param org_id: Update voicemail settings for this organization.
:type org_id: str
"""
params = org_id and {'orgId': org_id} or None
url = self.ep()
data = settings.json()
await self.put(url, data=data, params=params)
class AsPagingApi(AsApiChild, base='telephony/config'):
def _endpoint(self, *, location_id: str = None, paging_id: str = None) -> str:
"""
endpoint for paging group operation
:meta private:
:param location_id:
:type location_id: str
:param paging_id:
:type paging_id: str
"""
if location_id is None:
return super().ep('paging')
paging_id = paging_id and f'/{paging_id}' or ''
return super().ep(f'locations/{location_id}/paging{paging_id}')
def list_gen(self, *, location_id: str = None, name: str = None, phone_number: str = None,
org_id: str = None, **params) -> AsyncGenerator[Paging, None, None]:
"""
Read the List of Paging Groups
List all Paging Groups for the organization.
Group Paging allows a person to place a one-way call or group page to up to 75 people and/or workspaces by
dialing a number or extension assigned to a specific paging group. The Group Paging service makes a
simultaneous call to all the assigned targets.
Retrieving this list requires a full or read-only administrator auth token with a scope of
spark-admin:telephony_config_read.
:param location_id: Return only paging groups with matching location ID. Default is all locations
:type location_id: str
:param name: Return only paging groups with the matching name.
:type name: str
:param phone_number: Return only paging groups with matching primary phone number or extension.
:type phone_number: str
:param org_id: List paging groups for this organization.
:type org_id: str
:return: generator of class:`Paging` objects
"""
params.update((to_camel(k), v)
for i, (k, v) in enumerate(locals().items())
if i and v is not None and k != 'params')
url = self._endpoint()
# noinspection PyTypeChecker
return self.session.follow_pagination(url=url, model=Paging, params=params, item_key='locationPaging')
pass
async def list(self, *, location_id: str = None, name: str = None, phone_number: str = None,
org_id: str = None, **params) -> List[Paging]:
"""
Read the List of Paging Groups
List all Paging Groups for the organization.
Group Paging allows a person to place a one-way call or group page to up to 75 people and/or workspaces by
dialing a number or extension assigned to a specific paging group. The Group Paging service makes a
simultaneous call to all the assigned targets.
Retrieving this list requires a full or read-only administrator auth token with a scope of
spark-admin:telephony_config_read.
:param location_id: Return only paging groups with matching location ID. Default is all locations
:type location_id: str
:param name: Return only paging groups with the matching name.
:type name: str
:param phone_number: Return only paging groups with matching primary phone number or extension.
:type phone_number: str
:param org_id: List paging groups for this organization.
:type org_id: str
:return: generator of class:`Paging` objects
"""
params.update((to_camel(k), v)
for i, (k, v) in enumerate(locals().items())
if i and v is not None and k != 'params')
url = self._endpoint()
# noinspection PyTypeChecker
return [o async for o in self.session.follow_pagination(url=url, model=Paging, params=params, item_key='locationPaging')]
pass
async def create(self, *, location_id: str, settings: Paging, org_id: str = None) -> str:
"""
Create a new Paging Group
Create a new Paging Group for the given location.
Group Paging allows a person to place a one-way call or group page to up to 75 people and/or workspaces by
dialing a number or extension assigned to a specific paging group. The Group Paging service makes a
simultaneous call to all the assigned targets.
Creating a paging group requires a full administrator auth token with a scope of
spark-admin:telephony_config_write.
:param location_id: Create the paging group for this location.
:type location_id: str
:param settings: new paging group
:type settings: Paging
:param org_id: Create the paging group for this organization.
:type org_id: str
:return: ID of the newly created paging group.
:rtype: str
"""
params = org_id and {'orgId': org_id} or None
if settings.originators and settings.originator_caller_id_enabled is None:
raise TypeError('originator_caller_id_enabled required if originators are provided')
url = self._endpoint(location_id=location_id)
data = settings.create_or_update()
data = await self.post(url, data=data, params=params)
return data['id']
async def delete_paging(self, *, location_id: str, paging_id: str, org_id: str = None):
"""
Delete a Paging Group
Delete the designated Paging Group.
Group Paging allows a person to place a one-way call or group page to up to 75 people and/or workspaces by
dialing a number or extension assigned to a specific paging group. The Group Paging service makes a
simultaneous call to all the assigned targets.
Deleting a paging group requires a full administrator auth token with a scope of
spark-admin:telephony_config_write.
:param location_id: Location from which to delete a paging group.
:type location_id: str
:param paging_id: Delete the paging group with the matching ID.
:param org_id: Delete the paging group from this organization.
"""
params = org_id and {'orgId': org_id} or None
url = self._endpoint(location_id=location_id, paging_id=paging_id)
await self.delete(url, params=params)
async def details(self, *, location_id: str, paging_id: str, org_id: str = None) -> Paging:
"""
Get Details for a Paging Group
Retrieve Paging Group details.
Group Paging allows a person to place a one-way call or group page to up to 75 people and/or workspaces by
dialing a number or extension assigned to a specific paging group. The Group Paging service makes a
simultaneous call to all the assigned targets.
Retrieving paging group details requires a full or read-only administrator auth token with a scope of
spark-admin:telephony_config_read.
:param location_id: Retrieve settings for a paging group in this location.
:param paging_id: Retrieve settings for the paging group with this identifier.
:param org_id: Retrieve paging group settings from this organization.
:return: :class:`Paging` object
"""
params = org_id and {'orgId': org_id} or None
url = self._endpoint(location_id=location_id, paging_id=paging_id)
return Paging.parse_obj(await self.get(url, params=params))
async def update(self, *, location_id: str, update: Paging, paging_id: str, org_id: str = None):
"""
Update the designated Paging Group.
Group Paging allows a person to place a one-way call or group page to up to 75 people and/or workspaces by
dialing a number or extension assigned to a specific paging group. The Group Paging service makes a
simultaneous call to all the assigned targets.
Updating a paging group requires a full administrator auth token with a scope of
spark-admin:telephony_config_write.
:param location_id: Update settings for a paging group in this location.
:type location_id: str
:param update: update parameters
:type update: Paging
:param paging_id: Update settings for the paging group with this identifier.
:type paging_id: str
:param org_id: Update paging group settings from this organization.
:type org_id: str
"""
params = org_id and {'orgId': org_id} or None
url = self._endpoint(location_id=location_id, paging_id=paging_id)
data = update.create_or_update()
await self.put(url, data=data, params=params)
class AsPrivateNetworkConnectApi(AsApiChild, base='telephony/config/locations'):
"""
API for location private network connect API settings
"""
async def read(self, *, location_id: str, org_id: str = None) -> NetworkConnectionType:
"""
Get Private Network Connect
Retrieve the location's network connection type.
Network Connection Type determines if the location's network connection is public or private.
Retrieving location's network connection type requires a full, user or read-only administrator auth token with
a scope of spark-admin:telephony_config_read.
:param location_id: Retrieve network connection type for this location.
:type location_id: str
:param org_id: Retrieve network connection type for this organization.
:type org_id: str
:return: location PNC settings
:rtype: NetworkConnectionType
"""
params = org_id and {'orgId': org_id} or None
url = self.session.ep(f'telephony/config/locations/{location_id}/privateNetworkConnect')
data = await self.get(url, params=params)
return parse_obj_as(NetworkConnectionType, data['networkConnectionType'])
async def update(self, *, location_id: str, connection_type: NetworkConnectionType, org_id: str = None):
"""
Get Private Network Connect
Retrieve the location's network connection type.
Network Connection Type determines if the location's network connection is public or private.
Retrieving location's network connection type requires a full, user or read-only administrator auth token with
a scope of spark-admin:telephony_config_read.
:param location_id: Update network connection type for this location.
:type location_id: str
:param connection_type: Network Connection Type for the location.
:type connection_type: NetworkConnectionType
:param org_id: Update network connection type for this organization.
:type org_id: str
"""
params = org_id and {'orgId': org_id} or None
url = self.session.ep(f'telephony/config/locations/{location_id}/privateNetworkConnect')
body = {'networkConnectionType': connection_type.value}
await self.put(url, json=body, params=params)
class AsVoicePortalApi(AsApiChild, base='telephony/config/locations'):
"""
location voice portal API
"""
def _endpoint(self, *, location_id: str, path: str = None) -> str:
"""
location specific
telephony/config/locations/{locationId}/voicePortal
:meta private:
:param location_id: Unique identifier for the location.
:type location_id: str
:param path: additional path
:type: path: str
:return: full endpoint
:rtype: str
"""
path = path and f'/{path}' or ''
ep = self.session.ep(f'telephony/config/locations/{location_id}/voicePortal{path}')
return ep
async def read(self, *, location_id: str, org_id: str = None) -> VoicePortalSettings:
"""
:param location_id: Location to which the voice portal belongs.
:type location_id: str
:param org_id: Organization to which the voice portal belongs.
:type org_id: str
:return: location voice portal settings
:rtype: VoicePortalSettings
"""
params = org_id and {'orgId': org_id} or None
url = self._endpoint(location_id=location_id)
return VoicePortalSettings.parse_obj(await self.get(url, params=params))
async def update(self, *, location_id: str, settings: VoicePortalSettings, passcode: str = None, org_id: str = None):
"""
Update VoicePortal
Update Voice portal information for the location.
Voice portals provide an interactive voice response (IVR) system so administrators can manage auto attendant
announcements.
Updating voice portal information for organization and/or rules requires a full administrator auth token with
a scope of spark-admin:telephony_config_write.
:param location_id: Location to which the voice portal belongs.
:type location_id: str
:param settings: new settings
:type settings: VoicePortalSettings
:param passcode: new passcode
:type passcode: str
:param org_id: Organization to which the voice portal belongs.
:type org_id: str
"""
data = json.loads(settings.json(exclude={'portal_id': True,
'language': True}))
if passcode is not None:
data['passcode'] = {'newPasscode': passcode,
'confirmPasscode': passcode}
params = org_id and {'orgId': org_id} or None
url = self._endpoint(location_id=location_id)
await self.put(url, params=params, json=data)
async def passcode_rules(self, *, location_id: str, org_id: str = None) -> PasscodeRules:
"""
Get VoicePortal Passcode Rule
Retrieve the voice portal passcode rule for a location.
Voice portals provide an interactive voice response (IVR) system so administrators can manage auto attendant
announcements
Retrieving the voice portal passcode rule requires a full read-only administrator auth token with a scope
of spark-admin:telephony_config_read.
:param location_id: Retrieve voice portal passcode rules for this location.
:type location_id: str
:param org_id: Retrieve voice portal passcode rules for this organization.
:type org_id: str
:return: passcode rules
:rtype: PasscodeRules
"""
params = org_id and {'orgId': org_id} or None
url = self._endpoint(location_id=location_id, path='passcodeRules')
return PasscodeRules.parse_obj(await self.get(url, params=params))
class AsVoicemailGroupsApi(AsApiChild, base='telephony/config/voicemailGroups'):
"""
API for location private network connect API settings
"""
def list(self, *, location_id: str = None, name: str = None, phone_number: str = None, org_id: str = None):
params = {to_camel(p): v for p, v in locals().items() if p != 'self' and v is not None}
url = self.ep()
return self.session.follow_pagination(url=url, model=VoicemailGroup, params=params, item_key='voicemailGroups')
class AsVoicemailRulesApi(AsApiChild, base='telephony/config/voicemail/rules'):
"""
API for voicemail rules settings
"""
async def read(self, *, org_id: str = None) -> VoiceMailRules:
"""
Get Voicemail Rules
Retrieve the organization's voicemail rules.
Organizational voicemail rules specify the default passcode requirements.
Retrieving the organization's voicemail rules requires a full, user or read-only administrator auth token with
a scope of spark-admin:telephony_config_read.
:param org_id: Retrieve voicemail settings for this organization.
:type org_id: str
:return: VM settings
:rtype: OrganisationVoicemailSettings
"""
params = org_id and {'orgId': org_id} or None
url = self.ep()
return VoiceMailRules.parse_obj(await self.get(url, params=params))
async def update(self, *, settings: VoiceMailRules, org_id: str = None):
"""
Update Voicemail Rules
Update the organization's default voicemail passcode and/or rules.
Organizational voicemail rules specify the default passcode requirements.
If you choose to set default passcode for new people added to your organization, communicate to your people
what that passcode is, and that it must be reset before they can access their voicemail. If this feature is
not turned on, each new person must initially set their own passcode.
Updating organization's voicemail passcode and/or rules requires a full administrator auth token with a scope
of spark-admin:telephony_config_write.
:param settings: new settings
:type settings: VoiceMailRules
:param org_id: Update voicemail rules for this organization.
:type org_id: str
"""
params = org_id and {'orgId': org_id} or None
url = self.ep()
data = settings.json(exclude={'default_voicemail_pin_rules': True})
await self.put(url, params=params, data=data)
class AsTelephonyApi(AsApiChild, base='telephony'):
"""
The telephony settings (features) API.
"""
#: access or authentication codes
access_codes: AsAccessCodesApi
auto_attendant: AsAutoAttendantApi
calls: AsCallsApi
callpark: AsCallParkApi
callpark_extension: AsCallparkExtensionApi
callqueue: AsCallQueueApi
huntgroup: AsHuntGroupApi
location_intercept: AsLocationInterceptApi
location_moh: AsLocationMoHApi
#: Location VM settings (only enable/disable transcription for now)
location_voicemail: AsLocationVoicemailSettingsApi
#: organisation voicemail settings
organisation_voicemail: AsOrganisationVoicemailSettingsAPI
paging: AsPagingApi
permissions_out: AsOutgoingPermissionsApi
pickup: AsCallPickupApi
pnc: AsPrivateNetworkConnectApi
schedules: AsScheduleApi
voicemail_groups: AsVoicemailGroupsApi
voicemail_rules: AsVoicemailRulesApi
voiceportal: AsVoicePortalApi
def __init__(self, session: AsRestSession):
super().__init__(session=session)
self.access_codes = AsAccessCodesApi(session=session)
self.auto_attendant = AsAutoAttendantApi(session=session)
self.calls = AsCallsApi(session=session)
self.callpark = AsCallParkApi(session=session)
self.callpark_extension = AsCallparkExtensionApi(session=session)
self.callqueue = AsCallQueueApi(session=session)
self.huntgroup = AsHuntGroupApi(session=session)
self.location_intercept = AsLocationInterceptApi(session=session)
self.location_moh = AsLocationMoHApi(session=session)
self.location_voicemail = AsLocationVoicemailSettingsApi(session=session)
self.organisation_voicemail = AsOrganisationVoicemailSettingsAPI(session=session)
self.paging = AsPagingApi(session=session)
self.permissions_out = AsOutgoingPermissionsApi(session=session, locations=True)
self.pickup = AsCallPickupApi(session=session)
self.pnc = AsPrivateNetworkConnectApi(session=session)
self.schedules = AsScheduleApi(session=session, base=ScheduleApiBase.locations)
self.voicemail_groups = AsVoicemailGroupsApi(session=session)
self.voicemail_rules = AsVoicemailRulesApi(session=session)
self.voiceportal = AsVoicePortalApi(session=session)
def phone_numbers_gen(self, *, location_id: str = None, phone_number: str = None, available: bool = None,
order: str = None,
owner_name: str = None, owner_id: str = None, owner_type: OwnerType = None,
extension: str = None, number_type: NumberType = None,
phone_number_type: NumberListPhoneNumberType = None,
state: NumberState = None, toll_free_numbers: bool = None,
org_id: str = None, **params) -> AsyncGenerator[NumberListPhoneNumber, None, None]:
"""
Get Phone Numbers for an Organization with given criteria.
List all the phone numbers for the given organization along with the status and owner (if any).
PSTN phone numbers are associated with a specific location and can be active/inactive and assigned/unassigned.
The owner is the person, workspace, or feature to which the number is assigned.
Retrieving this list requires a full or read-only administrator auth token with a scope of
spark-admin:telephony_config_read.
:param location_id: Return the list of phone numbers for this location within the given organization.
:type location_id: str
:param phone_number: Search for this phone number.
:type phone_number: str
:param available: Search among the available phone numbers. This parameter cannot be used along with owner_type
parameter when set to true.
:type available: bool
:param order: Sort the list of phone numbers based on the following:lastName,dn,extension. Default sort will
be based on number and extension in an Ascending order
:type order: str
:param owner_name: Return the list of phone numbers that is owned by given owner name. Maximum length is 255.
:type owner_name: str
:param owner_id: Returns only the matched number/extension entries assigned to the feature with specified
uuid/broadsoftId.
:type owner_id: str
:param owner_type: Returns the list of phone numbers that are of given owner_type.
:type owner_type: OwnerType
:param extension: Returns the list of PSTN phone numbers with given extension.
:type extension: str
:param number_type: Returns the filtered list of PSTN phone numbers that contains given type of numbers.
This parameter cannot be used along with available or state.
:type number_type: NumberType
:param phone_number_type: Returns the filtered list of PSTN phone numbers that are of given phoneNumberType.
:type phone_number_type: NumberListPhoneNumberType
:param state: Returns the list of PSTN phone numbers with matching state.
:type state: NumberState
:param toll_free_numbers: Returns the list of toll free phone numbers.
:type toll_free_numbers: bool
:param org_id: List numbers for this organization.
:type org_id: str
:return: yields :class:`NumberListPhoneNumber` instances
"""
params.update((to_camel(p), v) for i, (p, v) in enumerate(locals().items())
if i and v is not None and p != 'params')
for param, value in params.items():
if isinstance(value, bool):
value = 'true' if value else 'false'
params[param] = value
elif isinstance(value, Enum):
value = value.value
params[param] = value
url = self.ep(path='config/numbers')
return self.session.follow_pagination(url=url, model=NumberListPhoneNumber, params=params,
item_key='phoneNumbers')
async def phone_numbers(self, *, location_id: str = None, phone_number: str = None, available: bool = None,
order: str = None,
owner_name: str = None, owner_id: str = None, owner_type: OwnerType = None,
extension: str = None, number_type: NumberType = None,
phone_number_type: NumberListPhoneNumberType = None,
state: NumberState = None, toll_free_numbers: bool = None,
org_id: str = None, **params) -> List[NumberListPhoneNumber]:
"""
Get Phone Numbers for an Organization with given criteria.
List all the phone numbers for the given organization along with the status and owner (if any).
PSTN phone numbers are associated with a specific location and can be active/inactive and assigned/unassigned.
The owner is the person, workspace, or feature to which the number is assigned.
Retrieving this list requires a full or read-only administrator auth token with a scope of
spark-admin:telephony_config_read.
:param location_id: Return the list of phone numbers for this location within the given organization.
:type location_id: str
:param phone_number: Search for this phone number.
:type phone_number: str
:param available: Search among the available phone numbers. This parameter cannot be used along with owner_type
parameter when set to true.
:type available: bool
:param order: Sort the list of phone numbers based on the following:lastName,dn,extension. Default sort will
be based on number and extension in an Ascending order
:type order: str
:param owner_name: Return the list of phone numbers that is owned by given owner name. Maximum length is 255.
:type owner_name: str
:param owner_id: Returns only the matched number/extension entries assigned to the feature with specified
uuid/broadsoftId.
:type owner_id: str
:param owner_type: Returns the list of phone numbers that are of given owner_type.
:type owner_type: OwnerType
:param extension: Returns the list of PSTN phone numbers with given extension.
:type extension: str
:param number_type: Returns the filtered list of PSTN phone numbers that contains given type of numbers.
This parameter cannot be used along with available or state.
:type number_type: NumberType
:param phone_number_type: Returns the filtered list of PSTN phone numbers that are of given phoneNumberType.
:type phone_number_type: NumberListPhoneNumberType
:param state: Returns the list of PSTN phone numbers with matching state.
:type state: NumberState
:param toll_free_numbers: Returns the list of toll free phone numbers.
:type toll_free_numbers: bool
:param org_id: List numbers for this organization.
:type org_id: str
:return: yields :class:`NumberListPhoneNumber` instances
"""
params.update((to_camel(p), v) for i, (p, v) in enumerate(locals().items())
if i and v is not None and p != 'params')
for param, value in params.items():
if isinstance(value, bool):
value = 'true' if value else 'false'
params[param] = value
elif isinstance(value, Enum):
value = value.value
params[param] = value
url = self.ep(path='config/numbers')
return [o async for o in self.session.follow_pagination(url=url, model=NumberListPhoneNumber, params=params,
item_key='phoneNumbers')]
async def phone_number_details(self, *, org_id: str = None) -> NumberDetails:
"""
get summary (counts) of phone numbers
:param org_id: detaild for numbers in this organization.
:type org_id: str
:return: phone number details
:rtype: :class:`NumberDetails`
"""
params = {to_camel(p): v for i, (p, v) in enumerate(locals().items())
if i and v is not None}
params['details'] = 'true'
params['max'] = 1
url = self.ep(path='config/numbers')
data = await self.get(url, params=params)
return NumberDetails.parse_obj(data['count'])
async def validate_extensions(self, *, extensions: list[str]) -> ValidateExtensionsResponse:
"""
Validate the List of Extensions
Validate the List of Extensions. Retrieving this list requires a full or read-only administrator auth token
with a scope of spark-admin:telephony_config_read.
:param extensions: Array of Strings of ID of Extensions.
:return:
"""
url = self.ep(path='config/actions/validateExtensions/invoke')
data = await self.post(url, json={'extensions': extensions})
return ValidateExtensionsResponse.parse_obj(data)
async def ucm_profiles(self, *, org_id: str = None) -> list[UCMProfile]:
"""
Read the List of UC Manager Profiles
List all calling UC Manager Profiles for the organization.
UC Manager Profiles are applicable if your organization uses Jabber in Team Messaging mode or Calling in
Webex Teams (Unified CM).
The UC Manager Profile has an organization-wide default and may be overridden for individual persons, although
currently only setting at a user level is supported by Webex APIs.
Retrieving this list requires a full or read-only administrator auth token with a scope
of spark-admin:people_read as this API is designed to be used in conjunction with calling behavior at the
user level.
:param org_id: List manager profiles in this organization.
:type org_id: str
:return: list of :class:`UCMProfile`
"""
params = org_id and {'orgId': org_id} or None
url = self.ep(path='config/callingProfiles')
data = await self.get(url, params=params)
return parse_obj_as(list[UCMProfile], data['callingProfiles'])
async def change_announcement_language(self, *, location_id: str, language_code: str, agent_enabled: bool = None,
service_enabled: bool = None, org_id: str = None):
"""
Change Announcement Language
Change announcement language for the given location.
Change announcement language for current people/workspaces and/or existing feature configurations. This does
not change the default announcement language which is applied to new users/workspaces and new feature
configurations.
Changing announcement language for the given location requires a full administrator auth token with a scope
of spark-admin:telephony_config_write.
:param location_id: Change announcement language for this location.
:type location_id: str
:param language_code: Language code.
:type language_code: str
:param agent_enabled: Set to true to change announcement language for existing people and workspaces.
:type agent_enabled: bool
:param service_enabled: Set to true to change announcement language for existing feature configurations.
:type service_enabled: bool
:param org_id: Change announcement language for this organization.
:type org_id: str
"""
params = org_id and {'orgId': org_id} or None
body = {'announcementLanguageCode': language_code}
if agent_enabled is not None:
body['agentEnabled'] = agent_enabled
if service_enabled is not None:
body['serviceEnabled'] = service_enabled
url = self.session.ep(f'telephony/config/locations/{location_id}/actions/modifyAnnouncementLanguage/invoke')
await self.put(url, json=body, params=params)
class AsWebhookApi(AsApiChild, base='webhooks'):
"""
API for webhook management
"""
def list_gen(self) -> AsyncGenerator[WebHook, None, None]:
"""
List all of your webhooks.
:return: yields webhooks
"""
ep = self.ep()
# noinspection PyTypeChecker
return self.session.follow_pagination(url=ep, model=WebHook)
async def list(self) -> List[WebHook]:
"""
List all of your webhooks.
:return: yields webhooks
"""
ep = self.ep()
# noinspection PyTypeChecker
return [o async for o in self.session.follow_pagination(url=ep, model=WebHook)]
async def create(self, *, name: str, target_url: str, resource: WebHookResource, event: WebHookEvent, filter: str = None,
secret: str = None,
owned_by: str = None) -> WebHook:
"""
Creates a webhook.
:param name: A user-friendly name for the webhook.
:param target_url: The URL that receives POST requests for each event.
:param resource: The resource type for the webhook. Creating a webhook requires 'read' scope on the resource
the webhook is for.
:param event: The event type for the webhook.
:param filter: The filter that defines the webhook scope.
:param secret: The secret used to generate payload signature.
:param owned_by: Specified when creating an org/admin level webhook. Supported for meetings, recordings and
meetingParticipants resources for now.
:return: the new webhook
"""
params = {to_camel(param): value for i, (param, value) in enumerate(locals().items())
if i and value is not None}
body = json.loads(WebHookCreate(**params).json())
ep = self.ep()
data = await self.post(ep, json=body)
result = WebHook.parse_obj(data)
return result
async def details(self, *, webhook_id: str) -> WebHook:
"""
Get Webhook Details
Shows details for a webhook, by ID.
:param webhook_id: The unique identifier for the webhook.
:type webhook_id: str
:return: Webhook details
"""
url = self.ep(webhook_id)
return WebHook.parse_obj(await self.get(url))
async def update(self, *, webhook_id: str, update: WebHook) -> WebHook:
"""
Updates a webhook, by ID. You cannot use this call to deactivate a webhook, only to activate a webhook that
was auto deactivated. The fields that can be updated are name, targetURL, secret and status. All other fields,
if supplied, are ignored.
:param webhook_id: The unique identifier for the webhook.
:type webhook_id: str
:param update: The webhook update
:type update: WebHook
:return: updated :class:`WebHook` object
"""
url = self.ep(webhook_id)
webhook_data = update.json(include={'name', 'target_url', 'secret', 'owned_by', 'status'})
return WebHook.parse_obj(await self.put(url, data=webhook_data))
async def webhook_delete(self, *, webhook_id: str):
"""
Deletes a webhook, by ID.
:param webhook_id: The unique identifier for the webhook.
:type webhook_id: str
:return: None
"""
ep = self.ep(f'{webhook_id}')
await self.delete(ep)
@dataclass(init=False)
class AsWorkspaceSettingsApi(AsApiChild, base='workspaces'):
"""
API for all workspace settings.
Most of the workspace settings are equivalent to corresponding user settings. For these settings the attributes of
this class are instances of the respective user settings APIs. When calling endpoints of these APIs workspace IDs
need to be passed to the ``person_id`` parameter of the called function.
"""
call_intercept: AsCallInterceptApi
call_waiting: AsCallWaitingApi
caller_id: AsCallerIdApi
forwarding: AsPersonForwardingApi
monitoring: AsMonitoringApi
numbers: AsNumbersApi
permissions_in: AsIncomingPermissionsApi
permissions_out: AsOutgoingPermissionsApi
def __init__(self, session: AsRestSession):
super().__init__(session=session)
self.call_intercept = AsCallInterceptApi(session=session, workspaces=True)
self.call_waiting = AsCallWaitingApi(session=session, workspaces=True)
self.caller_id = AsCallerIdApi(session=session, workspaces=True)
self.forwarding = AsPersonForwardingApi(session=session, workspaces=True)
self.monitoring = AsMonitoringApi(session=session, workspaces=True)
self.numbers = AsNumbersApi(session=session, workspaces=True)
self.permissions_in = AsIncomingPermissionsApi(session=session, workspaces=True)
self.permissions_out = AsOutgoingPermissionsApi(session=session, workspaces=True)
class AsWorkspacesApi(AsApiChild, base='workspaces'):
"""
Workspaces API
Workspaces represent where people work, such as conference rooms, meeting spaces, lobbies, and lunch rooms. Devices
may be associated with workspaces.
Viewing the list of workspaces in an organization requires an administrator auth token with
the spark-admin:workspaces_read scope. Adding, updating, or deleting workspaces in an organization requires an
administrator auth token with the spark-admin:workspaces_write scope.
The Workspaces API can also be used by partner administrators acting as administrators of a different organization
than their own. In those cases an orgId value must be supplied, as indicated in the reference documentation for
the relevant endpoints.
"""
def list_gen(self, *, workspace_location_id: str = None, floor_id: str = None, display_name: str = None,
capacity: int = None,
workspace_type: WorkSpaceType = None, calling: CallingType = None, calendar: CalendarType = None,
org_id: str = None, **params) -> AsyncGenerator[Workspace, None, None]:
"""
List Workspaces
List workspaces. Use query parameters to filter the response. The orgId parameter can only be used by admin
users of another organization (such as partners). The workspaceLocationId, floorId, capacity and type fields
will only be present for workspaces that have a value set for them. The special values notSet (for filtering
on category) and -1 (for filtering on capacity) can be used to filter for workspaces without a type and/or
capacity.
:param workspace_location_id: Location associated with the workspace
:type workspace_location_id: str
:param floor_id: Floor associated with the workspace.
:type floor_id: str
:param display_name: List workspaces by display name.
:type display_name: str
:param capacity: List workspaces with the given capacity. Must be -1 or higher. A value of -1 lists workspaces
with no capacity set.
:type capacity: int
:param workspace_type: List workspaces by type.
:type workspace_type: :class:`WorkSpaceType`
:param calling: List workspaces by calling type.
:type calling: :class:`CallingType`
:param calendar: List workspaces by calendar type.
:type calendar: :class:`CalendarType`
:param org_id: List workspaces in this organization. Only admin users of another organization
(such as partners) may use this parameter.
:type org_id: str
:return: generator of :class:`Workspace` instances
"""
params.update((to_camel(k), v)
for i, (k, v) in enumerate(locals().items())
if i and k != 'params' and v is not None)
if workspace_type is not None:
params.pop('workspaceType')
params['type'] = workspace_type
ep = self.ep()
# noinspection PyTypeChecker
return self.session.follow_pagination(url=ep, model=Workspace, params=params)
async def list(self, *, workspace_location_id: str = None, floor_id: str = None, display_name: str = None,
capacity: int = None,
workspace_type: WorkSpaceType = None, calling: CallingType = None, calendar: CalendarType = None,
org_id: str = None, **params) -> List[Workspace]:
"""
List Workspaces
List workspaces. Use query parameters to filter the response. The orgId parameter can only be used by admin
users of another organization (such as partners). The workspaceLocationId, floorId, capacity and type fields
will only be present for workspaces that have a value set for them. The special values notSet (for filtering
on category) and -1 (for filtering on capacity) can be used to filter for workspaces without a type and/or
capacity.
:param workspace_location_id: Location associated with the workspace
:type workspace_location_id: str
:param floor_id: Floor associated with the workspace.
:type floor_id: str
:param display_name: List workspaces by display name.
:type display_name: str
:param capacity: List workspaces with the given capacity. Must be -1 or higher. A value of -1 lists workspaces
with no capacity set.
:type capacity: int
:param workspace_type: List workspaces by type.
:type workspace_type: :class:`WorkSpaceType`
:param calling: List workspaces by calling type.
:type calling: :class:`CallingType`
:param calendar: List workspaces by calendar type.
:type calendar: :class:`CalendarType`
:param org_id: List workspaces in this organization. Only admin users of another organization
(such as partners) may use this parameter.
:type org_id: str
:return: generator of :class:`Workspace` instances
"""
params.update((to_camel(k), v)
for i, (k, v) in enumerate(locals().items())
if i and k != 'params' and v is not None)
if workspace_type is not None:
params.pop('workspaceType')
params['type'] = workspace_type
ep = self.ep()
# noinspection PyTypeChecker
return [o async for o in self.session.follow_pagination(url=ep, model=Workspace, params=params)]
async def create(self, *, settings: Workspace, org_id: str = None):
"""
Create a Workspace
Create a workspace. The workspaceLocationId, floorId, capacity, type and notes parameters are optional, and
omitting them will result in the creation of a workspace without these values set, or set to their default.
A workspaceLocationId must be provided when the floorId is set. Calendar and calling can also be set for a
new workspace. Omitting them will default to free calling and no calendaring. The orgId parameter can only be
used by admin users of another organization (such as partners).
:param settings: settings for new Workspace
:type settings: :class:`Workspace`
:param org_id: OrgId associated with the workspace. Only admin users of another organization
(such as partners) may use this parameter.
:type org_id: str
:return: new workspace
:rtype: :class:`Workspace`
"""
if org_id:
settings.org_id = org_id
data = settings.update_or_create()
url = self.ep()
data = await self.post(url, data=data)
return Workspace.parse_obj(data)
async def details(self, workspace_id) -> Workspace:
"""
Get Workspace Details
Shows details for a workspace, by ID. The workspaceLocationId, floorId, capacity, type and notes fields will
only be present if they have been set for the workspace.
:param workspace_id: A unique identifier for the workspace.
:type workspace_id: str
:return: workspace details
:rtype: :class:`Workspace`
"""
url = self.ep(workspace_id)
return Workspace.parse_obj(await self.get(url))
async def update(self, *, workspace_id, settings: Workspace) -> Workspace:
"""
Update a Workspace
Updates details for a workspace, by ID. Specify the workspace ID in the workspaceId parameter in the URI.
Include all details for the workspace that are present in a GET request for the workspace details. Not
including the optional capacity, type or notes fields will result in the fields no longer being defined
for the workspace. A workspaceLocationId must be provided when the floorId is set. The workspaceLocationId,
floorId, calendar and calling fields do not change when omitted from the update request. Updating the
calling parameter is not supported.
:param workspace_id: A unique identifier for the workspace.
:type workspace_id: str
:param settings: new workspace settings
:type settings: :class:`Workspace`
:return: updated workspace
:rtype: :class:`Workspace`
"""
url = self.ep(workspace_id)
j_data = settings.update_or_create(for_update=True)
data = await self.put(url, data=j_data)
return Workspace.parse_obj(data)
async def delete_workspace(self, workspace_id):
"""
Delete a Workspace
Deletes a workspace, by ID. Will also delete all devices associated with the workspace. Any deleted devices
will need to be reactivated.
:param workspace_id: A unique identifier for the workspace.
:type workspace_id: str
"""
url = self.ep(workspace_id)
await self.delete(url)
@dataclass(init=False)
class AsWebexSimpleApi:
"""
The main API object
"""
#: groups API :class:`AsGroupsApi`
groups: AsGroupsApi
#: Licenses API :class:`AsLicensesApi`
licenses: AsLicensesApi
#: Location API :class:`AsLocationsApi`
locations: AsLocationsApi
#: Person settings API :class:`AsPersonSettingsApi`
person_settings: AsPersonSettingsApi
#: People API :class:`AsPeopleApi`
people: AsPeopleApi
#: Telephony (features) API :class:`AsTelephonyApi`
telephony: AsTelephonyApi
#: Webhooks API :class:`AsWebhookApi`
webhook: AsWebhookApi
#: Workspaces API :class:`AsWorkspacesApi`
workspaces: AsWorkspacesApi
#: Workspace setting API :class:`AsWorkspaceSettingsApi`
workspace_settings: AsWorkspaceSettingsApi
#: :class:`AsRestSession` used for all API requests
session: AsRestSession
def __init__(self, *, tokens: Union[str, Tokens] = None, concurrent_requests: int = 10):
"""
:param tokens: token to be used by the API. Can be a :class:`tokens.Tokens` instance, a string or None. If
None then an access token is expected in the WEBEX_ACCESS_TOKEN environment variable.
:param concurrent_requests: number of concurrent requests when using multi-threading
:type concurrent_requests: int
"""
if isinstance(tokens, str):
tokens = Tokens(access_token=tokens)
elif tokens is None:
tokens = os.getenv('WEBEX_ACCESS_TOKEN')
if tokens is None:
raise ValueError('if no access token is passed, then a valid access token has to be present in '
'WEBEX_ACCESS_TOKEN environment variable')
tokens = Tokens(access_token=tokens)
session = AsRestSession(tokens=tokens, concurrent_requests=concurrent_requests)
self.groups = AsGroupsApi(session=session)
self.licenses = AsLicensesApi(session=session)
self.locations = AsLocationsApi(session=session)
self.person_settings = AsPersonSettingsApi(session=session)
self.people = AsPeopleApi(session=session)
self.telephony = AsTelephonyApi(session=session)
self.webhook = AsWebhookApi(session=session)
self.workspaces = AsWorkspacesApi(session=session)
self.workspace_settings = AsWorkspaceSettingsApi(session=session)
self.session = session
@property
def access_token(self) -> str:
"""
access token used for all requests
:return: access token
:rtype: str
"""
return self.session.access_token
async def close(self):
await self.session.close()
async def __aenter__(self):
return self
async def __aexit__(self, exc_type, exc_val, exc_tb):
await self.close()
```
#### File: wxc_sdk/wxc_sdk/__init__.py
```python
import logging
import os
from typing import Union
from .groups import GroupsApi
from .licenses import LicensesApi
from .locations import LocationsApi
from .people import PeopleApi
from .person_settings import PersonSettingsApi
from .rest import RestSession
from .telephony import TelephonyApi
from .tokens import Tokens
from .webhook import WebhookApi
from .workspaces import WorkspacesApi
from .workspace_settings import WorkspaceSettingsApi
from dataclasses import dataclass
__all__ = ['WebexSimpleApi']
__version__ = '1.4.1'
log = logging.getLogger(__name__)
# TODO: devices
@dataclass(init=False)
class WebexSimpleApi:
"""
The main API object
"""
#: groups API :class:`groups.GroupsApi`
groups: GroupsApi
#: Licenses API :class:`licenses.LicensesApi`
licenses: LicensesApi
#: Location API :class:`locations.LocationsApi`
locations: LocationsApi
#: Person settings API :class:`person_settings.PersonSettingsApi`
person_settings: PersonSettingsApi
#: People API :class:`people.PeopleApi`
people: PeopleApi
#: Telephony (features) API :class:`telephony.TelephonyApi`
telephony: TelephonyApi
#: Webhooks API :class:`webhook.WebhookApi`
webhook: WebhookApi
#: Workspaces API :class:`workspaces.WorkspacesApi`
workspaces: WorkspacesApi
#: Workspace setting API :class:`workspace_settings.WorkspaceSettingsApi`
workspace_settings: WorkspaceSettingsApi
#: :class:`rest.RestSession` used for all API requests
session: RestSession
def __init__(self, *, tokens: Union[str, Tokens] = None, concurrent_requests: int = 10):
"""
:param tokens: token to be used by the API. Can be a :class:`tokens.Tokens` instance, a string or None. If
None then an access token is expected in the WEBEX_ACCESS_TOKEN environment variable.
:param concurrent_requests: number of concurrent requests when using multi-threading
:type concurrent_requests: int
"""
if isinstance(tokens, str):
tokens = Tokens(access_token=tokens)
elif tokens is None:
tokens = os.getenv('WEBEX_ACCESS_TOKEN')
if tokens is None:
raise ValueError('if no access token is passed, then a valid access token has to be present in '
'WEBEX_ACCESS_TOKEN environment variable')
tokens = Tokens(access_token=tokens)
session = RestSession(tokens=tokens, concurrent_requests=concurrent_requests)
self.groups = GroupsApi(session=session)
self.licenses = LicensesApi(session=session)
self.locations = LocationsApi(session=session)
self.person_settings = PersonSettingsApi(session=session)
self.people = PeopleApi(session=session)
self.telephony = TelephonyApi(session=session)
self.webhook = WebhookApi(session=session)
self.workspaces = WorkspacesApi(session=session)
self.workspace_settings = WorkspaceSettingsApi(session=session)
self.session = session
@property
def access_token(self) -> str:
"""
access token used for all requests
:return: access token
:rtype: str
"""
return self.session.access_token
def close(self):
self.session.close()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
```
#### File: wxc_sdk/person_settings/voicemail.py
```python
import os
from enum import Enum
from io import BufferedReader
from typing import Optional, Union
from requests_toolbelt.multipart.encoder import MultipartEncoder
from .common import PersonSettingsApiChild
from ..base import ApiModel
from ..common import Greeting
__all__ = ['VoicemailApi', 'VoicemailEnabled', 'VoicemailEnabledWithGreeting', 'UnansweredCalls',
'StorageType', 'VoicemailMessageStorage', 'VoicemailCopyOfMessage', 'VoicemailFax',
'VoicemailTransferToNumber', 'VoicemailNotifications', 'VoiceMailFax', 'VoicemailSettings']
class VoicemailEnabled(ApiModel):
enabled: bool
class VoicemailEnabledWithGreeting(VoicemailEnabled):
"""
Voicemail enablement setting with greeting details
"""
#: DEFAULT indicates the default greeting will be played. CUSTOM indicates a custom .wav file will be played.
greeting: Optional[Greeting]
#: Indicates a custom greeting has been uploaded.
greeting_uploaded: Optional[bool]
class UnansweredCalls(VoicemailEnabledWithGreeting):
"""
Voicemail enablement settungs for unsanswered cals
"""
#: Number of rings before unanswered call will be sent to voicemail.
number_of_rings: Optional[int]
#: System-wide maximum number of rings allowed for number_of_rings setting.
system_max_number_of_rings: Optional[int]
class StorageType(str, Enum):
"""
Designates which type of voicemail message storage is used.
"""
#: For message access via phone or the Calling User Portal.
internal = 'INTERNAL'
#: For sending all messages to the person's email.
external = 'EXTERNAL'
class VoicemailMessageStorage(ApiModel):
"""
Settings for message storage
"""
#: When true desktop phone will indicate there are new voicemails.
mwi_enabled: Optional[bool]
#: Designates which type of voicemail message storage is used.
storage_type: Optional[StorageType]
#: External email address to which the new voicemail audio will be sent. A value for this field must be provided
# in the request if a storageType of EXTERNAL is given in the request.
external_email: Optional[str]
class VoicemailCopyOfMessage(VoicemailEnabled):
"""
Settings for sending a copy of new voicemail message audio via email.
"""
#: Email address to which the new voicemail audio will be sent.
email_id: Optional[str]
class VoicemailFax(VoicemailEnabled):
phone_number: Optional[str]
extension: Optional[str]
class VoicemailTransferToNumber(VoicemailEnabled):
"""
Settings for voicemail caller to transfer to a different number by pressing zero (0).
"""
#: Number voicemail caller will be transferred to when they press zero (0).
destination: Optional[str]
class VoicemailNotifications(VoicemailEnabled):
"""
Settings for notifications when there are any new voicemails.
"""
#: Email address to which the notification will be sent. For text messages, use an email to text message gateway
#: like <EMAIL>.
destination: Optional[str]
class VoiceMailFax(VoicemailEnabled):
"""
Fax message settings
"""
#: Designates optional extension for fax.
extension: Optional[str]
#: Designates phone number for fax. A value for this field must be provided in the request if faxMessage enabled
#: field is given as true in the request.
phone_number: Optional[str]
class VoicemailSettings(ApiModel):
"""
User's voicemail settings
"""
#: Voicemail is enabled or disabled.
enabled: Optional[bool]
#: Settings for sending all calls to voicemail.
send_all_calls: Optional[VoicemailEnabled]
#: Settings for sending calls to voicemail when the line is busy.
send_busy_calls: Optional[VoicemailEnabledWithGreeting]
#: Settings for sending calls to voicemail when call is unanswered
send_unanswered_calls: Optional[UnansweredCalls]
#: Settings for notifications when there are any new voicemails.
notifications: Optional[VoicemailNotifications]
#: Settings for voicemail caller to transfer to a different number by pressing zero (0).
transfer_to_number: Optional[VoicemailTransferToNumber]
#: Settings for sending a copy of new voicemail message audio via email.
email_copy_of_message: Optional[VoicemailCopyOfMessage]
#: Settings for message storage
message_storage: Optional[VoicemailMessageStorage]
#: Fax message settings
fax_message: Optional[VoiceMailFax]
voice_message_forwarding_enabled: Optional[bool] # TODO: raise documentation defect
@staticmethod
def default() -> 'VoicemailSettings':
"""
Default voicemail settings
:return: defauilt settings
:rtype: :class:`VoicemailSettings`
"""
return VoicemailSettings(enabled=True,
send_all_calls=VoicemailEnabled(enabled=False),
send_busy_calls=VoicemailEnabledWithGreeting(enabled=False, greeting=Greeting.default),
send_unanswered_calls=UnansweredCalls(enabled=True,
greeting=Greeting.default,
number_of_rings=3),
notifications=VoicemailNotifications(enabled=False),
transfer_to_number=VoicemailTransferToNumber(enabled=False),
email_copy_of_message=VoicemailCopyOfMessage(enabled=False),
message_storage=VoicemailMessageStorage(mwi_enabled=True,
storage_type=StorageType.internal),
fax_message=VoiceMailFax(enabled=False),
voice_message_forwarding_enabled=False)
class XForwardingSetting:
pass
class VoicemailApi(PersonSettingsApiChild):
"""
API for person's call voicemail settings
"""
feature = 'voicemail'
def read(self, *, person_id: str, org_id: str = None) -> VoicemailSettings:
"""
Read Voicemail Settings for a Person
Retrieve a Person's Voicemail Settings
The voicemail feature transfers callers to voicemail based on your settings. You can then retrieve voice
messages via Voicemail. Voicemail audio is sent in Waveform Audio File Format, .wav, format.
Optionally, notifications can be sent to a mobile phone via text or email. These notifications will not include
the voicemail files.
This API requires a full, user, or read-only administrator auth token with a scope of spark-admin:people_read
or a user auth token with spark:people_read scope can be used by a person to read their settings.
:param person_id: Unique identifier for the person.
:type person_id: str
:param org_id: Person is in this organization. Only admin users of another organization (such as partners)
may use this parameter as the default is the same organization as the token used to access API.
:type org_id: str
:return: user's voicemail settings
:rtype: VoicemailSettings
"""
url = self.f_ep(person_id=person_id)
params = org_id and {'orgId': org_id} or None
return VoicemailSettings.parse_obj(self.get(url, params=params))
def configure(self, *, person_id: str, settings: VoicemailSettings, org_id: str = None):
"""
Configure Voicemail Settings for a Person
Configure a person's Voicemail Settings
The voicemail feature transfers callers to voicemail based on your settings. You can then retrieve voice
messages via Voicemail. Voicemail audio is sent in Waveform Audio File Format, .wav, format.
Optionally, notifications can be sent to a mobile phone via text or email. These notifications will not
include the voicemail files.
This API requires a full or user administrator auth token with the spark-admin:people_write scope or a user
auth token with spark:people_write scope can be used by a person to update their settings.
:return:
"""
# some settings can't be part of an update
data = settings.json(exclude={'send_busy_calls': {'greeting_uploaded': True},
'send_unanswered_calls': {'system_max_number_of_rings': True,
'greeting_uploaded': True},
'voice_message_forwarding_enabled': True
})
url = self.f_ep(person_id=person_id)
params = org_id and {'orgId': org_id} or None
self.put(url, data=data, params=params)
def _configure_greeting(self, *, person_id: str, content: Union[BufferedReader, str],
upload_as: str = None, org_id: str = None,
greeting_key: str):
"""
handled greeting configuration
:param person_id: Unique identifier for the person.
:type person_id: str
:param content: the file to be uploaded, can be a path to a file or a buffered reader (opened file); if a
reader referring to an open file is passed then make sure to open the file as binary b/c otherwise the
content length might be calculated wrong
:type content: Union[BufferedReader, str]
:param upload_as: filename for the content. Only required if content is a reader; has to be a .wav file name.
:type upload_as: str
:param org_id: Person is in this organization. Only admin users of another organization (such as partners)
may use this parameter as the default is the same organization as the token used to access API.
:type org_id: str
:param greeting_key: 'uploadBusyGreeting' or 'uploadNoAnswerGreeting'
"""
if isinstance(content, str):
upload_as = os.path.basename(content)
content = open(content, mode='rb')
must_close = True
else:
must_close = False
# an existing reader
if not upload_as:
raise ValueError('upload_as is required')
encoder = MultipartEncoder(fields={'file': (upload_as, content, 'audio/wav')})
ep = self.f_ep(person_id=person_id, path=f'actions/{greeting_key}/invoke')
params = org_id and {'orgId': org_id} or None
try:
self.post(ep, data=encoder, headers={'Content-Type': encoder.content_type},
params=params)
finally:
if must_close:
content.close()
def configure_busy_greeting(self, *, person_id: str, content: Union[BufferedReader, str],
upload_as: str = None, org_id: str = None):
"""
Configure Busy Voicemail Greeting for a Person
Configure a Person's Busy Voicemail Greeting by uploading a Waveform Audio File Format, .wav, encoded audio
file.
Your request will need to be a multipart/form-data request rather than JSON, using the audio/wav Content-Type.
This API requires a full or user administrator auth token with the spark-admin:people_write scope or a user
auth token with spark:people_write scope can be used by a person to update their settings.
:param person_id: Unique identifier for the person.
:type person_id: str
:param content: the file to be uploaded, can be a path to a file or a buffered reader (opened file); if a
reader referring to an open file is passed then make sure to open the file as binary b/c otherwise the
content length might be calculated wrong
:type content: Union[BufferedReader, str]
:param upload_as: filename for the content. Only required if content is a reader; has to be a .wav file name.
:type upload_as: str
:param org_id: Person is in this organization. Only admin users of another organization (such as partners)
may use this parameter as the default is the same organization as the token used to access API.
:type org_id: str
"""
self._configure_greeting(person_id=person_id, content=content, upload_as=upload_as, org_id=org_id,
greeting_key='uploadBusyGreeting')
def configure_no_answer_greeting(self, person_id: str, content: Union[BufferedReader, str],
upload_as: str = None, org_id: str = None):
"""
Configure No Answer Voicemail Greeting for a Person
Configure a Person's No Answer Voicemail Greeting by uploading a Waveform Audio File Format, .wav, encoded
audio file.
Your request will need to be a multipart/form-data request rather than JSON, using the audio/wav Content-Type.
This API requires a full or user administrator auth token with the spark-admin:people_write scope or a user
auth token with spark:people_write scope can be used by a person to update their settings.
:param person_id: Unique identifier for the person.
:type person_id: str
:param content: the file to be uploaded, can be a path to a file or a buffered reader (opened file); if a
reader referring to an open file is passed then make sure to open the file as binary b/c otherwise the
content length might be calculated wrong
:type content: Union[BufferedReader, str]
:param upload_as: filename for the content. Only required if content is a reader; has to be a .wav file name.
:type upload_as: str
:param org_id: Person is in this organization. Only admin users of another organization (such as partners)
may use this parameter as the default is the same organization as the token used to access API.
:type org_id: str
"""
self._configure_greeting(person_id=person_id, content=content, upload_as=upload_as, org_id=org_id,
greeting_key='uploadNoAnswerGreeting')
```
#### File: telephony/callqueue/announcement.py
```python
from collections.abc import Generator
from pydantic import Field
from ...base import ApiModel
from ...rest import RestSession
__all__ = ['AnnouncementApi', 'Announcement']
class Announcement(ApiModel):
"""
Announcement file information
"""
name: str = Field(alias='fileName')
size: int = Field(alias='fileSize')
class AnnouncementApi:
"""
API for call queue Announcements
"""
def __init__(self, *, session: RestSession):
self._session = session
def _endpoint(self, location_id: str, queue_id: str, path: str = None):
"""
:meta private:
:param location_id:
:param queue_id:
:param path:
:return:
"""
path = path and f'/{path}' or ''
ep = self._session.ep(path=f'telephony/config/locations/{location_id}/queues/{queue_id}/announcements{path}')
return ep
def list(self, *, location_id: str, queue_id: str, org_id: str = None) -> Generator[Announcement]:
"""
:param location_id:
:param queue_id:
:param org_id:
:return:
"""
url = self._endpoint(location_id=location_id, queue_id=queue_id)
params = org_id and {'orgId': org_id} or dict()
# noinspection PyTypeChecker
return self._session.follow_pagination(url=url, model=Announcement, params=params)
def delete_announcement(self, *, location_id: str, queue_id: str, file_name: str, org_id: str = None):
"""
:param location_id:
:type location_id: str
:param queue_id:
:type queue_id: str
:param file_name:
:type file_name: str
:param org_id:
"""
url = self._endpoint(location_id=location_id, queue_id=queue_id, path=file_name)
params = org_id and {'orgId': org_id} or None
self._session.delete(url=url, params=params)
```
#### File: telephony/callqueue/__init__.py
```python
from collections.abc import Generator
from dataclasses import dataclass
from enum import Enum
from typing import Optional
from pydantic import Field
from .announcement import AnnouncementApi
from ..forwarding import ForwardingApi, FeatureSelector
from ..hg_and_cq import HGandCQ, Policy, Agent
from ...base import to_camel, ApiModel
from ...common import RingPattern, Greeting
from ...rest import RestSession
__all__ = ['CallBounce', 'DistinctiveRing', 'CallQueueCallPolicies', 'OverflowAction', 'OverflowSetting', 'WaitMode',
'WaitMessageSetting', 'AudioSource', 'WelcomeMessageSetting', 'ComfortMessageSetting', 'MohMessageSetting',
'QueueSettings', 'CallQueue', 'CallQueueApi']
class CallBounce(ApiModel):
"""
Settings for when the call into the call queue is not answered.
"""
#: If enabled, bounce calls after the set number of rings.
enabled: Optional[bool] = Field(alias='callBounceEnabled')
#: Number of rings after which to bounce call, if call bounce is enabled.
max_rings: Optional[int] = Field(alias='callBounceMaxRings')
#: Bounce if agent becomes unavailable.
agent_unavailable_enabled: Optional[bool]
#: Alert agent if call on hold more than alert_agent_max_seconds.
alert_agent_enabled: Optional[bool]
#: Number of second after which to alert agent if alertAgentEnabled.
alert_agent_max_seconds: Optional[int]
#: Bounce if call on hold more than on_hold_max_seconds
on_hold_enabled: Optional[bool] = Field(alias='callBounceOnHoldEnabled')
#: Number of second after which to bounce if on_hold_enabled.
on_hold_max_seconds: Optional[int] = Field(alias='callBounceOnHoldMaxSeconds')
@staticmethod
def default() -> 'CallBounce':
return CallBounce(enabled=True,
max_rings=8,
agent_unavailable_enabled=False,
alert_agent_enabled=False,
alert_agent_max_seconds=30,
on_hold_enabled=False,
on_hold_max_seconds=60)
class DistinctiveRing(ApiModel):
"""
Whether or not the call queue has the distinctive ring option enabled.
"""
#: Whether or not the distinctive ring is enabled.
enabled: bool
#: Ring pattern for when this callqueue is called. Only available when distinctiveRing is enabled for the call
#: queue.
ring_pattern: Optional[RingPattern]
@staticmethod
def default() -> 'DistinctiveRing':
"""
Default DistinctiveRing
"""
return DistinctiveRing(enabled=True,
ring_pattern=RingPattern.normal)
class CallQueueCallPolicies(ApiModel):
"""
Policy controlling how calls are routed to agents.
"""
#: Call routing policy to use to dispatch calls to agents.
policy: Optional[Policy]
#: Settings for when the call into the call queue is not answered.
call_bounce: Optional[CallBounce]
#: Whether or not the call queue has the distinctive ring option enabled.
distinctive_ring: Optional[DistinctiveRing]
@staticmethod
def default() -> 'CallQueueCallPolicies':
"""
Default CallPolicies
"""
return CallQueueCallPolicies(policy=Policy.circular,
call_bounce=CallBounce.default(),
distinctive_ring=DistinctiveRing.default())
@staticmethod
def simple() -> 'CallQueueCallPolicies':
return CallQueueCallPolicies(policy=Policy.circular,
call_bounce=CallBounce.default())
class OverflowAction(str, Enum):
"""
How to handle new calls when the queue is full.
"""
#: The caller hears a fast-busy tone.
perform_busy_treatment = 'PERFORM_BUSY_TREATMENT'
#: Enter the number where you want to transfer overflow calls.
transfer_to_phone_number = 'TRANSFER_TO_PHONE_NUMBER'
#: The caller hears ringing until they disconnect.
play_ringing_until_caller_hangs_up = 'PLAY_RINGING_UNTIL_CALLER_HANGS_UP'
class OverflowSetting(ApiModel):
"""
Settings for incoming calls exceed queueSize.
"""
#: How to handle new calls when the queue is full.
action: Optional[OverflowAction]
#: When true, forward all calls to a voicemail service of an internal number. This option is ignored when an
#: external transfer_number is entered.
send_to_voicemail: Optional[bool]
#: Destination number for overflow calls when action is set to TRANSFER_TO_PHONE_NUMBER.
transfer_number: Optional[str]
#: True: transfer number is set
is_transfer_number_set: Optional[bool]
#: After calls wait for the configured number of seconds and no agent is available, the overflow treatment
#: is triggered.
overflow_after_wait_enabled: Optional[bool]
#: Number of seconds to wait before the overflow treatment is triggered when no agent is available.
overflow_after_wait_time: Optional[int]
#: Indicate overflow audio to be played, otherwise callers will hear the hold music until the call is answered
#: by a user.
play_overflow_greeting_enabled: Optional[bool]
#: How to handle new calls when the queue is full.
greeting: Optional[Greeting]
#: Array of announcement file name strings to be played as overflow greetings. These files must be from the list
#: of announcements files associated with this call queue.
audio_files: Optional[list[str]]
@staticmethod
def default() -> 'OverflowSetting':
return OverflowSetting(action=OverflowAction.perform_busy_treatment,
send_to_voicemail=False,
is_transfer_number_set=False,
overflow_after_wait_enabled=False,
overflow_after_wait_time=30,
play_overflow_greeting_enabled=False,
greeting=Greeting.default,
audio_files=list())
class WaitMode(str, Enum):
time = 'TIME'
position = 'POSITION'
class WaitMessageSetting(ApiModel):
enabled: Optional[bool]
wait_mode: Optional[WaitMode]
handling_time: Optional[int]
queue_position: Optional[int]
high_volume_message_enabled: Optional[bool]
default_handling_time: Optional[int]
@staticmethod
def default():
return WaitMessageSetting(enabled=False,
wait_mode=WaitMode.position,
handling_time=100,
queue_position=100,
high_volume_message_enabled=False,
default_handling_time=5)
class AudioSource(ApiModel):
enabled: bool = Field(default=True)
greeting: Greeting = Field(default=Greeting.default)
audio_files: list[str] = Field(default_factory=list)
class WelcomeMessageSetting(AudioSource):
always_enabled: bool = Field(default=False)
class ComfortMessageSetting(AudioSource):
time_between_messages: int = Field(default=10)
@staticmethod
def default() -> 'ComfortMessageSetting':
return ComfortMessageSetting(enabled=False)
class MohMessageSetting(ApiModel):
normal_source: AudioSource
alternate_source: AudioSource
@staticmethod
def default() -> 'MohMessageSetting':
return MohMessageSetting(normal_source=AudioSource(enabled=True),
alternate_source=AudioSource(enabled=False))
class QueueSettings(ApiModel):
"""
Overall call queue settings.
"""
#: maximum number of calls for this call queue. Once this number is reached, the overflow settings are triggered
# (max 50).
queue_size: int
#: Play ringing tone to callers when their call is set to an available agent.
call_offer_tone_enabled: Optional[bool]
#: Reset caller statistics upon queue entry.
reset_call_statistics_enabled: Optional[bool]
#: Settings for incoming calls exceed queue_size.
overflow: Optional[OverflowSetting]
#:
wait_message: Optional[WaitMessageSetting]
welcome_message: Optional[WelcomeMessageSetting]
comfort_message: Optional[ComfortMessageSetting]
moh_message: Optional[MohMessageSetting]
@staticmethod
def default(*, queue_size: int) -> 'QueueSettings':
"""
Simple queue settings
:param queue_size: queue size
:type queue_size: int
"""
return QueueSettings(queue_size=queue_size,
overflow=OverflowSetting.default())
class CallQueue(HGandCQ):
"""
Call queue details
"""
#: Policy controlling how calls are routed to agents.
call_policies: Optional[CallQueueCallPolicies]
# TODO: file documentation defect. This is missing at
# https://developer.webex.com/docs/api/v1/webex-calling-organization-settings/get-details-for-a-call-queue
#: Overall call queue settings.
queue_settings: Optional[QueueSettings]
# TODO: file documentation defect. This is missing at
# https://developer.webex.com/docs/api/v1/webex-calling-organization-settings/get-details-for-a-call-queue
allow_call_waiting_for_agents_enabled: Optional[bool]
@staticmethod
def exclude_update_or_create() -> dict:
"""
Exclude dict for update or create calls
:return: dict
:meta private:
"""
base_exclude = HGandCQ.exclude_update_or_create()
base_exclude.update({'queue_settings':
{'overflow':
{'is_transfer_number_set': True}}})
return base_exclude
@staticmethod
def create(*, name: str,
agents: list[Agent],
queue_size: int = None,
enabled: bool = None,
language_code: str = None,
first_name: str = None,
last_name: str = None,
time_zone: str = None,
phone_number: str = None,
extension: str = None,
call_policies: CallQueueCallPolicies = None,
queue_settings: QueueSettings = None,
allow_call_waiting_for_agents_enabled: bool = None) -> 'CallQueue':
"""
Get an instance which can be uses for a create() call. Allows simplified creation of default queue settings
based on queue_size
:param name:
:param agents:
:param queue_size:
:param enabled:
:param language_code:
:param first_name:
:param last_name:
:param time_zone:
:param phone_number:
:param extension:
:param call_policies:
:param queue_settings:
:param allow_call_waiting_for_agents_enabled:
:return:
"""
if not (queue_size or queue_settings):
raise ValueError('One of queue_size and queue_settings has to be given')
if queue_size and queue_settings:
raise ValueError('Only one of queue_size and queue_settings can be given')
if not (phone_number or extension):
raise ValueError('One of phone_number and extension has to be given')
if queue_size:
queue_settings = QueueSettings(queue_size=queue_size)
params = {k: v for k, v in locals().items()
if v is not None and k != 'queue_size'}
return CallQueue(**params)
@dataclass(init=False)
class CallQueueApi:
"""
Call Queue APร
"""
forwarding: ForwardingApi
announcement: AnnouncementApi
def __init__(self, session: RestSession):
self._session = session
self.forwarding = ForwardingApi(session=session, feature_selector=FeatureSelector.queues)
self.announcement = AnnouncementApi(session=session)
def _endpoint(self, *, location_id: str = None, queue_id: str = None):
"""
Helper to get URL for API endpoints
:meta private:
:param location_id:
:param queue_id:
:return:
"""
if location_id is None:
return self._session.ep('telephony/config/queues')
else:
ep = self._session.ep(f'telephony/config/locations/{location_id}/queues')
if queue_id:
ep = f'{ep}/{queue_id}'
return ep
@staticmethod
def update_or_create(*, queue: CallQueue) -> str:
"""
Get JSON for update or create
:param queue:
:return:
:meta private:
"""
return queue.json(
exclude={'id': True,
'location_name': True,
'location_id': True,
'toll_free_number': True,
'language': True,
'agents':
{'__all__':
{'first_name': True,
'last_name': True,
'user_type': True,
'extension': True,
'phone_number': True}},
'alternate_number_settings':
{'alternate_numbers':
{'__all__':
{'toll_free_number': True}}},
'queue_settings':
{'overflow':
{'is_transfer_number_set': True}}})
def list(self, *, location_id: str = None, name: str = None,
org_id: str = None, **params) -> Generator[CallQueue, None, None]:
"""
Read the List of Call Queues
List all Call Queues for the organization.
Call queues temporarily hold calls in the cloud when all agents, which can be users or agents, assigned to
receive calls from the queue are unavailable. Queued calls are routed to an available agent when not on an
active call. Each call queue is assigned a Lead Number, which is a telephone number outside callers can dial
to reach users assigned to the call queue. Call queues are also assigned an internal extension, which can be
dialed internally to reach users assigned to the call queue.
Retrieving this list requires a full or read-only administrator auth token with a scope
of spark-admin:telephony_config_read.
:param location_id: Only return call queues with matching location ID.
:type location_id: str
:param name: Only return call queues with the matching name.
:type name: str
:param org_id: List call queues for this organization
:type org_id: str
:param params: dict of additional parameters passed directly to endpoint
:type params: dict
:return: yields :class:`CallQueue` objects
"""
params.update((to_camel(k), v)
for i, (k, v) in enumerate(locals().items())
if i and v is not None and k != 'params')
url = self._endpoint()
# noinspection PyTypeChecker
return self._session.follow_pagination(url=url, model=CallQueue, params=params)
def by_name(self, *, name: str, location_id: str = None, org_id: str = None) -> Optional[CallQueue]:
"""
Get queue info by name
:param location_id:
:param name:
:param org_id:
:return:
"""
return next((cq for cq in self.list(location_id=location_id, org_id=org_id, name=name)
if cq.name == name), None)
def create(self, *, location_id: str, settings: CallQueue, org_id: str = None) -> str:
"""
Create a Call Queue
Create new Call Queues for the given location.
Call queues temporarily hold calls in the cloud when all agents, which can be users or agents, assigned to
receive calls from the queue are unavailable. Queued calls are routed to an available agent when not on an
active call. Each call queue is assigned a Lead Number, which is a telephone number outside callers can dial
to reach users assigned to the call queue. Call queues are also assigned an internal extension, which can be
dialed internally to reach users assigned to the call queue.
Creating a call queue requires a full administrator auth token with a scope
of spark-admin:telephony_config_write.
:param location_id: Create the call queue for this location.
:type location_id: str
:param settings: parameters for queue creation.
:type settings: :class:`CallQueue`
:param org_id: Create the call queue for this organization.
:type org_id: str
:return: queue id
:rtype: str
"""
params = org_id and {'orgId': org_id} or {}
cq_data = settings.create_or_update()
url = self._endpoint(location_id=location_id)
data = self._session.rest_post(url, data=cq_data, params=params)
return data['id']
def delete_queue(self, *, location_id: str, queue_id: str, org_id: str = None):
"""
Delete a Call Queue
Delete the designated Call Queue.
Call queues temporarily hold calls in the cloud when all agents, which can be users or agents, assigned to
receive calls from the queue are unavailable. Queued calls are routed to an available agent when not on an
active call. Each call queue is assigned a Lead Number, which is a telephone number outside callers can dial
to reach users assigned to the call queue. Call queues are also assigned an internal extension, which can be
dialed internally to reach users assigned to the call queue.
Deleting a call queue requires a full administrator auth token with a scope
of spark-admin:telephony_config_write.
:param location_id: Location from which to delete a call queue.
:type location_id: str
:param queue_id: Delete the call queue with the matching ID.
:type queue_id: str
:param org_id: Delete the call queue from this organization.
:type org_id: str
"""
url = self._endpoint(location_id=location_id, queue_id=queue_id)
params = org_id and {'orgId': org_id} or None
self._session.rest_delete(url=url, params=params)
def details(self, *, location_id: str, queue_id: str, org_id: str = None) -> CallQueue:
"""
Get Details for a Call Queue
Retrieve Call Queue details.
Call queues temporarily hold calls in the cloud when all agents, which can be users or agents, assigned to
receive calls from the queue are unavailable. Queued calls are routed to an available agent when not on an
active call. Each call queue is assigned a Lead Number, which is a telephone number outside callers can dial
to reach users assigned to the call queue. Call queues are also assigned anvinternal extension, which can be
dialed internally to reach users assigned to the call queue.
Retrieving call queue details requires a full or read-only administrator auth token with a scope
of spark-admin:telephony_config_read.
:param location_id: Retrieve settings for a call queue in this location
:type location_id: str
:param queue_id: Retrieve settings for the call queue with this identifier.
:type queue_id: str
:param org_id: Retrieve call queue settings from this organization.
:type org_id: str
:return: call queue details
:rtype: :class:`CallQueue`
"""
url = self._endpoint(location_id=location_id, queue_id=queue_id)
params = {'orgId': org_id} if org_id is not None else {}
data = self._session.rest_get(url, params=params)
result = CallQueue.parse_obj(data)
# noinspection PyTypeChecker
return result
def update(self, *, location_id: str, queue_id: str, update: CallQueue, org_id: str = None):
"""
Update a Call Queue
Update the designated Call Queue.
Call queues temporarily hold calls in the cloud when all agents, which can be users or agents, assigned to
receive calls from the queue are unavailable. Queued calls are routed to an available agent when not on an
active call. Each call queue is assigned a Lead Number, which is a telephone number outside callers can dial
to reach users assigned to the call queue. Call queues are also assigned an internal extension, which can be
dialed internally to reach users assigned to the call queue.
Updating a call queue requires a full administrator auth token with a scope
of spark-admin:telephony_config_write.
Examples:
.. code-block::
api = WebexSimpleApi()
# shortcut
cq = api.telephony.callqueue
# disable a call queue
update = CallQueue(enabled=False)
cq.update(location_id=...,
queue_id=...,
update=update)
# set the call routing policy to SIMULTANEOUS
update = CallQueue(call_policies=CallPolicies(policy=Policy.simultaneous))
cq.update(location_id=...,
queue_id=...,
update=update)
# don't bounce calls after the set number of rings.
update = CallQueue(
call_policies=CallPolicies(
call_bounce=CallBounce(
enabled=False)))
cq.update(location_id=...,
queue_id=...,
update=update)
Alternatively you can also read call queue details, update them in place and then call update().
.. code-block::
details = cq.details(location_id=...,
queue_id=...)
details.call_policies.call_bounce.agent_unavailable_enabled=False
details.call_policies.call_bounce.on_hold_enabled=False
cq.update(location_id=...,
queue_id=...,
update=details)
:param location_id: Location in which this call queue exists.
:type location_id: str
:param queue_id: Update setting for the call queue with the matching ID.
:type queue_id: str
:param update: updates
:type update: :class:`CallQueue`
:param org_id: Update call queue settings from this organization.
"""
params = org_id and {'orgId': org_id} or None
cq_data = update.create_or_update()
url = self._endpoint(location_id=location_id, queue_id=queue_id)
self._session.rest_put(url=url, data=cq_data, params=params)
```
#### File: wxc_sdk/telephony/voicemail_groups.py
```python
from typing import Optional
from pydantic import Field
from ..api_child import ApiChild
__all__ = ['VoicemailGroup', 'VoicemailGroupsApi']
from ..base import to_camel, ApiModel
class VoicemailGroup(ApiModel):
#: Voicemail Group Id.
group_id: str = Field(alias='id')
#: Voicemail Group Name.
name: str
#: Location Name.
location_name: str
#: location id
location_id: str
#: Extension of the voicemail group.
extension: Optional[str]
#: Phone number of the voicemail group.
phone_number: Optional[str]
#: If enabled, incoming calls are sent to voicemail.
enabled: bool
#: Flag to indicate if the number is toll free.
toll_free_number: Optional[bool]
class VoicemailGroupsApi(ApiChild, base='telephony/config/voicemailGroups'):
"""
API for location private network connect API settings
"""
def list(self, *, location_id: str = None, name: str = None, phone_number: str = None, org_id: str = None):
params = {to_camel(p): v for p, v in locals().items() if p != 'self' and v is not None}
url = self.ep()
return self.session.follow_pagination(url=url, model=VoicemailGroup, params=params, item_key='voicemailGroups')
```
|
{
"source": "jeoliva/dash-analyzer",
"score": 2
}
|
#### File: dash-analyzer/mpd/mpd.py
```python
from collections import namedtuple
import os
import posixpath
import errno
import math
import re
import xml.etree.ElementTree as ET
from mediapresentationdescription import MediaPresentationDescription
try:
import urlparse as url_parser
except ImportError:
import urllib.parse as url_parser
class Mpd(object):
DURATION_REGEX_STR = "^(-)?P(([0-9]*)Y)?(([0-9]*)M)?(([0-9]*)D)?(T(([0-9]*)H)?(([0-9]*)M)?(([0-9.]*)S)?)?$"
DURATION_REGEX = re.compile(DURATION_REGEX_STR)
def __init__(self, content=None, base_path=None, base_uri=None):
if content is not None:
self.manifest = self.parse(content)
else:
self.manifest = {}
self._base_uri = base_uri
self.base_path = base_path
def parse(self, content):
root = ET.fromstring(content)
manifest = MediaPresentationDescription()
self.parseRoot(root, manifest)
return manifest
def parseRoot(self, mpdRoot, manifest):
for name, value in mpdRoot.attrib.items():
if name == "type":
manifest.dynamic = (value == "dynamic")
elif name == "mediaPresentationDuration":
manifest.duration = self.parseDuration(value)
elif name == "availabilityStartTime":
manifest.availabilityStartTime = value
elif name == "maxSegmentDuration":
manifest.maxSegmentDuration = self.parseDuration(value)
elif name == "minBufferTime":
manifest.minBufferTime = self.parseDuration(value)
elif name == "profiles":
manifest.profiles = value
def parseDuration(self, str):
result = self.DURATION_REGEX.match(str)
duration = 0
if result != None:
negated = not(result.group(1) == None or len(result.group(1)) == 0)
years = result.group(3)
if years != None:
duration += float(years) * 31556908
months = result.group(5)
if months != None:
duration += float(months) * 2629739
days = result.group(7)
if days != None:
duration += float(days) * 86400
hours = result.group(10)
if hours != None:
duration += float(hours) * 3600
minutes = result.group(12)
if minutes != None:
duration += float(minutes) * 60
seconds = result.group(14)
if seconds != None:
duration += float(seconds)
if negated:
return -1 * long(duration * 1000)
else:
return long(duration * 1000)
else:
return long(float(str) * 3600 * 1000)
```
#### File: dash-analyzer/mpd/rangeduri.py
```python
class RangedUri(object):
def __init__(self):
self.start = 0
self.length = -1
self.baseUri = ""
self.referenceUri = ""
```
#### File: dash-analyzer/mpd/representation.py
```python
class Representation(object):
def __init__(self):
self.revisionId = 0
self.format = ""
self.presentationTimeOffsetUs = 0
self.initializationUri = None
```
|
{
"source": "jeoncw1030/study",
"score": 4
}
|
#### File: jeoncw1030/study/calculator_3.py
```python
from pythonds.basic.stack import Stack
def calculator(str):
expression = [] #list
numbers = []
operator = Stack() #stack
ranking = {'+':1,'-':1,'*':2,'/':2 }
middle_num = 0
right_num = 0
left_num = 0
# ๋ด๊ธฐ
#์ซ์ : ๋ฐฐ์ด์ ๋ฃ๊ธฐ
#๊ธฐํธ(๋งจ ์ ์ฐ์ฐ์๋ณด๋ค ์ฐ์ ์์๊ฐ ๋ฎ๊ฑฐ๋ ๊ฐ์ ๋ ) : ๊ธฐ์กด ๊ฐ๋ค์ ๋ฐฐ์ด์, ๋ณธ์ธ์ ์คํ์
#๊ธฐํธ(์ฐ์ ์์ ๋์ ๋) : ์คํ์ ๋ณธ์ธ ๊ฐ ์ ์ฅ
for s in str:
if s.isdigit():
expression.append(s)
elif s in ranking.keys():
if len(operator) == 0:
operator.append(s)
elif ranking[s] <= ranking[operator.peek()] :
while operator:
expression.append(operator.pop())
operator.push(s)
else:
operator.push(s)
# ๋จ์ ๊ธฐํธ๋ค ๋ฐฐ์ด์ ๋ฃ๊ธฐ
while operator:
expression.append(operator.pop())
# ๊ณ์ฐํ๊ธฐ
for e in expression:
if e.isdigit():
numbers.push(e)
else:
right_num = int(numbers.pop())
left_num = int(numbers.pop())
if e == '+':
middle_num = left_num+right_num
elif e == '-':
middle_num = left_num-right_num
elif e == '*':
middle_num = left_num*right_num
elif e == '/':
middle_num = left_num/right_num
numbers.push(middle_num)
return numbers.pop()
if __name__ == '__main__':
s = 3+5*2 #input("Enter the arithmetic expression : ")
result = calculator(s)
print('Calculation result : {} = {}'.format(s, result))
''' ์ด์ ์ฐ์ฐ ์ฝ๋
while operator:
o = operator.pop()
right_num = int(numbers.pop())
left_num = int(numbers.pop())
if o == '+':
middle_num = left_num+right_num
elif o == '-':
middle_num = left_num-right_num
elif o == '*':
middle_num = left_num*right_num
elif o == '/':
middle_num = left_num/right_num
#if middle_num < 0:
# middle_num *= -1
numbers.append(middle_num)
return middle_num
'''
''' example = [
'1+1',
' 2 - 5',
'5 * 2',
'8/ 2',
'3*4-2',
'4-1/3',
'1-1-1'
]
for e in example:
result = calculator(e)
print('calculation result : {} = {}'.format(e, result))
'''
```
|
{
"source": "jeong242/gre-voca-tester",
"score": 4
}
|
#### File: jeong242/gre-voca-tester/stat_util.py
```python
import json
import json
# more_wrong increase the probability of asking a word.
more_wrong = 2
"""
Typical use pattern:
stat = Stat()
# Do some stuffs...
if right_answer:
stat.right_answer(word)
else:
stat.wrong_answer(word)
...
stat.save()
"""
class Stat:
def __init__(self, load_fname="stat.json"):
with open(load_fname, "r") as f:
self.stat_dict = json.load(f)
self._update()
def _update(self, fname="voca.json"):
with open(fname, "r") as f:
voca_dict = json.load(f)
n_voca = len(voca_dict)
n_stat = len(self.stat_dict)
# Check if update is required.
# The difference is only determined by the lengths.
voca_keys = list(voca_dict.keys())
while n_stat < n_voca:
new_key = voca_keys[n_stat]
self.stat_dict[new_key] = [0,0]
n_stat += 1
def get_prob(self, word):
n_right, n_wrong = self.stat_dict[word]
prob = (n_wrong + more_wrong) / (n_right + n_wrong + 2)
return prob
def right_answer(self, word):
self.stat_dict[word][0] += 1
def wrong_answer(self, word):
self.stat_dict[word][1] += 1
def save(self, fname = "stat.json"):
stat = self.stat_dict
with open(fname, "w") as f:
json.dump(stat, f)
# Call once.
# Create a dictionary, where dict = {word: (# correct answer, # wrong answer)}.
def init_stat(src_fname="voca.json", dst_fname="stat.json"):
# Open voca json file.
with open(src_fname, "r") as f:
stat = json.load(f)
# Set all the values to 0.
for w in stat:
stat[w] = (0,0)
# Store stat as json.
with open(dst_fname, "w") as f:
json.dump(stat, f)
```
|
{
"source": "Jeongah-Shin/integer-seq2seq-tensorflow",
"score": 2
}
|
#### File: Jeongah-Shin/integer-seq2seq-tensorflow/common.py
```python
import os
import tensorflow as tf
tf.logging.set_verbosity(tf.logging.INFO)
def create_folder(location):
if not os.path.exists(location):
os.makedirs(location)
def get_session_config():
config = tf.ConfigProto(
allow_soft_placement=True,
log_device_placement=False
)
config.gpu_options.allow_growth = True
config.gpu_options.per_process_gpu_memory_fraction = 1.0
return config
def get_checkpoint(checkpoint_dir, log=True):
ckpt = tf.train.get_checkpoint_state(checkpoint_dir)
if log:
tf.logging.info("loading tensorflow checkpoints...")
tf.logging.info('[+] Get checkpoint {}'.format(ckpt))
if ckpt and ckpt.model_checkpoint_path:
last_step = int(ckpt.model_checkpoint_path.split("-")[-1])
ckpt_path = ckpt.model_checkpoint_path
if log:
tf.logging.info("[+] RESTORE SAVED VARIBALES : restored {}".format(ckpt_path))
tf.logging.info("[+] RESTORE SAVED VARIBALES : restart from step {}".format(last_step))
else:
raise RuntimeError('checkpoint file was not found')
return ckpt_path, last_step
def get_batch_nums(batch_size, gpus):
q, r = divmod(batch_size, gpus)
return [q + 1] * r + [q] * (gpus - r)
def get_init_pretrained():
saver_reader = tf.train.Saver(
tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
)
init_fn = lambda sess, ckpt_path: saver_reader.restore(sess, ckpt_path)
return init_fn
# borrowed from https://github.com/lengstrom/fast-style-transfer/blob/master/src/utils.py
def get_files(img_dir):
imgs, masks, xmls = list_files(img_dir)
return list(map(lambda x: os.path.join(img_dir,x), imgs)),\
list(map(lambda x: os.path.join(img_dir,x), masks)),\
list(map(lambda x: os.path.join(img_dir,x), xmls))
def list_files(in_path):
img_files = []
mask_files = []
gt_files = []
for (dirpath, dirnames, filenames) in os.walk(in_path):
for file in filenames:
filename, ext = os.path.splitext(file)
ext = str.lower(ext)
if ext == '.jpg' or ext == '.jpeg' or ext == '.gif' or ext == '.png' or ext == '.pgm':
img_files.append(os.path.join(dirpath, file))
elif ext == '.bmp':
mask_files.append(os.path.join(dirpath, file))
elif ext == '.xml' or ext == '.gt' or ext == '.txt':
gt_files.append(os.path.join(dirpath, file))
elif ext == '.zip':
continue
else:
print ('There is no file : %s'%(file))
# img_files.sort()
# mask_files.sort()
# gt_files.sort()
return img_files, mask_files, gt_files
```
#### File: Jeongah-Shin/integer-seq2seq-tensorflow/process_data.py
```python
import json
import numpy as np
def process(is_train=True):
with open('./data/config.json') as data_config:
data = json.load(data_config)
if is_train :
with open(data['train_s'], 'r', encoding='utf-8') as source:
source_lines = source.read().splitlines()
with open(data['train_t'], 'r', encoding='utf-8') as target:
target_lines = target.read().splitlines()
else:
with open(data['test_s'], 'r', encoding='utf-8') as source:
source_lines = source.read().splitlines()
with open(data['test_t'], 'r', encoding='utf-8') as target:
target_lines = target.read().splitlines()
num_train_samples = len(source_lines)
# Vectorize the data.
input_texts = []
target_texts = []
input_characters = set()
target_characters = set()
for idx in range(num_train_samples):
input_text = source_lines[idx]
input_texts.append(input_text)
# We use "s" as the "start sequence" character
# for the targets, and "e" as "end sequence" character.
target_text = 's' + target_lines[idx] + 'e'
target_texts.append(target_text)
for char in input_text:
if char not in input_characters:
input_characters.add(char)
for char in target_text:
if char not in target_characters:
target_characters.add(char)
input_characters = sorted(list(input_characters))
target_characters = sorted(list(target_characters))
num_encoder_tokens = len(input_characters)
num_decoder_tokens = len(target_characters)
max_encoder_seq_length = max([len(txt) for txt in input_texts])
max_decoder_seq_length = max([len(txt) for txt in target_texts])
print('Number of samples:', len(input_texts))
print('Number of unique input tokens:', num_encoder_tokens)
print('Number of unique output tokens:', num_decoder_tokens)
print('Max sequence length for inputs:', max_encoder_seq_length)
print('Max sequence length for outputs:', max_decoder_seq_length)
input_token_index = dict(
[(char, i) for i, char in enumerate(input_characters)])
target_token_index = dict(
[(char, i) for i, char in enumerate(target_characters)])
encoder_input_data = np.zeros(
(len(input_texts), max_encoder_seq_length, num_encoder_tokens),
dtype='float32')
decoder_input_data = np.zeros(
(len(input_texts), max_decoder_seq_length, num_decoder_tokens),
dtype='float32')
decoder_target_data = np.zeros(
(len(input_texts), max_decoder_seq_length, num_decoder_tokens),
dtype='float32')
for i, (input_text, target_text) in enumerate(zip(input_texts, target_texts)):
for t, char in enumerate(input_text):
encoder_input_data[i, t, input_token_index[char]] = 1.
encoder_input_data[i, t + 1:, input_token_index[' ']] = 1.
for t, char in enumerate(target_text):
# decoder_target_data is ahead of decoder_input_data by one timestep
decoder_input_data[i, t, target_token_index[char]] = 1.
if t > 0:
# decoder_target_data will be ahead by one timestep
# and will not include the start character.
decoder_target_data[i, t - 1, target_token_index[char]] = 1.
decoder_input_data[i, t + 1:, target_token_index[' ']] = 1.
decoder_target_data[i, t:, target_token_index[' ']] = 1.
return encoder_input_data, decoder_input_data, decoder_target_data
```
|
{
"source": "jeongbal/Gamble",
"score": 2
}
|
#### File: jeongbal/Gamble/bot.py
```python
from discord.activity import Game
from discord.enums import Status
from discord.ext import commands
from pathlib import Path
import os
from utils.database.mongo import Mongo
bot = commands.Bot(command_prefix=";", help_command=None)
bot.mongo = Mongo(os.getenv("MONGO_DB_URL"))
token = os.environ["DISCORD_TOKEN"]
cwd = Path(__file__).parents[0]
cwd = str(cwd)
bot.cwd = cwd
@bot.event
async def on_ready():
print("Bot is ready")
await bot.change_presence(status=Status.online, activity=Game(";help"))
if __name__ == "__main__":
for file in os.listdir(os.path.join(cwd, "cogs")):
if file.endswith(".py") and not file.startswith("_"):
bot.load_extension(f"cogs.{file[:-3]}")
bot.load_extension("jishaku")
bot.run(token)
```
#### File: Gamble/cogs/general.py
```python
import discord
from discord.ext import commands
from discord.ext.commands.bot import Bot
from discord.ext.commands.context import Context
from discord.embeds import Embed
from discord.message import Message
class General(commands.Cog):
def __init__(self, bot):
self.bot: Bot = bot
@commands.command(name="์ด๋")
async def _invite(self, ctx: Context):
"""
์ด ๋ด์ ์ด๋ ๋งํฌ๋ฅผ ๋ณด์ฌ์ค๋๋ค.
์ฌ์ฉ ์์: ``;์ด๋``
"""
url = discord.utils.oauth_url(self.bot.user.id)
await ctx.send(url)
@commands.command(name="help", aliases=["๋์๋ง", "๋์", "ใ
๋ใ
"])
async def _help(self, ctx: Context):
msg: Message = await ctx.send(embed=Embed(title="ใฑใท"))
embed = Embed(title="๋ช
๋ น์ด ๋ชฉ๋ก")
command_list = [
command
for command in self.bot.commands
if command.name not in ["jishaku", "set_money", "help"]
]
for command in command_list:
embed.add_field(name=command.name, value=command.help)
await msg.edit(embed=embed)
def setup(bot):
bot.add_cog(General(bot))
```
#### File: Gamble/cogs/money.py
```python
from discord.embeds import Embed
from discord.ext import commands
from discord.ext.commands.bot import Bot
from discord.ext.commands.context import Context
from discord.message import Message
from utils.database.mongo import Mongo
from utils.money import MoneyExt
class Money(commands.Cog):
def __init__(self, bot: Bot, mongo: Mongo):
self.bot = bot
self.money = MoneyExt(mongo)
@commands.command(name="๋", aliases=["์ง๊ฐ", "ใท", "ใ
ใฑ", "wr", "ehs", "e"])
async def _money(self, ctx: Context):
"""
ํ์ฌ ์๊ณ ๋ฅผ ๋ณด์ฌ์ค๋๋ค. ์ง๊ฐ์ด ์์ ๊ฒฝ์ฐ ์๋์ผ๋ก ์์ฑํฉ๋๋ค.
์ฌ์ฉ ์์: ``;๋``
"""
msg: Message = await ctx.send(embed=Embed(title="๋ถ๋ฌ์ค๋ ์ค"))
embed = await self.money.money(ctx.author.id)
await msg.edit(embed=embed)
@commands.command(name="์ถ์", aliases=["ใ
ใ
", "ct", "cnftjr", "cใ
"])
@commands.cooldown(1, 600, commands.BucketType.user)
async def _attend(self, ctx: Context):
"""
10,000์ ~ 100,000์ ์ ๋๋ค์ผ๋ก ์ง๊ธํฉ๋๋ค. 10๋ถ๋ง๋ค ์ฌ์ฉํ ์ ์์ต๋๋ค.
์ฌ์ฉ ์์: ``;์ถ์``
"""
msg: Message = await ctx.send(embed=Embed(title="ใฑใท"))
embed = await self.money.attend(ctx)
await msg.edit(embed=embed)
@commands.command(name="๋ญํน", aliases=["ในใ
", "fz", "fใ
"])
async def _ranking(self, ctx: Context):
"""
์ ์ ๋ญํน์ ์์ง๊ธ ์์ผ๋ก Top10 ๊น์ง ํ์ํฉ๋๋ค.
์ฌ์ฉ ์์: ``;๋ญํน``
"""
msg: Message = await ctx.send(embed=Embed(title="๋ถ๋ฌ์ค๋ ์ค"))
embed = await self.money.ranking(ctx)
await msg.edit(embed=embed)
def setup(bot):
bot.add_cog(Money(bot, bot.mongo))
```
#### File: Gamble/utils/money.py
```python
from utils.database.mongo import Mongo
from discord import Embed
from discord.ext.commands.context import Context
from random import randint
class MoneyExt:
def __init__(self, mongo: Mongo) -> None:
self.mongo = mongo
async def money(self, user_id: int) -> Embed:
if user_data := await self.mongo.get_user_data(user_id):
return Embed(
title="์๊ณ ",
description=f"**{format(user_data['money'], ',')}**์",
color=0x30807C,
)
await self.mongo.initialize_user(user_id)
return Embed(title="์ง๊ฐ ์์", description="์ง๊ฐ์ ์์ฑํฉ๋๋ค.", color=0x80307C)
async def attend(self, ctx: Context) -> Embed:
if user_data := await self.mongo.get_user_data(ctx.author.id):
current = user_data["money"]
amount = randint(10000, 100000) // 1000 * 1000
await self.mongo.set_user_money(ctx.author.id, current + amount)
return Embed(title=f"์ถ์ํ์ฌ {amount}์์ ๋ฐ์์ต๋๋ค.")
ctx.command.reset_cooldown(ctx)
return Embed(title="์ง๊ฐ์ด ์์ต๋๋ค.", description="`;๋` ๋ช
๋ น์ด๋ก ์ง๊ฐ์ ์์ฑํ์ธ์.")
async def ranking(self, ctx: Context) -> Embed:
ranking = await self.mongo.get_all_users_data(10)
embed = Embed(title="Top 10")
for user in ranking:
embed.add_field(
name=f"{await ctx.bot.fetch_user(user['user_id'])}",
value=f"{format(user['money'], ',')}์",
inline=False,
)
return embed
```
|
{
"source": "jeongchans/smrf",
"score": 3
}
|
#### File: jeongchans/smrf/util.py
```python
import os.path
import sys
import time
def open_file(filename):
return open(filename)
def write_file(buf, filename):
fp = sys.stdout if filename == "stdout" else open(filename, 'w')
fp.write(buf)
fp.close()
def message(msg):
print '[%s] %s'%(time.asctime(), msg)
```
|
{
"source": "JeongChanwoo/Deep-POC-2019",
"score": 3
}
|
#### File: image/binary_image_classification/binary_image_classification.py
```python
import numpy as np
import pandas as pd
import os
import argparse
from keras.models import Model
from keras.layers import Input,Conv2D, MaxPooling2D, Flatten, Dense
from keras_preprocessing.image import ImageDataGenerator
from keras.callbacks import TensorBoard, LearningRateScheduler, ModelCheckpoint
from sklearn.metrics import confusion_matrix, classification_report
# input_shape = 64
filters = 32
kernel_size = 3
strides = (3,3)
iterations = 782
# resize_shape = (64,64)
def createFolder(directory):
if not os.path.exists(directory):
os.makedirs(directory)
def arguments():
parser = argparse.ArgumentParser(description = "binary image classification")
parser.add_argument('--train_path' ,
required = True,
help = 'train data directory path')
parser.add_argument('--resize_x',
required = False,
type = int,
default = 32,
help = 'resizing x')
parser.add_argument('--epoch',
required = False,
type= int,
default = 50,
help = 'epoch size')
parser.add_argument('--sample_count',
required = False,
type = int,
default = 5000,
help = 'tarining sampe count')
args = parser.parse_args()
return args
def working_model(input_dim, filters , kernel_size , strides):
inputs = Input(shape = (input_dim, input_dim, 3))
cnn = Conv2D(filters = filters, kernel_size=kernel_size, strides=strides, activation = 'relu')(inputs)
cnn = MaxPooling2D(pool_size=(2,2))(cnn)
cnn = Flatten()(cnn)
cnn = Dense(128, activation='relu')(cnn)
output = Dense(1, activation='sigmoid')(cnn)
classifier = Model(inputs,output)
return classifier
def image_generator(train_path, resize_shape):
train_datagen = ImageDataGenerator(rescale=1./255,
shear_range = 0.2,
zoom_range = 0.2,
horizontal_flip = True,
validation_split = 0.2)
# test_datagen = ImageDataGenerator(rescale= 1./255)
training_set = train_datagen.flow_from_directory(train_path,
target_size = resize_shape,
batch_size = 32,
class_mode = 'binary',
subset = 'training')
validation_set = train_datagen.flow_from_directory(train_path,
target_size = resize_shape,
batch_size = 1,
class_mode = 'binary',
subset = 'validation')
# test_set = test_datagen.flow_from_directory(test_path,
# target_size = resize_shape,
# batch_size = 32,
# class_mode = 'binary')
return training_set, validation_set
def train(train_set,validation_set ,model, epoch, sample_size):
model.compile(optimizer = 'adam', loss = 'binary_crossentropy', metrics = ['accuracy'])
# tb_hist = TensorBoard(log_dir='./graph/image_classification/binary',
# histogram_freq=0, write_graph=True, write_images=True)
tb_cb = TensorBoard(log_dir = './graph/', histogram_freq=0,
write_graph=True, write_images=True)
ckpt = ModelCheckpoint('./weight/ckpt.h5',
save_best_only = True, mode = 'auto', period = 10)
cbks = [tb_cb, ckpt]
model.fit_generator(train_set,
# steps_per_epoch = 15,
# epochs = 5,
samples_per_epoch = sample_size,
epochs = epoch,
# steps_per_epoch = iterations,
steps_per_epoch = train_set.samples// 32,
validation_data = validation_set,
# validation_steps=1,
validation_steps=validation_set.samples,
# nb_val_samples = 2000,
verbose = 2,
callbacks = cbks,
workers = os.cpu_count(),
use_multiprocessing = True)
return model
def score(validation_set, model):
predict_res = model.predict_generator(validation_set,
verbose = 2,
workers = os.cpu_count(),
use_multiprocessing = True,
steps = validation_set.samples)
print(validation_set.class_indices)
def label(x):
if x>0.5:
return 1
else:
return 0
label_encode = np.vectorize(label)
res = label_encode(predict_res)
# print(np.unique(res))
# print(res.shape)
# print(confusion_matrix(validation_set.classes,res))
print(classification_report(validation_set.classes, res))
if __name__ == '__main__':
args = arguments()
createFolder('./graph')
createFolder('./weight')
model = working_model(args.resize_x, filters, kernel_size, strides)
print(model.summary())
print('Data argumentation for binary classification')
resize_shape = (args.resize_x, args.resize_x)
train_datagen, validation_datagen = image_generator(args.train_path, resize_shape)
model = train(train_datagen ,
validation_datagen,
model,
args.epoch,
args.sample_count)
model.save('./weight/model_weight.h5')
# predict_res = model.predict_generator()
score(validation_datagen, model)
```
#### File: Deep-POC-2019/src/metric.py
```python
from keras import backend as K
from keras.losses import binary_crossentropy
import numpy as np
from tqdm import tqdm
def dice_coef(y_true, y_pred):
y_true_f = K.flatten(y_true)
y_pred = K.cast(y_pred, 'float32')
y_pred_f = K.cast(K.greater(K.flatten(y_pred), 0.5), 'float32')
intersection = y_true_f * y_pred_f
score = 2. * K.sum(intersection) / (K.sum(y_true_f) + K.sum(y_pred_f))
return score
def dice_loss(y_true, y_pred):
smooth = 1.
y_true_f = K.flatten(y_true)
y_pred_f = K.flatten(y_pred)
intersection = y_true_f * y_pred_f
score = (2. * K.sum(intersection) + smooth) / (K.sum(y_true_f) + K.sum(y_pred_f) + smooth)
return 1. - score
def bce_dice_loss(y_true, y_pred):
return binary_crossentropy(y_true, y_pred) + dice_loss(y_true, y_pred)
def bce_logdice_loss(y_true, y_pred):
return binary_crossentropy(y_true, y_pred) - K.log(1. - dice_loss(y_true, y_pred))
def dice_channel_label(probability, truth):
batch_size = truth.shape[0]
channel_num = truth.shape[-1]
mean_dice_channel = 0.
# channel_1 = 0.
# channel_2 = 0.
# channel_3 = 0.
# channel_4 = 0.
for i in range(batch_size):
# for j in range(channel_num):
channel_dice = dice_single_channel(probability[i, :,:], truth[i, :, :])
mean_dice_channel += channel_dice/(batch_size)
# mean_dice_channel += channel_dice/(batch_size)
# mean_channels[j] += channel_dice/batch_size
# print("Channel_1 : {}, Channel_2 : {}, Channel_3 : {},Channel_4 : {},".format(
# round(mean_channels[0],5 ),
# round(mean_channels[1],5 ),
# round(mean_channels[2],5 ),
# round(mean_channels[3],5 )))
return mean_dice_channel
def dice_channel_torch(probability, truth):
batch_size = truth.shape[0]
channel_num = truth.shape[-1]
mean_dice_channel = 0.
mean_channels = [0.]* channel_num
for i in tqdm(range(batch_size)):
for j in range(channel_num):
channel_dice = dice_single_channel(probability[i, :,:,j], truth[i, :, :, j])
mean_dice_channel += channel_dice/(batch_size * channel_num)
mean_channels[j] += channel_dice/batch_size
# print(channel_num)
score_text = ' : {}, '.join(['channnel_{}'.format(k + 1) for k in range(channel_num)]) + ' : {}'
# print(score_text)
score = np.round(np.array(mean_channels), 5)
total_score = np.round(np.append(mean_dice_channel, np.array(mean_channels)),5)
score_text = score_text.format(*score)
print("Mean_dice_channel : {} ".format(total_score[0]))
print(score_text)
return total_score
def dice_single_channel(probability, truth, eps = 1E-9):
p = probability.astype(np.float32)
t = truth.astype(np.float32)
dice = (2.0 * (p * t).sum() + eps)/ (p.sum() + t.sum() + eps)
return dice
```
#### File: Deep-POC-2019/src/model.py
```python
import keras
from keras.models import Model, load_model
from keras.layers import Concatenate,Dense,Input,Dropout,BatchNormalization,Activation,Add,Lambda, InputLayer, UpSampling2D,ZeroPadding2D
from keras.layers.core import Lambda
from keras.layers.convolutional import Conv2D, Conv2DTranspose
from keras.layers.pooling import MaxPooling2D
from keras.layers.merge import concatenate
from keras.callbacks import TensorBoard,EarlyStopping, ModelCheckpoint, ReduceLROnPlateau,CSVLogger
from keras import backend as K
from keras.losses import binary_crossentropy
from keras.optimizers import Adam
# from keras_radam import RAdam
import efficientnet.keras as efn
from keras.layers import ReLU,LeakyReLU
import tensorflow as tf
from keras.preprocessing.image import ImageDataGenerator, array_to_img, img_to_array, load_img
import cv2
# from data import df_gen
from metric import bce_dice_loss,bce_logdice_loss,dice_coef,dice_loss
import efficientnet.keras as efn
from keras.layers import ReLU, LeakyReLU
# def UpSampling2DBilinear(stride, **kwargs):
# def layer(x):
# input_shape = K.int_shape(x)
# output_shape = (stride * input_shape[1], stride * input_shape[2])
# return tf.image.resize_bilinear(x, output_shape, align_corners=True)
# return Lambda(layer, **kwargs)
# def get_model(label_counts = 4, input_shape=(256,256,3)):
# K.clear_session()
# base_model = efn.EfficientNetB2(weights='imagenet',include_top=False, input_shape= input_shape)
# base_model.trainable = False
# base_out = base_model.output
# conv1 = Conv2DTranspose(8, (2, 2), strides=(2, 2), padding='same') (base_out) # (8, 16, 16)
# up = UpSampling2DBilinear(8 )(conv1) # (8, 128, 128)
# conv2 = Conv2DTranspose(1, (2, 2), strides=(2, 2), padding='same') (up) # (1, 256, 256)
# conv3 = Conv2D(label_counts, (1, 1))(conv2)
# conv4 = Activation('sigmoid')(conv3)
# model = Model(input=base_model.input, output=conv4)
# return model
# ACTIVATION = "relu"
def H(lst, name, use_gn=False):
# if use_gn:
# norm = GroupNormalization(groups=1, name=name+'_gn')
# else:
norm = BatchNormalization(name=name+'_bn')
x = concatenate(lst)
num_filters = int(x.shape.as_list()[-1]/2)
x = Conv2D(num_filters, (2, 2), padding='same', name=name)(x)
x = norm(x)
x = LeakyReLU(alpha = 0.1, name=name+'_activation')(x)
return x
def U(x, use_gn=False):
# if use_gn:
# norm = GroupNormalization(groups=1)
# else:
norm = BatchNormalization()
num_filters = int(x.shape.as_list()[-1]/2)
x = Conv2DTranspose(num_filters, (3, 3), strides=(2, 2), padding='same')(x)
x = norm(x)
x = LeakyReLU(alpha = 0.1 )(x)
return x
def get_model(label_counts = 4 , input_shape = (256,256,3)):
base_model = efn.EfficientNetB4(weights=None, include_top=False, input_shape=input_shape)
input = base_model.input
x00 = base_model.input # (256, 512, 3)
x10 = base_model.get_layer('stem_activation').output # (128, 256, 4)
x20 = base_model.get_layer('block2d_add').output # (64, 128, 32)
x30 = base_model.get_layer('block3d_add').output # (32, 64, 56)
x40 = base_model.get_layer('block5f_add').output # (16, 32, 160)
x50 = base_model.get_layer('block7b_add').output # (8, 16, 448)
x01 = H([x00, U(x10)], 'X01')
x11 = H([x10, U(x20)], 'X11')
x21 = H([x20, U(x30)], 'X21')
x31 = H([x30, U(x40)], 'X31')
x41 = H([x40, U(x50)], 'X41')
x02 = H([x00, x01, U(x11)], 'X02')
x12 = H([x11, U(x21)], 'X12')
x22 = H([x21, U(x31)], 'X22')
x32 = H([x31, U(x41)], 'X32')
x03 = H([x00, x01, x02, U(x12)], 'X03')
x13 = H([x12, U(x22)], 'X13')
x23 = H([x22, U(x32)], 'X23')
x04 = H([x00, x01, x02, x03, U(x13)], 'X04')
x14 = H([x13, U(x23)], 'X14')
x05 = H([x00, x01, x02, x03, x04, U(x14)], 'X05')
x_out = Concatenate(name='bridge')([x01, x02, x03, x04, x05])
x_out = Conv2D(label_counts, (3,3), padding="same", name='final_output', activation="sigmoid")(x_out)
return Model(inputs=input, outputs=x_out)
```
|
{
"source": "jeongeun980906/YOLOS_code_review",
"score": 3
}
|
#### File: YOLOS_code_review/util/scheduler.py
```python
import logging
import math
import numpy as np
import torch
from typing import Dict, Any
import torch
class Scheduler:
"""
Parameter Scheduler Base Class
A scheduler base class that can be used to schedule any optimizer parameter groups.
Unlike the builtin PyTorch schedulers, this is intended to be consistently called
* At the END of each epoch, before incrementing the epoch count, to calculate next epoch's value
* At the END of each optimizer update, after incrementing the update count, to calculate next update's value
The schedulers built on this should try to remain as stateless as possible (for simplicity).
This family of schedulers is attempting to avoid the confusion of the meaning of 'last_epoch'
and -1 values for special behaviour. All epoch and update counts must be tracked in the training
code and explicitly passed in to the schedulers on the corresponding step or step_update call.
Based on ideas from:
* https://github.com/pytorch/fairseq/tree/master/fairseq/optim/lr_scheduler
* https://github.com/allenai/allennlp/tree/master/allennlp/training/learning_rate_schedulers
"""
def __init__(self,
optimizer: torch.optim.Optimizer,
param_group_field: str,
noise_range_t=None,
noise_type='normal',
noise_pct=0.67,
noise_std=1.0,
noise_seed=None,
initialize: bool = True) -> None:
self.optimizer = optimizer
self.param_group_field = param_group_field
self._initial_param_group_field = f"initial_{param_group_field}"
if initialize:
for i, group in enumerate(self.optimizer.param_groups):
if param_group_field not in group:
raise KeyError(f"{param_group_field} missing from param_groups[{i}]")
group.setdefault(self._initial_param_group_field, group[param_group_field])
else:
for i, group in enumerate(self.optimizer.param_groups):
if self._initial_param_group_field not in group:
raise KeyError(f"{self._initial_param_group_field} missing from param_groups[{i}]")
self.base_values = [group[self._initial_param_group_field] for group in self.optimizer.param_groups]
self.metric = None # any point to having this for all?
self.noise_range_t = noise_range_t
self.noise_pct = noise_pct
self.noise_type = noise_type
self.noise_std = noise_std
self.noise_seed = noise_seed if noise_seed is not None else 42
self.update_groups(self.base_values)
def state_dict(self) -> Dict[str, Any]:
return {key: value for key, value in self.__dict__.items() if key != 'optimizer'}
def load_state_dict(self, state_dict: Dict[str, Any]) -> None:
self.__dict__.update(state_dict)
def get_epoch_values(self, epoch: int):
return None
def get_update_values(self, num_updates: int):
return None
def step(self, epoch: int, metric: float = None) -> None:
self.metric = metric
values = self.get_epoch_values(epoch)
if values is not None:
values = self._add_noise(values, epoch)
self.update_groups(values)
def step_update(self, num_updates: int, metric: float = None):
self.metric = metric
values = self.get_update_values(num_updates)
if values is not None:
values = self._add_noise(values, num_updates)
self.update_groups(values)
def update_groups(self, values):
if not isinstance(values, (list, tuple)):
values = [values] * len(self.optimizer.param_groups)
for param_group, value in zip(self.optimizer.param_groups, values):
param_group[self.param_group_field] = value
def _add_noise(self, lrs, t):
if self.noise_range_t is not None:
if isinstance(self.noise_range_t, (list, tuple)):
apply_noise = self.noise_range_t[0] <= t < self.noise_range_t[1]
else:
apply_noise = t >= self.noise_range_t
if apply_noise:
g = torch.Generator()
g.manual_seed(self.noise_seed + t)
if self.noise_type == 'normal':
while True:
# resample if noise out of percent limit, brute force but shouldn't spin much
noise = torch.randn(1, generator=g).item()
if abs(noise) < self.noise_pct:
break
else:
noise = 2 * (torch.rand(1, generator=g).item() - 0.5) * self.noise_pct
lrs = [v + v * noise for v in lrs]
return lrs
class CosineLRScheduler(Scheduler):
"""
Cosine decay with restarts.
This is described in the paper https://arxiv.org/abs/1608.03983.
Inspiration from
https://github.com/allenai/allennlp/blob/master/allennlp/training/learning_rate_schedulers/cosine.py
"""
def __init__(self,
optimizer: torch.optim.Optimizer,
t_initial: int,
t_mul: float = 1.,
lr_min: float = 0.,
decay_rate: float = 1.,
warmup_t=0,
warmup_lr_init=0,
warmup_prefix=False,
cycle_limit=0,
t_in_epochs=True,
noise_range_t=None,
noise_pct=0.67,
noise_std=1.0,
noise_seed=42,
initialize=True) -> None:
super().__init__(
optimizer, param_group_field="lr",
noise_range_t=noise_range_t, noise_pct=noise_pct, noise_std=noise_std, noise_seed=noise_seed,
initialize=initialize)
assert t_initial > 0
assert lr_min >= 0
if t_initial == 1 and t_mul == 1 and decay_rate == 1:
_logger.warning("Cosine annealing scheduler will have no effect on the learning "
"rate since t_initial = t_mul = eta_mul = 1.")
self.t_initial = t_initial
self.t_mul = t_mul
self.lr_min = lr_min
self.decay_rate = decay_rate
self.cycle_limit = cycle_limit
self.warmup_t = warmup_t
self.warmup_lr_init = warmup_lr_init
self.warmup_prefix = warmup_prefix
self.t_in_epochs = t_in_epochs
if self.warmup_t:
self.warmup_steps = [(v - warmup_lr_init) / self.warmup_t for v in self.base_values]
super().update_groups(self.warmup_lr_init)
else:
self.warmup_steps = [1 for _ in self.base_values]
def _get_lr(self, t):
if t < self.warmup_t:
lrs = [self.warmup_lr_init + t * s for s in self.warmup_steps]
else:
if self.warmup_prefix:
t = t - self.warmup_t
if self.t_mul != 1:
i = math.floor(math.log(1 - t / self.t_initial * (1 - self.t_mul), self.t_mul))
t_i = self.t_mul ** i * self.t_initial
t_curr = t - (1 - self.t_mul ** i) / (1 - self.t_mul) * self.t_initial
else:
i = t // self.t_initial
t_i = self.t_initial
t_curr = t - (self.t_initial * i)
gamma = self.decay_rate ** i
lr_min = self.lr_min * gamma
lr_max_values = [v * gamma for v in self.base_values]
if self.cycle_limit == 0 or (self.cycle_limit > 0 and i < self.cycle_limit):
lrs = [
lr_min + 0.5 * (lr_max - lr_min) * (1 + math.cos(math.pi * t_curr / t_i)) for lr_max in lr_max_values
]
else:
lrs = [self.lr_min for _ in self.base_values]
return lrs
def get_epoch_values(self, epoch: int):
if self.t_in_epochs:
return self._get_lr(epoch)
else:
return None
def get_update_values(self, num_updates: int):
if not self.t_in_epochs:
return self._get_lr(num_updates)
else:
return None
def get_cycle_length(self, cycles=0):
if not cycles:
cycles = self.cycle_limit
cycles = max(1, cycles)
if self.t_mul == 1.0:
return self.t_initial * cycles
else:
return int(math.floor(-self.t_initial * (self.t_mul ** cycles - 1) / (1 - self.t_mul)))
def create_scheduler(args, optimizer):
num_epochs = args.epochs
if getattr(args, 'lr_noise', None) is not None:
lr_noise = getattr(args, 'lr_noise')
if isinstance(lr_noise, (list, tuple)):
noise_range = [n * num_epochs for n in lr_noise]
if len(noise_range) == 1:
noise_range = noise_range[0]
else:
noise_range = lr_noise * num_epochs
else:
noise_range = None
lr_scheduler = None
if args.sched == 'step':
lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, args.lr_drop)
elif args.sched == 'warmupcos':
lr_scheduler = CosineLRScheduler(
optimizer,
t_initial=num_epochs,
t_mul=getattr(args, 'lr_cycle_mul', 1.),
lr_min=args.min_lr,
decay_rate=args.decay_rate,
warmup_lr_init=args.warmup_lr,
warmup_t=args.warmup_epochs,
cycle_limit=getattr(args, 'lr_cycle_limit', 1),
t_in_epochs=True,
noise_range_t=noise_range,
noise_pct=getattr(args, 'lr_noise_pct', 0.67),
noise_std=getattr(args, 'lr_noise_std', 1.),
noise_seed=getattr(args, 'seed', 42),
)
num_epochs = lr_scheduler.get_cycle_length()
return lr_scheduler, num_epochs
```
|
{
"source": "jeonggwanlee/varibad",
"score": 2
}
|
#### File: ad_envs/online_adaptation_suite/mj_env.py
```python
import os
from gym import error, spaces
from gym.utils import seeding
import numpy as np
from os import path
import gym
import six
import time as timer
from mujoco_py import load_model_from_path, MjSim, MjViewer
import mujoco_py
BIG = 1e6
class MujocoEnv(gym.Env):
def __init__(self, model_path, frame_skip=1, action_noise=0.0, random_init_state=True):
if model_path.startswith("/"):
fullpath = model_path
else:
fullpath = os.path.join(os.path.dirname(__file__), "assets", model_path)
if not path.exists(fullpath):
raise IOError("File %s does not exist" % fullpath)
self.frame_skip = frame_skip
self.model = load_model_from_path(fullpath)
self.sim = MjSim(self.model)
self.data = self.sim.data
self.viewer = None
self.init_qpos = self.data.qpos.ravel().copy()
self.init_qvel = self.data.qvel.ravel().copy()
self.init_qacc = self.data.qacc.ravel().copy()
self.init_ctrl = self.data.ctrl.ravel().copy()
self.qpos_dim = self.init_qpos.size
self.qvel_dim = self.init_qvel.size
self.ctrl_dim = self.init_ctrl.size
self.action_noise = action_noise
self.random_init_state = random_init_state
"""
if "init_qpos" in self.model.numeric_names:
init_qpos_id = self.model.numeric_names.index("init_qpos")
addr = self.model.numeric_adr.flat[init_qpos_id]
size = self.model.numeric_size.flat[init_qpos_id]
init_qpos = self.model.numeric_data.flat[addr:addr + size]
self.init_qpos = init_qpos
"""
self.dcom = None
self.current_com = None
self.reset()
super(MujocoEnv, self).__init__()
@property
def action_space(self):
bounds = self.model.actuator_ctrlrange.copy()
lb = bounds[:, 0]
ub = bounds[:, 1]
return spaces.Box(lb, ub)
@property
def observation_space(self):
shp = self.get_current_obs().shape
ub = BIG * np.ones(shp)
return spaces.Box(ub * -1, ub)
@property
def action_bounds(self):
return self.action_space.low, self.action_space.high
def reset_mujoco(self, init_state=None):
if init_state is None:
if self.random_init_state:
qp = self.init_qpos.copy() + \
np.random.normal(size=self.init_qpos.shape) * 0.01
qv = self.init_qvel.copy() + \
np.random.normal(size=self.init_qvel.shape) * 0.1
else:
qp = self.init_qpos.copy()
qv = self.init_qvel.copy()
qacc = self.init_qacc.copy()
ctrl = self.init_ctrl.copy()
else:
pass
"""
start = 0
for datum_name in ["qpos", "qvel", "qacc", "ctrl"]:
datum = getattr(self.data, datum_name)
datum_dim = datum.shape[0]
datum = init_state[start: start + datum_dim]
setattr(self.model.data, datum_name, datum)
start += datum_dim
"""
self.set_state(qp, qv)
def reset(self, init_state=None):
#self.reset_mujoco(init_state)
self.sim.reset()
self.sim.forward()
self.current_com = self.data.subtree_com[0]
self.dcom = np.zeros_like(self.current_com)
return self.get_current_obs()
def set_state(self, qpos, qvel, qacc):
assert qpos.shape == (self.qpos_dim,) and qvel.shape ==(self.qvel_dim, ) and qacc.shape == (self.qacc_dim,)
state = self.sim.get_state()
for i in range(self.model.nq):
state.qpos[i] = qpos[i]
for i in range(self.model.nv):
state.qvel[i] = qvel[i]
self.sim.set_state(state)
self.sim.forward()
def get_current_obs(self):
return self._get_full_obs()
def _get_full_obs(self):
data = self.data
cdists = np.copy(self.model.geom_margin).flat
for c in self.model.data.contact:
cdists[c.geom2] = min(cdists[c.geom2], c.dist)
obs = np.concatenate([
data.qpos.flat,
data.qvel.flat,
# data.cdof.flat,
data.cinert.flat,
data.cvel.flat,
# data.cacc.flat,
data.qfrc_actuator.flat,
data.cfrc_ext.flat,
data.qfrc_constraint.flat,
cdists,
# data.qfrc_bias.flat,
# data.qfrc_passive.flat,
self.dcom.flat,
])
return obs
@property
def _state(self):
return np.concatenate([
self.data.qpos.flat,
self.data.qvel.flat
])
@property
def _full_state(self):
return np.concatenate([
self.data.qpos,
self.data.qvel,
self.data.qacc,
self.data.ctrl,
]).ravel()
def inject_action_noise(self, action):
# generate action noise
noise = self.action_noise * \
np.random.normal(size=action.shape)
# rescale the noise to make it proportional to the action bounds
lb, ub = self.action_bounds
noise = 0.5 * (ub - lb) * noise
return action + noise
def forward_dynamics(self, action):
ctrl = self.inject_action_noise(action)
for i in range(self.model.nu):
self.sim.data.ctrl[i] = ctrl[i]
for _ in range(self.frame_skip):
self.sim.step()
new_com = self.data.subtree_com[0]
self.dcom = new_com - self.current_com
self.current_com = new_com
def get_viewer(self, config=None):
if self.viewer is None:
self.viewer = MjViewer(self.sim)
#self.viewer.start()
#self.viewer.set_model(self.model)
if config is not None:
pass
# self.viewer.set_window_pose(config["xpos"], config["ypos"])
# self.viewer.set_window_size(config["width"], config["height"])
# self.viewer.set_window_title(config["title"])
return self.viewer
def render(self, close=False, mode='human', config=None):
if mode == 'human':
# viewer = self.get_viewer(config=config)
try:
self.viewer.render()
except:
self.get_viewer(config=config)
self.viewer.render()
elif mode == 'rgb_array':
viewer = self.get_viewer(config=config)
viewer.loop_once()
# self.get_viewer(config=config).render()
data, width, height = self.get_viewer(config=config).get_image()
return np.fromstring(data, dtype='uint8').reshape(height, width, 3)[::-1,:,:]
if close:
self.stop_viewer()
# def start_viewer(self):
# viewer = self.get_viewer()
# if not viewer.running:
# viewer.start()
#
# def stop_viewer(self):
# if self.viewer:
# self.viewer.finish()
# self.viewer = None
# def release(self):
# # temporarily alleviate the issue (but still some leak)
# from learning_to_adapt.mujoco_py.mjlib import mjlib
# mjlib.mj_deleteModel(self.model._wrapped)
# mjlib.mj_deleteData(self.data._wrapped)
def get_body_xmat(self, body_name):
idx = self.model.body_names.index(body_name)
return self.data.ximat[idx].reshape((3, 3))
def get_body_com(self, body_name):
idx = self.model.body_names.index(body_name)
return self.data.subtree_com[idx]
def get_body_comvel(self, body_name):
idx = self.model.body_names.index(body_name)
## _compute_subtree
body_vels = np.zeros((self.model.nbody, 6))
# bodywise quantities
mass = self.model.body_mass.flatten()
for i in range(self.model.nbody):
# body velocity
# Compute object 6D velocity in object-centered frame, world/local orientation.
#mj_objectVelocity(const mjModel* m, const mjData* d, int objtype, int objid, mjtMum* res, int flg_local)
mujoco_py.cymj._mj_objectVelocity(self.model, self.data, 1, i, body_vels[i], 0)
lin_moms = body_vels[:, 3:] * mass.reshape((-1, 1))
# init subtree mass
body_parentid = self.model.body_parentid
# subtree com and com_vel
for i in range(self.model.nbody - 1, -1, -1):
if i > 0:
parent = body_parentid[i]
# add scaled velocities
lin_moms[parent] += lin_moms[i]
# accumulate mass
mass[parent] += mass[i]
return_ = lin_moms / mass.reshape((-1, 1))
return return_[idx]
#return self.model.body_comvels[idx]
# def get_body_comvel(self, body_name):
# idx = self.model.body_names.index(body_name)
#
# return self.model.body_comvels[idx]
# def print_stats(self):
# super(MujocoEnv, self).print_stats()
# print("qpos dim:\t%d" % len(self.data.qpos))
def action_from_key(self, key):
raise NotImplementedError
# def set_state_tmp(self, state, restore=True):
# if restore:
# prev_pos = self.data.qpos
# prev_qvel = self.data.qvel
# prev_ctrl = self.data.ctrl
# prev_act = self.data.act
# qpos, qvel = self.decode_state(state)
# self.model.data.qpos = qpos
# self.model.data.qvel = qvel
# self.model.forward()
# yield
# if restore:
# self.data.qpos = prev_pos
# self.data.qvel = prev_qvel
# self.data.ctrl = prev_ctrl
# self.data.act = prev_act
# self.model.forward()
def get_param_values(self):
return {}
def set_param_values(self, values):
pass
```
#### File: environments/mujoco/half_cheetah_blocks_env.py
```python
import numpy as np
# from learning_to_adapt.utils.serializable import Serializable
# from learning_to_adapt.envs.mujoco_env import MujocoEnv
# from learning_to_adapt.logger import logger
import os
from environments.mujoco.mj_env import MujocoEnv
class HalfCheetahBlocksEnv(MujocoEnv):
#<<<<<<< ys
def __init__(self, task='damping',
max_episode_steps=200,
reset_every_episode=False,
ctrl_cost_weight=0.5,
contact_cost_weight=5e-4,
healthy_reward=1.0,
terminate_when_unhealthy=False,
healthy_z_range=(0.3, 0.85),
contact_force_range=(-1.0, 1.0),
):
# Serializable.quick_init(self, locals())
#=======
#
# def __init__(self, task='damping', max_episode_steps=200, reset_every_episode=False, frame_skip=1):
# #Serializable.quick_init(self, locals())
#>>>>>>> master
self.reset_every_episode = reset_every_episode
self.first = True
print("frame_skip :", frame_skip)
MujocoEnv.__init__(self, os.path.join(os.path.abspath(os.path.dirname(__file__)),
"assets", "half_cheetah_blocks.xml"), frame_skip=frame_skip)
task = None if task == 'None' else task
self.cripple_mask = np.ones(self.action_space.shape)
self._init_geom_rgba = self.model.geom_rgba.copy()
self._init_geom_contype = self.model.geom_contype.copy()
self._init_geom_size = self.model.geom_size.copy()
self._init_geom_pos = self.model.geom_pos.copy()
self.dt = self.model.opt.timestep
assert task in [None, 'damping']
self.task = task
self._max_episode_steps = max_episode_steps
#self.visualise_behaviour = True
#<<<<<<< ys
#reward
self._ctrl_cost_weight = ctrl_cost_weight
self._contact_cost_weight = contact_cost_weight
self._healthy_reward = healthy_reward
self._terminate_when_unhealthy = terminate_when_unhealthy
self._healthy_z_range = healthy_z_range
self._contact_force_range = contact_force_range
@property
def healthy_reward(self):
return float(
self.is_healthy
or self._terminate_when_unhealthy) \
* self._healthy_reward
@property
def is_healthy(self):
z = self.get_body_com("torso")[2].copy()
#print("z: {}".format(z))
min_z, max_z = self._healthy_z_range
is_healthy = (min_z <= z <= max_z)
return is_healthy
@property
def contact_forces(self):
raw_contact_forces = self.sim.data.cfrc_ext
min_value, max_value = self._contact_force_range
contact_forces = np.clip(raw_contact_forces, min_value, max_value)
return contact_forces
@property
def contact_cost(self):
contact_cost = self._contact_cost_weight * np.sum(
np.square(self.contact_forces))
return contact_cost
@property
def done(self):
done = (not self.is_healthy if self._terminate_when_unhealthy else False)
return done
#=======
#>>>>>>> master
def get_current_obs(self):
return np.concatenate([
self.data.qpos.flatten()[9:],
self.data.qvel.flat[8:],
self.get_body_com("torso").flat,
])
def get_task(self):
return 1 # dummy
def step(self, action):
xy_position_before = self.get_body_com("torso")[:2].copy()
self.forward_dynamics(action)
xy_position_after = self.get_body_com("torso")[:2].copy()
next_obs = self.get_current_obs()
xy_velocity = (xy_position_after - xy_position_before) / self.dt
x_velocity, y_velocity = xy_velocity
#<<<<<<< ys
# ctrl_cost = 1e-1 * 0.5 * np.sum(np.square(action))
ctrl_cost = 1e-1 * self._ctrl_cost_weight * np.sum(np.square(action))
contact_cost = self.contact_cost
costs = ctrl_cost + contact_cost
#forward_reward = x_velocity
forward_reward = self.get_body_comvel("torso")[0]
healthy_reward= self.healthy_reward
reward = forward_reward - costs + healthy_reward
done = self.done
#=======
# ctrl_cost = 1e-1 * 0.5 * np.sum(np.square(action))
# #forward_reward = x_velocity
# forward_reward = self.get_body_comvel("torso")[0]
# reward = forward_reward - ctrl_cost
# done = False
#>>>>>>> master
info = {
'reward_forward': forward_reward,
'reward_ctrl': -ctrl_cost,
'healthy_reward': healthy_reward,
'x_postiion': xy_position_after[0],
'y_position': xy_position_after[1],
'distance_from_origin': np.linalg.norm(xy_position_after, ord=2),
'x_velocity': x_velocity,
'y_velocity': y_velocity,
'forward_reward': forward_reward,
'task': 1, ## dummy ## TODO
}
return next_obs, reward, done, info
def reward(self, obs, action, next_obs):
assert obs.ndim == 2
assert obs.shape == next_obs.shape
assert obs.shape[0] == action.shape[0]
ctrl_cost = 1e-1 * 0.5 * np.sum(np.square(action), axis=1)
forward_reward = (next_obs[:, -3] - obs[:, -3]) / self.dt
reward = forward_reward - ctrl_cost
return reward
def reset_mujoco(self, init_state=None):
super(HalfCheetahBlocksEnv, self).reset_mujoco(init_state=init_state)
if self.reset_every_episode and not self.first:
self.reset_task()
if self.first:
self.first = False
def reset_task(self, value=None):
if self.task == 'damping':
damping = self.model.dof_damping.copy()
damping[:8] = value if value is not None else np.random.uniform(0, 10, size=8)
for idx in range(8):
self.model.dof_damping[idx] = damping[idx]
elif self.task is None:
pass
else:
raise NotImplementedError
self.sim.forward()
# def log_diagnostics(self, paths):
# progs = [
# path["observations"][-1][-3] - path["observations"][0][-3]
# for path in paths
# ]
# logger.logkv('AverageForwardProgress', np.mean(progs))
# logger.logkv('MaxForwardProgress', np.max(progs))
# logger.logkv('MinForwardProgress', np.min(progs))
# logger.logkv('StdForwardProgress', np.std(progs))
if __name__ == '__main__':
env = HalfCheetahBlocksEnv(task='damping')
while True:
env.reset()
env.reset_task()
for _ in range(1000):
env.step(env.action_space.sample())
env.render()
```
#### File: varibad/environments/parallel_envs.py
```python
import os
import gym
import numpy as np
import torch
from environments.wrappers import TimeLimitMask
from environments.wrappers import VariBadWrapper
from utils import bench
from utils.common.vec_env import VecEnvWrapper
from utils.common.vec_env.dummy_vec_env import DummyVecEnv
from utils.common.vec_env.subproc_vec_env import SubprocVecEnv
from utils.common.vec_env.vec_normalize import VecNormalize as VecNormalize_
def make_env(env_id, seed, rank, log_dir, allow_early_resets,
episodes_per_task, **kwargs):
def _thunk():
env = gym.make(env_id, **kwargs)
if seed is not None:
env.seed(seed + rank)
if str(env.__class__.__name__).find('TimeLimit') >= 0:
env = TimeLimitMask(env)
env = VariBadWrapper(env=env, episodes_per_task=episodes_per_task)
if log_dir is not None:
env = bench.Monitor(env, os.path.join(log_dir, str(rank)),
allow_early_resets=allow_early_resets)
return env
return _thunk
def make_vec_envs(env_name, seed, num_processes, gamma, log_dir,
device, allow_early_resets, episodes_per_task,
obs_rms, ret_rms, rank_offset=0,
**kwargs):
"""
:param obs_rms: running mean and std for observations
:param ret_rms: running return and std for rewards
"""
envs = [make_env(env_id=env_name, seed=seed, rank=rank_offset + i, log_dir=log_dir,
allow_early_resets=allow_early_resets,
episodes_per_task=episodes_per_task, **kwargs)
for i in range(num_processes)]
if len(envs) > 1:
envs = SubprocVecEnv(envs)
else:
envs = DummyVecEnv(envs)
if len(envs.observation_space.shape) == 1:
if gamma is None:
envs = VecNormalize(envs, obs_rms=obs_rms, ret_rms=ret_rms, ret=False)
else:
envs = VecNormalize(envs, obs_rms=obs_rms, ret_rms=ret_rms, gamma=gamma)
envs = VecPyTorch(envs, device)
return envs
class VecPyTorch(VecEnvWrapper):
def __init__(self, venv, device):
"""Return only every `skip`-th frame"""
super(VecPyTorch, self).__init__(venv)
self.device = device
# TODO: Fix data types
def reset_mdp(self, index=None):
obs = self.venv.reset_mdp(index=index)
if isinstance(obs, list):
obs = [torch.from_numpy(o).float().to(self.device) for o in obs]
else:
obs = torch.from_numpy(obs).float().to(self.device)
return obs
def reset(self, index=None, task=None):
if task is not None:
assert isinstance(task, list)
obs = self.venv.reset(index=index, task=task)
if isinstance(obs, list):
obs = [torch.from_numpy(o).float().to(self.device) for o in obs]
else:
obs = torch.from_numpy(obs).float().to(self.device)
return obs
def step_async(self, actions):
actions = actions.squeeze(1).cpu().numpy()
self.venv.step_async(actions)
def step_wait(self):
obs, reward, done, info = self.venv.step_wait()
if isinstance(obs, list): # raw + normalised
obs = [torch.from_numpy(o).float().to(self.device) for o in obs]
else:
obs = torch.from_numpy(obs).float().to(self.device)
if isinstance(reward, list): # raw + normalised
reward = [torch.from_numpy(r).unsqueeze(dim=1).float().to(self.device) for r in reward]
else:
reward = torch.from_numpy(reward).unsqueeze(dim=1).float().to(self.device)
return obs, reward, done, info
def __getattr__(self, attr):
"""
If env does not have the attribute then call the attribute in the wrapped_env
"""
if attr in ['num_states', '_max_episode_steps']:
return self.unwrapped.get_env_attr(attr)
try:
orig_attr = self.__getattribute__(attr)
except AttributeError:
orig_attr = self.unwrapped.__getattribute__(attr)
if callable(orig_attr):
def hooked(*args, **kwargs):
result = orig_attr(*args, **kwargs)
return result
return hooked
else:
return orig_attr
class VecNormalize(VecNormalize_):
def __init__(self, envs, obs_rms, ret_rms, *args, **kwargs):
super(VecNormalize, self).__init__(envs, obs_rms=obs_rms, ret_rms=ret_rms, *args, **kwargs)
self.training = True
def _obfilt(self, obs):
if self.training:
self.obs_rms.update(obs)
obs_norm = np.clip((obs - self.obs_rms.mean) / np.sqrt(self.obs_rms.var + self.epsilon), -self.clipobs,
self.clipobs)
return [obs, obs_norm]
def train(self):
self.training = True
def eval(self):
self.training = False
def __getattr__(self, attr):
"""
If env does not have the attribute then call the attribute in the wrapped_env
"""
try:
orig_attr = self.__getattribute__(attr)
except AttributeError:
orig_attr = self.unwrapped.__getattribute__(attr)
if callable(orig_attr):
def hooked(*args, **kwargs):
result = orig_attr(*args, **kwargs)
return result
return hooked
else:
return orig_attr
```
#### File: learning_to_adapt/dynamics/meta_mlp_dynamics.py
```python
from learning_to_adapt.dynamics.core.layers import MLP
from collections import OrderedDict
import tensorflow as tf
import numpy as np
from learning_to_adapt.utils.serializable import Serializable
from learning_to_adapt.utils import tensor_utils
from learning_to_adapt.logger import logger
import time
class MetaMLPDynamicsModel(Serializable):
"""
Class for MLP continous dynamics model
"""
_activations = {
None: None,
"relu": tf.nn.relu,
"tanh": tf.tanh,
"sigmoid": tf.sigmoid,
"softmax": tf.nn.softmax,
"swish": lambda x: x * tf.sigmoid(x)
}
def __init__(self,
name,
env,
hidden_sizes=(512, 512),
meta_batch_size=10,
hidden_nonlinearity=tf.nn.relu,
output_nonlinearity=None,
batch_size=500,
learning_rate=0.001,
inner_learning_rate=0.1,
normalize_input=True,
optimizer=tf.train.AdamOptimizer,
valid_split_ratio=0.2,
rolling_average_persitency=0.99,
):
Serializable.quick_init(self, locals())
self.normalization = None
self.normalize_input = normalize_input
self.next_batch = None
self.meta_batch_size = meta_batch_size
self.valid_split_ratio = valid_split_ratio
self.rolling_average_persitency = rolling_average_persitency
self.batch_size = batch_size
self.learning_rate = learning_rate
self.inner_learning_rate = inner_learning_rate
self.name = name
self._dataset_train = None
self._dataset_test = None
self._prev_params = None
self._adapted_param_values = None
# determine dimensionality of state and action space
self.obs_space_dims = obs_space_dims = env.observation_space.shape[0]
self.action_space_dims = action_space_dims = env.action_space.shape[0]
hidden_nonlinearity = self._activations[hidden_nonlinearity]
output_nonlinearity = self._activations[output_nonlinearity]
""" ------------------ Pre-Update Graph + Adaptation ----------------------- """
with tf.variable_scope(name):
# Placeholders
self.obs_ph = tf.placeholder(tf.float32, shape=(None, obs_space_dims))
self.act_ph = tf.placeholder(tf.float32, shape=(None, action_space_dims))
self.delta_ph = tf.placeholder(tf.float32, shape=(None, obs_space_dims))
# Concatenate action and observation --> NN input
self.nn_input = tf.concat([self.obs_ph, self.act_ph], axis=1)
# Create MLP
mlp = MLP(name,
output_dim=obs_space_dims,
hidden_sizes=hidden_sizes,
hidden_nonlinearity=hidden_nonlinearity,
output_nonlinearity=output_nonlinearity,
input_var=self.nn_input,
input_dim=obs_space_dims+action_space_dims)
self.delta_pred = mlp.output_var # shape: (batch_size, ndim_obs, n_models)
self.loss = tf.reduce_mean(tf.square(self.delta_ph - self.delta_pred))
self.optimizer = tf.train.GradientDescentOptimizer(self.learning_rate)
self.adaptation_sym = tf.train.GradientDescentOptimizer(self.inner_learning_rate).minimize(self.loss)
# Tensor_utils
self.f_delta_pred = tensor_utils.compile_function([self.obs_ph, self.act_ph], self.delta_pred)
""" --------------------------- Meta-training Graph ---------------------------------- """
nn_input_per_task = tf.split(self.nn_input, self.meta_batch_size, axis=0)
delta_per_task = tf.split(self.delta_ph, self.meta_batch_size, axis=0)
pre_input_per_task, post_input_per_task = zip(*[tf.split(nn_input, 2, axis=0) for nn_input in nn_input_per_task])
pre_delta_per_task, post_delta_per_task = zip(*[tf.split(delta, 2, axis=0) for delta in delta_per_task])
pre_losses = []
post_losses = []
self._adapted_params = []
for idx in range(self.meta_batch_size):
with tf.variable_scope(name + '/pre_model_%d' % idx, reuse=tf.AUTO_REUSE):
pre_mlp = MLP(name,
output_dim=obs_space_dims,
hidden_sizes=hidden_sizes,
hidden_nonlinearity=hidden_nonlinearity,
output_nonlinearity=output_nonlinearity,
input_var=pre_input_per_task[idx],
input_dim=obs_space_dims + action_space_dims,
params=mlp.get_params())
pre_delta_pred = pre_mlp.output_var
pre_loss = tf.reduce_mean(tf.square(pre_delta_per_task[idx] - pre_delta_pred))
adapted_params = self._adapt_sym(pre_loss, pre_mlp.get_params())
self._adapted_params.append(adapted_params)
with tf.variable_scope(name + '/post_model_%d' % idx, reuse=tf.AUTO_REUSE):
post_mlp = MLP(name,
output_dim=obs_space_dims,
hidden_sizes=hidden_sizes,
hidden_nonlinearity=hidden_nonlinearity,
output_nonlinearity=output_nonlinearity,
input_var=post_input_per_task[idx],
params=adapted_params,
input_dim=obs_space_dims + action_space_dims)
post_delta_pred = post_mlp.output_var
post_loss = tf.reduce_mean(tf.square(post_delta_per_task[idx] - post_delta_pred))
pre_losses.append(pre_loss)
post_losses.append(post_loss)
self.pre_loss = tf.reduce_mean(pre_losses)
self.post_loss = tf.reduce_mean(post_losses)
self.train_op = optimizer(self.learning_rate).minimize(self.post_loss)
""" --------------------------- Post-update Inference Graph --------------------------- """
with tf.variable_scope(name + '_ph_graph'):
self.post_update_delta = []
self.network_phs_meta_batch = []
nn_input_per_task = tf.split(self.nn_input, self.meta_batch_size, axis=0)
for idx in range(meta_batch_size):
with tf.variable_scope('task_%i' % idx):
network_phs = self._create_placeholders_for_vars(mlp.get_params())
self.network_phs_meta_batch.append(network_phs)
mlp_meta_batch = MLP(name,
output_dim=obs_space_dims,
hidden_sizes=hidden_sizes,
hidden_nonlinearity=hidden_nonlinearity,
output_nonlinearity=output_nonlinearity,
params=network_phs,
input_var=nn_input_per_task[idx],
input_dim=obs_space_dims + action_space_dims,
)
self.post_update_delta.append(mlp_meta_batch.output_var)
self._networks = [mlp]
def fit(self, obs, act, obs_next, epochs=1000, compute_normalization=True,
valid_split_ratio=None, rolling_average_persitency=None, verbose=False, log_tabular=False):
assert obs.ndim == 3 and obs.shape[2] == self.obs_space_dims
assert obs_next.ndim == 3 and obs_next.shape[2] == self.obs_space_dims
assert act.ndim == 3 and act.shape[2] == self.action_space_dims
if valid_split_ratio is None: valid_split_ratio = self.valid_split_ratio
if rolling_average_persitency is None: rolling_average_persitency = self.rolling_average_persitency
assert 1 > valid_split_ratio >= 0
sess = tf.get_default_session()
if (self.normalization is None or compute_normalization) and self.normalize_input:
self.compute_normalization(obs, act, obs_next)
if self.normalize_input:
# Normalize data
obs, act, delta = self._normalize_data(obs, act, obs_next)
assert obs.ndim == act.ndim == obs_next.ndim == 3
else:
delta = obs_next - obs
# Split into valid and test set
obs_train, act_train, delta_train, obs_test, act_test, delta_test = train_test_split(obs, act, delta,
test_split_ratio=valid_split_ratio)
if self._dataset_test is None:
self._dataset_test = dict(obs=obs_test, act=act_test, delta=delta_test)
self._dataset_train = dict(obs=obs_train, act=act_train, delta=delta_train)
else:
self._dataset_test['obs'] = np.concatenate([self._dataset_test['obs'], obs_test])
self._dataset_test['act'] = np.concatenate([self._dataset_test['act'], act_test])
self._dataset_test['delta'] = np.concatenate([self._dataset_test['delta'], delta_test])
self._dataset_train['obs'] = np.concatenate([self._dataset_train['obs'], obs_train])
self._dataset_train['act'] = np.concatenate([self._dataset_train['act'], act_train])
self._dataset_train['delta'] = np.concatenate([self._dataset_train['delta'], delta_train])
valid_loss_rolling_average = None
epoch_times = []
""" ------- Looping over training epochs ------- """
num_steps_per_epoch = max(int(np.prod(self._dataset_train['obs'].shape[:2])
/ (self.meta_batch_size * self.batch_size * 2)), 1)
num_steps_test = max(int(np.prod(self._dataset_test['obs'].shape[:2])
/ (self.meta_batch_size * self.batch_size * 2)), 1)
for epoch in range(epochs):
# preparations for recording training stats
pre_batch_losses = []
post_batch_losses = []
t0 = time.time()
""" ------- Looping through the shuffled and batched dataset for one epoch -------"""
for _ in range(num_steps_per_epoch):
obs_batch, act_batch, delta_batch = self._get_batch(train=True)
pre_batch_loss, post_batch_loss, _ = sess.run([self.pre_loss, self.post_loss, self.train_op],
feed_dict={self.obs_ph: obs_batch,
self.act_ph: act_batch,
self.delta_ph: delta_batch})
pre_batch_losses.append(pre_batch_loss)
post_batch_losses.append(post_batch_loss)
valid_losses = []
for _ in range(num_steps_test):
obs_test, act_test, delta_test = self._get_batch(train=False)
# compute validation loss
feed_dict = {self.obs_ph: obs_test,
self.act_ph: act_test,
self.delta_ph: delta_test}
valid_loss = sess.run(self.loss, feed_dict=feed_dict)
valid_losses.append(valid_loss)
valid_loss = np.mean(valid_losses)
if valid_loss_rolling_average is None:
valid_loss_rolling_average = 1.5 * valid_loss # set initial rolling to a higher value avoid too early stopping
valid_loss_rolling_average_prev = 2 * valid_loss
if valid_loss < 0:
valid_loss_rolling_average = valid_loss/1.5 # set initial rolling to a higher value avoid too early stopping
valid_loss_rolling_average_prev = valid_loss/2
valid_loss_rolling_average = rolling_average_persitency*valid_loss_rolling_average \
+ (1.0-rolling_average_persitency)*valid_loss
epoch_times.append(time.time() - t0)
if verbose:
logger.log("Training DynamicsModel - finished epoch %i - "
"train loss: %.4f valid loss: %.4f valid_loss_mov_avg: %.4f epoch time: %.2f"
% (epoch, np.mean(post_batch_losses), valid_loss, valid_loss_rolling_average,
time.time() - t0))
if valid_loss_rolling_average_prev < valid_loss_rolling_average or epoch == epochs - 1:
logger.log('Stopping Training of Model since its valid_loss_rolling_average decreased')
break
valid_loss_rolling_average_prev = valid_loss_rolling_average
""" ------- Tabular Logging ------- """
if log_tabular:
logger.logkv('AvgModelEpochTime', np.mean(epoch_times))
logger.logkv('Post-Loss', np.mean(post_batch_losses))
logger.logkv('Pre-Loss', np.mean(pre_batch_losses))
logger.logkv('Epochs', epoch)
def predict(self, obs, act):
assert obs.shape[0] == act.shape[0]
assert obs.ndim == 2 and obs.shape[1] == self.obs_space_dims
assert act.ndim == 2 and act.shape[1] == self.action_space_dims
obs_original = obs
if self.normalize_input:
obs, act = self._normalize_data(obs, act)
delta = np.array(self._predict(obs, act))
delta = denormalize(delta, self.normalization['delta'][0], self.normalization['delta'][1])
else:
delta = np.array(self._predict(obs, act))
assert delta.ndim == 2
pred_obs = obs_original + delta
return pred_obs
def _predict(self, obs, act):
if self._adapted_param_values is not None:
sess = tf.get_default_session()
obs, act = self._pad_inputs(obs, act)
feed_dict = {self.obs_ph: obs, self.act_ph: act}
feed_dict.update(self.network_params_feed_dict)
delta = sess.run(self.post_update_delta[:self._num_adapted_models], feed_dict=feed_dict)
delta = np.concatenate(delta, axis=0)
else:
delta = self.f_delta_pred(obs, act)
return delta
def _pad_inputs(self, obs, act, obs_next=None):
if self._num_adapted_models < self.meta_batch_size:
pad = int(obs.shape[0] / self._num_adapted_models * (self.meta_batch_size - self._num_adapted_models))
obs = np.concatenate([obs, np.zeros((pad,) + obs.shape[1:])], axis=0)
act = np.concatenate([act, np.zeros((pad,) + act.shape[1:])], axis=0)
if obs_next is not None:
obs_next = np.concatenate([obs_next, np.zeros((pad,) + obs_next.shape[1:])], axis=0)
if obs_next is not None:
return obs, act, obs_next
else:
return obs, act
def adapt(self, obs, act, obs_next):
self._num_adapted_models = len(obs)
assert len(obs) == len(act) == len(obs_next)
obs = np.concatenate([np.concatenate([ob, np.zeros_like(ob)], axis=0) for ob in obs], axis=0)
act = np.concatenate([np.concatenate([a, np.zeros_like(a)], axis=0) for a in act], axis=0)
obs_next = np.concatenate([np.concatenate([ob, np.zeros_like(ob)], axis=0) for ob in obs_next], axis=0)
obs, act, obs_next = self._pad_inputs(obs, act, obs_next)
assert obs.shape[0] == act.shape[0] == obs_next.shape[0]
assert obs.ndim == 2 and obs.shape[1] == self.obs_space_dims
assert act.ndim == 2 and act.shape[1] == self.action_space_dims
assert obs_next.ndim == 2 and obs_next.shape[1] == self.obs_space_dims
if self.normalize_input:
# Normalize data
obs, act, delta = self._normalize_data(obs, act, obs_next)
assert obs.ndim == act.ndim == obs_next.ndim == 2
else:
delta = obs_next - obs
self._prev_params = [nn.get_param_values() for nn in self._networks]
sess = tf.get_default_session()
self._adapted_param_values = sess.run(self._adapted_params[:self._num_adapted_models],
feed_dict={self.obs_ph: obs, self.act_ph: act, self.delta_ph: delta})
def switch_to_pre_adapt(self):
if self._prev_params is not None:
[nn.set_params(params) for nn, params in zip(self._networks, self._prev_params)]
self._prev_params = None
self._adapted_param_values = None
def _get_batch(self, train=True):
if train:
num_paths, len_path = self._dataset_train['obs'].shape[:2]
idx_path = np.random.randint(0, num_paths, size=self.meta_batch_size)
idx_batch = np.random.randint(self.batch_size, len_path - self.batch_size, size=self.meta_batch_size)
obs_batch = np.concatenate([self._dataset_train['obs'][ip,
ib - self.batch_size:ib + self.batch_size, :]
for ip, ib in zip(idx_path, idx_batch)], axis=0)
act_batch = np.concatenate([self._dataset_train['act'][ip,
ib - self.batch_size:ib + self.batch_size, :]
for ip, ib in zip(idx_path, idx_batch)], axis=0)
delta_batch = np.concatenate([self._dataset_train['delta'][ip,
ib - self.batch_size:ib + self.batch_size, :]
for ip, ib in zip(idx_path, idx_batch)], axis=0)
else:
num_paths, len_path = self._dataset_test['obs'].shape[:2]
idx_path = np.random.randint(0, num_paths, size=self.meta_batch_size)
idx_batch = np.random.randint(self.batch_size, len_path - self.batch_size, size=self.meta_batch_size)
obs_batch = np.concatenate([self._dataset_test['obs'][ip,
ib - self.batch_size:ib + self.batch_size, :]
for ip, ib in zip(idx_path, idx_batch)], axis=0)
act_batch = np.concatenate([self._dataset_test['act'][ip,
ib - self.batch_size:ib + self.batch_size, :]
for ip, ib in zip(idx_path, idx_batch)], axis=0)
delta_batch = np.concatenate([self._dataset_test['delta'][ip,
ib - self.batch_size:ib + self.batch_size, :]
for ip, ib in zip(idx_path, idx_batch)], axis=0)
return obs_batch, act_batch, delta_batch
def _normalize_data(self, obs, act, obs_next=None):
obs_normalized = normalize(obs, self.normalization['obs'][0], self.normalization['obs'][1])
actions_normalized = normalize(act, self.normalization['act'][0], self.normalization['act'][1])
if obs_next is not None:
delta = obs_next - obs
deltas_normalized = normalize(delta, self.normalization['delta'][0], self.normalization['delta'][1])
return obs_normalized, actions_normalized, deltas_normalized
else:
return obs_normalized, actions_normalized
def compute_normalization(self, obs, act, obs_next):
assert obs.shape[0] == obs_next.shape[0] == act.shape[0]
assert obs.shape[1] == obs_next.shape[1] == act.shape[1]
delta = obs_next - obs
assert delta.ndim == 3 and delta.shape[2] == obs_next.shape[2] == obs.shape[2]
# store means and std in dict
self.normalization = OrderedDict()
self.normalization['obs'] = (np.mean(obs, axis=(0, 1)), np.std(obs, axis=(0, 1)))
self.normalization['delta'] = (np.mean(delta, axis=(0, 1)), np.std(delta, axis=(0, 1)))
self.normalization['act'] = (np.mean(act, axis=(0, 1)), np.std(act, axis=(0, 1)))
def _adapt_sym(self, loss, params_var):
update_param_keys = list(params_var.keys())
grads = tf.gradients(loss, [params_var[key] for key in update_param_keys])
gradients = dict(zip(update_param_keys, grads))
# Gradient descent
adapted_policy_params = [params_var[key] - tf.multiply(self.inner_learning_rate, gradients[key])
for key in update_param_keys]
adapted_policy_params_dict = OrderedDict(zip(update_param_keys, adapted_policy_params))
return adapted_policy_params_dict
def _create_placeholders_for_vars(self, vars):
placeholders = OrderedDict()
for key, var in vars.items():
placeholders[key] = tf.placeholder(tf.float32, shape=var.shape, name=key + '_ph')
return OrderedDict(placeholders)
@property
def network_params_feed_dict(self):
return dict(list((self.network_phs_meta_batch[i][key], self._adapted_param_values[i][key])
for key in self._adapted_param_values[0].keys() for i in range(self._num_adapted_models)))
def __getstate__(self):
state = dict()
state['init_args'] = Serializable.__getstate__(self)
state['normalization'] = self.normalization
state['networks'] = [nn.__getstate__() for nn in self._networks]
return state
def __setstate__(self, state):
Serializable.__setstate__(self, state['init_args'])
self.normalization = state['normalization']
for i in range(len(self._networks)):
self._networks[i].__setstate__(state['networks'][i])
def normalize(data_array, mean, std):
return (data_array - mean) / (std + 1e-10)
def denormalize(data_array, mean, std):
return data_array * (std + 1e-10) + mean
def train_test_split(obs, act, delta, test_split_ratio=0.2):
assert obs.shape[0] == act.shape[0] == delta.shape[0]
dataset_size = obs.shape[0]
indices = np.arange(dataset_size)
np.random.shuffle(indices)
split_idx = int(dataset_size * (1-test_split_ratio))
idx_train = indices[:split_idx]
idx_test = indices[split_idx:]
assert len(idx_train) + len(idx_test) == dataset_size
return obs[idx_train, :], act[idx_train, :], delta[idx_train, :], \
obs[idx_test, :], act[idx_test, :], delta[idx_test, :]
```
|
{
"source": "jeonggyukim/pyathena",
"score": 3
}
|
#### File: pyathena/feedback_test/profile_1d.py
```python
import numpy as np
from scipy import stats
from inspect import getsource
from ..load_sim import LoadSim
class Profile1D:
@LoadSim.Decorators.check_pickle
def get_profile1d(self, num, fields_y, field_x='r', bins=None, statistic='mean',
prefix='profile1d', savdir=None, force_override=False):
"""
Function to calculate 1D profile(s) and pickle using
scipy.stats.binned_statistics
Parameters
----------
num : int
vtk output number
fields_y : (list of) str
Fields to be profiled
fields_x : str
Field for binning
bins : int or sequence of scalars, optional
If bins is an int, it defines the number of equal-width bins in the
given range. If bins is a sequence, it defines the bin edges,
including the rightmost edge, allowing for non-uniform bin widths.
Values in x that are smaller than lowest bin edge are assigned to
bin number 0, values beyond the highest bin are assigned to
bins[-1]. If the bin edges are specified, the number of bins will
be, (nx = len(bins)-1). The default value is np.linspace(x.min(),
x.max(), 50)
statistic : (list of) string or callable
The statistic to compute (default is โmeanโ). The following
statistics are available:
โmeanโ : compute the mean of values for points within each bin.
Empty bins will be represented by NaN.
โstdโ : compute the standard deviation within each bin. This is
implicitly calculated with ddof=0.
โmedianโ : compute the median of values for points within each bin.
Empty bins will be represented by NaN.
โcountโ : compute the count of points within each bin. This is
identical to an unweighted histogram. values array is not
referenced.
โsumโ : compute the sum of values for points within each bin. This
is identical to a weighted histogram.
โminโ : compute the minimum of values for points within each bin.
Empty bins will be represented by NaN.
โmaxโ : compute the maximum of values for point within each bin.
Empty bins will be represented by NaN.
function : a user-defined function which takes a 1D array of values,
and outputs a single numerical statistic. This function will be
called on the values in each bin. Empty bins will be represented by
function([]), or NaN if this returns an error.
savdir : str, optional
Directory to pickle results
prefix : str
Prefix for python pickle file
force_override : bool
Flag to force read of starpar_vtk file even when pickle exists
"""
fields_y = np.atleast_1d(fields_y)
statistic = np.atleast_1d(statistic)
ds = self.load_vtk(num)
ddy = ds.get_field(fields_y)
ddx = ds.get_field(field_x)
x1d = ddx[field_x].data.flatten()
if bins is None:
bins = np.linspace(x1d.min(), x1d.max(), 50)
res = dict()
res[field_x] = dict()
for y in fields_y:
res[y] = dict()
get_lambda_name = lambda l: getsource(l).split('=')[0].strip()
# Compute statistics
for y in fields_y:
y1d = ddy[y].data.flatten()
for st in statistic:
# Get name of statistic
if callable(st):
if st.__name__ == "<lambda>":
name = get_lambda_name(st)
else:
name = st.__name__
else:
name = st
st, bine, _ = stats.binned_statistic(x1d, y1d, st, bins=bins)
# Store result
res[y][name] = st
# bin edges
res[field_x]['bine'] = bine
# bin centers
res[field_x]['binc'] = 0.5*(bine[1:] + bine[:-1])
# Time of the snapshot
res['time_code'] = ds.domain['time']
res['time'] = ds.domain['time']*self.u.Myr
return res
```
#### File: pyathena/microphysics/rec_rate.py
```python
import os
import os.path as osp
import numpy as np
import matplotlib.pyplot as plt
import pathlib
class RecRate(object):
"""Class to compute Badnell (radiative/dielectronic) recombination rates,
Draine (2011)'s recombination rates
"""
def __init__(self):
# read data
self._read_data()
def _read_data(self):
basedir = osp.join(pathlib.Path(__file__).parent.absolute(),
'../../data/microphysics')
self.fname_dr_C = os.path.join(basedir, 'badnell_dr_C.dat')
self.fname_dr_E = os.path.join(basedir, 'badnell_dr_E.dat')
self.fname_rr = os.path.join(basedir, 'badnell_rr.dat')
# Read dielectronic recombination rate data
with open(self.fname_dr_C, 'r') as fp:
lines1 = fp.readlines()
with open(self.fname_dr_E, 'r') as fp:
lines2 = fp.readlines()
i0 = 4
nline = len(lines1) - i0
if len(lines1) != len(lines2):
print('Check data file (lines1, lines2) = {0:d}, {1:d}',
len(lines1), len(lines2))
raise
self.Zd = np.zeros(nline, dtype='uint8')
self.Nd = np.zeros(nline, dtype='uint8')
self.Md = np.zeros(nline, dtype='uint8')
self.Wd = np.zeros(nline, dtype='uint8')
self.Cd = np.zeros((nline, 9))
self.Ed = np.zeros((nline, 9))
self.nd = np.zeros(nline, dtype='uint8')
for i, (l1, l2) in enumerate(zip(lines1[i0:i0 + nline],
lines2[i0:i0 + nline])):
l1 = l1.split()
l2 = l2.split()
# Make sure that Z, N, M, W all match
if int(l1[0]) == int(l2[0]) and \
int(l1[1]) == int(l2[1]) and \
int(l1[2]) == int(l2[2]) and \
int(l1[3]) == int(l2[3]):
self.Zd[i] = int(l1[0])
self.Nd[i] = int(l1[1])
self.Md[i] = int(l1[2])
self.Wd[i] = int(l1[3])
for j, l1_ in enumerate(l1[4:]):
self.Cd[i, j] = float(l1_)
for j, l2_ in enumerate(l2[4:]):
self.Ed[i, j] = float(l2_)
self.nd[i] = j + 1
else:
print("Columns do not match!")
raise
del lines1, lines2
# Read radiative recombination rate data
with open(self.fname_rr, 'r') as fp:
lines = fp.readlines()
i0 = 4
nline = len(lines) - i0
self.Zr = np.zeros(nline, dtype='uint8')
self.Nr = np.zeros(nline, dtype='uint8')
self.Mr = np.zeros(nline, dtype='uint8')
self.Wr = np.zeros(nline, dtype='uint8')
self.Ar = np.zeros(nline)
self.Br = np.zeros(nline)
self.T0r = np.zeros(nline)
self.T1r = np.zeros(nline)
self.Cr = np.zeros(nline)
self.T2r = np.zeros(nline)
# Use modifed B for low-charge ions
self.modr = np.zeros(nline, dtype='bool')
for i, l1 in enumerate(lines[i0:i0 + nline]):
l1 = l1.split()
self.Zr[i] = int(l1[0])
self.Nr[i] = int(l1[1])
self.Mr[i] = int(l1[2])
self.Wr[i] = int(l1[3])
self.Ar[i] = float(l1[4])
self.Br[i] = float(l1[5])
self.T0r[i] = float(l1[6])
self.T1r[i] = float(l1[7])
try:
self.Cr[i] = float(l1[8])
self.T2r[i] = float(l1[9])
self.modr[i] = True
except IndexError:
self.modr[i] = False
def get_rr_rate(self, Z, N, T, M=1):
"""
Calculate radiative recombination rate coefficient
Parameters
----------
Z : int
Nuclear Charge
N : int
Number of electrons of the initial target ion
T : array of floats
Temperature [K]
M : int
Initial metastable levels (M=1 for the ground state) of the ground
and metastable terms. The defualt value is 1.
Returns
-------
rr: array of floats
Radiative recombination coefficients [cm^3 s^-1]
"""
c1 = self.Zr == Z
c2 = self.Nr == N
c3 = self.Mr == M
idx = np.where(c1 & c2 & c3)
i = idx[0][0]
sqrtTT0 = np.sqrt(T/self.T0r[i])
sqrtTT1 = np.sqrt(T/self.T1r[i])
if self.modr[i]:
B = self.Br[i] + self.Cr[i]*np.exp(-self.T2r[i]/T)
else:
B = self.Br[i]
rr = self.Ar[i] / (sqrtTT0 * (1.0 + sqrtTT0)**(1.0 - B) * \
(1.0 + sqrtTT1)**(1.0 + B))
return rr
def get_dr_rate(self, Z, N, T, M=1):
"""
Calculate dielectronic recombination rate coefficient
Parameters
----------
Z : int
Nuclear Charge
N : int
Number of electrons of the initial target ion (before recombination)
T : array of floats
Temperature [K]
M : int
Initial metastable levels (M=1 for the ground state) of the ground
and metastable terms. The defualt value is 1.
Returns
-------
rr: array of floats
Dielectronic recombination coefficients [cm^3 s^-1]
"""
c1 = self.Zd == Z
c2 = self.Nd == N
c3 = self.Md == M
idx = np.where(c1 & c2 & c3)
i = idx[0][0]
dr = 0.0
for m in range(self.nd[i]):
dr += self.Cd[i, m]*np.exp(-self.Ed[i, m]/T)
dr *= T**(-1.5)
return dr
def get_rec_rate(self, Z, N, T, M=1, kind='badnell'):
"""
Calculate radiative + dielectronic recombination rate coefficient
Parameters
----------
Z : int
Nuclear Charge
N : int
Number of electrons of the initial target ion (before recombination)
T : array of floats
Temperature [K]
M : int
Initial metastable levels (M=1 for the ground state) of the ground
and metastable terms. The defualt value is 1.
kind : str
Set to 'badnell' to use fits Badnell fits or 'dr11' to use
Draine (2011)'s formula.
Returns
-------
rrate: array of floats
Recombination rate coefficient [cm^3 s^-1]
"""
if kind == 'badnell':
if Z == 1: # No dielectronic recombination
return self.get_rr_rate(Z, N, T, M=M)
else:
return self.get_rr_rate(Z, N, T, M=M) + \
self.get_dr_rate(Z, N, T, M=M)
elif kind == 'dr11':
if Z == 1:
return self.get_rec_rate_H_caseA(T)
else:
print('Z > 1 is not supported for dr11 recombination rate.')
raise
@staticmethod
def get_rec_rate_H_caseA(T):
"""Compute case A recombination rate coefficient for H
Table 14.1 in Draine (2011)
"""
T4 = T*1e-4
return 4.13e-13*T4**(-0.7131 - 0.0115*np.log(T4))
@staticmethod
def get_rec_rate_H_caseB(T):
"""Compute case B recombination rate coefficient for H
Table 14.1 in Draine (2011)
"""
T4 = T*1e-4
return 2.54e-13*T4**(-0.8163 - 0.0208*np.log(T4))
@staticmethod
def get_alpha_gr(T, psi, Z):
# Parameters for Fit (14.37) to Grain Recombination Rate coefficients
# alpha_gr(X +) for selected ions. (Draine 2011)
C = dict()
C['H'] = np.array([12.25, 8.074e-6, 1.378, 5.087e2, 1.586e-2, 0.4723, 1.102e-5])
C['He']= np.array([5.572, 3.185e-7, 1.512, 5.115e3, 3.903e-7, 0.4956, 5.494e-7])
C['C'] = np.array([45.58, 6.089e-3, 1.128, 4.331e2, 4.845e-2, 0.8120, 1.333e-4])
C['Mg']= np.array([2.510, 8.116e-8, 1.864, 6.170e4, 2.169e-6, 0.9605, 7.232e-5])
C['S'] = np.array([3.064, 7.769e-5, 1.319, 1.087e2, 3.475e-1, 0.4790, 4.689e-2])
C['Ca']= np.array([1.636, 8.208e-9, 2.289, 1.254e5, 1.349e-9, 1.1506, 7.204e-4])
if Z == 1:
e = 'H'
elif Z == 2:
e = 'He'
elif Z == 6:
e = 'C'
elif Z == 12:
e = 'Mg'
elif Z == 16:
e = 'S'
elif Z == 20:
e = 'Ca'
return 1e-14*C[e][0]/(1.0 + C[e][1]*psi**C[e][2]*\
(1.0 + C[e][3]*T**C[e][4]*psi**(-C[e][5]-C[e][6]*np.log(T))))
@staticmethod
def get_rec_rate_grain(ne, G0, T, Z):
"""Compute grain assisted recombination coefficient
Ch 14.8 in Draine (2011)
"""
psi = G0*T**0.5/ne
return RecRate.get_alpha_gr(T, psi, Z)
def plt_rec_rate(self, Z, N, M=1):
T = np.logspace(3, 6)
# Z = ct.EnumAtom.He.value
# N = Z - 1
# M = M
plt.loglog(T, self.get_rec_rate(Z, N, T, M=M), '-')
plt.loglog(T, self.get_rr_rate(Z, N, T, M=M), ':')
plt.loglog(T, self.get_dr_rate(Z, N, T, M=M), '--')
plt.ylim(1e-14, 1e-10)
return plt.gca()
```
#### File: pyathena/sf_cloud_rad/pdf.py
```python
import os.path as osp
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import astropy.units as au
import astropy.constants as ac
import pandas as pd
from mpl_toolkits.axes_grid1 import ImageGrid
from ..plt_tools.cmap import cmap_apply_alpha
from ..util.scp_to_pc import scp_to_pc
from ..load_sim import LoadSim
class PDF:
bins=dict(nH=np.logspace(-2,5,71),
nHI=np.logspace(-2,5,71),
nH2=np.logspace(-2,5,71),
nHII=np.logspace(-2,5,71),
T=np.logspace(0,5,51),
pok=np.logspace(0,7,71),
chi_PE_tot=np.logspace(-4,5,91),
chi_FUV_tot=np.logspace(-4,5,91),
Bmag=np.logspace(-7,-4,91),
Erad_LyC=np.logspace(-17,-8,91),
)
@LoadSim.Decorators.check_pickle
def read_pdf2d(self, num,
bin_fields=None, bins=None, prefix='pdf2d',
savdir=None, force_override=False):
if bins is not None:
self.bins = bins
bin_fields_def = [['nH', 'pok'], ['nH', 'T']]
if bin_fields is None:
bin_fields = bin_fields_def
ds = self.load_vtk(num=num)
res = dict()
for bf in bin_fields:
k = '-'.join(bf)
res[k] = dict()
dd = ds.get_field(bf)
xdat = dd[bf[0]].data.flatten()
ydat = dd[bf[1]].data.flatten()
# Volume weighted hist
weights = None
H, xe, ye = np.histogram2d(xdat, ydat, (self.bins[bf[0]], self.bins[bf[1]]),
weights=weights)
res[k]['H'] = H
res[k]['xe'] = xe
res[k]['ye'] = ye
# Density weighted hist
weights = (ds.get_field('nH'))['nH'].data.flatten()
Hw, xe, ye = np.histogram2d(xdat, ydat, (self.bins[bf[0]], self.bins[bf[1]]),
weights=weights)
res[k]['Hw'] = Hw
res['domain'] = ds.domain
return res
@LoadSim.Decorators.check_pickle
def read_pdf2d_phase(self, num, prefix='pdf2d_phase',
savdir=None, force_override=False):
"""
Read 2d pdf of density, chi_FUV, pok
"""
r = dict()
ds = self.load_vtk(num)
fields = ['nH','xH2','xHII','xHI','pok','T','Bmag','Erad_LyC']
self.logger.info('Reading fields {0:s}'.format(', '.join(fields)))
dd = self.get_chi(ds, fields=fields, freq=['LW','PE']) # see ./fields.py
#bins = (np.logspace(-2,5,71), np.logspace(-4,5,91))
# Masked array
idx_HII = dd['xHII'].data.flatten() > 0.5
idx_HI = (dd['xHI'].data.flatten() > 0.5)
idx_H2 = (dd['xH2'].data.flatten() > 0.25)
#idx_HI = ~idx_HII & ~idx_H2
dat_all = {
'nH-chi_PE_tot': (dd['nH'].data.flatten(),
(dd['chi_PE_ext'] + dd['chi_PE']).data.flatten(),
dd['nH'].data.flatten()),
'nH2-chi_PE_tot': (dd['nH'].data.flatten()[idx_H2],
(dd['chi_PE_ext'] + dd['chi_PE']).data.flatten()[idx_H2],
dd['nH'].data.flatten()[idx_H2]),
'nHI-chi_PE_tot': (dd['nH'].data.flatten()[idx_HI],
(dd['chi_PE_ext'] + dd['chi_PE']).data.flatten()[idx_HI],
dd['nH'].data.flatten()[idx_HI]),
'nHII-chi_PE_tot': (dd['nH'].data.flatten()[idx_HII],
(dd['chi_PE_ext'] + dd['chi_PE']).data.flatten()[idx_HII],
dd['nH'].data.flatten()[idx_HII]),
'nH-chi_FUV_tot': (dd['nH'].data.flatten(),
(dd['chi_FUV_ext'] + dd['chi_FUV']).data.flatten(),
dd['nH'].data.flatten()),
'nH2-chi_FUV_tot': (dd['nH'].data.flatten()[idx_H2],
(dd['chi_FUV_ext'] + dd['chi_FUV']).data.flatten()[idx_H2],
dd['nH'].data.flatten()[idx_H2]),
'nHI-chi_FUV_tot': (dd['nH'].data.flatten()[idx_HI],
(dd['chi_FUV_ext'] + dd['chi_FUV']).data.flatten()[idx_HI],
dd['nH'].data.flatten()[idx_HI]),
'nHII-chi_FUV_tot': (dd['nH'].data.flatten()[idx_HII],
(dd['chi_FUV_ext'] + dd['chi_FUV']).data.flatten()[idx_HII],
dd['nH'].data.flatten()[idx_HII]),
'nH-pok': (dd['nH'].data.flatten(),
dd['pok'].data.flatten(),
dd['nH'].data.flatten()),
'nH2-pok': (dd['nH'].data.flatten()[idx_H2],
dd['pok'].data.flatten()[idx_H2],
dd['nH'].data.flatten()[idx_H2]),
'nHI-pok': (dd['nH'].data.flatten()[idx_HI],
dd['pok'].data.flatten()[idx_HI],
dd['nH'].data.flatten()[idx_HI]),
'nHII-pok': (dd['nH'].data.flatten()[idx_HII],
dd['pok'].data.flatten()[idx_HII],
dd['nH'].data.flatten()[idx_HII]),
'nH-Bmag': (dd['nH'].data.flatten(),
dd['Bmag'].data.flatten(),
dd['nH'].data.flatten()),
'nH2-Bmag': (dd['nH'].data.flatten()[idx_H2],
dd['Bmag'].data.flatten()[idx_H2],
dd['nH'].data.flatten()[idx_H2]),
'nHI-Bmag': (dd['nH'].data.flatten()[idx_HI],
dd['Bmag'].data.flatten()[idx_HI],
dd['nH'].data.flatten()[idx_HI]),
'nHII-Bmag': (dd['nH'].data.flatten()[idx_HII],
dd['Bmag'].data.flatten()[idx_HII],
dd['nH'].data.flatten()[idx_HII]),
'nH-T': (dd['nH'].data.flatten(),
dd['T'].data.flatten(),
dd['nH'].data.flatten()),
'nH2-T': (dd['nH'].data.flatten()[idx_H2],
dd['T'].data.flatten()[idx_H2],
dd['nH'].data.flatten()[idx_H2]),
'nHI-T': (dd['nH'].data.flatten()[idx_HI],
dd['T'].data.flatten()[idx_HI],
dd['nH'].data.flatten()[idx_HI]),
'nHII-T': (dd['nH'].data.flatten()[idx_HII],
dd['T'].data.flatten()[idx_HII],
dd['nH'].data.flatten()[idx_HII]),
'nH-Erad_LyC': (dd['nH'].data.flatten(),
dd['Erad_LyC'].data.flatten(),
dd['nH'].data.flatten()),
'nHII-Erad_LyC': (dd['nH'].data.flatten()[idx_HII],
dd['Erad_LyC'].data.flatten()[idx_HII],
dd['nH'].data.flatten()[idx_HII]),
}
for k, (xdat,ydat,wdat) in dat_all.items():
r[k] = dict()
kx, ky = k.split('-')
bins = (self.bins[kx], self.bins[ky])
H, xe, ye = np.histogram2d(xdat, ydat, bins=bins, weights=None)
Hw, _, _ = np.histogram2d(xdat, ydat, bins=bins, weights=wdat)
r[k]['H'] = H
r[k]['Hw'] = Hw
r[k]['xe'] = xe
r[k]['ye'] = ye
return r
@LoadSim.Decorators.check_pickle
def read_density_pdf_all(self, prefix='density_pdf_all',
savdir=None, force_override=False):
rr = dict()
# nums = self.nums
#nums = [0,10,20]
nums = range(0, self.get_num_max_virial())
print('density_pdf_all: {0:s} nums:'.format(self.basename), nums, end=' ')
for i in nums:
print(i, end=' ')
r = self.read_density_pdf(num=i, force_override=False)
if i == 0:
for k in r.keys():
rr[k] = []
for k in r.keys():
try:
rr[k].append(r[k].value.item())
except:
rr[k].append(r[k])
rr = pd.DataFrame(rr)
return rr
@LoadSim.Decorators.check_pickle
def read_density_pdf(self, num, prefix='density_pdf',
savdir=None, force_override=False):
"""
Read 1d pdf of density
"""
bins = np.logspace(-3, 7, 101)
ds = self.load_vtk(num)
dd = ds.get_field(['nH','specific_scalar_CL','xn'])
# Select neutral cloud gas
idx = np.logical_and(dd['xn'].data > 0.5, dd['specific_scalar_CL'].data > 5e-1)
nH_cl = (dd['nH']*dd['specific_scalar_CL']).data[idx]
x = np.log(nH_cl)
res = dict()
res['time_code'] = ds.domain['time']
try:
res['nH_cl_meanV'] = np.mean(nH_cl)
res['nH_cl_meanM'] = np.average(nH_cl, weights=nH_cl)
res['muV'] = np.sum(x)/len(nH_cl)
res['muM'] = np.sum(x*nH_cl)/np.sum(nH_cl)
res['sigmaV'] = np.std(x)
res['sigmaM'] = np.sqrt(np.sum((x - res['muM'])**2*nH_cl)/np.sum(nH_cl))
res['histV'], res['bineV'] = np.histogram(nH_cl, bins=bins)
res['histM'], res['bineM'] = np.histogram(nH_cl, bins=bins, weights=nH_cl)
except ZeroDivisionError:
pass
return res
def plt_pdf2d_one_model(s, dt_Myr=[-0.2,2,5,8], yvar='chi_PE_tot', alpha=1.0,
force_override=False):
"""Function to plot 2d histograms at different snapshots
"""
minmax = dict(chi_PE_tot=(1e-4,1e4),
chi_FUV_tot=(1e-4,1e4),
pok=(1e2,1e7),
nH=(1e-2,3e4),
T=(1e1,3e4),
Bmag=(1e-7,1e-4),
Erad_LyC=(1e-4,1e4),
)
ylabels = dict(chi_PE_tot=r'$\chi_{\rm PE}$',
chi_FUV_tot=r'$\chi_{\rm FUV}$',
pok=r'$P/k_{\rm B}\;[{\rm cm}^{-3}\,{\rm K}]$',
T=r'$T\,{\rm K}$',
Bmag=r'$|\mathbf{B}|\;[\mu{\rm G}]$',
Erad_LyC=r'$\mathcal{E}_{\rm LyC}\;[10^{-13}\,{\rm erg}\,{\rm cm}^{-3}]$',
)
pcargs = dict(edgecolor='face', linewidth=0, rasterized=True)
norm = [mpl.colors.LogNorm(1e-6,5e-2),
mpl.colors.LogNorm(1e-5,5e-2),
mpl.colors.LogNorm(1e-5,5e-2),
mpl.colors.LogNorm(1e-5,5e-2)]
nums = s.get_nums(dt_Myr=dt_Myr)
cm0 = plt.cm.viridis
# cm1 = cmap_apply_alpha('Blues')
# cm2 = cmap_apply_alpha('Greens')
# cm3 = cmap_apply_alpha('Oranges')
cm1 = plt.cm.Blues
cm2 = plt.cm.Greens
cm3 = plt.cm.Oranges
fig = plt.figure(figsize=(15, 12))
nr = 4
nc = len(dt_Myr)
imgrid_args = dict(nrows_ncols=(nr,nc), direction='row', aspect=False,
label_mode='L', axes_pad=0.2, cbar_mode='edge', cbar_location='right')
g1 = ImageGrid(fig, [0.02, 0.05, 0.90, 0.90], **imgrid_args)
for ic,num in enumerate(nums):
print(num, end=' ')
rr = s.read_pdf2d_phase(num, force_override=force_override)
k0 = f'nH-{yvar}'
k = f'nH-{yvar}'
im0 = g1[ic].pcolormesh(rr[k]['xe'], rr[k]['ye'],
rr[k]['Hw'].T/rr[k0]['Hw'].T.sum(),
norm=norm[0], cmap=cm0, alpha=alpha, **pcargs)
k = f'nH2-{yvar}'
im1 = g1[nc+ic].pcolormesh(rr[k]['xe'], rr[k]['ye'],
rr[k]['Hw'].T/rr[k0]['Hw'].T.sum(),
norm=norm[1], cmap=cm1, alpha=alpha, **pcargs)
k = f'nHI-{yvar}'
im2 = g1[2*nc+ic].pcolormesh(rr[k]['xe'], rr[k]['ye'],
rr[k]['Hw'].T/rr[k0]['Hw'].T.sum(),
norm=norm[2], cmap=cm2, alpha=alpha, **pcargs)
if yvar == 'chi_FUV_tot':
k0 = r'nH-Erad_LyC'
k = r'nHII-Erad_LyC'
im3 = g1[3*nc+ic].pcolormesh(rr[k]['xe'], rr[k]['ye']*1e13,
rr[k]['Hw'].T/rr[k0]['Hw'].T.sum(),
norm=norm[3], cmap=cm3, alpha=alpha, **pcargs)
else:
k = f'nHII-{yvar}'
im3 = g1[3*nc+ic].pcolormesh(rr[k]['xe'], rr[k]['ye'],
rr[k]['Hw'].T/rr[k0]['Hw'].T.sum(),
norm=norm[3], cmap=cm3, alpha=alpha, **pcargs)
for i, ax in enumerate(g1):
if yvar == 'pok':
# Plot lines of constant temperature 8000/40K for ionized/molecular gas
nH = np.logspace(np.log10(minmax['nH'][0]), np.log10(minmax['nH'][1]))
for T,xe,xH2,c,label in zip((20.0,8000.0),(0.0,1.0),\
(0.5,0.0),('blue','orange'),
(r'$T=20\,{\rm K} (x_{\rm H_2}=0.5)$',
r'$T=8000\,{\rm K} (x_{\rm H^+}=1)$')):
l, = ax.loglog(nH, (1.1 + xe - xH2)*nH*T, c=c,
lw=0.75, ls='-', label=label)
if yvar == 'chi_FUV_tot' and i >= (nr - 1)*nc:
# Plot lines of constant ionization parameter
hnui = (s.par['radps']['hnu_PH']*au.eV).cgs.value
Uion = (1e0, 1e-2, 1e-4)
nH = np.logspace(np.log10(minmax['nH'][0]), np.log10(minmax['nH'][1]))
for U in Uion:
Erad = hnui*U*nH
ax.loglog(nH, Erad*1e13, c='grey', lw=0.75, ls='--')
ax.set(xscale='log', yscale='log', xlim=minmax['nH'], ylim=minmax['Erad_LyC'],
xlabel=r'$n_{\rm H}\;[{\rm cm^{-3}}]$', ylabel=ylabels['Erad_LyC'])
else:
ax.set(xscale='log', yscale='log', xlim=minmax['nH'], ylim=minmax[yvar],
xlabel=r'$n_{\rm H}\;[{\rm cm^{-3}}]$', ylabel=ylabels[yvar])
ax.grid()
# Annotate time
for ic, dt_ in zip(range(nc),dt_Myr):
if dt_ < 0.0:
g1[ic].set_title(r'$t_{*,0}-$' + r'{0:.1f}'.format(np.abs(dt_)) + r' Myr')
else:
g1[ic].set_title(r'$t_{*,0}+$' + r'{0:.1f}'.format(dt_) + r' Myr')
for i,(im,cm) in enumerate(zip((im0,im1,im2,im3),(cm0,cm1,cm2,cm3))):
plt.colorbar(im, cax=g1[(i+1)*nc-1].cax, label='mass fraction',
norm=norm[i], cmap=cm)
savefig = True
if savefig:
basedir = '/tigress/jk11/figures/GMC/paper/pdf/'
name = 'pdf2d-{0:s}-{1:s}.png'.format('nH', yvar)
savname = osp.join(basedir, name)
fig.savefig(savname, dpi=200, bbox_inches='tight')
scp_to_pc(savname, target='GMC-AB')
print('saved to', savname)
return fig
```
#### File: pyathena/sf_cloud/sfe_raskutti.py
```python
import numpy as np
import matplotlib.pyplot as plt
import scipy.special as sp
import astropy.units as au
import astropy.constants as ac
def Sigma_E(SFE, Psi):
"""Eddington surface density"""
return SFE/(1.0 + SFE)*Psi/(2.0*np.pi*ac.c.cgs.value*ac.G.cgs.value)
def mu_M(Sigma_cl, SFE, x, sigma):
return np.log(Sigma_cl*(1.0 - SFE)/(4.0*x**2)) + 0.5*sigma**2
def y_E(Sigma_cl, SFE, x, sigma, Psi):
"""
Returns the random variable with a standard normal distribution
for mass-weighted surface density distribution corresponding to Sigma_E
y_E = (ln(Sigma_E) - mu_M)/(sqrt(2)*sigma_lnSigma)
"""
Sigma_E_ = Sigma_E(SFE,Psi)
muM_ = mu_M(Sigma_cl, SFE, x, sigma)
return (np.log(Sigma_E_) - muM_)/(np.sqrt(2.0)*sigma)
def argmax_eps_of(Sigmacl, Psi=2000.0, sigma=1.0, x=1.0):
SFE=np.linspace(0.0001, 0.9999, num=1000)
yE = y_E(Sigmacl, SFE, x, sigma, Psi)
eps_of = 0.5*(1.0 - SFE)*(1.0 + sp.erf(yE))
return SFE, yE, eps_of, SFE[np.argmax(eps_of)]
def eps_min_max(Sigmacl, Psi=2000.0, sigma=1.0, x=1.0):
"""
Compute final SF efficiency given Sigmacl, Psi, sigma_lnSigma, x
"""
if not isinstance(Sigmacl, (np.ndarray, np.generic)):
if isinstance(Sigmacl, float):
Sigmacl = np.asarray([Sigmacl])
else:
Sigmacl = np.asarray(Sigmacl)
eps_min = np.zeros_like(Sigmacl)
eps_max = np.zeros_like(Sigmacl)
for i, Sigmacl_ in enumerate(Sigmacl):
SFE, yE, eps_of, SFE_min = argmax_eps_of(Sigmacl_, Psi=Psi, sigma=sigma, x=x)
eps_min[i] = SFE_min
eps_max[i] = 1.0 - max(eps_of)
return eps_min, eps_max
```
#### File: pyathena/util/units.py
```python
import astropy.units as au
import astropy.constants as ac
import numpy as np
class Units(object):
"""Simple class for simulation unit.
Msun, Lsun, etc.: physical constants in code unit
"""
def __init__(self, kind='LV', muH=1.4271):
"""
Parameters
----------
kind: string
"LV" for (pc, km/s) or "LT" for (pc, Myr)
muH: float
mean particle mass per H (for neutral gas).
Default value is 1.4271 (assuming solar metallicity).
"""
mH = 1.008*au.u
if kind == 'LV':
self.muH = muH
self.length = (1.0*au.pc).to('pc')
self.velocity = (1.0*au.km/au.s).to('km/s')
self.time = (self.length/self.velocity).cgs
elif kind == 'LT':
self.muH = muH
self.length = (1.0*au.pc).to('pc')
self.time = (1.0*au.Myr).to('Myr')
self.velocity = (self.length/self.time).to('km/s')
elif kind == 'cgs':
self.time = 1.0*au.s
self.velocity = (self.length/self.time).to('km/s')
self.mH = mH.to('g')
self.mass = (self.muH*mH*(self.length.to('cm').value)**3).to('Msun')
self.density = (self.mass/self.length**3).cgs
self.momentum = (self.mass*self.velocity).to('Msun km s-1')
self.energy = (self.mass*self.velocity**2).cgs
self.pressure = (self.density*self.velocity**2).cgs
self.energy_density = self.pressure.to('erg/cm**3')
self.mass_flux = (self.density*self.velocity).to('Msun kpc-2 yr-1')
self.momentum_flux = (self.density*self.velocity**2).to('Msun km s-1 kpc-2 yr-1')
self.energy_flux = (self.density*self.velocity**3).to('erg kpc-2 yr-1')
# Define (physical constants in code units)^-1
#
# Opposite to the convention chosen by set_units function in
# athena/src/units.c This is because in post-processing we want to
# convert from code units to more convenient ones by "multiplying" these
# constants
self.cm = self.length.to('cm').value
self.pc = self.length.to('pc').value
self.kpc = self.length.to('kpc').value
self.Myr = self.time.to('Myr').value
self.kms = self.velocity.to('km/s').value
self.Msun = self.mass.to('Msun').value
self.Lsun = (self.energy/self.time).to('Lsun').value
self.erg = self.energy.to('erg').value
self.eV = self.energy.to('eV').value
self.s = self.time.to('s').value
self.pok = ((self.pressure/ac.k_B).to('cm**-3*K')).value
self.muG = np.sqrt(4*np.pi*self.energy_density.cgs.value)/1e-6
# For yt
self.units_override = dict(length_unit=(self.length.to('pc').value, 'pc'),
time_unit=(self.time.to('Myr').value, 'Myr'),
mass_unit=(self.mass.to('Msun').value, 'Msun'))
```
|
{
"source": "jeonggyukim/pyglet",
"score": 3
}
|
#### File: pyglet/utils/units.py
```python
import astropy.units as au
import astropy.constants as ac
# import numpy as np
class Units(object):
"""Simple class for simulation unit.
Msun, Lsun, etc.: physical constants in code unit
"""
def __init__(self, kind='tigris', muH=1.4):
"""
Parameters
----------
kind : str
"tigris" for mass=1.4*muH*mH*(pc/cm)**3,
length=pc
velocity=km/s
with physical constants defined in tigris source code
muH : float
mean particle mass per H (for neutral gas).
"""
if kind == 'tigris':
# Physical constants defined in cgs units (src/units.hpp)
self.mH = 1.6733e-24*au.g
self.pc = 3.085678e+18*au.cm
self.kms = 1.0e+5*au.cm/au.s
self.kpc = 3.085678e+21*au.cm
self.Myr = 3.155815e+13*au.s
self.yr = 3.155815e+7*au.s
self.c = 2.99792458e+10*au.cm/au.s
self.k_B = 1.380658e-16*au.erg/au.K
self.G = 6.67259e-8*au.cm**3/au.g/au.s**2
self.M_sun = 1.9891e+33*au.g
self.L_sun = 3.8268e+33*au.erg/au.s
self.e = 4.80320427e-10*au.cm**1.5*au.g**0.5/au.s
self.aR = 7.5646e-15*au.erg/au.cm**3/au.K**4
self.muH = muH
self.length = self.pc
self.mass = ((self.mH*self.muH)*(self.length/au.cm)**3).to('Msun')
self.density = (self.mH*self.muH)*au.cm**-3
self.velocity = (self.kms).to('km s-1')
self.time = (self.length/self.velocity).to('Myr')
self.momentum = self.mass*self.velocity
self.pressure = (self.density*self.velocity**2).to('erg cm-3')
self.energy = self.mass*self.velocity**2
# For yt
self.units_override = dict(length_unit=(1.0, 'pc'),
time_unit=(1.0, 'pc/km*s'),
mass_unit=(muH*self.mH.value*self.length.value**3,
'g'))
# self.density = (self.mass/self.length**3).cgs
# self.momentum = (self.mass*self.velocity).to('Msun km s-1')
# self.energy = (self.mass*self.velocity**2).cgs
# self.pressure = (self.density*self.velocity**2).cgs
# self.energy_density = self.pressure.to('erg/cm**3')
# self.mass_flux = (self.density*self.velocity).to('Msun pc-2 Myr-1')
# self.momentum_flux = (self.density*self.velocity**2).to('Msun km s-1 pc-2 Myr-1')
# Define (physical constants in code units)^-1
#
# Opposite to the convention chosen by set_units function in
# athena/src/units.c This is because in post-processing we want to
# convert from code units to more convenient ones by "multiplying" these
# constants
# self.pc = self.length.to('pc').value
# self.kpc = self.length.to('kpc').value
# self.Myr = self.time.to('Myr').value
# self.kms = self.velocity.to('km/s').value
# self.Msun = self.mass.to('Msun').value
# self.Lsun = (self.energy/self.time).to('Lsun').value
# self.erg = self.energy.to('erg').value
# self.eV = self.energy.to('eV').value
# self.s = self.time.to('s').value
# self.pok = ((self.pressure/ac.k_B).to('cm**-3*K')).value
# self.muG = np.sqrt(4*np.pi*self.energy_density.cgs.value)/1e-6
```
|
{
"source": "Jeonghan57/A-Study-on-Patch-Wise-Deepfake-Image-Detection",
"score": 2
}
|
#### File: A-Study-on-Patch-Wise-Deepfake-Image-Detection/Patch Selection/check_npy_img.py
```python
import torch
import numpy as np
from torch.utils.data import TensorDataset, DataLoader
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
patch_size = 128
img_per_patch = int((256 / patch_size) ** 2)
data_size = img_per_patch * 2000
def get_data(batch_size):
path = "./Dataset(npy)/Church(LSUN)/"
data_holder = np.zeros([data_size*2, 3, patch_size, patch_size], dtype=np.uint8)
labels = np.zeros([data_size*2, 2])
temp = np.load(path + "test(StyleGAN2)_real.npy")
# temp = np.load(path + "test(StyleGAN2)("+ str(patch_size) +")_real.npy") # data_size, patch_size, patch_size, 3
temp = np.reshape(temp, [data_size, 3, patch_size, patch_size])
data_holder[:data_size, :, :, :] = temp[:data_size]
temp = np.load(path + "test(StyleGAN2)_fake.npy")
# temp = np.load(path + "test(StyleGAN2)("+ str(patch_size) +")_fake.npy") # data_size, patch_size, patch_size, 3
temp = np.reshape(temp, [data_size, 3, patch_size, patch_size])
data_holder[data_size:, :, :, :] = temp[:data_size]
# data_holder.shape -> (data_size*2, 3, patch_size, patch_size)
labels[:data_size, 0] = 1
labels[data_size:, 1] = 1
# labels.shape -> (data_size*2, 2)
# numpy ๋ฐฐ์ด์ torchํ ํจ.
data_holder = torch.from_numpy(data_holder).float()
labels = torch.from_numpy(labels).long()
ds = TensorDataset(data_holder, labels)
del temp, labels
data_loader = DataLoader(ds, batch_size=batch_size, shuffle=False)
return data_loader
from XceptionNet import Xception
net = Xception(num_classes = 2)
"""
# ์ด๋ฏธ์ง๊ฐ ์ด๋ ์ ๋ ์ ๋ต์ธ์ง
def Evaluate_whole_image(Net):
save_path = "./result/Xception(" + str(patch_size) + ")/"
for i in range(50):
Net.load_state_dict(torch.load(save_path + "epoch_" + str(i) + ".pth"), strict=False)
Net = Net.to(device).eval()
test_data = get_data(2048)
# Test
ys = []
ypreds = []
for X, Y in test_data:
X = X.to(device)
Y = Y.to(device)
with torch.no_grad():
_, y_pred = Net(X).max(1)
ys.append(Y.max(1)[1])
ypreds.append(y_pred)
ys = torch.cat(ys)
ypreds = torch.cat(ypreds)
correct = 0
unknown = 0
incorrect = 0
acc = 0
for j in range(0, data_size*2, img_per_patch):
whole = (ys[j:j+img_per_patch] == ypreds[j:j+img_per_patch]).float().sum()
if whole > (img_per_patch/2):
correct += 1
elif whole == (img_per_patch/2):
unknown += 1
elif whole < (img_per_patch/2):
incorrect += 1
del whole
acc = ((correct) / (correct + unknown + incorrect)) * 100
print("epoch " + str(i))
print("Correct:{0} / Unknown:{1} / Incorrect:{2} / Accuracy:{3:.2f}%" .format(correct, unknown, incorrect, acc))
"""
"""
# Real๊ณผ Fake์ ์๊ฐ ๊ฐ์ผ๋ฉด Fake์ธ ์ด๋ฏธ์ง๋ผ๊ณ ํ๋จํ๊ฒ ํ์ฌ ์ ํ๋ ๋ถ์
def Evaluate_whole_image(Net):
# save_path = "./result/Xception/"
save_path = "./result/Xception(" + str(patch_size) + ")/"
for i in range(50):
Net.load_state_dict(torch.load(save_path + "epoch_" + str(i) + ".pth"), strict=False)
Net = Net.to(device).eval()
test_data = get_data(512)
# Test
ys = []
ypreds = []
for X, Y in test_data:
X = X.to(device)
Y = Y.to(device)
with torch.no_grad():
_, y_pred = Net(X).max(1)
ys.append(Y.max(1)[1])
ypreds.append(y_pred)
ys = torch.cat(ys)
ypreds = torch.cat(ypreds)
correct = 0
incorrect = 0
acc = 0
for j in range(0, data_size*2, img_per_patch):
whole = (ys[j:j+img_per_patch] == ypreds[j:j+img_per_patch]).float().sum()
if j < data_size:
if whole > (img_per_patch/2):
correct += 1
else:
incorrect += 1
else:
if whole >= (img_per_patch/2):
correct += 1
else:
incorrect += 1
acc = ((correct) / (correct + incorrect)) * 100
print("epoch " + str(i))
print("Correct:{0} / Incorrect:{1} / Accuracy:{2:.2f}%" .format(correct, incorrect, acc))
Evaluate_whole_image(net)
def Evaluate_Random_image(Net):
save_path = "./result/Xception(" + str(patch_size) + ")/"
for i in range(50):
Net.load_state_dict(torch.load(save_path + "epoch_" + str(i) + ".pth"), strict=False)
Net = Net.to(device).eval()
test_data = get_data(512)
# Test
ys = []
ypreds = []
for X, Y in test_data:
X = X.to(device)
Y = Y.to(device)
with torch.no_grad():
_, y_pred = Net(X).max(1)
ys.append(Y.max(1)[1])
ypreds.append(y_pred)
ys = torch.cat(ys)
ypreds = torch.cat(ypreds)
correct = 0
incorrect = 0
acc = 0
for j in range(0, data_size*2, img_per_patch):
whole = (ys[j:j+img_per_patch] == ypreds[j:j+img_per_patch]).float().sum()
if whole > (img_per_patch/2):
correct += 1
else:
incorrect += 1
acc = ((correct) / (correct + incorrect)) * 100
print("epoch " + str(i))
print("Correct:{0} / Incorrect:{1} / Accuracy:{2:.2f}%" .format(correct, incorrect, acc))
# Evaluate_Random_image(net)
"""
```
#### File: A-Study-on-Patch-Wise-Deepfake-Image-Detection/Patch Selection/check_npy_img_threshold.py
```python
import torch
import numpy as np
from torch.utils.data import TensorDataset, DataLoader
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
################# Adjustable Parameter #################
batch_size = 64
patch_size = 128
delta = 0.8 # upper threshold
########################################################
img_per_patch = int((256 / patch_size) ** 2)
data_size = img_per_patch * 2000
def get_data(batch_size):
path = "./Dataset(npy)/Celeb/"
data_holder = np.zeros([data_size*2, 3, patch_size, patch_size], dtype=np.uint8)
labels = np.zeros([data_size*2, 2])
# temp = np.load(path + "test(StyleGAN2)_real.npy")
temp = np.load(path + "test(PROGAN)("+ str(patch_size) +")_real.npy") # data_size, patch_size, patch_size, 3
temp = np.reshape(temp, [data_size, 3, patch_size, patch_size])
data_holder[:data_size, :, :, :] = temp[:data_size]
# temp = np.load(path + "test(StyleGAN2)_fake.npy")
temp = np.load(path + "test(PROGAN)("+ str(patch_size) +")_fake.npy") # data_size, patch_size, patch_size, 3
temp = np.reshape(temp, [data_size, 3, patch_size, patch_size])
data_holder[data_size:, :, :, :] = temp[:data_size]
labels[:data_size, 0] = 1
labels[data_size:, 1] = 1
data_holder = torch.from_numpy(data_holder).float()
labels = torch.from_numpy(labels).long()
ds = TensorDataset(data_holder, labels)
del temp, labels
data_loader = DataLoader(ds, batch_size=batch_size, shuffle=False)
return data_loader
from XceptionNet import Xception
net = Xception(num_classes = 2)
# Evaluate images with filtered patches in upper threshold only
def Evaluate_whole_image_ver1(Net):
# save_path = "./result/Xception/"
save_path = "./result/Xception(" + str(patch_size) + ")/"
for i in range(50):
Net.load_state_dict(torch.load(save_path + "epoch_" + str(i) + ".pth"), strict=False)
Net = Net.to(device).eval()
test_data = get_data(batch_size)
# Test
ys = []
ypreds = []
y_tt = []
for X, Y in test_data:
X = X.to(device)
Y = Y.to(device)
with torch.no_grad():
y_predict = Net(X)
_, y_pred = Net(X).max(1)
for p in range(len(y_predict)):
z = float(y_predict[p, 1]) - float(y_predict[p, 0])
y_tt.append(z)
ys.append(Y.max(1)[1])
ypreds.append(y_pred)
ys = torch.cat(ys)
ypreds = torch.cat(ypreds)
y_tt_a = np.abs(y_tt)
threshold = (max(y_tt_a) - min(y_tt_a)) * (1 - delta)
lib = []
for l in range(len(y_tt_a)):
if y_tt_a[l] > threshold:
lib.append(l)
# print(lib)
# print("์ ํ๋ ํจ์น ๊ฐ์ :", len(lib), "/", str(data_size * 2))
correct_tt = 0
incorrect_tt = 0
acc = 0
for j in range(0, data_size*2, img_per_patch):
if j < data_size:
for k in range(0, img_per_patch):
correct = 0
incorrect = 0
if j+k in lib:
if (ys[j+k] == ypreds[j+k]):
correct += 1
else:
incorrect += 1
if correct > incorrect:
correct_tt += 1
elif correct == 0 and incorrect == 0:
continue
elif correct <= incorrect:
incorrect_tt +=1
else:
for k in range(0, img_per_patch):
correct = 0
incorrect = 0
if j+k in lib:
if (ys[j+k] == ypreds[j+k]):
correct += 1
else:
incorrect += 1
if correct >= incorrect:
correct_tt += 1
elif correct == 0 and incorrect == 0:
continue
elif correct < incorrect:
incorrect_tt +=1
acc = ((correct_tt) / (correct_tt + incorrect_tt)) * 100
print("epoch " + str(i))
print("Correct:{0} / Incorrect:{1} / Accuracy:{2:.2f}%" .format(correct_tt, incorrect_tt, acc))
# Evaluate all images. If there aren't filtered patches in single image,
# then use all patches in single image
def Evaluate_whole_image_ver2(Net):
# save_path = "./result/Xception/"
save_path = "./result/Xception(" + str(patch_size) + ")/"
for i in range(50):
Net.load_state_dict(torch.load(save_path + "epoch_" + str(i) + ".pth"), strict=False)
Net = Net.to(device).eval()
test_data = get_data(batch_size)
# Test
ys = []
ypreds = []
y_tt = []
for X, Y in test_data:
X = X.to(device)
Y = Y.to(device)
with torch.no_grad():
y_predict = Net(X)
_, y_pred = Net(X).max(1)
for p in range(len(y_predict)):
z = float(y_predict[p, 1]) - float(y_predict[p, 0])
y_tt.append(z)
ys.append(Y.max(1)[1])
ypreds.append(y_pred)
ys = torch.cat(ys)
ypreds = torch.cat(ypreds)
y_tt_a = np.abs(y_tt)
threshold = (max(y_tt_a) - min(y_tt_a)) * (1 - delta)
lib = []
for l in range(len(y_tt_a)):
if y_tt_a[l] > threshold:
lib.append(l)
# print(lib)
# print("์ ํ๋ ํจ์น ๊ฐ์ :", len(lib), "/", str(data_size * 2))
correct_tt = 0
incorrect_tt = 0
acc = 0
for j in range(0, data_size*2, img_per_patch):
if j < data_size:
correct = 0
incorrect = 0
for k in range(0, img_per_patch):
if j+k in lib:
if (ys[j+k] == ypreds[j+k]):
correct += 1
else:
incorrect += 1
if correct > incorrect:
correct_tt += 1
elif correct == 0 and incorrect == 0:
whole = (ys[j:j+img_per_patch] == ypreds[j:j+img_per_patch]).float().sum()
if whole > (img_per_patch/2):
correct_tt += 1
else:
incorrect_tt += 1
elif correct <= incorrect:
incorrect_tt +=1
else:
correct = 0
incorrect = 0
for k in range(0, img_per_patch):
if j+k in lib:
if (ys[j+k] == ypreds[j+k]):
correct += 1
else:
incorrect += 1
if correct >= incorrect:
correct_tt += 1
elif correct == 0 and incorrect == 0:
whole = (ys[j:j+img_per_patch] == ypreds[j:j+img_per_patch]).float().sum()
if whole >= (img_per_patch/2):
correct_tt += 1
else:
incorrect_tt += 1
elif correct < incorrect:
incorrect_tt +=1
acc = ((correct_tt) / (correct_tt + incorrect_tt)) * 100
print("epoch " + str(i))
print("Correct:{0} / Incorrect:{1} / Accuracy:{2:.2f}%" .format(correct_tt, incorrect_tt, acc))
###################### Recall Fucntions ######################
# Evaluate_whole_image_ver1(net)
# Evaluate_whole_image_ver2(net)
##############################################################
```
|
{
"source": "Jeonghan57/legendary-potato",
"score": 2
}
|
#### File: legendary-potato/Li et al/classifier_chrominance.py
```python
import torch
from torch import nn, optim
import numpy as np
from torch.utils.data import TensorDataset, DataLoader
import tqdm
device = torch.device("cuda" if torch.cuda.is_available() else "cpu") #gpu ์ฌ์ฉ ๊ฐ๋ฅ?
import pandas as pd
# ํ์ผ ๊ฒฝ๋ก
location_train = './csv/LSUN/cat/PGGAN/YCbCr'
location_test = './csv/LSUN/church/PGGAN/YCbCr'
channel = 3
param = 75 * channel
# ์ถ์ถ ๋ฐ ๋ณํ ์ฝ๋
#train_fake
train_fake_pd = pd.read_csv('{}/{}'.format(location_train, 'train_features_fake_all.csv'),
header=None, index_col=None, names=None)
train_fake_np = pd.DataFrame.to_numpy(train_fake_pd)
#train_real
train_real_pd = pd.read_csv('{}/{}'.format(location_train, 'train_features_real_all.csv'),
header=None, index_col=None, names=None)
train_real_np = pd.DataFrame.to_numpy(train_real_pd)
#test_fake
test_fake_pd = pd.read_csv('{}/{}'.format(location_test, 'test_features_fake.csv'),
header=None, index_col=None, names=None)
test_fake_np = pd.DataFrame.to_numpy(test_fake_pd)
#test_real
test_real_pd = pd.read_csv('{}/{}'.format(location_test, 'test_features_real.csv'),
header=None, index_col=None, names=None)
test_real_np = pd.DataFrame.to_numpy(test_real_pd)
def get_data(batch_size, train=True):
if train:
temp_real = train_real_np[:25000] #20000
temp_real = np.reshape(temp_real, [25000, param])
temp_fake = train_fake_np[:25000] #20000
temp_fake = np.reshape(temp_fake, [25000, param])
else:
temp_real = test_real_np[:2000] #2000
temp_real = np.reshape(temp_real, [temp_real.shape[0], param])
temp_fake = train_fake_np[:2000] #2000
temp_fake = np.reshape(temp_fake, [temp_fake.shape[0], param])
data_holder = np.concatenate((temp_real, temp_fake))
size_real, size_fake = temp_real.shape[0], temp_fake.shape[0]
del temp_real, temp_fake
print("\nData Loading Complete")
labels = np.zeros([size_real + size_fake, 2])
labels[:size_real, 0] = 1
labels[size_real:, 1] = 1
data_holder = torch.from_numpy(data_holder).float()
labels = torch.from_numpy(labels).long()
ds = TensorDataset(data_holder, labels)
del data_holder, labels
data_loader=DataLoader(ds, batch_size=batch_size, shuffle=train)
return data_loader
def train(Net, batch_size):
Net = Net.to(device)
lr = 0.001
loss_fn = nn.CrossEntropyLoss()
optimizer = optim.SGD(Net.parameters(), lr=lr, momentum=0.9)
print("Training Start")
train_data = get_data(batch_size, True)
for epoch in range(50):
count = 0
for X, Y in train_data:
X = X.to(device)
Y = Y.to(device)
y_pred = Net(X)
loss = loss_fn(y_pred, torch.max(Y, 1)[1])
optimizer.zero_grad()
loss.backward()
optimizer.step()
count +=1
if count % 100 == 0:
print(f"epoch:{epoch}, loss={loss}")
torch.save(Net.state_dict(), f"./result/LSUN/epoch(cat(PGGAN)_YCbCr)_{epoch}_1.pth")
def Evaluate_Networks(Net):
save_path = "./result/LSUN/"
# data
Net.load_state_dict(torch.load(save_path + "epoch(cat(PGGAN)_YCbCr)_49_1.pth"), strict=False)
Net = Net.to(device).eval()
test_data = get_data(64, train=False)
# Test
ys = []
ypreds = []
for X, Y in tqdm.tqdm(test_data):
X = X.to(device)
Y = Y.to(device)
with torch.no_grad():
_, y_pred = Net(X).max(1)
ys.append(Y.max(1)[1])
ypreds.append(y_pred)
ys = torch.cat(ys)
ypreds = torch.cat(ypreds)
acc_real = (ys[2000:] == ypreds[2000:]).float().sum() / len(ys[2000:])
acc_fake = (ys[:2000] == ypreds[:2000]).float().sum() / len(ys[:2000])
acc = (ys == ypreds).float().sum() / len(ys)
print('\nReal Accuracy : ', acc_real.item())
print('Fake Accuracy : ', acc_fake.item())
print('Tatal AVG : ', acc.item())
net = nn.Linear(param, 2)
# train(net , 64)
Evaluate_Networks(net)
```
|
{
"source": "Jeongheum/BalancingBike",
"score": 3
}
|
#### File: Jeongheum/BalancingBike/dqn_replaybuffer.py
```python
import numpy as np
from collections import deque
import random
class ReplayBuffer(object):
"""
Reply Buffer
"""
def __init__(self, buffer_size):
self.buffer_size = buffer_size
self.buffer = deque()
self.count = 0
## save to buffer
def add_buffer(self, state, action, reward, next_state, done):
transition = (state, action, reward, next_state, done)
# check if buffer is full
if self.count < self.buffer_size:
self.buffer.append(transition)
self.count += 1
else:
self.buffer.popleft()
self.buffer.append(transition)
## sample a batch
def sample_batch(self, batch_size):
if self.count < batch_size:
batch = random.sample(self.buffer, self.count)
else:
batch = random.sample(self.buffer, batch_size)
# return a batch of transitions
states = np.asarray([i[0] for i in batch])
actions = np.asarray([i[1] for i in batch])
rewards = np.asarray([i[2] for i in batch])
next_states = np.asarray([i[3] for i in batch])
dones = np.asarray([i[4] for i in batch])
return states, actions, rewards, next_states, dones
## Current buffer occupation
def buffer_count(self):
return self.count
## Clear buffer
def clear_buffer(self):
self.buffer = deque()
self.count = 0
```
|
{
"source": "JeongHoLim/practice",
"score": 3
}
|
#### File: practice/boj/1439.py
```python
def func(s):
one = 0
zero = 0
for i in range(len(s)):
if i > 0 and s[i] == s[i-1] : continue
if s[i] == "1":
one += 1
else :
zero += 1
return min(one,zero)
s = input()
print(func(s))
```
#### File: practice/boj/23290.py
```python
from copy import deepcopy
m,s = map(int,input().split())
grid = [[[] for _ in range(4)] for _ in range(4)]
smell = [[0 for _ in range(4)] for _ in range(4)]
directions = {
0: [0,-1],
1: [-1,-1],
2: [-1,0],
3: [-1,1],
4: [0,1],
5: [1,1],
6: [1,0],
7: [1,-1]
}
for i in range(m):
fx,fy,d = map(int,input().split())
grid[fx-1][fy-1].append(d-1)
s_xy = list(map(lambda x : int(x)-1,input().split()))
def get_valid_move(smell,x,y,d,directions,s_xy):
od = d
for _ in range(8):
dx,dy = directions[d]
f_xy = [x+dx,y+dy]
if f_xy[0]<0 or f_xy[0] >= 4 or f_xy[1] <0 or f_xy[1] >=4:
d = (d+7)%8
elif s_xy == f_xy:
d = (d+7)%8
elif smell[f_xy[0]][f_xy[1]] >0:
d = (d+7)%8
else:
return f_xy[0],f_xy[1],d
return x,y,od
def move_fish(grid,smell,directions,s_xy):
new_grid = [[[] for _ in range(4)] for _ in range(4)]
for i in range(4):
for j in range(4):
if len(grid[i][j]) == 0: continue
for d in grid[i][j]:
dx,dy,nd = get_valid_move(smell,i,j,d,directions,s_xy)
new_grid[dx][dy].append(nd)
return new_grid
def is_valid_move(x,y):
return (0<=x<4) and (0<=y<4)
def compare(c1,c2,paths):
global path
t1,t2 = sum(map(lambda x: x[2],c1)),sum(map(lambda x : x[2],c2))
if t1 > t2:
return True
elif t1 == t2:
return "".join(map(str,path)) > "".join(map(str,paths))
# ์์ 1, ์ข๋ 2, ํ๋ 3, ์ฐ๋ 4๋ก ๋ณํํ๋ค.
def move_shark(grid,smell,s_xy,path,caught=[],step = 0):
if step == 3:
if compare(caught,globals()["caught"],path):
globals()["caught"] = caught[:]
globals()["next_move"] = s_xy[:]
globals()["path"] = path[:]
return
possible_move = [[-1,0],[0,-1],[1,0],[0,1]]
x,y = s_xy
for k,dxy in enumerate(possible_move,1):
temp = []
dx,dy = dxy
if not is_valid_move(x+dx,y+dy): continue
if len(grid[x+dx][y+dy]) > 0:
temp = grid[x+dx][y+dy][:]
grid[x+dx][y+dy] = []
caught.append([x+dx,y+dy,len(temp)])
move_shark(grid,smell,[x+dx,y+dy],path + [k],caught,step+1)
if temp:
grid[x+dx][y+dy] = temp[:]
caught.pop()
return
def remove_smell(smell,cnt):
for i in range(4):
for j in range(4):
if cnt-smell[i][j] == 2:
smell[i][j] = 0
def duplicate_fish(duplicate,grid):
for i in range(4):
for j in range(4):
if len(duplicate[i][j]) > 0 :
grid[i][j].extend(duplicate[i][j])
def remove_fish(grid,smell,caught,index):
for c in caught:
grid[c[0]][c[1]] = []
smell[c[0]][c[1]] = index
next_move = []
for i in range(s):
duplicate = deepcopy(grid)
caught,path = [],[5,5,5]
grid = move_fish(grid,smell,directions,s_xy)
move_shark(grid,smell,s_xy,[])
s_xy = next_move[:]
remove_fish(grid,smell,caught,i+1)
duplicate_fish(duplicate,grid)
remove_smell(smell,i+1)
k = 0
for i in range(4):
for j in range(4):
k += len(grid[i][j])
print(k)
```
#### File: practice/boj/2747.py
```python
n = int(input())
fibs = dict()
def fib(n):
if n not in fibs:
if n <= 1:
fibs[n] = n
return n
else:
fibs[n-1],fibs[n-2] = fib(n-1),fib(n-2)
fibs[n] = fibs[n-1] + fibs[n-2]
return fibs[n]
else:
return fibs[n]
print(fib(n))
```
#### File: leetcode/Hard/332.py
```python
from collections import defaultdict
class List(list): pass
class Solution:
def findItinerary(self, tickets: List[List[str]]) -> List[str]:
graph = defaultdict(list)
answer = []
for f,t in sorted(tickets,reverse=True):
graph[f].append(t)
def dfs(f):
while graph[f]:
dfs(graph[f].pop())
answer.append(f)
dfs("JFK")
return answer[::-1]
s = Solution()
tickets = [["MUC","LHR"],["JFK","MUC"],["SFO","SJC"],["LHR","SFO"]]
print(s.findItinerary(tickets))
```
#### File: leetcode/Medium/17.py
```python
class List(list): pass
from itertools import product
class Solution:
def letterCombinations(self, digits: str) -> List[str]:
if not digits: return []
phone = {
"2" : "abc",
"3" : "def",
"4" : "ghi",
"5" : "jkl",
"6" : "mno",
"7" : "pqrs",
"8" : "tuv",
"9" : "wxyz"
}
return list(map("".join,product(*(phone[x] for x in digit))))
s = Solution()
digit = "23"
print(s.letterCombinations(digit))
```
#### File: leetcode/Medium/39.py
```python
class List(list): pass
class Solution:
def combinationSum(self, candidates: List[int], target: int) -> List[List[int]]:
n = len(candidates)
answer = []
def makeAnswer(idx,path,temp_sum):
if idx == n: return
if temp_sum == target:
answer.append(path[:])
return
for i in range(idx,n):
check = temp_sum + candidates[i]
if check > target:
return
path.append(candidates[i])
makeAnswer(i,path,check)
path.pop()
candidates.sort()
makeAnswer(0,[],0)
return answer
s = Solution()
candidates = [2,3,6,7]
target = 7
print(s.combinationSum(candidates,target))
```
#### File: programmers/Lv2/14.py
```python
def solution(p):
answer = ''
def is_balanced(p):
return p.count("(") == p.count(")")
def is_correct(p):
count = 0
if is_balanced(p):
for x in p:
if x =='(': count += 1
else: count -=1
if count <0: return False
return True
return False
def remove_and_transform(p):
ret = ""
for c in p[1:-1]:
if c==')': ret += "("
else: ret += ")"
return ret
def func(p):
if len(p) ==0: return ""
for i in range(2,len(p)+2,2):
if is_balanced(p[:i]) and is_balanced(p[i:]):
u,v = p[:i],p[i:]
if is_correct(u):
u += func(v)
return u
else:
nc = "(" + func(v) + ")"
return nc + remove_and_transform(u)
answer = func(p)
return answer
```
#### File: programmers/Lv2/16.py
```python
import collections
import bisect
import itertools
def solution(info, queries):
answer = []
default_val=[
["java","python","cpp"],
["backend","frontend"],
["junior","senior"],
["chicken","pizza"]
]
def push(combi,my_dict,l,p,c,f,s):
my_dict[(l,p,c,f)].append(s)
for cm in combi:
for t in list(cm):
copy = [l,p,c,f]
for i in t:
copy[i] = '-'
my_dict[tuple(copy)].append(s)
combinations = []
examples = [0,1,2,3]
for i in range(1,5):
combinations.append(list(itertools.combinations(examples,i)))
my_dict = collections.defaultdict(list)
for x in info:
l,p,c,f,s = x.split()
push(combinations,my_dict,l,p,c,f,int(s))
for x in my_dict.items():
x[1].sort()
for query in queries:
l,p,c,fs = map(str.strip,query.split('and'))
f,s = fs.split()
s = int(s)
t = my_dict[l,p,c,f]
answer.append(len(t) - bisect.bisect_left(t,s))
return answer
```
#### File: programmers/Lv2/19.py
```python
from itertools import combinations,permutations
import math
def solution(numbers):
answer = 0
def is_prime(x):
if x <= 1: return False
for i in range(2,int(math.sqrt(x))+1):
if x % i ==0:
return False
return True
found = set()
for i in range(1,len(numbers)+1):
combi = list(combinations(numbers,i))
for c in combi:
for p in permutations(c):
value = int("".join(p))
if value not in found and is_prime(value):
answer += 1
found.add(value)
return answer
```
#### File: programmers/Lv2/1.py
```python
import collections
def solution(record):
answer = []
my_dict = collections.defaultdict(str)
action_dict = {"Enter" : "๋์ด ๋ค์ด์์ต๋๋ค.","Leave":"๋์ด ๋๊ฐ์ต๋๋ค."}
for info in record[::-1]:
parsed = info.split()
if len(parsed) == 2: continue
action,user_id,nick_name = parsed
if my_dict[user_id]=="":
my_dict[user_id] = nick_name
for info in record:
parsed = info.split()
if parsed[0] == "Change": continue
action,user_id = parsed[0],parsed[1]
answer.append(my_dict[user_id]+action_dict[action])
return answer
```
#### File: programmers/Lv2/3.py
```python
def solution(n):
answer = ''
my_dict = ['4','1','2']
r = n
while r>0:
r,q = divmod(r,3)
answer += my_dict[q]
if q== 0:
r -= 1
return answer[::-1]
```
#### File: programmers/Lv2/6.py
```python
def solution(numbers, target):
answer = 0
def func(nums,i,n,t,res):
if i == n:
if sum(nums) == t:
res[0] += 1
return
func(nums,i+1,n,t,res)
nums[i] *= -1
func(nums,i+1,n,t,res)
res = [0]
func(numbers[:],0,len(numbers),target,res)
answer = res[0]
return answer
```
#### File: programmers/Lv2/7.py
```python
def solution(s):
check,stack = {},[]
alpha = "abcdefghijklmnopqrstuvwxyz"
reversed_s = s[::-1]
for a in alpha:
check[a] = len(s) - reversed_s.find(a) + 1
i = 0
while True:
if i == len(s):
if stack: answer = 0
else: answer = 1
break
cur = s[i]
if check[cur] < i or len(stack) > len(s)-i:
answer = 0
break
if stack and stack[-1] == cur:
stack.pop()
else:
stack.append(cur)
i += 1
return answer
```
#### File: programmers/Lv2/8.py
```python
import re
import itertools
def solution(expression):
answer = 0
p1 = re.compile('\d+')
p2 = re.compile("[-*+]")
nums = list(map(int,p1.findall(expression)))
ops = p2.findall(expression)
def cal(op,n1,n2):
if op == "-": return n1-n2
elif op == "+" : return n1 + n2
else : return n1*n2
pd = {}
per = list(itertools.permutations([str(i) for i in range(3)]))
for pd['+'],pd['-'],pd['*'] in per:
print(f"+:{pd['+']}, - : {pd['-']}, * : {pd['*']}")
n_stack = [nums[0],nums[1]]
op_stack = [ops[0]]
equation = f"({nums[0]} {ops[0]} {nums[1]}"
for index in range(1,len(nums)-1):
op1 = op_stack[-1]
op2 = ops[index]
n3 = nums[index+1]
print(equation)
if pd[op1] > pd[op2]:
temp = eval(equation+")")
equation = f"({temp}"
op_stack.append(op2)
equation += f"{op2} {n3}"
answer = max(answer,abs(eval(equation+")")))
return answer
```
#### File: programmers/Lv3/14.py
```python
from collections import defaultdict,deque
def solution(gems):
answer = []
s = e = 0
count = defaultdict(int)
window = deque()
current,n = 0, len(set(gems))
length = float('inf')
while e < len(gems):
cur = gems[e]
count[cur] += 1
window.append(e)
if count[cur] == 1:
current += 1
if current == n:
while True:
if length > e-s+1:
answer = [s+1,e+1]
length = min(length,e-s+1)
if length == n:
return [s+1,e+1]
peek = gems[s]
if count[peek] == 1:
break
count[gems[window.popleft()]] -= 1
s += 1
e += 1
while count[gems[s]] > 1:
length = min(length,e-s)
count[window.popleft()] -= 1
s += 1
return answer
```
#### File: programmers/Lv3/4.py
```python
from collections import defaultdict,deque
def solution(n, edges):
def bfs(graph,n):
queue = deque()
queue.append(1)
visited = [-1]*(n+1)
visited[1] = 0
while queue:
node = queue.popleft()
for v in graph[node]:
if visited[v] == -1:
queue.append(v)
visited[v] = visited[node]+1
return visited.count(max(visited))
graph = defaultdict(list)
for f,t in edges:
graph[f].append(t)
graph[t].append(f)
return bfs(graph,n)
```
#### File: programmers/Lv3/5.py
```python
from collections import defaultdict,deque
def solution(n, computers):
answer = 0
def bfs(graph,start,found):
queue = deque()
queue.append(start)
visited = []
while queue:
f = queue.popleft()
found.append(f)
for t in graph[f]:
if t not in visited:
queue.append(t)
visited.append(t)
def get_answer(grpah):
found = []
ret = 0
for k,v in graph.items():
if len(v) == 1:
ret += 1
found.append(k)
continue
if k not in found:
bfs(graph,k,found)
ret += 1
return ret
graph = defaultdict(list)
for i,com in enumerate(computers):
for ci,c in enumerate(com):
if c == 1: graph[i].append(ci)
answer = get_answer(graph)
return answer
```
#### File: programmers/Lv3/6.py
```python
from collections import defaultdict,deque
def solution(n, results):
answer = 0
graph = defaultdict(list)
win = [0]+[set() for _ in range(n)]
lose = [0]+[set() for _ in range(n)]
for i in range(1,n+1):
graph[i].extend([])
for a,b in results:
graph[a].append(b)
win[a].add(b)
lose[b].add(a)
for f,s in graph.items():
queue = deque([f])
visited = []
while queue:
v = queue.popleft()
win[f] |= win[v]
lose[v] |= lose[f]
for t in graph[v]:
if t not in visited:
queue.append(t)
visited.append(t)
for i in range(1,n+1):
if len(win[i]) + len(lose[i]) == n-1: answer += 1
return answer
```
#### File: programmers/Lv3/9.py
```python
import itertools
def solution(N, number):
if N == number: return 1
answer = -1
dp = [{0},{N}]
def add(dp,l,r,idx,N):
dp[idx].add(l+r)
dp[idx].add(l-r)
dp[idx].add(-l+r)
dp[idx].add(l*r)
if str(l).count(N) == len(str(l)) and str(r).count(N) == len(str(r)):
dp[idx].add(eval(f"{l}{r}"))
if r != 0:
dp[idx].add(l//r)
if l != 0:
dp[idx].add(r//l)
for idx in range(2,9):
dp.append(set())
i,j = 1,idx-1
while i <= j:
pd = list(itertools.product(dp[i],dp[j]))
for l,r in pd:
add(dp,l,r,idx,str(N))
i,j = i+1,j-1
if number in dp[idx]:
answer = idx
break
if answer != -1:
break
return answer
```
|
{
"source": "jeonghoonkang/BerePi",
"score": 3
}
|
#### File: apps/co2/co2_t110.py
```python
import serial,os,time
import sys
import RPi.GPIO as GPIO
import logging
import logging.handlers
import json
import requests
import fcntl, socket, struct
DEBUG_PRINT = 1
SERIAL_READ_BYTE = 12
FILEMAXBYTE = 1024 * 1024 * 100 #100MB
LOG_PATH = '/home/pi/log_tos.log'
# important, sensorname shuould be pre-defined, unique sensorname
sensorname = "co2.ws"
url = "http://xxx.xxx.xxx.xxx/api/put"
def getHwAddr(ifname):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
info = fcntl.ioctl(s.fileno(), 0x8927, struct.pack('256s', ifname[:15]))
return ':'.join(['%02x' %ord(char) for char in info[18:24]])
macAddr = getHwAddr('eth0')
macAddr = macAddr.replace(':','.')
level = 0
ppm = 0
def led0On():
GPIO.output(18, True)
def led1On():
GPIO.output(23, True)
def led2On():
GPIO.output(24, True)
def led3On():
GPIO.output(25, True)
def led0Off():
GPIO.output(18, False)
def led1Off():
GPIO.output(23, False)
def led2Off():
GPIO.output(24, False)
def led3Off():
GPIO.output(25, False)
def ledAllOff():
led0Off()
led1Off()
led2Off()
led3Off()
def ledAllOn():
led0On()
led1On()
led2On()
led3On()
def rled0On():
led0Off()
def rled1On():
led1Off()
def rled2On():
led2Off()
def rled3On():
led3Off()
def rled0Off():
led0On()
def rled1Off():
led1On()
def rled2Off():
led2On()
def rled3Off():
led3On()
def rledAllOff():
ledAllOn()
def rledAllOn():
ledAllOff()
def rled0Blink():
led0On()
time.sleep(0.5)
led0Off()
time.sleep(0.3)
led0On()
time.sleep(0.5)
led0Off()
time.sleep(0.3)
led0On()
def rled1Blink():
led1On()
time.sleep(0.5)
led1Off()
time.sleep(0.3)
led1On()
time.sleep(0.5)
led1Off()
time.sleep(0.3)
led1On()
def rled2Blink():
led2On()
time.sleep(0.5)
led2Off()
time.sleep(0.3)
led2On()
time.sleep(0.5)
led2Off()
time.sleep(0.3)
led2On()
def rled3Blink():
led3On()
time.sleep(0.5)
led3Off()
time.sleep(0.3)
led3On()
time.sleep(0.5)
led3Off()
time.sleep(0.3)
led3On()
# check length, alignment of incoming packet string
def syncfind():
index = 0
alignment = 0
while 1:
in_byte = serial_in_device.read(1)
# packet[8] should be 'm'
# end of packet is packet[10]
if in_byte is 'm' :
#print 'idx =', index, in_byte
alignment = 8
if alignment is 10 :
alignment = 1
index = 0
break
elif alignment > 0 :
alignment += 1
index += 1
def checkAlignment(incoming):
idxNum = incoming.find('m')
# idxNum is 9, correct
offset = idxNum - 9
if offset > 0 :
new_str = incoming[offset:]
new_str = new_str + incoming[:offset]
if offset < 0 :
offset = 12 + offset
new_str = incoming[offset:]
new_str = new_str + incoming[:offset]
return new_str
def init_process():
print " "
print "MSG - [S100, T110 CO2 Sensor Driver on RASPI2, Please check log file : ", LOG_PATH
print "MSG - now starting to read SERIAL PORT"
print " "
# HW setup, GPIO
GPIO.setwarnings(False)
GPIO.cleanup()
GPIO.setmode(GPIO.BCM)
GPIO.setup(18, GPIO.OUT)
GPIO.setup(23, GPIO.OUT)
GPIO.setup(24, GPIO.OUT)
GPIO.setup(25, GPIO.OUT)
logger.info(' *start* GPIO all set, trying to open serial port, SW starting ')
rledAllOn()
######################################################################
# START Here. Main
######################################################################
# set logger file
logger = logging.getLogger(sensorname)
logger.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
fileHandler = logging.handlers.RotatingFileHandler(LOG_PATH, maxBytes=FILEMAXBYTE,backupCount=10)
fileHandler.setLevel(logging.DEBUG)
fileHandler.setFormatter(formatter)
logger.addHandler(fileHandler)
#consoleHandler = logging.StreamHandler()
#consoleHandler.setLevel(logging.DEBUG)
#consoleHandler.setFormatter(formatter)
#logger.addHandler(consoleHandler)
# call raspi init...
init_process()
# open RASPI serial device, 38400
try:
serial_in_device = serial.Serial('/dev/ttyAMA0',38400)
except serial.SerialException, e:
logger.error("Serial port open error")
rled0Off()
rled1Off()
rled2Off()
rled3Off()
while True:
ppm = 0
try:
in_byte = serial_in_device.read(SERIAL_READ_BYTE)
pos = 0
except serial.SerialException, e:
rled0Off()
rled1Off()
rled2Off()
rled3Off()
if not (len(in_byte) is SERIAL_READ_BYTE) :
logger.error("Serial packet size is strange, %d, expected size is %d" % (len(in_byte),SERIAL_READ_BYTE))
print 'serial byte read count error'
continue
# sometimes, 12 byte alighn is in-correct
# espacially run on /etc/rc.local
if not in_byte[9] is 'm':
shift_byte = checkAlignment(in_byte)
in_byte = shift_byte
if ('ppm' in in_byte):
if DEBUG_PRINT :
print '-----\/---------\/------ DEBUG_PRINT set -----\/---------\/------ '
for byte in in_byte :
print "serial_in_byte[%d]: " %pos,
pos += 1
if ord(byte) is 0x0d :
print "escape:", '0x0d'," Hex: ", byte.encode('hex')
continue
elif ord(byte) is 0x0a :
print "escape:", '0x0a'," Hex: ", byte.encode('hex')
continue
print " String:", byte, " Hex: ", byte.encode('hex')
if not (in_byte[2] is ' ') :
ppm += (int(in_byte[2])) * 1000
if not (in_byte[3] is ' ') :
ppm += (int(in_byte[3])) * 100
if not (in_byte[4] is ' ') :
ppm += (int(in_byte[4])) * 10
if not (in_byte[5] is ' ') :
ppm += (int(in_byte[5]))
logline = sensorname + ' CO2 Level is '+ str(ppm) + ' ppm'
#now = time.localtime()
#now_str = "%04d-%02d-%02d %02d:%02d:%02d" % (now.tm_year, now.tm_mon, now.tm_mday, now.tm_hour, now.tm_min, now.tm_sec)
#logline += now_str
if DEBUG_PRINT :
print logline
if ppm > 2500 :
logger.error("%s", logline)
continue
else :
logger.info("%s", logline)
data = {
"metric": "rc1.co2.ppm",
"timestamp": time.time(),
"value": ppm,
"tags": {
"eth0": macAddr,
"stalk": "VOLOSSH" ,
"sensor" : "co2.t110",
"name" : sensorname,
"floor_room": "10fl_min_room",
"building": "woosung",
"owner": "kang",
"country": "kor"
}
#tags should be less than 9, 8 is alright, 9 returns http error
}
try :
ret = requests.post(url, data=json.dumps(data))
logger.info("http ret %s", ret)
if DEBUG_PRINT :
print "http return : %s" %ret
except requests.exceptions.Timeout :
logger.error("http connection error, Timeout %s", ret)
continue
except requests.exceptions.ConnectionError :
logger.error("http connection error, Too many requests %s")
continue
#except requests.exceptions :
# print " --------------"
# continue
# level = 1, 0~1000 ppm, no- LED
# level = 2, 1000~1150 ppm, 1 - LED
# level = 3, 1150~1300 ppm, 2 - LED
# level = 4, 1300~1700 ppm, 3 - LED
# level = 5, 1750~ ppm, 4 - LED
if ppm < 800 :
rled0Blink()
rled0Blink()
rled0Blink()
led1Off()
led2Off()
led3Off()
elif ppm < 1000 :
led0On()
led1Off()
led2Off()
led3Off()
elif ppm < 1300 :
led0Off()
led1On()
led2Off()
led3Off()
elif ppm < 1600:
led0Off()
led1Off()
led2On()
led3Off()
elif ppm < 1900:
led0Off()
led1Off()
rled2Blink()
rled2Blink()
rled2Blink()
led3Off()
elif ppm >= 1900 :
rled0Blink()
rled0Blink()
rled0Blink()
rled1Blink()
rled1Blink()
rled1Blink()
rled2Blink()
rled2Blink()
rled2Blink()
rled3Blink()
rled3Blink()
rled3Blink()
GPIO.cleanup()
```
#### File: excel/extract_ids/save_xlsx_list_df.py
```python
from __future__ import print_function
import argparse
import pandas as pd
import os
import sys
import math
import xlsxwriter
def brush_argparse():
parser = argparse.ArgumentParser()
#parser.add_argument("-xlsx", help="xlsx ํ์ผ ์ด๋ฆ", action="store_true")
parser.add_argument("-xlsx", help="xlsx ํ์ผ ์ด๋ฆ")
args = parser.parse_args()
return args
if __name__ =='__main__':
filename = 'out_put_list.py'
_args_pack_ = brush_argparse()
#print (_args_pack_)
_args_ = vars(_args_pack_)
#print (_args_) #key xlsx : value ํ์ผ๋ช
.ํ์ฅ๋ช
_input_xlsx = _args_['xlsx']
if _input_xlsx == None:
print ('Please input xlsx filename on cli run argument')
sys.exit(0)
# ํ์ผ ๋ฆฌ์คํธ๋ฅผ ๋ณด์ฌ์ฃผ๊ณ ์ ํํ๋๋ก ๊ธฐ๋ฅ ์ถ๊ฐ
# todo : ๊ณ ์ ์
๋ ฅ๋ ์์
ํญ๋ชฉ๋ค์ ์
๋ ฅ๋ฐ๋๋ก ์์ ํ์
dframe = pd.read_excel(_input_xlsx, sheet_name='201906')
dframe = dframe.sort_values(["COUNT"], ascending=[False]).reset_index(drop=True)
carID = dframe["PHONE_NUM"].unique()
id_list = carID.tolist()
_len = len(id_list)
print ("len of list = ", _len)
#print (type(id_list))
# to String, save list object to string
__ofile = open(filename,"w")
print ("writing list to file ")
print ("id_list=", file=__ofile)
print (id_list, file=__ofile)
print ('#', 'length=', _len, file=__ofile)
print ("finish : file write ")
__ofile.close()
```
#### File: apps/lcd_berepi/Baseball.py
```python
from gluon import *
from gluon import current
import urllib2
global g_myScore, g_otScore, g_playState
g_Score_1 = 0
g_Score_2 = 0
g_playState = "init"
def getBaseballinfo():
global g_myScore, g_otScore, g_playState
baseballinfo = ["Baseball","Information"]
try:
page = urllib2.urlopen("http://sports.news.naver.com/schedule/index.nhn?category=kbo")
text = page.read()
except httplib.IncompleteRead, e:
baseballinfo[0] = "Sorry,"
baseballinfo[1] = "Now Loading..."
return baseballinfo
if '<ul class="sch_vs" id="todaySchedule">' not in text:
baseballinfo[0] = "Sorry,"
baseballinfo[1] = "Not Season."
return baseballinfo
cut_1 = text.split('<ul class="sch_vs" id="todaySchedule">')[1].split('<form name="monthlyScheduleForm"')[0]
cut_2 = cut_1.split('<div class="vs_cnt ">')
myTeamInfo = " "
for i in cut_2:
if teamName(getMyTeamfromDB(),0) in i:
myTeamInfo = i
if '<div class="cancel">' in myTeamInfo:
baseballinfo[0] = "Sorry,"
baseballinfo[1] = "Game Cancel."
return baseballinfo
play_state = myTeamInfo.split('<em class="state">')[1].split('</em>')[0].strip()
if g_playState == "init":
g_playState = play_state
if g_playState in ['18:30']:
if play_state not in ['18:30','์ข
๋ฃ']:
th = threading.Thread(target=printAlarm_game_start_end, args=())
th.start()
g_playState = play_state
if g_playState not in ['18:30','์ข
๋ฃ']:
if play_state in ['์ข
๋ฃ']:
th = threading.Thread(target=printAlarm_game_start_end, args=())
th.start()
g_playState = 'init'
if play_state in ['18:30']:
baseballinfo[0] = "Sorry,"
baseballinfo[1] = "Not Game Time."
return baseballinfo
team_1=["name1","score1"]
team_2=["name2","score2"]
team_1[0] = myTeamInfo.split('alt="')[1].split('" title=')[0]
team_2[0] = myTeamInfo.split('alt="')[2].split('" title=')[0]
team_1[1] = myTeamInfo.split('<strong class="vs_num">')[1].split('<')[0]
team_2[1] = myTeamInfo.split('<strong class="vs_num">')[2].split('<')[0]
if (g_Score_1 != int(team_1[1])) or (g_Score_2 != int(team_2[1])):
th = threading.Thread(target=printAlarm_game_score, args=())
th.start()
g_Score_1 = int(team_1[1])
g_Score_2 = int(team_2[1])
baseballinfo[0] = teamName(team_1[0],1) + " vs " + teamName(team_2[0],1)
baseballinfo[1] = team_1[1] + " : " + team_2[1] + " " + convertState(play_state)
return baseballinfo
#def getMyTeamfromDB():
# db = current.db
# if db(db.tablebar_baseball.team_name).isempty():
# return "์ผ์ฑ"
# else:
# return db(db.tablebar_baseball.id>0).select().first().team_name
def teamName(teamcode, code): #code - 0:searching , 1:printing
if teamcode == "์ผ์ฑ":
return "SS"
elif teamcode == "NC":
return "NC"
elif teamcode == "๋์ฐ":
if code == 0:
return "OB"
elif code == 1:
return "DB"
elif teamcode == "๋ฅ์ผ":
if code == 0:
return "WO"
elif code == 1:
return "NX"
elif teamcode == "ํํ":
return "HH"
elif teamcode == "SK":
return "SK"
elif teamcode == "KIA":
if code == 0:
return "HT"
elif code == 1:
return "KIA"
elif teamcode == "๋กฏ๋ฐ":
return "LT"
elif teamcode == "LG":
return "LG"
elif teamcode == "KT":
return "KT"
else:
return "default"
def convertState(state):
if state in ['์ข
๋ฃ']:
return 'end'
else:
if '์ด' in state:
return '%sTOP'%state[:state.find('ํ์ด')]
else:
return '%sBOT'%state[:state.find('ํ๋ง')]
def printAlarm_game_start_end():
for i in range(0,10):
redLCDon()
time.sleep(0.2)
whiteLCDon()
time.sleep(0.2)
def printAlarm_game_score():
for i in range(0,3):
redLCDon()
time.sleep(0.2)
yellowLCDon()
time.sleep(0.2)
pinkLCDon()
time.sleep(0.2)
whiteLCDon()
time.sleep(0.2)
```
#### File: apps/led_berepi/led1_test.py
```python
import serial,os,time
import sys
import RPi.GPIO as GPIO
import logging
import json
import requests
import socket
import fcntl
import struct
debug_print = 0
level = 0
ppm = 0
# important, sensorname shuould be pre-defined, unique sensorname
bdname = "led.00"
url = "http://xx.xx.xx.xx:4242/api/put"
def getHwAddr(ifname):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
info = fcntl.ioctl(s.fileno(), 0x8927, struct.pack('256s', ifname[:15]))
return ':'.join(['%02x' %ord(char) for char in info[18:24]])
macAddr = getHwAddr('eth0')
macAddr = macAddr.replace(':','.')
logging.basicConfig(filename='/home/pi/log_led1_test.log',level=logging.DEBUG)
logging.info("Start------------------------------- ")
bled = 16
gled = 20
rled = 21
# HW setup, GPIO
GPIO.cleanup()
GPIO.setmode(GPIO.BCM)
GPIO.setup(bled, GPIO.OUT)
GPIO.setup(gled, GPIO.OUT)
GPIO.setup(rled, GPIO.OUT)
GPIO.output(bled, True)
GPIO.output(gled, True)
GPIO.output(rled, True)
time.sleep(1)
logging.info('---->>>> GPIO all set ')
def ledb_on():
GPIO.output(bled, True)
def ledg_on():
GPIO.output(gled, True)
def ledr_on():
GPIO.output(rled, True)
def ledb_off():
GPIO.output(bled, False)
def ledg_off():
GPIO.output(gled, False)
def ledr_off():
GPIO.output(rled, False)
def ledall_off():
GPIO.output(bled, False)
GPIO.output(gled, False)
GPIO.output(rled, False)
led_time_idx = 0
while True:
led_time_idx += 1
if ((led_time_idx % 3) == 0) :
logline = bdname + 'LED is '
now = time.localtime()
now_str = "%04d-%02d-%02d %02d:%02d:%02d" % (now.tm_year, now.tm_mon, now.tm_mday, now.tm_hour, now.tm_min, now.tm_sec)
logline += now_str
logging.warning("logline = %s", logline)
logline = ""
ledall_off()
ledb_on()
elif ((led_time_idx % 3) == 1) :
ledall_off()
ledg_on()
elif ((led_time_idx % 3) == 2) :
ledall_off()
ledr_on()
time.sleep(0.5)
GPIO.cleanup()
```
#### File: apps/mail/event_shot.py
```python
import requests
# Try running this locally.
def send_email():
print "email sending....."
# temperature check
# when out range of temp. send email
if __name__ == "__main__" :
send_email()
```
#### File: meter/portable/put_sensor.py
```python
import serial,os,time
import sys
#import RPi.GPIO as GPIO
import logging
import logging.handlers
import json
import requests
import socket
import fcntl
import struct
debug_print = 1
FILEMAXBYTE = 1024 * 1024 * 100 #100MB
LOG_PATH = '/home/pi/log_tos.log'
level = 0
ppm = 777
import urllib
try:
import http.client as http_client
except ImportError:
import httplib as http_client
http_client.HTTPConnection.debuglevel = 1
logging.basicConfig()
logging.getLogger().setLevel(logging.DEBUG)
requests_log = logging.getLogger("requests.packages.urllib3")
requests_log.setLevel(logging.DEBUG)
requests_log.propagate = True
# important, sensorname shuould be pre-defined, unique sensorname
sensorname = "KETI_1"
url = "http://172.16.17.32:8000/svc/PointService.asmx/SetPointValue"
#def getHwAddr(ifname):
# s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# info = fcntl.ioctl(s.fileno(), 0x8927, struct.pack('256s', ifname[:15]))
# return ':'.join(['%02x' %ord(char) for char in info[18:24]])
macAddr = 'eth0'
#logger = logging.getLogger(sensorname)
#logger.setLevel(logging.DEBUG)
#formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
#s
#fileHandler = logging.handlers.RotatingFileHandler(LOG_PATH, maxBytes=FILEMAXBYTE,backupCount=10)
#fileHandler.setLevel(logging.DEBUG)
#fileHandler.setFormatter(formatter)
#logger.addHandler(fileHandler)
#try:
logline = sensorname + ' log ' + str(ppm) + ' ppm'
params = urllib.urlencode({'value': 7})
#headers = {'Content-type': 'application/json', 'Accept': 'text/plain'}
#headers = {'Content-type': 'application/x-www-form-urlencoded'}
headers = {'Content-Type': 'application/x-www-form-urlencoded', 'Accept':'text/plain'}
data = {
#"metric": 'keti' ,
#"timestamp": 'tea',
"value": '0'
# "tags": {
# "hw": "raspberrypi2" ,
# "sensor" : "sensor.xxx",
# "name" : sensorname,
# "floor_room": "10fl_min_room",
# "zone":"1st zone",
# "building": "name",
# "owner": "me",jjjj
# "country": "kor"
# }
#tags should be less than 9, 8 is alright, 9 returns http error
}
print data
print headers
#conn = httplib.HTTPConnection("172.16.17.32:8000")
#conn.request("POST", "/svc/PointService.asmx/SetPointValue",params,headers)
ret = requests.post(url, data=json.dumps(data), headers=headers)
print ret.text
#ret = requests.post(url, data=data, headers=headers)
#ret = conn.getresponse()
print ret.status, response.reason
#except requests.exceptions.Timeout :
#except requests.exceptions.ConnectionError :
# logger.info("%s",logline)
```
#### File: otsdb/put/put_test.py
```python
devel_dir="/home/pi/devel"
tmp_dir=devel_dir+"/BerePi/apps"
from types import *
import sys
import time
import datetime
import requests
import json
import subprocess
import ketidatetime
import time
''' example : url = "http://10.0.0.43:4242/api/put
warning : we have to 50 JSON pack to put in OpenTSDB, on first stage.
if you add more, you shoud test amount of TX packets '''
def otsdb_restful_put(url, metric=None, ts=None, val=None, tags=None, iter_n=1 ):
if tags == None :
sname = "kang.tinyos.test.000"
sensor = "keti.put.test"
tags = {
"sensor" : "keti.put_test",
"name" : sname
} #tags should be less than 9, 8 is alright, 9 returns http error
mname = metric
if metric == None :
mname = "__keti.tinyos.test.0001__"
print ts
if ts == None or ts == 'now' : uts = int(time.time())
else :
ts = ketidatetime._check_time_len(ts)
print ts
uts = ketidatetime.datetime2ts(ts)
print uts
print " metric name = ", mname
print tags
tags = eval(tags)
print type(tags)
''' if you want to add iteration, use iter_n valiable '''
for i in range(0,iter_n):
if val == None : exit('can not make forward val = None')
data = {
"metric": mname, #alphabet and number . _ /
"timestamp": int(uts),
"value": val, #integer
"tags": tags
}
print data
''' if you want to check inserted POINT on TSDB server,
use below URL to check, you should modify URL PORT to proper IP address
http://URL:PORT/api/query?start=2018/06/25-00:00:00&end=2018/06/26-00:00:00&m=none:keti.tinyos.packet.test
'''
try :
#s = requests.Session()
ret = requests.post(url, data=json.dumps(data))
print ret.content
print "\n return is ", ret
outstring = "\n now trying to put below data to TSDB, url %s " %(url)
outstring += str(data)
outstring += "\n try %d / %d " % (i, iter_n-1)
sys.stdout.write(outstring)
sys.stdout.flush()
except requests.exceptions.Timeout :
logger.error("http connection error, Timeout %s", ret)
pass
except requests.exceptions.ConnectionError :
logger.error("http connection error, Too many requests %s")
pass
return
def helpmsg():
volubility = len(sys.argv)
# merge list word items to one string line
_stringargv = " ".join(sys.argv)
print "\n ********************************************** "
print " *** TEST OpenTSDB put *** "
print " *** By 3POKang *** "
print " ********************************************** "
timestamp=time.localtime()
print " Thanks for the try, time : ", time.asctime(timestamp) , \
" >> Volubility, Arg length = ", volubility
if volubility > 1:
argv1 = sys.argv[1]
print " sys.argv[%d] = %s" % (1, argv1) ,
else :
exit (" you should input the IP address of openTSDB server like 10.0.0.1:4242")
return argv1
import argparse
def parse_args():
story = 'OpenTSDB needs many arguments URL, start time, end time, port '
usg = '\n python tsdb_read.py -url x.x.x.x \
-port 4242 -start 2016110100 -end 2016110222 \
-rdm metric_name, -wm write_metric_name -tags="{id:911}" --help for more info'
parser=argparse.ArgumentParser(description=story,
usage=usg,
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument("-url", default="127.0.0.1",
help="URL input, or run fails")
parser.add_argument("-start", default='2016070100',
help="start time input, like 2016110100")
parser.add_argument("-port", default=4242,
help="port input, like 4242")
parser.add_argument("-val", default=802,
help="value which will be inserted to OpenTSDB")
parser.add_argument("-wtm", default='__keti_test__',
help="write-metric ")
parser.add_argument("-tags", default="{'sensor':'_test_sensor_', 'desc':'_test_'}",
help="tags ")
args = parser.parse_args()
#check args if valid
url = args.url
_ht = 'http://'
if ( url[:7] != _ht ) : url = _ht + url
port = args.port
if port == 80 : port = ''
else : port = ":"+ str(port)
url = url + port +'/api/put'
wm = args.wtm
if wm == None :
print usg
exit("... I can not do anything without metric")
return url, wm, args.start, args.val, args.tags
def put_tsdb(url, write_metric, time, val, tags):
if url.find('http') == -1 :
url = 'http://' + url + '/api/put' #+ ':4242'
otsdb_restful_put(url, write_metric, time, val, tags)
#python put_test.py -url 192.168.0.200 -start 2018081800 -val 21766000 -wtm rc01.t_power.WH -tags "{'id':'911'}"
def put_now_tsdb(url, write_metric, time, val, tags):
if url.find('http') == -1 :
url = 'http://' + url + '/api/put' #+ ':4242'
otsdb_restful_put(url, write_metric, 'now', val, tags)
if __name__== "__main__" :
print "...starting..."
args = parse_args()
print args
otsdb_restful_put(args[0], args[1], args[2], args[3], args[4])
time.sleep(0.1)
print "\n ...ending..."
```
#### File: apps/sht20/sht20.py
```python
import smbus
import time
SHT20_ADDR = 0x40 # SHT20 register address
#SHT20_CMD_R_T = 0xE3 # hold Master Mode (Temperature)
#SHT20_CMD_R_RH = 0xE5 # hold Master Mode (Humidity)
SHT20_CMD_R_T = 0xF3 # no hold Master Mode (Temperature)
SHT20_CMD_R_RH = 0xF5 # no hold Master Mode (Humidity)
#SHT20_WRITE_REG = 0xE6 # write user register
#SHT20_READ_REG = 0xE7 # read user register
SHT20_CMD_RESET = 0xFE # soft reset
bus = smbus.SMBus(1) # 0 = /dev/i2c-0 (port I2C0), 1 = /dev/i2c-1 (port I2C1)
def reading(v):
#bus.write_quick(SHT20_ADDR)
if v == 1:
bus.write_byte(SHT20_ADDR, SHT20_CMD_R_T)
elif v == 2:
bus.write_byte(SHT20_ADDR, SHT20_CMD_R_RH)
else:
return False
time.sleep(.1)
b = (bus.read_byte(SHT20_ADDR)<<8)
b += bus.read_byte(SHT20_ADDR)
return b
def calc(temp, humi):
tmp_temp = -46.85 + 175.72 * float(temp) / pow(2,16)
tmp_humi = -6 + 125 * float(humi) / pow(2,16)
return tmp_temp, tmp_humi
if __name__== "__main__" :
while True:
temp = reading(1)
humi = reading(2)
if not temp or not humi:
print "register error"
break
value = calc(temp, humi)
print "temp : %s\thumi : %s" % (value[0], value[1])
time.sleep(1)
```
#### File: apps/sht20/sht20_ubuntu64.py
```python
import lgpio
import time
SHT20_ADDR = 0x40 # SHT20 register address
#SHT20_CMD_R_T = 0xE3 # hold Master Mode (Temperature)
#SHT20_CMD_R_RH = 0xE5 # hold Master Mode (Humidity)
SHT20_CMD_R_T = 0xF3 # no hold Master Mode (Temperature)
SHT20_CMD_R_RH = 0xF5 # no hold Master Mode (Humidity)
#SHT20_WRITE_REG = 0xE6 # write user register
#SHT20_READ_REG = 0xE7 # read user register
SHT20_CMD_RESET = 0xFE # soft reset
#bus = smbus.SMBus(1) # 0 = /dev/i2c-0 (port I2C0), 1 = /dev/i2c-1 (port I2C1)
bus = lgpio.i2c_open(1, SHT20_ADDR)
def reading(v):
if v == 1:
lgpio.i2c_write_byte(bus, SHT20_CMD_R_T)
elif v == 2:
lgpio.i2c_write_byte(bus, SHT20_CMD_R_RH)
else:
return False
time.sleep(.1)
b = (lgpio.i2c_read_byte(bus)<<8)
b += lgpio.i2c_read_byte(bus)
return b
def calc(temp, humi):
tmp_temp = -46.85 + 175.72 * float(temp) / pow(2,16)
tmp_humi = -6 + 125 * float(humi) / pow(2,16)
return tmp_temp, tmp_humi
if __name__== "__main__" :
while True:
temp = reading(1)
humi = reading(2)
if not temp or not humi:
print ("register error")
break
value = calc(temp, humi)
print ("temp : %s\thumi : %s" % (value[0], value[1]))
time.sleep(1)
```
#### File: telegram/diskreport/main.py
```python
import json
import time
import socket
import fcntl
import struct
import os
import datetime
import telegram
import requests
from pytz import timezone
def get_ip_address(ifname):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
info = fcntl.ioctl (s.fileno(), 0x8915,
struct.pack('256s', bytes(ifname[:15], 'utf-8')))
return ''.join(['%d.' % b for b in info[20:24]])[:-1]
def get_free_space():
ret = os.statvfs('./')
free_space = ret.f_frsize * ret.f_bfree / 1024 / 1024 / 1024 # ๊ธฐ๊ฐ๋ฐ์ดํธ
return free_space
def send_message(token, chat_id, message):
bot = telegram.Bot(token=token)
bot.sendMessage(chat_id=chat_id, text=message, parse_mode="markdown")
if __name__ == "__main__" :
monitoring_time = datetime.datetime.now(timezone("Asia/Seoul"))
message = f"""{monitoring_time}"""
message += ' \n(****)์๋ฒ ๋์์ค, HDD ์์ฌ์ฉ๋ ' + str(int(get_free_space())) + 'GByte IP์ฃผ์: '
local_ip = get_ip_address('enp1s0')
message += local_ip
with open("telegramconfig.json") as f: #์ ๋๊ฒฝ๋ก ์ฌ์ฉ ์ถ์ฒ
settings = json.load(f)
for x,y in settings.items():
#print (x, y)
if x == "telegram_bot_setting":
for sub in y:
#print (sub)
token = sub["token"]
chat_id = sub["chat_id"]
try:
send_message(token=token, chat_id=chat_id, message=message)
except telegram.error.TelegramError as e:
None
print ("finish end of sending telegram message via Bot, good bye .... ")
# 10 */8 * * * python3 /home/---/devel/crontab/diskreport/main.py > /home/---/devel/crontab/diskreport/err.txt 2>&1
```
#### File: apps/tinyosGW/index.py
```python
import os
import cgi
#Check the .log file and return to just file name
def chkFileExt(filename):
if filename.find('.log') > 0:
filename = filename.split('.')[0]
else:
return None
return str(filename)
#convert from escape sequence to html (/n > <br>, /r > <br>)
def fileToHTML(content):
content = content.replace('\n', '<br>')
content = content.replace('\r', '<br>')
return content
#๊ฒฝ๋ก๋ฅผ ์ค์ ํ์ง ์์ผ๋ฉด ํ์ฌ ํ์ผ ๊ฒฝ๋ก ์ฌ์ฉ
def printFilesList(path_dir=os.getcwd()+'/out/'):
return os.listdir(path_dir)
#start to cgi
form = cgi.FieldStorage()
if 'parameter' in form:
fileContent = form['parameter'].value
webContent = open('out/'+fileContent, 'r').read()
else:
fileContent = None
webContent = ""
print("Content-type:text/html\r\n\r\n")
print('<html>')
print('<head>')
print('<title>tinyosGW status</title>')
print('</head>')
print('<body>')
print('<h2>tinyosGW status</h2>')
print('<ol>')
for file_name in printFilesList():
if chkFileExt(file_name): print('<li><a href="index.py?parameter='+file_name+'">'+str(chkFileExt(file_name))+'</a></li>')
print('</ol>')
print('<p>{webcontent}</p>'.format(webcontent=fileToHTML(webContent)))
print('</body>')
print('</html>')
```
#### File: apps/TTS/sample.py
```python
def speak(text):
from gtts import gTTS
import os
tts = gTTS(text=text, lang='ko')
tts.save("tmp_talk.mp3")
os.system("omxplayer tmp_talk.mp3")
os.system("rm -f tmp_talk.mp3")
text = "์๋
ํ์ธ์, ์ข์ ์์นจ์
๋๋ค"
speak(text)
```
|
{
"source": "jeonghoonkang/keti-tinyos-CSV2JSON",
"score": 3
}
|
#### File: keti-tinyos-CSV2JSON/CSV2JSON/CSVJSON_main.py
```python
from __future__ import print_function
#import requests
import time
import json
from collections import OrderedDict
import pandas as pd
import sys
import os
import argparse
import shutil
import copy
from datetime import datetime
import pcs
#์ถ๋ ฅ ๋๋ ํ ๋ฆฌ ์ด๋ฆ์ output์ผ๋ก ๋ณ๊ฒฝ
# Result, changed JSON ๋ฑ , output ๋๋ ํ ๋ฆฌ ํ๋ถ์ ์ ์ฅ
# write ๊ด๋ จ ํจ์๋ ๋ชจ๋์ ๋ฐ๋ก ํ์ผ๋ก ๋ง๋ค๋ฉด ์ข์๊ฒ๊ฐ์
ARG= 50 #argment
def dprint(s): # debug_print
global g_DEBUG
if (g_DEBUG):
print (' ', s)
else : return None
def brush_argparse():
global g_DEBUG # dprint ํจ์ ์คํ์ ์ํ ํ๋๊ทธ
parser = argparse.ArgumentParser()
parser.add_argument("-debug", help="debug mode run", action="store_true")
parser.add_argument("-jsonpack", help="how many json's in one output file", type=int)
parser.add_argument("-filetype", help="csv or xlsx")
parser.add_argument("-filekind", help="select file type number you'll store")
parser.add_argument("-field", help="select field idx you'll store")
parser.add_argument("-ts", help="select timestamp field idx you'll store")
parser.add_argument("-carid", help="select carid field idx you'll store")
parser.add_argument("-metric", help="select metric you'll store")
parser.add_argument("-outdir", help="select outdir", type=str, default='./output')
parser.add_argument("-pn", help="select producer num", default='4')
parser.add_argument("-cn", help="select consumer num", default='2')
args = parser.parse_args()
if (args.debug) :
g_DEBUG = True
dprint ('DPRINT Enabled ************************************** ' + __file__ )
return args
def make_result_dirctory_tree(_savepath, filepath, _carid):
_savepath = _savepath+"/resultData"
if not(os.path.isdir(_savepath)):
os.makedirs(os.path.join(_savepath))
filepath = filepath.split('originalCSVData')[-1]
if filepath[0] == '/':
filepath = filepath[1:]
filepath = filepath.split('/')[0:-1]
for sub in filepath:
_savepath = _savepath + '/' + sub
if not(os.path.isdir(_savepath)):
os.makedirs(os.path.join(_savepath))
if _savepath[-1] == '/':
_savepath = _savepath[0:-1]
_savepath = _savepath + '/' + _carid
if not(os.path.isdir(_savepath)):
os.makedirs(os.path.join(_savepath))
return _savepath
# make a file
def writeJson(_buffer, _json_title):
with open(_json_title+'.json', 'a') as f:
json.dump(_buffer, f, ensure_ascii=False, indent=4)
# create folder & make files
def writeJsonfiles(_buffer, _json_title, _num, _fname, _carid, _outdir):
#try:
savepath = os.path.dirname(os.path.realpath(__file__))
savepath = savepath + _outdir[1:]
_indx = savepath.find('//')
if _indx != -1:
savepath = savepath[:_indx+1] + savepath[_indx+2:]
if not(os.path.isdir(savepath)):
os.makedirs(os.path.join(savepath))
#print ('-->',savepath)
savepath = make_result_dirctory_tree(savepath, _fname, _carid)
# Unicode decode error๊ฐ ๋ฐ์ํ์ฌ ์์ ํด์ค
with open(str(savepath + '/' +_json_title+'_'+str(_num)+'.json'), 'w') as f:
try:
json.dump(_buffer, f, ensure_ascii=False, indent=4)
# ๋ช๋ช ํ์ผ์์ ascii๊ด๋ จ DecodeError ๋ฐ์
except:
json.dump(_buffer, f, ensure_ascii=True, indent=4)
print ('[' + _json_title + '_'+str(_num)+'.json] saved')
# convert Time to Epoch
def convertTimeToEpoch(_time):
date_time = "%s.%s.%s %s:%s:%s" %(_time[8:10], _time[5:7], _time[:4], _time[11:13], _time[14:16], _time[17:])
pattern = "%d.%m.%Y %H:%M:%S"
epoch = int (time.mktime(time.strptime(date_time, pattern)))
return epoch
# YYYYmmddHHMMSS -> dd.mm.YY HH:MM:SS
def convertTimeToEpoch_v2(_time):
date_time = "%s.%s.%s %s:%s:%s" %(_time[6:8], _time[4:6], _time[:4], _time[8:10], _time[10:12], _time[12:])
pattern = "%d.%m.%Y %H:%M:%S"
epoch = int (time.mktime(time.strptime(date_time, pattern)))
return epoch
# display progress bar
def printProgressBar(iteration, total, prefix = u'์ฒ๋ฆฌ์ค', suffix = u'์๋ฃ',\
decimals = 1, length = 60, fill = 'โ'):
# ์์
์ ์งํ์ํฉ์ ํ์
percent = ("{0:." + str(decimals) + "f}").format(100 * (iteration / float(total)))
filledLength = int(length * iteration // total)
bar = fill * filledLength + '-' * (length - filledLength)
print('\r%s |%s| %s%% %s' %(prefix, bar, percent, suffix), end='\r')
sys.stdout.flush()
if iteration == total:
None
#print()
# ํ์ผ๋ก ์ ์ฅํ๊ธฐ์ ๋ฐ์ดํฐ์ ์ ๋ณด๋ฅผ ์์ฝํด์ฃผ๋ _dataInfo๋ฅผ ๋ง๋ค์ด์ค
def initDataInfo(_dataInfo, __list):
_dataInfo["1.metric"]= __list[0]["metric"]
vallist = [d["value"] for d in __list]
tslist = [int(d["timestamp"]) for d in __list]
_dataInfo["2.mints"]=min(tslist)
_dataInfo["3.maxts"]=max(tslist)
_dataInfo["4.mindt"]=str(datetime.fromtimestamp(_dataInfo["2.mints"]))
_dataInfo["5.maxdt"]=str(datetime.fromtimestamp(_dataInfo["3.maxts"]))
_dataInfo["6.totalCnt"]=len(__list)
_dataInfo["7.minval"]=min(vallist)
_dataInfo["8.maxval"]=max(vallist)
_dataInfo["9.tags"]=__list[0]["tags"]
def ToJsonFormat(_list, _args_pack_, _json_title, _filename):
_list = _list.sort_values(by=[_args_pack_.ts.decode('utf-8')], axis=0)
Car_id = str(_list[_args_pack_.carid.decode('utf-8')].iloc[0])
dftime = _list[_args_pack_.ts.decode('utf-8')].tolist()
dfval = _list[_args_pack_.field.decode('utf-8')].tolist()
data_len = len(_list)
_buffer = []
count=0
perCount=0
num=0 #
for i in range(len(dftime)):
perCount += 1
value = dfval[i]
# skip NaN value & ts
if value == 'nan' or dftime[i] == 'nan':
continue
elif value == 'NaN' or dftime[i] == 'NaN':
continue
ts = convertTimeToEpoch(dftime[i])
ts = str(ts)
csv_data = dict()
csv_data['metric'] = _args_pack_.metric
csv_data["tags"] = dict()
csv_data['timestamp'] = ts
csv_data["value"] = value
csv_data["tags"]['VEHICLE_NUM'] = str(Car_id)
csv_data["tags"]["fieldname"] = _args_pack_.field
count += 1
_buffer.append(csv_data)
if count >= _args_pack_.jsonpack:
dataInfo={}
initDataInfo(dataInfo, _buffer)
dataInfo = OrderedDict(sorted(dataInfo.items(), key=lambda t: t[0]))
_buffer.insert(0, dataInfo)
num +=1
writeJsonfiles(_buffer, _json_title, num, _filename, Car_id, _args_pack_.outdir) #save files by bundle
_buffer = []
count = 0
#printProgressBar(perCount, data_len)
if len(_buffer) != 0:
# ๋ฒํผ์ ๋จ์ ๋ฐ์ดํฐ json ํ์ผ ์์ฑ
#writeJson(_buffer, _json_title)# make a file
dataInfo={}
initDataInfo(dataInfo, _buffer)
dataInfo = OrderedDict(sorted(dataInfo.items(), key=lambda t: t[0]))
_buffer.insert(0, dataInfo)
num +=1
writeJsonfiles(_buffer, _json_title, num, _filename, Car_id, _args_pack_.outdir) #save files by bundle
def field_IndextoStr(_fieldnum, _collist):
return _collist[_fieldnum]
def ts_IndextoStr(_tsnum, _collist):
return _collist[_tsnum]
def carid_IndextoStr(_caridnum, _collist):
return _collist[_caridnum]
def CSVtoDF(_filename, _args_pack_, fieldidx, tsidx, carididx):
print("\nreading %s" %_filename)
if _args_pack_.filetype == 'xlsx' :
df = pd.read_excel(_filename)
elif _args_pack_.filetype == 'csv' :
try:
chunks = pd.read_csv(_filename, usecols = [fieldidx, tsidx, carididx] ,low_memory=False, chunksize=10000, encoding='utf-8')
except UnicodeDecodeError:
try:
chunks = pd.read_csv(_filename,usecols = [fieldidx, tsidx, carididx], low_memory=False, chunksize=10000, encoding='euc-kr')
except UnicodeDecodeError:
chunks = pd.read_csv(_filename, usecols = [fieldidx, tsidx, carididx], low_memory=False, chunksize=10000, encoding='cp949')
df = pd.concat(chunks, ignore_index=True)
#print(df.columns)
if _args_pack_.filetype == 'xlsx' :
jsonTitle = (_filename.split('/'))[-1][:-5]+'_'+_args_pack_.field
elif _args_pack_.filetype == 'csv' :
jsonTitle = (_filename.split('/'))[-1][:-4]+'_'+_args_pack_.field
print("%s -> DF" %_filename)
return df, jsonTitle
def pack_to_meta(pack):
ret = {}
ret['field']=pack.field
ret['timestamp']=pack.ts
ret['carid']=pack.carid
ret['metric']=pack.metric
ret['pn']=pack.pn
ret['cn']=pack.cn
ret['field']=pack.field
ret['bundle']=pack.jsonpack
ret['outdir']=pack.outdir
return ret
if __name__ == "__main__":
global g_DEBUG
g_DEBUG = False
#gFile_type, bundle = brush_args()
_args_pack_ = brush_argparse()
_args_pack_.pn = int(_args_pack_.pn)
_args_pack_.cn = int(_args_pack_.cn)
dprint (vars(_args_pack_))
import type_file
file_type = type_file.file_type
file_list = file_type['type_'+_args_pack_.filekind]['files']
col_list = file_type['type_'+_args_pack_.filekind]['columns']
file_list = [i.encode('utf-8') for i in file_list] #unicodeํํ(u' ~ ')๋ฅผ ์ผ๋ฐ string์ผ๋ก ๋ฐ๊ฟ์ค
col_list = [i.encode('utf-8') for i in col_list]
fieldidx = int(_args_pack_.field)
tsidx = int(_args_pack_.ts)
carididx = int(_args_pack_.carid)
# ์
๋ ฅํ index ์ ๋ณด๋ฅผ ์ค์ string์ผ๋ก ์ ํ
_args_pack_.field = field_IndextoStr(fieldidx, col_list)
_args_pack_.ts = ts_IndextoStr(tsidx, col_list)
_args_pack_.carid = carid_IndextoStr(carididx, col_list)
#print(_args_pack_.field)
print('๋ณํ ํ์ผ ๋ชฉ๋ก')
for f in file_list:
print(f)
np = _args_pack_.pn
nc = _args_pack_.cn
meta = pack_to_meta(_args_pack_)
# ์๋ธ ํ๋ก์ธ์ค ๊ด๋ฆฌ์, ์์ฐ์, ์๋น์ ์์ฑ
workers = pcs.Workers(np, nc)
works_basket_list = workers.start_work(meta)
for file_name in file_list:
# csv -> df
df, title = CSVtoDF(file_name, _args_pack_, fieldidx, tsidx, carididx)
if len(df) == 0:
continue
# df์ ํฌ๊ธฐ๊ฐ np๋ณด๋ค ์์ผ๋ฉด main process์์ ์ฒ๋ฆฌ
if len(df) < np:
ToJsonFormat(df, _args_pack_, title, file_name)
# df๋ฅผ np๋งํผ ๋ถํ ํ์ฌ ๊ฐ ์์ฐ์์๊ฒ ํ๋ก ์ ์ก
else :
start=0
end=start+len(df)/np
for idx in range(np):
if idx == np-1:
end = len(df)
while (works_basket_list[idx].full()):
time.sleep(0.5)
works_basket_list[idx].put([df[start:end], title, file_name])
start = end
end = start+len(df)/np
print("\nmain : [csv -> df] done")
print("work basket์ ๋ชจ๋ data ์ ์ก ์๋ฃ")
print("subprocess๊ฐ ์์ง ์คํ ์ค ์
๋๋ค...\n")
lines = workers.report()
totallines=0
for line in lines:
totallines += line
print("total processed lines : %d" %totallines)
```
|
{
"source": "jeonghopark/ddsp",
"score": 2
}
|
#### File: ddsp/ddsp/spectral_ops_test.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from ddsp import spectral_ops
import numpy as np
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
class STFTTest(tf.test.TestCase):
def test_tf_and_np_are_consistent(self):
amp = 1e-2
audio = amp * (np.random.rand(64000).astype(np.float32) * 2.0 - 1.0)
frame_size = 2048
hop_size = 128
overlap = 1.0 - float(hop_size) / frame_size
pad_end = True
s_np = spectral_ops.stft_np(
audio, frame_size=frame_size, overlap=overlap, pad_end=pad_end)
sess = tf.Session()
with self.cached_session() as sess:
s_tf = sess.run(
spectral_ops.stft(
audio, frame_size=frame_size, overlap=overlap, pad_end=pad_end))
# TODO(jesseengel): The phase comes out a little different, figure out why.
self.assertAllClose(np.abs(s_np), np.abs(s_tf), rtol=1e-3, atol=1e-3)
class LoudnessTest(tf.test.TestCase):
def test_tf_and_np_are_consistent(self):
amp = 1e-2
audio = amp * (np.random.rand(64000).astype(np.float32) * 2.0 - 1.0)
frame_size = 2048
frame_rate = 250
with self.cached_session() as sess:
ld_tf = sess.run(
spectral_ops.compute_loudness(
audio, n_fft=frame_size, frame_rate=frame_rate, use_tf=True))
ld_np = spectral_ops.compute_loudness(
audio, n_fft=frame_size, frame_rate=frame_rate, use_tf=False)
self.assertAllClose(np.abs(ld_np), np.abs(ld_tf), rtol=1e-3, atol=1e-3)
if __name__ == '__main__':
tf.test.main()
```
#### File: ddsp/training/eval_util.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import io
import itertools
import os
import time
from absl import logging
from ddsp import spectral_ops
import gin
import librosa
import matplotlib.pyplot as plt
import numpy as np
from scipy.io import wavfile
import tensorflow.compat.v1 as tf
# Global values for evaluation.
MIN_F0_CONFIDENCE = 0.85
OUTLIER_MIDI_THRESH = 12
def check_and_squeeze_to_vector(input_vector):
"""Ensure vector only has one axis of dimensionality."""
if input_vector.ndim > 1:
return np.squeeze(input_vector)
else:
return input_vector
def l1_distance(prediction, ground_truth):
"""L1 distance difference between two vectors."""
if prediction.shape != ground_truth.shape:
prediction, ground_truth = np.squeeze(prediction), np.squeeze(ground_truth)
min_length = min(prediction.size, ground_truth.size)
return np.abs(prediction[:min_length] - ground_truth[:min_length])
def compute_audio_features(audio,
n_fft=2048,
sample_rate=16000,
frame_rate=250):
"""Compute features from audio."""
audio_feats = {'audio': audio}
audio = check_and_squeeze_to_vector(audio)
audio_feats['loudness_db'] = spectral_ops.compute_loudness(
audio, sample_rate, frame_rate, n_fft)
audio_feats['f0_hz'], audio_feats['f0_confidence'] = spectral_ops.compute_f0(
audio, sample_rate, frame_rate)
return audio_feats
def is_outlier(ground_truth_f0_conf):
"""Determine if ground truth f0 for audio sample is an outlier."""
ground_truth_f0_conf = check_and_squeeze_to_vector(ground_truth_f0_conf)
return np.max(ground_truth_f0_conf) < MIN_F0_CONFIDENCE
def f0_dist_conf_thresh(gen_f0,
gen_f0_confidence,
ground_truth_f0,
ground_truth_f0_confidence,
f0_confidence_thresh=MIN_F0_CONFIDENCE):
"""Compute L1 between gen audio and ground truth audio.
Calculating F0 distance is more complicated than calculating loudness
distance because of inherent inaccuracies in pitch tracking.
We take the following steps:
- Define a `keep_mask` that only select f0 values above when f0_confidence in
the GENERATED AUDIO (not ground truth) exceeds a minimum threshold.
Experimentation by jessengel@ and hanoih@ found this to be optimal way to
filter out bad f0 pitch tracking.
- Compute `delta_f0` between generated audio and ground truth audio.
- Only select values in `delta_f0` based on this `keep_mask`
- Compute mean on this selection
- At the start of training, audio samples will sound bad and thus have no
pitch content. If the `f0_confidence` is all below the threshold, we keep a
count of it. A better performing model will have a smaller count of
"untrackable pitch" samples.
Args:
gen_f0: generated audio f0 [MB,:]
gen_f0_confidence: generated audio f0 confidence [MB,:]
ground_truth_f0: ground truth audio f0 confidence [MB,:]
ground_truth_f0_confidence: ground truth audio f0 confidence [MB,:]
f0_confidence_thresh: confidence threshold above which f0 metrics will be
computed
Returns:
delta_f0_mean: float or None if entire generated sample had
f0_confidence below threshold.
"""
if np.max(gen_f0_confidence) < f0_confidence_thresh:
# Generated audio is not good enough for reliable pitch tracking.
return None
else:
keep_mask = ground_truth_f0_confidence >= f0_confidence_thresh
# Report mean error in midi space for easier interpretation.
gen_f0_midi = librosa.core.hz_to_midi(gen_f0)
ground_truth_f0_midi = librosa.core.hz_to_midi(ground_truth_f0)
# Set -infs introduced by hz_to_midi to 0.
gen_f0_midi[gen_f0_midi == -np.inf] = 0
ground_truth_f0_midi[ground_truth_f0_midi == -np.inf] = 0
delta_f0_midi = l1_distance(gen_f0_midi, ground_truth_f0_midi)
delta_f0_midi_filt = delta_f0_midi[keep_mask]
delta_f0_midi_mean = np.mean(delta_f0_midi_filt)
return delta_f0_midi_mean
# ---------------------- WAV files --------------------------------------------
def get_wav_file(wav_data, sample_rate):
mem_file = io.BytesIO()
try:
wavfile.write(mem_file, sample_rate, wav_data)
except: # pylint: disable=bare-except
logging.warning('error in writing WAV file')
return mem_file
# ---------------------- Spectrogram ------------------------------------------
def spectrogram(audio, sess=None, rotate=False, size=2048):
"""Compute logmag spectrogram."""
if sess is None:
sess = tf.Session()
mag = sess.run(
spectral_ops.compute_logmag(
tf.convert_to_tensor(audio, tf.float32), size=size))
if rotate:
mag = np.rot90(mag)
return mag
# ---------------------- Summary Writers --------------------------------------
class Writer(object):
"""Base Class for writing tensorboard summaries dataset."""
def __init__(self, batch_size, summary_dir, global_step, verbose=True):
"""Initializes the result writer."""
self._batch_size = batch_size
self._summary_dir = summary_dir
self._global_step = global_step
self._file_writer = tf.summary.FileWriter(self._summary_dir)
self._verbose = verbose
def update(self, gen_audio_outputs, ground_truth_feats, tensor_dict):
raise NotImplementedError('update() not defined')
def flush(self):
raise NotImplementedError('flush() not defined')
class Writers(object):
"""Result writer that wraps a list of writers."""
def __init__(self, writers=None):
"""Initializes the result writer.
Args:
writers: list of `eval_utils.Writer`
"""
self._writers = writers or []
def add(self, writer):
self._writers.append(writer)
def update(self, gen_audio_outputs, ground_truth_feats, tensor_dict=None):
for writer in self._writers:
writer.update(gen_audio_outputs, ground_truth_feats, tensor_dict)
def flush(self):
for writer in self._writers:
writer.flush()
class MetricsWriter(Writer):
"""Class for writing WaveRNN metrics in Dataset to tensorboard."""
def __init__(self, batch_size, summary_dir, global_step):
super(MetricsWriter, self).__init__(batch_size, summary_dir, global_step)
self._metrics_dict = {
'ld_metric': 0,
'ld_dist_sum': 0,
'ld_count': 0,
'f0_metric': 0,
'f0_outlier_ratio': 0,
'f0_dist_sum': 0,
'f0_ground_truth_untrackable_pitch_count': 0,
'f0_gen_pitch_outlier_count': 0,
'f0_gen_untrackable_pitch_count': 0,
'f0_gen_trackable_pitch_count': 0,
}
def _compute_ld_dist_and_update_counts(self, gen_ld, ground_truth_ld):
metrics_d = self._metrics_dict
ld_dist = np.mean(l1_distance(gen_ld, ground_truth_ld))
metrics_d['ld_dist_sum'] += ld_dist
metrics_d['ld_count'] += 1
return ld_dist
def _compute_f0_dist_and_update_counts(self, gen_f0, gen_f0_confidence,
ground_truth_f0,
ground_truth_f0_confidence):
"""Compute f0 dist and update corresponding counts."""
metrics_d = self._metrics_dict
if is_outlier(ground_truth_f0_confidence):
# Ground truth f0 was unreliable to begin with. Discard.
metrics_d['f0_ground_truth_untrackable_pitch_count'] += 1
f0_dist = None
else:
# Gound truth f0 was reliable, compute f0 distance with generated audio
f0_dist = f0_dist_conf_thresh(gen_f0, gen_f0_confidence, ground_truth_f0,
ground_truth_f0_confidence)
if f0_dist is not None and f0_dist > OUTLIER_MIDI_THRESH:
# Generated audio had trackable pitch content but is an outlier
metrics_d['f0_gen_pitch_outlier_count'] += 1
elif f0_dist is not None and f0_dist <= OUTLIER_MIDI_THRESH:
# Generated audio had trackable pitch content and is within tolerance
metrics_d['f0_dist_sum'] += f0_dist
metrics_d['f0_gen_trackable_pitch_count'] += 1
elif f0_dist is None:
# Generated audio had untrackable pitch content
metrics_d['f0_gen_untrackable_pitch_count'] += 1
return f0_dist
def _compute_update_ld_metric(self):
"""Compute and update ld metric."""
metrics_d = self._metrics_dict
if metrics_d['ld_count'] == 0:
ld_metric = np.nan
else:
ld_metric = metrics_d['ld_dist_sum'] / metrics_d['ld_count']
metrics_d['ld_metric'] = ld_metric
return ld_metric
def _compute_update_f0_metric(self):
"""Compute and update f0 metric."""
metrics_d = self._metrics_dict
if metrics_d['f0_gen_trackable_pitch_count'] == 0:
f0_metric = np.nan
else:
f0_metric = metrics_d['f0_dist_sum'] / metrics_d[
'f0_gen_trackable_pitch_count']
metrics_d['f0_metric'] = f0_metric
return f0_metric
def _compute_update_outlier_ratio(self):
"""Compute and update the outlier ratio.
Outlier ratio distinguishes the number of poorly generated audio by the
model vs.audio with poor pitch tracking to begin with.
The lowest (best) possible ratio is `f0_ground_truth_untrackable_pitch_count
/ total_count` = 0.02. Indicating all generated samples were of good
quality, and only ground truth samples with poor pitch content to begin with
had to be omitted from evaluation.
The outlier ratio is computed using:
f0_ground_truth_untrackable_pitch_count +
f0_gen_pitch_outlier_count +
f0_gen_untrackable_pitch_count
/
f0_ground_truth_untrackable_pitch_count +
f0_gen_pitch_outlier_count +
f0_gen_untrackable_pitch_count +
f0_gen_trackable_pitch_count
As the model improves in performance `f0_gen_pitch_outlier_count` and
`f0_gen_untrackable_pitch_count` should decrease, causing a lower ratio.
Args: None
Returns:
outlier_ratio: float or np.nan if division by 0
"""
metrics_d = self._metrics_dict
numerator = metrics_d['f0_ground_truth_untrackable_pitch_count']
numerator += metrics_d['f0_gen_pitch_outlier_count']
numerator += metrics_d['f0_gen_untrackable_pitch_count']
denominator = copy.copy(numerator)
denominator += metrics_d['f0_gen_trackable_pitch_count']
if denominator == 0:
outlier_ratio = np.nan
else:
outlier_ratio = numerator / denominator
metrics_d['f0_outlier_ratio'] = outlier_ratio
return outlier_ratio
def update(self, gen_audio_outputs, ground_truth_feats, tensor_dict=None):
"""Update metrics dictionary given a batch of audio."""
# Compute metrics per sample. No batch operations possible.
for sample_idx in range(self._batch_size):
# Extract generated audio
gen_audio = check_and_squeeze_to_vector(gen_audio_outputs[sample_idx])
gen_feats = compute_audio_features(gen_audio)
ld_dist = self._compute_ld_dist_and_update_counts(
gen_feats['loudness_db'],
ground_truth_feats['loudness_db'][sample_idx])
f0_dist = self._compute_f0_dist_and_update_counts(
gen_feats['f0_hz'], gen_feats['f0_confidence'],
ground_truth_feats['f0_hz'][sample_idx],
ground_truth_feats['f0_confidence'][sample_idx])
if self._verbose:
log_string = 'sample {} | ld_dist: {:.3f} | '.format(
sample_idx, ld_dist)
if f0_dist:
# Only log f0 distance if it was calculated
log_string = log_string + 'f0_dist(midi): {:.3f}'.format(f0_dist)
logging.info(log_string)
def get_current_metrics(self):
_ = self._compute_update_ld_metric()
_ = self._compute_update_f0_metric()
_ = self._compute_update_outlier_ratio()
return self._metrics_dict
def flush(self):
"""Output metrics to tensorboard for global step."""
metrics_d = self.get_current_metrics()
if self._verbose:
logging.info('COMPUTING METRICS COMPLETE. FLUSHING ALL METRICS')
metric_keys = ['ld_metric', 'f0_metric', 'f0_outlier_ratio']
metrics_str = ' | '.join(
'{}: {:0.3f}'.format(m, metrics_d[m]) for m in metric_keys)
logging.info(metrics_str)
counts_keys = [
'f0_gen_trackable_pitch_count', 'f0_gen_pitch_outlier_count',
'f0_gen_untrackable_pitch_count',
'f0_ground_truth_untrackable_pitch_count'
]
counts_str = ' | '.join(
'{}: {}'.format(c, metrics_d[c]) for c in counts_keys)
logging.info(counts_str)
summary = tf.Summary()
for value_name in [
'ld_metric',
'f0_metric',
'f0_outlier_ratio',
]:
summary.value.add(
tag='rt_metrics/' + value_name, simple_value=metrics_d[value_name])
for value_name in [
'f0_gen_trackable_pitch_count', 'f0_gen_pitch_outlier_count',
'f0_gen_untrackable_pitch_count',
'f0_ground_truth_untrackable_pitch_count'
]:
summary.value.add(
tag='counts/' + value_name, simple_value=metrics_d[value_name])
self._file_writer.add_summary(summary, self._global_step)
self._file_writer.flush()
logging.info('Wrote metric summaries for step %s to %s', self._global_step,
self._summary_dir)
spectral_ops.reset_crepe() # Reset CREPE global state
class WaveformImageWriter(Writer):
"""Class for writing waveform tensorboard summaries."""
def __init__(self, batch_size, summary_dir, global_step):
super(WaveformImageWriter, self).__init__(batch_size, summary_dir,
global_step)
def update(self, gen_audio_outputs, ground_truth_feats, tensor_dict):
"""Update metrics dictionary given a batch of audio."""
gt = ground_truth_feats['audio']
a1 = tensor_dict.get('additive_audio')
a2 = tensor_dict.get('noise_audio')
a3 = gen_audio_outputs
def _plot(summary, sample_idx, length=None, prefix='waveform'):
"""Plots all waveforms."""
waveforms = []
labels = []
for a, label in zip([gt, a1, a2, a3],
['gt', 'additive', 'noise', 'synthesized']):
if a is not None:
x = check_and_squeeze_to_vector(a[sample_idx])
if length is not None:
x = x[:length]
waveforms.append(x)
labels.append(label)
# Manually specify exact size of fig for tensorboard
num_subplots = len(labels)
fig = plt.figure(figsize=(2.5 * num_subplots, 10))
for i in range(num_subplots):
ax = fig.add_subplot(num_subplots, 1, i + 1)
ax.plot(waveforms[i])
ax.set_title(labels[i])
# Format and save plot to image
buf = io.BytesIO()
fig.savefig(buf, format='png')
image_summary = tf.Summary.Image(encoded_image_string=buf.getvalue())
plt.close(fig)
summary.value.add(
tag='{}/{}'.format(prefix, sample_idx), image=image_summary)
summary = tf.Summary()
for sample_idx in range(self._batch_size):
_plot(summary, sample_idx, length=None, prefix='waveform_4s')
_plot(summary, sample_idx, length=2000, prefix='waveform_125ms')
self._file_writer.add_summary(summary, self._global_step)
def flush(self):
"""Output metrics to tensorboard for global step."""
self._file_writer.flush()
logging.info('Wrote image summaries for step %s to %s', self._global_step,
self._summary_dir)
class SpectrogramImageWriter(Writer):
"""Class for writing spectrogram tensorboard summaries."""
def __init__(self, batch_size, summary_dir, global_step):
super(SpectrogramImageWriter, self).__init__(batch_size, summary_dir,
global_step)
def update(self,
gen_audio_outputs,
ground_truth_feats,
unused_tensor_dict,
sess=None):
"""Update metrics dictionary given a batch of audio."""
# Batch spectrogram operations
gtr_spectrograms = spectrogram(ground_truth_feats['audio'], sess=sess)
gen_spectrograms = spectrogram(gen_audio_outputs, sess=sess)
logging.info('spec writer')
summary = tf.Summary()
for sample_idx in range(self._batch_size):
# Manually specify exact size of fig for tensorboard
fig = plt.figure(figsize=(8, 5))
ax = fig.add_subplot(111)
ax.set_title('original')
ax.set_xticks([0, 1000])
ax.matshow(gtr_spectrograms[sample_idx], vmin=-5, vmax=1)
ax = fig.add_subplot(212)
ax.set_title('synthesized')
ax.set_xticks([])
ax.set_yticks([])
ax.matshow(gen_spectrograms[sample_idx], vmin=-5, vmax=1)
# Format and save plot to image
buf = io.BytesIO()
fig.savefig(buf, format='png')
image_summary = tf.Summary.Image(encoded_image_string=buf.getvalue())
plt.close(fig)
summary.value.add(
tag='spectrogram/{}'.format(sample_idx), image=image_summary)
logging.info('spec writer 2')
self._file_writer.add_summary(summary, self._global_step)
def flush(self):
"""Output metrics to tensorboard for global step."""
self._file_writer.flush()
logging.info('Wrote image summaries for step %s to %s', self._global_step,
self._summary_dir)
class AudioWriter(Writer):
"""Class for writing audio samples to tensorboard."""
def __init__(self, batch_size, summary_dir, global_step, sample_rate=16000):
super(AudioWriter, self).__init__(batch_size, summary_dir, global_step)
self._sample_rate = sample_rate
def update(self, gen_audio_outputs, ground_truth_feats, unused_tensor_dict):
"""Update metrics dictionary given a batch of audio."""
# Compute metrics per sample. No batch operations possible.
summary = tf.Summary()
for sample_idx in range(self._batch_size):
# Ground truth audio
gtr_audio = check_and_squeeze_to_vector(
ground_truth_feats['audio'][sample_idx])
gtr_audio_summary = tf.Summary.Audio(
sample_rate=self._sample_rate,
num_channels=1,
length_frames=len(gtr_audio),
encoded_audio_string=get_wav_file(gtr_audio,
self._sample_rate).getvalue(),
content_type='wav')
summary.value.add(
tag='ground_truth_audio/{}'.format(sample_idx),
audio=gtr_audio_summary)
# Synthesized audio
gen_audio = check_and_squeeze_to_vector(gen_audio_outputs[sample_idx])
gen_audio_summary = tf.Summary.Audio(
sample_rate=self._sample_rate,
num_channels=1,
length_frames=len(gen_audio),
encoded_audio_string=get_wav_file(gen_audio,
self._sample_rate).getvalue(),
content_type='wav')
summary.value.add(
tag='gen_audio/{}'.format(sample_idx), audio=gen_audio_summary)
self._file_writer.add_summary(summary, self._global_step)
def flush(self):
"""Output metrics to tensorboard for global step."""
self._file_writer.flush()
logging.info('Wrote audio summaries for step %s to %s', self._global_step,
self._summary_dir)
# ---------------------- Evaluation --------------------------------------------
def evaluate_or_sample(data_provider,
model,
mode='eval',
model_dir='~/tmp/ddsp/training',
master='',
batch_size=32,
num_batches=50,
keys_to_fetch='additive_audio,noise_audio',
ckpt_delay_secs=0,
run_once=False):
"""Run evaluation loop.
Args:
data_provider: DataProvider instance.
model: Model instance.
mode: Whether to 'eval' with metrics or create 'sample' s.
model_dir: Path to directory with checkpoints and summary events.
master: Name of TensorFlow runtime to use.
batch_size: Size of each eval/sample batch.
num_batches: How many batches to eval from dataset. -1 denotes all batches.
keys_to_fetch: Additional tensors to fetch from model outputs.
ckpt_delay_secs: Time to wait when a new checkpoint was not detected.
run_once: Only run evaluation or sampling once.
"""
# Set up dataset.
input_fn = data_provider.get_input_fn(shuffle=False, repeats=1)
model_dir = os.path.expanduser(model_dir)
params = {'batch_size': batch_size, 'model_dir': model_dir}
dataset = input_fn(params)
dataset = dataset.take(num_batches)
dataset = dataset.make_one_shot_iterator()
features_tf = dataset.get_next()[0]
# Load model checkpoint
predictions = model.get_outputs(features_tf, training=False)
# additional tensors to fetch during eval
tensor_dict_tf = {}
for k in keys_to_fetch.split(','):
v = predictions.get(k)
if v is not None:
tensor_dict_tf[k] = v
trainable_variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
saver = tf.train.Saver(var_list=trainable_variables)
# Sample continuously and load the newest checkpoint each time
checkpoints_iterator = tf.train.checkpoints_iterator(model_dir,
ckpt_delay_secs)
for checkpoint in checkpoints_iterator:
# Set up writers before calling Sess() so computations run in same sess
base_summary_dir = os.path.join(model_dir, 'summaries')
if not tf.gfile.IsDirectory(base_summary_dir):
tf.gfile.MakeDirs(base_summary_dir)
global_step = int(checkpoint.split('-')[-1])
writers_list = []
if mode == 'eval':
writers_list.append(
MetricsWriter(
batch_size=batch_size,
summary_dir=base_summary_dir,
global_step=global_step))
elif mode == 'sample':
writers_list.append(
SpectrogramImageWriter(
batch_size=batch_size,
summary_dir=base_summary_dir,
global_step=global_step))
writers_list.append(
WaveformImageWriter(
batch_size=batch_size,
summary_dir=base_summary_dir,
global_step=global_step))
writers_list.append(
AudioWriter(
batch_size=batch_size,
summary_dir=base_summary_dir,
global_step=global_step))
writers = Writers(writers_list)
# Setup session.
sess = tf.Session(target=master)
sess.run(tf.global_variables_initializer())
start_time = time.time()
saver.restore(sess, checkpoint)
logging.info('Loading model took %.1f seconds', time.time() - start_time)
# Iterate through dataset and make predictions
for batch_idx in itertools.count(1, 1):
try:
start_time = time.time()
logging.info('Predicting batch %d', batch_idx)
audio_gen, ground_truth_feats, tensor_dict = sess.run(
(predictions['audio_gen'], features_tf, tensor_dict_tf))
logging.info('Prediction took %.1f seconds', time.time() - start_time)
writers.update(audio_gen, ground_truth_feats, tensor_dict=tensor_dict)
logging.info('Batch index %i with size %i took %.1f seconds', batch_idx,
batch_size,
time.time() - start_time)
except tf.errors.OutOfRangeError:
logging.info('End of dataset.')
break
writers.flush()
if run_once:
break
@gin.configurable
def evaluate(data_provider,
model,
model_dir='~/tmp/ddsp/training',
master='',
batch_size=32,
num_batches=50,
keys_to_fetch='additive_audio,noise_audio',
ckpt_delay_secs=0,
run_once=False):
"""Run evaluation loop.
Args:
data_provider: DataProvider instance.
model: Model instance.
model_dir: Path to directory with checkpoints and summary events.
master: Name of TensorFlow runtime to use.
batch_size: Size of each eval/sample batch.
num_batches: How many batches to eval from dataset. -1 denotes all batches.
keys_to_fetch: Additional tensors to fetch from model outputs.
ckpt_delay_secs: Time to wait when a new checkpoint was not detected.
run_once: Only run evaluation or sampling once.
"""
evaluate_or_sample(
data_provider=data_provider,
model=model,
mode='eval',
model_dir=model_dir,
master=master,
batch_size=batch_size,
num_batches=num_batches,
keys_to_fetch=keys_to_fetch,
ckpt_delay_secs=ckpt_delay_secs,
run_once=run_once)
@gin.configurable
def sample(data_provider,
model,
model_dir='~/tmp/ddsp/training',
master='',
batch_size=32,
num_batches=50,
keys_to_fetch='additive_audio,noise_audio',
ckpt_delay_secs=0,
run_once=False):
"""Run sampling loop.
Args:
data_provider: DataProvider instance.
model: Model instance.
model_dir: Path to directory with checkpoints and summary events.
master: Name of TensorFlow runtime to use.
batch_size: Size of each eval/sample batch.
num_batches: How many batches to eval from dataset. -1 denotes all batches.
keys_to_fetch: Additional tensors to fetch from model outputs.
ckpt_delay_secs: Time to wait when a new checkpoint was not detected.
run_once: Only run evaluation or sampling once.
"""
evaluate_or_sample(
data_provider=data_provider,
model=model,
mode='sample',
model_dir=model_dir,
master=master,
batch_size=batch_size,
num_batches=num_batches,
keys_to_fetch=keys_to_fetch,
ckpt_delay_secs=ckpt_delay_secs,
run_once=run_once)
```
|
{
"source": "Jeonghun-Ban/corona-bot",
"score": 3
}
|
#### File: corona-bot/crawler/hospital_info.py
```python
import requests
from bs4 import BeautifulSoup
import json
hosiptal = {}
# ์ ์ฒด ์ ๋ณ์ง๋ฃ์
hospital_dic = {}
# retry get raw without timeout exception
def get_raw(url):
while True:
try:
return requests.get(url, timeout=10, headers={"User-Agent": "Mozilla/5.0"})
except:
print("internet not connected")
# ๊ฒ์ฒด์ฑ์ทจ๊ฐ๋ฅ ์ง๋ฃ์
possible_hospital=[]
raw_hospital = get_raw("http://www.mohw.go.kr/react/popup_200128.html")
# print(raw_hospital.encoding)
# raw_hospital.encoding = None
html_hospital = BeautifulSoup(raw_hospital.content, 'html.parser', from_encoding='euc-kr')
hospitals = html_hospital.select("tbody.tb_center tr")
# 546
city_dic = {}
item_list = []
for h in hospitals:
id = h.select_one("th").text
city = h.select_one("td:nth-of-type(1)").text
region = h.select_one("td:nth-of-type(2)").text
selected = h.select_one("td:nth-of-type(3)").text.replace(" ","").replace("(๊ฒ์ฒด์ฑ์ทจ ๊ฐ๋ฅ)", "")
number = h.select_one("td:nth-of-type(4)").text.replace(",","/")
# city_dic = {}
# item_list = []
if "*" in selected:
possible_hospital.append(selected)
if city in hospital_dic.keys():
if region in city_dic.keys():
print(region, "์์ ๋ ์ถ๊ฐ")
item_list.append([selected, number])
city_dic[region] = item_list
else:
print(region, "์๋กญ๊ฒ ์ถ๊ฐ")
item_list = list()
item_list.append([selected, number])
city_dic[region] = item_list
else:
print(city, "์ ์ถ๊ฐ")
item_list = list()
city_dic = dict()
item_list.append([selected, number])
city_dic[region] = item_list
hospital_dic[city] = city_dic
print(hospital_dic)
json_hospital = json.dumps(hospital_dic, indent=4)
print(json_hospital)
with open('hospital.json', 'w', encoding="utf-8") as make_hosptial:
json.dump(hospital_dic, make_hosptial, ensure_ascii=False, indent="\t")
```
#### File: corona-bot/crawler/summary_info.py
```python
import requests
from bs4 import BeautifulSoup
import json
import boto3
import os
# TODO ์ง์ญ๋ณ ํ์ง์
def set_summary_info(event, context):
data = dict()
raw_status = get_raw(os.environ['summary_url'])
html_status = BeautifulSoup(raw_status.text, 'html.parser')
statusbox = html_status.select("span.num")
checkbox = html_status.select_one("span.num_rnum").text
updatebox = html_status.select_one("span.livedate").text[1:].split(".")
data['update_date'] = updatebox[0] + "์ " + updatebox[1] + "์ผ" + updatebox[2].split(",")[0]
data['confirmator_num'] = statusbox[0].text.replace("(๋์ )", "")
data['discharged_num'] = statusbox[1].text
data['charged_num'] = statusbox[2].text
data['death_num'] = statusbox[3].text
data['check_num'] = checkbox.split()[0]
data['cured_rate'] = round(int(data['discharged_num'].replace(",", ""))/int(data['confirmator_num'].replace(",", ""))*100, 1)
json_data = json.dumps(data)
s3 = boto3.resource('s3')
bucket = s3.Bucket('facebook-coronabot')
bucket.put_object(Key='summary.json', Body=json_data)
return {
'statusCode': 200,
'body': json_data
}
# retry get raw without timeout exception
def get_raw(url):
while True:
try:
return requests.get(url, timeout=10, headers={"User-Agent": "Mozilla/5.0"})
except:
pass
```
#### File: corona-bot/facebook/webhook.py
```python
import json
import os
import re
import requests
from parser import summary_info
from parser import hospital_info
CHATBOT_RESPONSE = {
'์ผ์ผํ์ง์': '',
'ํ์งํ์์': '',
'ํด์์กฐ์น์': '',
'์ฌ๋ง์์': '',
'์ ๋ณ์ง๋ฃ์': """์๋ ๋ฐ ์๊ตฐ๊ตฌ๋ฅผ ์
๋ ฅํ๋ฉด\n์ ๋ณ์ง๋ฃ์ ์กฐํ๋ฅผ ์์ํฉ๋๋ค. \n(๊ฒ์์ด ์์: '์์ธ ์ข
๋ก๊ตฌ')""",
'๋ฐ๋จ': """2019๋
12์, ์ค๊ตญ ์ฐํ์์ ์ฒ์ ๋ฐ์ํ์ต๋๋ค. ๊ฐ์ผ์์ ๋๋ฌผ๋ก ์ถ์ ๋๊ณ ์์ผ๋ฉฐ, ๋๋ฌผ์๊ฒ์ ์ฌ๋์ผ๋ก ์ ํ๋ ๊ฒ์ผ๋ก ์ถ์ ๋ฉ๋๋ค.""",
'์ฆ์': """๊ฐ์ผ๋๋ฉด ์ต๋ 2์ฃผ๊ฐ์ ์ ๋ณต๊ธฐ๋ฅผ ๊ฑฐ์น ํ, ๋ฐ์ด/๊ธฐ์นจ/ํธํก๊ณค๋์ ๋น๋กฏํ ํ๋ ด ์ฆ์์ด ์ฃผ๋ก ๋ํ๋ฉ๋๋ค. ๋ค๋ง, ์ฆ์์ด ๋ํ๋์ง ์๋ ๋ฌด์ฆ์ ๊ฐ์ผ ์ฌ๋ก๋ ์กด์ฌํฉ๋๋ค.""",
'์ ์ผ๊ฒฝ๋ก': """์ฝ๋ก๋19๋ ์ฌ๋ ๊ฐ ์ ํ๊ฐ ํ์ธ๋ ๋ฐ์ด๋ฌ์ค์
๋๋ค. ์ฃผ๋ ๊ฐ์ผ๊ฒฝ๋ก๋ ๋น๋ง๊ฐ์ผ์ผ๋ก, ๊ฐ์ผ์์ ์นจ๋ฐฉ์ธ์ด ํธํก๊ธฐ๋ ๋/์ฝ/์
์ ์ ๋ง์ผ๋ก ์นจํฌ๋ ๋ ์ ์ผ๋ฉ๋๋ค.""",
'์๋ฐฉ๋ฒ': """1. ์ฐ์ , ๋น๋์ ๋ฌผ๋ก ์์ ์์ฃผ ์ป์ต๋๋ค. ์ ์๋
์ ์ฌ์ฉ๋ ์ข์ ๋์์
๋๋ค.
\n2. ์ป์ง ์์ ์์ผ๋ก ๋์ด๋ ์ฝ, ์
์ ๋ง์ง์ง ์์ต๋๋ค.
\n3. ๊ธฐ์นจ์ด๋ ์ฌ์ฑ๊ธฐ๋ฅผ ํ ๋ ํฐ์๋ ์๋งค๋ก ์
/์ฝ๋ฅผ ๊ฐ๋ฆฝ๋๋ค.
\n4. ์ํ ๋๋ ์๊ฐ๊ฒฉ๋ฆฌ๋ฅผ ํตํด ๋ค๋ฅธ ์ฌ๋๊ณผ์ ์ ์ด์ ํผํฉ๋๋ค.""",
'์น๋ฃ': """์ฝ๋ก๋19 ์น๋ฃ๋ ํ์์ ์ฆ์์ ๋์ํ๋ ์น๋ฃ๋ก ์ด๋ฃจ์ด์ง๋๋ค.\n๊ธฐ์นจ/์ธํํต/ํ๋ ด ๋ฑ ์ฃผ์ ์ฆ์์ ๋ฐ๋ผ ํญ๋ฐ์ด๋ฌ์ค์ ๋ ํญ์์ ํฌ์ฌ๊ฐ ํด๋น๋ฉ๋๋ค.""",
}
cities = ('์์ธ', '๋ถ์ฐ', '๋๊ตฌ', '์ธ์ฒ', '๊ด์ฃผ', '๋์ ', '์ธ์ฐ', '์ธ์ข
', '๊ฒฝ๊ธฐ', '๊ฐ์', '์ถฉ๋ถ', '์ถฉ๋จ', '์ ๋ถ', '์ ๋จ', '๊ฒฝ๋ถ', '๊ฒฝ๋จ', '์ ์ฃผ')
def event_handler(event, context):
# TODO implement
if event["httpMethod"] == "GET":
hub_challenge = event["queryStringParameters"]["hub.challenge"]
hub_verify_token = event["queryStringParameters"]["hub.verify_token"]
if hub_verify_token == os.environ['VERIFY_TOKEN']:
return {'statusCode': '200', 'body': hub_challenge, 'headers': {'Content-Type': 'application/json'}}
else:
return {'statusCode': '401', 'body': 'Incorrect verify token', 'headers': {'Content-Type': 'application/json'}}
elif event["httpMethod"] == "POST":
incoming_message = json.loads(event['body'])
for entry in incoming_message['entry']:
for message in entry['messaging']:
if 'message' in message:
send_dots(message['sender']['id'])
try:
send_text(
message['sender']['id'], message['message']['text'])
except:
send_text(
message['sender']['id'], '.')
return {'statusCode': '200', 'body': 'Success' , 'headers': {'Content-Type': 'application/json'}}
def send_text(fbid, received_message):
reply = ''
quick_replies = list()
# add crawler data in dict
if '์ผ์ผํ์ง์' in received_message:
CHATBOT_RESPONSE['์ผ์ผํ์ง์'] = summary_info.get_daily_num()
if 'ํ์งํ์์' in received_message:
CHATBOT_RESPONSE['ํ์งํ์์'] = summary_info.get_confirmator_num()
if 'ํด์์กฐ์น์' in received_message:
CHATBOT_RESPONSE['ํด์์กฐ์น์'] = summary_info.get_discharged_num()
if '์ฌ๋ง์์' in received_message:
CHATBOT_RESPONSE['์ฌ๋ง์์'] = summary_info.get_death_num()
for key in CHATBOT_RESPONSE.keys():
quick_replies.append({
"content_type": "text",
"title": key,
"payload": 'DEVELOPER_DEFINED_PAYLOAD'})
if key in received_message:
reply += CHATBOT_RESPONSE[key] + '\n\n'
# parse hospital_list
for city in cities:
if re.compile(city).search(received_message):
reply += hospital_info.get_hospital_list(city, received_message)
break
if not reply:
reply = "์๋
ํ์ธ์,\n์ฝ๋ก๋ ์๋ฆฌ๋ฏธ์
๋๋ค!\n\n์๋ ์ ์๋ ํค์๋๋ฅผ ํฌํจํ์ฌ ์ง๋ฌธํด์ฃผ์ธ์."
send_message(json.dumps({
"recipient": {"id": fbid}, "message": {"text": reply, "quick_replies": quick_replies}}))
def send_dots(fbid):
send_message(json.dumps({
"recipient": {"id": fbid}, "sender_action": "typing_on"
}))
def send_message(response_msg):
endpoint = 'https://graph.facebook.com/v6.0/me/messages?access_token=%s' % os.environ['PAGE_ACCESS_TOKEN']
requests.post(endpoint, headers={
"Content-Type": "application/json"}, data=response_msg)
```
|
{
"source": "Jeonghun-Ban/likelion-mju.com",
"score": 2
}
|
#### File: likelion-mju.com/account/models.py
```python
from django.db import models
from django.contrib.auth.models import (
BaseUserManager, AbstractBaseUser, PermissionsMixin
)
class UserManager(BaseUserManager):
def create_user(self, email, number, name, gender, phone, college, department, grade, password=None):
if not email:
raise ValueError(_('Users must have an email address'))
user = self.model(
email=self.normalize_email(email),
number = number,
name = name,
gender = gender,
phone = phone,
college = college,
department = department,
grade = grade,
)
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self, email, number, name, gender, phone, college, department, grade, password):
user = self.create_user(
email=email,
password=password,
number = number,
name = name,
gender = gender,
phone = phone,
college = college,
department = department,
grade = grade,
)
user.is_superuser = True
user.is_active = True
user.save(using=self._db)
return user
class User(AbstractBaseUser, PermissionsMixin):
number = models.CharField(
verbose_name = 'ํ๋ฒ',
max_length=15,
unique=True
)
name = models.CharField(
verbose_name = '์ด๋ฆ',
max_length=15,
null=True
)
gender = models.CharField(
verbose_name = '์ฑ๋ณ',
max_length=15,
null=True
)
email = models.EmailField(
verbose_name = '์ด๋ฉ์ผ',
max_length=255,
unique=True,
null=True
)
phone = models.CharField(
verbose_name = '์ ํ๋ฒํธ',
max_length=15,
null=True
)
college = models.CharField(
verbose_name = '๋จ๊ณผ๋',
max_length=15,
null=True
)
department = models.CharField(
verbose_name = '์ ๊ณต',
max_length=15,
null=True
)
grade = models.IntegerField(
verbose_name = 'ํ๋
',
null=True
)
is_active = models.BooleanField(default=False)
objects = UserManager()
USERNAME_FIELD = 'number'
REQUIRED_FIELDS = [
'name',
'gender',
'phone',
'email',
'college',
'department',
'grade'
]
def __str__(self):
return self.name
def has_perm(self, perm, obj=None):
return True
def has_module_perms(self, app_label):
return True
@property
def is_staff(self):
return self.is_superuser
```
|
{
"source": "Jeonghun-Ban/likelionmyongji_lotto",
"score": 3
}
|
#### File: likelionmyongji_lotto/lotto/views.py
```python
from django.shortcuts import render, redirect
import random
# Create your views here.
def home(request):
return render(request, 'home.html')
def result(request):
# input number
number_list = list()
for i in range(0,6):
number = request.GET['number'+str(i+1)]
# if number is null, redirect home.html
if number == '':
return redirect('home')
number_list.append(int(number))
# random number
rand_list = list()
for i in range(0,7):
rand = random.randrange(1,46)
# when rand in rand_list
while rand in rand_list:
rand = random.randrange(1,46)
rand_list.append(rand)
# count same number
count=0
for i in range(0,6):
for j in range(0,7):
if number_list[i]==rand_list[j]:
count+=1
return render(request, 'result.html', {'number_list':number_list, 'rand_list':rand_list, 'count':count })
```
|
{
"source": "jeonghunn/EverytimeBot",
"score": 3
}
|
#### File: jeonghunn/EverytimeBot/bot.py
```python
import logging
import telegram
import requests, json
import traceback
from time import sleep
import model as ml
import tensorflow as tf
from earlystop import EarlyStopping
import random
import math
import os, sys
import data
import datetime
from configs import DEFINES
update_id = 0
def __del__(self):
bot = telegram.Bot('auth')
bot.send_message(chat_id = -116418298, text="Penta ์๋น์ค๊ฐ ์ข
๋ฃ๋์์ต๋๋ค.")
def main():
"""Run the bot."""
global update_id
# Telegram Bot Authorization Token
bot = telegram.Bot('auth')
URL = "https://unopenedbox.com/develop/square/api.php"
last_message = ""
bootcount = 0
lcount = 0
readingold = False
readingold_lastcount = 0
now = datetime.datetime.now()
# get the first pending update_id, this is so we can skip over it in case
# we get an "Unauthorized" exception.
# ๋ฐ์ดํฐ๋ฅผ ํตํ ์ฌ์ ๊ตฌ์ฑ ํ๋ค.
char2idx, idx2char, vocabulary_length = data.load_vocabulary()
# ์์คํฐ๋ฉ์ดํฐ ๊ตฌ์ฑํ๋ค.
classifier = tf.estimator.Estimator(
model_fn=ml.Model, # ๋ชจ๋ธ ๋ฑ๋กํ๋ค.
model_dir=DEFINES.check_point_path, # ์ฒดํฌํฌ์ธํธ ์์น ๋ฑ๋กํ๋ค.
params={ # ๋ชจ๋ธ ์ชฝ์ผ๋ก ํ๋ผ๋ฉํฐ ์ ๋ฌํ๋ค.
'hidden_size': DEFINES.hidden_size, # ๊ฐ์ค์น ํฌ๊ธฐ ์ค์ ํ๋ค.
'layer_size': DEFINES.layer_size, # ๋ฉํฐ ๋ ์ด์ด ์ธต ๊ฐ์๋ฅผ ์ค์ ํ๋ค.
'learning_rate': DEFINES.learning_rate, # ํ์ต์จ ์ค์ ํ๋ค.
'teacher_forcing_rate': DEFINES.teacher_forcing_rate, # ํ์ต์ ๋์ฝ๋ ์ธํ ์ ๋ต ์ง์์จ ์ค์
'vocabulary_length': vocabulary_length, # ๋์
๋๋ฆฌ ํฌ๊ธฐ๋ฅผ ์ค์ ํ๋ค.
'embedding_size': DEFINES.embedding_size, # ์๋ฒ ๋ฉ ํฌ๊ธฐ๋ฅผ ์ค์ ํ๋ค.
'embedding': DEFINES.embedding, # ์๋ฒ ๋ฉ ์ฌ์ฉ ์ ๋ฌด๋ฅผ ์ค์ ํ๋ค.
'multilayer': DEFINES.multilayer, # ๋ฉํฐ ๋ ์ด์ด ์ฌ์ฉ ์ ๋ฌด๋ฅผ ์ค์ ํ๋ค.
'attention': DEFINES.attention, # ์ดํ
์
์ง์ ์ ๋ฌด๋ฅผ ์ค์ ํ๋ค.
'teacher_forcing': DEFINES.teacher_forcing, # ํ์ต์ ๋์ฝ๋ ์ธํ ์ ๋ต ์ง์ ์ ๋ฌด ์ค์ ํ๋ค.
'loss_mask': DEFINES.loss_mask, # PAD์ ๋ํ ๋ง์คํฌ๋ฅผ ํตํ loss๋ฅผ ์ ํ ํ๋ค.
'serving': DEFINES.serving # ๋ชจ๋ธ ์ ์ฅ ๋ฐ serving ์ ๋ฌด๋ฅผ ์ค์ ํ๋ค.
})
while 1:
sleep(3)
now = datetime.datetime.now()
bootcount = bootcount + 1
lcount = lcount + 1
try:
#data = {'a': 'penta_check', 'auth': '<PASSWORD>', 'start_num' : '0', 'number' : '15'}
#res = requests.post(URL, data=data)
#answer = "[๋ณด๊ณ ]" + res.json()[0]['description'];
answer = ""
if bootcount == 1 :
#answer = "๋ค์ ์์ํ์ต๋๋ค. Penta ๋ฒ์ 1.0.625 ๋ฐ๋ฆฐ ์ฑํ
์ ์ฝ๋ ์ค ์
๋๋ค..."
readingold = True
readingold_lastcount = bootcount
if readingold_lastcount < bootcount and readingold is True :
readingold = False
#bot.send_message(chat_id = -116418298, text="์ด์ ๊ธ ์ฝ๊ธฐ ์๋ฃ.")
if last_message != answer and answer != "" :
bot.send_message(chat_id = -116418298, text=answer)
last_message = answer
if last_message == answer :
tlm = ""
last_user = 0
last_talk = ""
updates = bot.get_updates(offset=update_id)
for i in updates:
if i.message:
if last_user != i.message.from_user.id :
last_talk = tlm
tlm = ""
last_user = i.message.from_user.id
# with open("./data_in/ChatBotData.csv", "a") as myfile:
# myfile.write("\n")
if i.message.text is not None and tlm != "" :
tlm = tlm + " " + i.message.text
# with open("./data_in/ChatBotData.csv", "a") as myfile:
# myfile.write(" " + i.message.text)
if i.message.text is not None and tlm == "" :
tlm = i.message.text
# with open("./data_in/ChatBotData.csv", "a") as myfile:
# myfile.write(i.message.text)
update_id = i.update_id + 1
now_last_id = updates[-1].update_id
if tlm != "" and tlm is not None and now_last_id + 1 <= update_id:
readingold_lastcount = readingold_lastcount +1
lcount = 0
if not readingold :
predic_input_enc, predic_input_enc_length = data.enc_processing([tlm], char2idx)
predic_target_dec, _ = data.dec_target_processing([""], char2idx)
# ์์ธก์ ํ๋ ๋ถ๋ถ์ด๋ค.
predictions = classifier.predict(
input_fn=lambda:data.eval_input_fn(predic_input_enc, predic_target_dec, DEFINES.batch_size))
# ์์ธกํ ๊ฐ์ ์ธ์ง ํ ์ ์๋๋ก
# ํ
์คํธ๋ก ๋ณ๊ฒฝํ๋ ๋ถ๋ถ์ด๋ค.
aimessage = data.pred2string(predictions, idx2char)
if aimessage != "" :
bot.send_message(chat_id = -116418298, text=aimessage)
except IndexError:
update_id = None
except:
if last_message != traceback.format_exc() and "Message text is empty" not in traceback.format_exc():
bot.send_message(chat_id = -11641828, text="[์ค๋ฅ] ์ค๋ฅ๊ฐ ๋ฐ์ํ์ต๋๋ค. ์ ๊ฒ์ด ํ์ํฉ๋๋ค. \n"+ traceback.format_exc())
last_message = traceback.format_exc()
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
def echo(bot):
"""Echo the message the user sent."""
global update_id
# Request updates after the last update_id
for update in bot.get_updates(offset=update_id, timeout=10):
update_id = update.update_id + 1
if update.message: # your bot can receive updates without messages
# Reply to the message
update.message.reply_text("HI")
if __name__ == '__main__':
main()
```
|
{
"source": "jeonghunseo1/github_test",
"score": 3
}
|
#### File: github_test/gui_basic/4_text_entry.py
```python
from tkinter import *
root = Tk()
root.title("Nado GUI")
root.geometry("640x480") # ๊ฐ๋ก * ์ธ๋ก
txt = Text(root, width=30, height=5)
txt.pack()
txt.insert(END, "๊ธ์๋ฅผ ์
๋ ฅํ์ธ์")
e = Entry(root, width=30)
e.pack()
e.insert(0, "ํ ์ค๋ง ์
๋ ฅํด์") #0: ์ฒ์์์น์ ๊ธฐ๋ณธ๊ฐ์ด ๋ค์ด๊ฐ
def btncmd():
#๋ด์ฉ ์ถ๋ ฅ
print(txt.get("1.0",END)) # 1: ์ฒซ๋ฒ์งธ ๋ผ์ธ, 0: 0๋ฒ์งธ column ์์น
print(e.get())
#๋ด์ฉ ์ญ์
txt.delete("1.0",END)
e.delete(0, END)
btn = Button(root, text="ํด๋ฆญ", command=btncmd)
btn.pack()
root.mainloop()
```
#### File: github_test/gui_basic/menu.py
```python
from tkinter import *
root = Tk()
root.title("Nado GUI")
root.geometry("640x480") # ๊ฐ๋ก * ์ธ๋ก
def create_new_file():
print("์ ํ์ผ์ ๋ง๋ญ๋๋ค")
menu = Menu(root)
# File ๋ฉ๋ด
menu_file = Menu(menu, tearoff=0)
menu_file.add_command(label="New File", command=create_new_file)
menu_file.add_command(label="New Window")
menu_file.add_separator()
menu_file.add_command(label="Open File...")
menu_file.add_separator()
menu_file.add_command(label="Save All",state="disabled") # ๋นํ์ฑํ
menu_file.add_separator()
menu_file.add_command(label="Exit", command=root.quit)
menu.add_cascade(label="File", menu=menu_file)
# Edit ๋ฉ๋ด (๋น ๊ฐ)
menu.add_cascade(label="Edit")
# Language ๋ฉ๋ด ์ถ๊ฐ (radio ๋ฒํผ์ ํตํด์ ํ1)
menu_lang = Menu(menu, tearoff=0)
menu_lang.add_radiobutton(label="Python")
menu_lang.add_radiobutton(label="Java")
menu_lang.add_radiobutton(label="C++")
menu.add_cascade(label="Language", menu= menu_lang)
# View ๋ฉ๋ด
menu_view = Menu(menu, tearoff=0)
menu_view.add_checkbutton(label="Show Minimap")
menu.add_cascade(label="View", menu=menu_view)
root.config(menu= menu)
root.mainloop()
```
#### File: jeonghunseo1/github_test/main1.py
```python
import time
from watchdog.observers import Observer
from watchdog.events import FileSystemEventHandler
class Watcher:
DIRECTORY_WATCH = "./logDir"
def __init__(self):
self.observer = Observer()
def run(self):
event_handler = Handler()
self.observer.schedule(event_handler, self.DIRECTORY_WATCH, recursive=True)
self.observer.start()
try:
while True:
time.sleep(5)
except:
self.observer.stop()
print("Error")
self.observer.join()
class Handler(FileSystemEventHandler):
@staticmethod
def on_moved(event):
print("move ๋ฐ์")
def on_created(shelf,event):
print("move ๋ฐ์")
def on_deleted(shelf,event):
print("delete ๋ฐ์")
def on_modified(shelf,event):
print("modified ๋ฐ์")
def on_any_event(shelf,event):
print("ํน์ event ๋ฐ์")
print("test ")
if __name__ == "__main__":
w = Watcher()
w.run()
```
#### File: jeonghunseo1/github_test/tray_icon.py
```python
import PySimpleGUI as sg
from psgtray import SystemTray
def main():
menu = ['', ['Show Window', 'Hide Window', '---', '!Disabled Item', 'Change Icon', ['Happy', 'Sad', 'Plain'], 'Exit']]
tooltip = 'Tooltip'
layout = [
[sg.Multiline(size=(60,10), reroute_stdout=False, reroute_cprint=True, write_only=True, key='-OUT-')],
[sg.B('Hide Window'), sg.Button('Exit')]]
window = sg.Window('Window Title', layout, finalize=True, enable_close_attempted_event=True)
tray = SystemTray(menu, single_click_events=False, window=window, tooltip=tooltip, icon=sg.DEFAULT_BASE64_ICON)
tray.show_message('System Tray', 'System Tray Icon Started!')
#sg.cprint(sg.get_versions())
while True:
event, values = window.read()
# IMPORTANT step. It's not required, but convenient. Set event to value from tray
# if it's a tray event, change the event variable to be whatever the tray sent
if event == tray.key:
#sg.cprint(f'System Tray Event = ', values[event], c='white on red')
event = values[event] # use the System Tray's event as if was from the window
if event in (sg.WIN_CLOSED, 'Exit'):
break
#sg.cprint(event, values)
#tray.show_message(title=event, message=values)
if event in ('Show Window', sg.EVENT_SYSTEM_TRAY_ICON_DOUBLE_CLICKED):
window.un_hide()
window.bring_to_front()
elif event in ('Hide Window', sg.WIN_CLOSE_ATTEMPTED_EVENT):
window.hide()
tray.show_icon() # if hiding window, better make sure the icon is visible
tray.notify('System Tray Item Chosen', f'You chose {event}')
elif event == 'Happy':
tray.change_icon(sg.EMOJI_BASE64_HAPPY_JOY)
elif event == 'Sad':
tray.change_icon(sg.EMOJI_BASE64_FRUSTRATED)
elif event == 'Plain':
tray.change_icon(sg.DEFAULT_BASE64_ICON)
elif event == 'Hide Icon':
tray.hide_icon()
elif event == 'Show Icon':
tray.show_icon()
elif event == 'Change Tooltip':
tray.set_tooltip(values['-IN-'])
tray.close() # optional but without a close, the icon may "linger" until moused over
window.close()
if __name__ == '__main__':
main()
```
|
{
"source": "jeonghwaYoo/high-res-mapping",
"score": 3
}
|
#### File: keras_contrib/metrics/segmentation_metrics.py
```python
import sys
from keras import metrics
import keras.backend as K
def _end_mean(x, axis=-1):
""" Same as K.mean, but defaults to the final axis.
"""
return K.mean(x, axis=axis)
def _metric_2d_adaptor(y_true, y_pred, metric=None, summary=_end_mean, **kwargs):
""" Adapt a one dimensional loss function to work with 2d segmentation data.
"""
if metric is None:
raise ValueError("You must provide a metric function such as binary_crossentropy")
pred_shape = K.shape(y_pred)
true_shape = K.shape(y_true)
y_pred_reshaped = K.reshape(y_pred, (-1, pred_shape[-1]))
y_true_reshaped = K.reshape(y_true, (-1, true_shape[-1]))
result = metric(y_pred_reshaped, y_true_reshaped, **kwargs)
if summary is not None:
result = summary(result)
# if len(true_shape) >= 3:
if true_shape.get_shape()[0].value >= 3:
return K.reshape(result, true_shape[:-1])
else:
return result
def categorical_pixel_accuracy(y_true, y_pred):
pred_shape = K.shape(y_pred)
true_shape = K.shape(y_true)
# reshape such that w and h dim are multiplied together
y_pred_reshaped = K.reshape(y_pred, (-1, pred_shape[-1]))
y_true_reshaped = K.reshape(y_true, (-1, true_shape[-1]))
# correctly classified
clf_pred = K.one_hot(K.argmax(y_pred_reshaped), num_classes=true_shape[-1])
correct_pixels_per_class = K.cast(
K.equal(clf_pred, y_true_reshaped), dtype='float32')
return K.mean(correct_pixels_per_class)
def mean_accuracy(y_true, y_pred):
pred_shape = K.shape(y_pred)
true_shape = K.shape(y_true)
# reshape such that w and h dim are multiplied together
y_pred_reshaped = K.reshape(y_pred, (-1, pred_shape[-1]))
y_true_reshaped = K.reshape(y_true, (-1, true_shape[-1]))
# correctly classified
clf_pred = K.one_hot(K.argmax(y_pred_reshaped), num_classes=true_shape[-1])
equal_entries = K.cast(
K.equal(clf_pred, y_true_reshaped), dtype='float32') * y_true_reshaped
correct_pixels_per_class = K.sum(equal_entries, axis=1)
n_pixels_per_class = K.sum(y_true_reshaped, axis=1)
# epsilon added to avoid dividing by zero
acc = (correct_pixels_per_class + K.epsilon()) / (n_pixels_per_class + K.epsilon())
return K.mean(acc)
def binary_accuracy(y_true, y_pred):
""" Same as keras.metrics.binary_accuracy for 2d label data.
"""
return _metric_2d_adaptor(y_true, y_pred, loss=metrics.binary_accuracy, summary=_end_mean)
def categorical_accuracy(y_true, y_pred):
""" Same as keras.metrics.categorical_accuracy for 2d label data.
"""
return _metric_2d_adaptor(y_true, y_pred, loss=metrics.categorical_accuracy, summary=_end_mean)
def top_k_categorical_accuracy(y_true, y_pred):
""" Same as keras.metrics.top_k_categorical_accuracy for 2d label data.
"""
return _metric_2d_adaptor(y_true, y_pred, loss=metrics.top_k_categorical_accuracy, summary=_end_mean)
def sparse_top_k_categorical_accuracy(y_true, y_pred):
""" Same as keras.metrics.categorical_accuracy for 2d label data.
"""
return _metric_2d_adaptor(y_true, y_pred, loss=metrics.sparse_top_k_categorical_accuracy, summary=_end_mean)
def mean_intersection_over_union(y_true, y_pred, smooth=None, axis=-1):
"""Jaccard distance for semantic segmentation, also known as the intersection-over-union loss.
This loss is useful when you have unbalanced numbers of pixels within an image
because it gives all classes equal weight. However, it is not the defacto
standard for image segmentation.
For example, assume you are trying to predict if each pixel is cat, dog, or background.
You have 80% background pixels, 10% dog, and 10% cat. If the model predicts 100% background
should it be be 80% right (as with categorical cross entropy) or 30% (with this loss)?
The loss has been modified to have a smooth gradient as it converges on zero.
This has been shifted so it converges on 0 and is smoothed to avoid exploding
or disappearing gradient.
Also see jaccard which takes a slighty different approach.
Jaccard = (|X & Y|)/ (|X|+ |Y| - |X & Y|)
= sum(|A*B|)/(sum(|A|)+sum(|B|)-sum(|A*B|))
# References
Csurka, Gabriela & Larlus, Diane & Perronnin, Florent. (2013).
What is a good evaluation measure for semantic segmentation?.
IEEE Trans. Pattern Anal. Mach. Intell.. 26. . 10.5244/C.27.32.
https://en.wikipedia.org/wiki/Jaccard_index
"""
if smooth is None:
smooth = K.epsilon()
pred_shape = K.shape(y_pred)
true_shape = K.shape(y_true)
# reshape such that w and h dim are multiplied together
y_pred_reshaped = K.reshape(y_pred, (-1, pred_shape[-1]))
y_true_reshaped = K.reshape(y_true, (-1, true_shape[-1]))
# correctly classified
clf_pred = K.one_hot(K.argmax(y_pred_reshaped), num_classes=true_shape[-1])
equal_entries = K.cast(
K.equal(clf_pred, y_true_reshaped), dtype='float32') * y_true_reshaped
intersection = K.sum(equal_entries, axis=1)
union_per_class = K.sum(
y_true_reshaped, axis=1) + K.sum(
y_pred_reshaped, axis=1)
# smooth added to avoid dividing by zero
iou = (intersection + smooth) / (
(union_per_class - intersection) + smooth)
return K.mean(iou)
```
#### File: high-res-mapping/utils/plotting.py
```python
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.patches import ConnectionPatch
from matplotlib.transforms import Bbox
import seaborn as sns
import utils
from utils import filters, maxima, segment, merge
import warnings
def pipeline(img, low, high, roi_percentile=85, focal_scope='global', maxima_areas='small', merge_type='blend',
merge_alpha=0.5, filter_type='percentage', filter_percentage=15, filter_threshold=0.6):
"""
Visualization of the whole workflow. Requires the original image and the high and low res CAMs to work. Performs
the following steps:
1. Applies a filter to blur the high-res map.
2. Extracts the ROI from the low-res map through a percentile.
3. Identifies the focal points of the low-res map by locating it's local maxima.
4. Computes the gradient of the high-res map through a sobel filter.
5. Draws a histogram of the gradient. Only considers areas corresponding to the ROI extracted from the low-res map.
6. Calculates a 'lower' and 'upper' bound on the 25th and 75th percentile, respectively.
7. Performs a region-growing segmentation algorithm on the gradient. The boundaries are the previous percentiles,
while the focal points are set as the initial seeds (from where to start growing).
8. Merges the result of the segmentation with the low-res map.
9. Segments the original image according to the result of the previous merger.
Note: it would be more efficient and elegant if I went for 'axes fraction' instead of 'data' for the coordinates
of the ConnectionPatches, but it's too much of a hassle to change.
:param img: Original RBG image, default shape=(224, 224, 3).
:param low: Low-resolution CAM, default shape=(14, 14).
:param high: High-resolution CAM, default shape=(224, 224).
:param roi_percentile: Percentile based on which the ROI will be extracted. The default percentile=85 means that
the ROI will include the 15% highest-intensity pixels from the low-res map.
:param focal_scope: The scope in which the focal points will be identified. 'global' looks for global maxima, while
'local' looks for local maxima. Accepted values: ['global', 'local']
:param maxima_areas: Specifies the size of the focal points. Two options available: 'small' and 'large'.
:param merge_type: Specifies the method of merging the high-res segment map with the low-res map.
Two methods available: 'blend' and 'multiply'. The first is a possibly weighted linear
combination of the two, while the second simply multiplies them.
:param merge_alpha: If merge_type='blend', alpha regulates the importance of each of the two images (i.e. the low
and the high-res maps). Should be a float in [0, 1]. High values result in more influence from
the high-res map.
:param filter_type: Specifies the method of segmenting the original image based on the combined CAM. Two methods are
available: 'percentage' and 'threshold'. The first keeps a percentage of the original image's
pixels while the second relies solely on the values of the combined CAM exceeding a threshold.
:param filter_percentage: Selects the percentage of pixels to be included in the final segment. Only relevant if
filter_type='percentage'. Should be a number between 0 and 100.
:param filter_threshold: Selects the threshold based on which the final segmentation will be performed. Only pixels
of the combined CAM that have an intensity greater than this threshold will be included.
Based on this mask, the original image will be segmented. Should be a float in [0, 1].
"""
# Value checks
# Categorical arguments
if maxima_areas not in ('small', 'large'):
raise ValueError("available options for maxima_areas are: 'small' and 'large'.")
if merge_type not in ('blend', 'multiply'):
raise ValueError("available options for merge_type are: 'blend' and 'multiply'.")
if filter_type not in ('percentage', 'threshold'):
raise ValueError("vailable options for filter_type are: 'percentage' and 'threshold'.")
# Percentage arguments
if roi_percentile <= 0 or roi_percentile >= 100:
raise ValueError('roi_percentile should be a percentage in (0, 100)')
elif roi_percentile < 1:
warnings.warn('roi_percentile value in [0, 1). Should be defined as a percentage in (0, 100), '
'e.g. If the desired percentage is 13%, pass 33 instead of 0.33!')
if filter_percentage <= 0 or filter_percentage >= 100:
raise ValueError('filter_percentage should be a percentage in (0, 100)')
elif filter_percentage < 1:
warnings.warn('filter_percentage value in [0, 1). Should be defined as a percentage in (0, 100), '
'e.g. If the desired percentage is 13%, pass 33 instead of 0.33!')
# Value arguments
if merge_alpha < 0 or merge_alpha > 1:
raise ValueError('merge_alpha should be a float in [0, 1]')
if filter_threshold < 0 or filter_threshold > 1:
raise ValueError('filter_threshold should be a float in [0, 1]')
# Coordinates of the top/bottom/left/right/middle of the input image
left = (0, img.shape[1] / 2)
right = (img.shape[1], img.shape[1] / 2)
bottom = (img.shape[1] / 2, img.shape[1])
top = (img.shape[1] / 2, 0)
midpoint = (img.shape[1] / 2, img.shape[1] / 2)
# Create two 'blank' images for filling empty positions
blank = np.ones(img[0].shape, dtype=np.uint8)
half_blank = blank[::2]
# Initialize 5x7 grid
fig, ax = plt.subplots(5, 7, figsize=(16, 16))
##############################
######## First column ########
##############################
# Fill first, second, fourth and fifth rows with blank images
ax[0, 0].imshow(blank, alpha=0)
ax[0, 0].axis('off')
ax[1, 0].imshow(blank, alpha=0)
ax[1, 0].axis('off')
ax[3, 0].imshow(blank, alpha=0)
ax[3, 0].axis('off')
ax[4, 0].imshow(half_blank, alpha=0)
ax[4, 0].axis('off')
# Add original image to the third row
ax[2, 0].imshow(img[0], zorder=3)
ax[2, 0].axis('off')
ax[2, 0].set_title('Original image', backgroundcolor='white', zorder=2)
# Three crooked lines starting from the first row, represented by thirteen (!) connection patches
# Connection of 'original image' to 'high-res map'
con1a = ConnectionPatch(xyA=top, xyB=midpoint, coordsA='data', coordsB='data',
axesA=ax[2, 0], axesB=ax[1, 0], color='black', lw=2, zorder=1)
con1b = ConnectionPatch(xyA=midpoint, xyB=left, coordsA='data', coordsB='data',
axesA=ax[1, 0], axesB=ax[1, 1], color='black', lw=2, arrowstyle='->')
# Connection of 'original image' to 'low-res map'
con2a = ConnectionPatch(xyA=bottom, xyB=midpoint, coordsA='data', coordsB='data',
axesA=ax[2, 0], axesB=ax[3, 0], color='black', lw=2)
con2b = ConnectionPatch(xyA=midpoint, xyB=left, coordsA='data', coordsB='data',
axesA=ax[3, 0], axesB=ax[3, 1], color='black', lw=2, arrowstyle='->')
# Connection of 'original image' to 'result'
con3b = ConnectionPatch(xyA=midpoint, xyB=bottom, coordsA='data', coordsB='data',
axesA=ax[1, 0], axesB=ax[0, 0], color='black', lw=2)
con3c = ConnectionPatch(xyA=bottom, xyB=bottom, coordsA='data', coordsB='data',
axesA=ax[0, 0], axesB=ax[0, 1], color='black', lw=2)
con3d = ConnectionPatch(xyA=bottom, xyB=bottom, coordsA='data', coordsB='data',
axesA=ax[0, 1], axesB=ax[0, 2], color='black', lw=2)
con3e = ConnectionPatch(xyA=bottom, xyB=bottom, coordsA='data', coordsB='data',
axesA=ax[0, 2], axesB=ax[0, 3], color='black', lw=2)
con3f = ConnectionPatch(xyA=bottom, xyB=bottom, coordsA='data', coordsB='data',
axesA=ax[0, 3], axesB=ax[0, 4], color='black', lw=2)
con3g = ConnectionPatch(xyA=bottom, xyB=bottom, coordsA='data', coordsB='data',
axesA=ax[0, 4], axesB=ax[0, 5], color='black', lw=2)
con3h = ConnectionPatch(xyA=bottom, xyB=bottom, coordsA='data', coordsB='data',
axesA=ax[0, 5], axesB=ax[0, 6], color='black', lw=2)
con3i = ConnectionPatch(xyA=bottom, xyB=midpoint, coordsA='data', coordsB='data',
axesA=ax[0, 6], axesB=ax[1, 6], color='black', lw=2)
con3k = ConnectionPatch(xyA=midpoint, xyB=midpoint, coordsA='data', coordsB='data',
axesA=ax[1, 6], axesB=ax[2, 6], color='black', lw=2)
con3l = ConnectionPatch(xyA=midpoint, xyB=top, coordsA='data', coordsB='data',
axesA=ax[2, 6], axesB=ax[3, 6], color='black', lw=2, arrowstyle='->', zorder=1)
# Add each patch to its respective axis
ax[2, 0].add_artist(con1a)
ax[1, 0].add_artist(con1b)
ax[2, 0].add_artist(con2a)
ax[3, 0].add_artist(con2b)
ax[1, 0].add_artist(con3b)
ax[0, 0].add_artist(con3c)
ax[0, 1].add_artist(con3d)
ax[0, 2].add_artist(con3e)
ax[0, 3].add_artist(con3f)
ax[0, 4].add_artist(con3g)
ax[0, 5].add_artist(con3h)
ax[0, 6].add_artist(con3i)
ax[1, 6].add_artist(con3k)
ax[2, 6].add_artist(con3l)
###############################
######## Second column ########
###############################
# High-res map on the second line
ax[1, 1].imshow(high)
ax[1, 1].axis('off')
ax[1, 1].set_title('High-res CAM')
# Low-res map on the fourth line
ax[3, 1].imshow(utils.resize(low), zorder=3)
ax[3, 1].axis('off')
ax[3, 1].set_title('Low-res CAM', backgroundcolor='white', zorder=2)
# Fill the first, third and fifth lines with blank images
ax[0, 1].imshow(blank, alpha=0)
ax[0, 1].axis('off')
ax[2, 1].imshow(blank, alpha=0)
ax[2, 1].axis('off')
ax[4, 1].imshow(half_blank, alpha=0)
ax[4, 1].axis('off')
# Four lines represented by eleven (!) connection patches
# Connection of 'high-res map' to 'gradient'
con4 = ConnectionPatch(xyA=right, xyB=left, coordsA='data', coordsB='data',
axesA=ax[1, 1], axesB=ax[1, 2], color='black', lw=2, arrowstyle='->')
# Connection of 'low-res map' to 'roi'
con5a = ConnectionPatch(xyA=top, xyB=midpoint, coordsA='data', coordsB='data',
axesA=ax[3, 1], axesB=ax[2, 1], color='black', lw=2, zorder=1)
con5b = ConnectionPatch(xyA=midpoint, xyB=left, coordsA='data', coordsB='data',
axesA=ax[2, 1], axesB=ax[2, 2], color='black', lw=2, arrowstyle='->')
# Connection of 'low-res map' to 'focal points'
con6 = ConnectionPatch(xyA=right, xyB=left, coordsA='data', coordsB='data',
axesA=ax[3, 1], axesB=ax[3, 2], color='black', lw=2, arrowstyle='->')
# Connection of 'low-res map' to 'merger'
con7a = ConnectionPatch(xyA=bottom, xyB=top, coordsA='data', coordsB='data',
axesA=ax[3, 1], axesB=ax[4, 1], color='black', lw=2, zorder=1)
con7b = ConnectionPatch(xyA=top, xyB=top, coordsA='data', coordsB='data',
axesA=ax[4, 1], axesB=ax[4, 2], color='black', lw=2, zorder=1)
con7c = ConnectionPatch(xyA=top, xyB=top, coordsA='data', coordsB='data',
axesA=ax[4, 2], axesB=ax[4, 3], color='black', lw=2, zorder=1)
con7d = ConnectionPatch(xyA=top, xyB=top, coordsA='data', coordsB='data',
axesA=ax[4, 3], axesB=ax[4, 4], color='black', lw=2, zorder=1)
con7e = ConnectionPatch(xyA=top, xyB=top, coordsA='data', coordsB='data',
axesA=ax[4, 4], axesB=ax[4, 5], color='black', lw=2, zorder=1)
con7f = ConnectionPatch(xyA=top, xyB=bottom, coordsA='data', coordsB='data',
axesA=ax[4, 5], axesB=ax[3, 5], color='black', lw=2, zorder=1, arrowstyle='->')
# Add the patches to their respective axes
ax[1, 1].add_artist(con4)
ax[3, 1].add_artist(con5a)
ax[2, 1].add_artist(con5b)
ax[3, 1].add_artist(con6)
ax[3, 1].add_artist(con7a)
ax[4, 1].add_artist(con7b)
ax[4, 2].add_artist(con7c)
ax[4, 3].add_artist(con7d)
ax[4, 4].add_artist(con7e)
ax[4, 5].add_artist(con7f)
##############################
######## Third column ########
##############################
# High-res blur
blurred = filters.blur(high)
ax[1, 2].imshow(blurred)
ax[1, 2].axis('off')
ax[1, 2].set_title('Blurred')
# Region of Interest
roi = utils.resize(low) > utils.percentile(utils.resize(low), roi_percentile)
a = ax[2, 2].imshow(roi)
ax[2, 2].axis('off')
ax[2, 2].set_title('Region of Interest')
# Focal Points
focal_points = maxima.find_focal_points(low, scope=focal_scope, maxima_areas=maxima_areas)
bg, dots = a.get_cmap().colors[0], a.get_cmap().colors[-1]
ax[3, 2].imshow((blank.reshape(-1, 3) * bg).reshape(img.shape[1], img.shape[1], 3))
ax[3, 2].scatter([x[0] for x in focal_points], [x[1] for x in focal_points], marker='x', s=30, c=dots)
ax[3, 2].axis('off')
ax[3, 2].set_title('Focal Points')
# Fill first and fifth rows with blank images
ax[0, 2].imshow(blank, alpha=0)
ax[0, 2].axis('off')
ax[4, 2].imshow(half_blank, alpha=0)
ax[4, 2].axis('off')
# Three lines represented by five connection patches
con8 = ConnectionPatch(xyA=right, xyB=left, coordsA='data', coordsB='data',
axesA=ax[1, 2], axesB=ax[1, 3], color='black', lw=2, arrowstyle='->')
con9 = ConnectionPatch(xyA=right, xyB=(0, 0.5), coordsA='data', coordsB='axes fraction',
axesA=ax[2, 2], axesB=ax[2, 3], color='black', lw=2, arrowstyle='->')
con10a = ConnectionPatch(xyA=right, xyB=midpoint, coordsA='data', coordsB='data',
axesA=ax[3, 2], axesB=ax[3, 3], color='black', lw=2)
con10b = ConnectionPatch(xyA=midpoint, xyB=midpoint, coordsA='data', coordsB='data',
axesA=ax[3, 3], axesB=ax[3, 4], color='black', lw=2)
con10c = ConnectionPatch(xyA=midpoint, xyB=left, coordsA='data', coordsB='data',
axesA=ax[3, 4], axesB=ax[3, 5], color='black', lw=2, arrowstyle='->')
# Add the patches to their respective axes
ax[1, 2].add_artist(con8)
ax[2, 2].add_artist(con9)
ax[3, 2].add_artist(con10a)
ax[3, 3].add_artist(con10b)
ax[3, 4].add_artist(con10c)
###############################
######## Fourth column ########
###############################
# High-res edge detection
grad = utils.normalize_image(filters.sobel(blurred))
ax[1, 3].imshow(grad)
ax[1, 3].axis('off')
ax[1, 3].set_title('Edge detection')
# Gradient percentiles
roi_grad = grad[roi]
lower = utils.percentile(roi_grad, 25)
upper = utils.percentile(roi_grad, 75)
ax[2, 3] = sns.distplot(roi_grad.ravel(), ax=ax[2, 3])
ax[2, 3].plot([lower, lower], [0, 4], c='C1')
ax[2, 3].plot([upper, upper], [0, 4], c='C1')
ax[2, 3].text(lower, -0.5, 'lower', color='C1', horizontalalignment='center')
ax[2, 3].text(upper, 4.5, 'upper', color='C1', horizontalalignment='center')
ax[2, 3].axis('off')
ttl = ax[2, 3].set_title('Edge Histogram')
ttl.set_bbox(dict(color='white', alpha=0.5, zorder=2))
square_axes(ax[2, 3]) # custom function that shrinks the axis object to a square box
# Fill first, fourth and fifth rows
ax[0, 3].imshow(blank, alpha=0)
ax[0, 3].axis('off')
ax[3, 3].imshow(blank, alpha=0)
ax[3, 3].axis('off')
ax[4, 3].imshow(half_blank, alpha=0)
ax[4, 3].axis('off')
# Three lines represented by four connection patches
con11 = ConnectionPatch(xyA=bottom, xyB=(0.5, 1), coordsA='data', coordsB='axes fraction',
axesA=ax[1, 3], axesB=ax[2, 3], color='black', lw=2, arrowstyle='->')
con12a = ConnectionPatch(xyA=right, xyB=midpoint, coordsA='data', coordsB='data',
axesA=ax[1, 3], axesB=ax[1, 4], color='black', lw=2)
con12b = ConnectionPatch(xyA=midpoint, xyB=top, coordsA='data', coordsB='data',
axesA=ax[1, 4], axesB=ax[2, 4], color='black', lw=2, arrowstyle='->', zorder=1)
con13 = ConnectionPatch(xyA=(1, 0.5), xyB=left, coordsA='axes fraction', coordsB='data',
axesA=ax[2, 3], axesB=ax[2, 4], color='black', lw=2, arrowstyle='->')
# Add the patches to their respective axes
ax[1, 3].add_artist(con11)
ax[1, 3].add_artist(con12a)
ax[1, 4].add_artist(con12b)
ax[2, 3].add_artist(con13)
##############################
######## Fifth column ########
##############################
# Region Growing Segmentation
segm = segment.region_growing(grad, seeds=focal_points, lower=lower, upper=upper)
ax[2, 4].imshow(segm, zorder=3)
ax[2, 4].axis('off')
ttl = ax[2, 4].set_title('Region Growing\nSegmentation')
ttl.set_bbox(dict(color='white', alpha=0.5, zorder=2))
# Fill first, second fourth and fifth rows
ax[0, 4].imshow(blank, alpha=0)
ax[0, 4].axis('off')
ax[1, 4].imshow(blank, alpha=0)
ax[1, 4].axis('off')
ax[3, 4].imshow(blank, alpha=0)
ax[3, 4].axis('off')
ax[4, 4].imshow(half_blank, alpha=0)
ax[4, 4].axis('off')
# Just one connection! :)
con14 = ConnectionPatch(xyA=right, xyB=left, coordsA='data', coordsB='data',
axesA=ax[2, 4], axesB=ax[2, 5], color='black', lw=2, arrowstyle='->')
ax[2, 4].add_artist(con14)
##############################
######## Sixth column ########
##############################
# Add edges and fill small holes
edges = (grad >= upper).astype(float)
roi_edges = edges * roi
segm_with_edges = segm + roi_edges
filled = maxima.remove_small_holes(segm_with_edges)
ax[2, 5].imshow(filled)
ax[2, 5].axis('off')
ax[2, 5].set_title('Remove small holes')
# High-Low merger
merged = merge.merge_images(filled, low, method=merge_type, alpha=merge_alpha)
ax[3, 5].imshow(merged)
ax[3, 5].axis('off')
ttl = ax[3, 5].set_title('High-Low Merger')
ttl.set_bbox(dict(color='white', alpha=0.5, zorder=2))
# Fill remaining rows
ax[0, 5].imshow(blank, alpha=0)
ax[0, 5].axis('off')
ax[1, 5].imshow(blank, alpha=0)
ax[1, 5].axis('off')
ax[3, 5].imshow(blank, alpha=0)
ax[3, 5].axis('off')
ax[4, 5].imshow(half_blank, alpha=0)
ax[4, 5].axis('off')
# Last connection patches...
con15 = ConnectionPatch(xyA=bottom, xyB=top, coordsA='data', coordsB='data',
axesA=ax[2, 5], axesB=ax[3, 5], color='black', lw=2, zorder=-1, arrowstyle='->')
con16 = ConnectionPatch(xyA=right, xyB=left, coordsA='data', coordsB='data',
axesA=ax[3, 5], axesB=ax[3, 6], color='black', lw=2, zorder=-1, arrowstyle='->')
ax[2, 5].add_artist(con15)
ax[3, 5].add_artist(con16)
################################
######## Seventh column ########
################################
# Result
if filter_type == 'percentage':
result = merge.keep_percentage(img, merged, percentage=filter_percentage/100)
else:
result = merge.filter_image(img, merged, threshold=filter_threshold)
ax[3, 6].imshow(result, zorder=3)
ax[3, 6].axis('off')
ttl = ax[3, 6].set_title('Result')
ttl.set_bbox(dict(color='white', alpha=0.5, zorder=2))
# Fill remaining rows
ax[0, 6].imshow(blank, alpha=0)
ax[0, 6].axis('off')
ax[1, 6].imshow(blank, alpha=0)
ax[1, 6].axis('off')
ax[2, 6].imshow(blank, alpha=0)
ax[2, 6].axis('off')
ax[4, 6].imshow(half_blank, alpha=0)
ax[4, 6].axis('off')
def plt_to_static(axes):
"""
Should convert an axis object to an image in a numpy.array. Doesn't work as intended!
:param axes: A matplotlib.axes.Axes object
:return: The same object as a numpy.array
"""
fig = plt.figure()
fig.axes.append(axes)
fig.canvas.draw()
buf = fig.canvas.tostring_rgb()
width, height = fig.canvas.get_width_height()
return np.frombuffer(buf, dtype=np.uint8).reshape(height, width, 3)
def square_axes(axes):
"""
Takes a matplotlib.axes.Axes object, finds its height and width and shrinks the largest dimension to match the
smallest one. Caution: it actually changes the object (in-place)!
:param axes: A matplotlib.axes.Axes object.
:return: The new Bbox coordinates.
"""
bbox = axes.get_position()._points.copy()
width = bbox[1, 0] - bbox[0, 0]
height = bbox[1, 1] - bbox[0, 1]
if width < height:
center = bbox[0, 1] + height / 2
bbox[0, 1] = center - width / 2
bbox[1, 1] = center + width / 2
else:
center = bbox[0, 0] + width / 2
bbox[0, 0] = center - height / 2
bbox[1, 0] = center + height / 2
axes.set_position(Bbox(bbox))
return bbox
```
|
{
"source": "jeonghyukpark/msi_highlow",
"score": 2
}
|
#### File: msi_highlow/msi/maskrcnn_gland.py
```python
import os
import time
import random
import collections
import numpy as np
import pandas as pd
from PIL import Image
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
import torch
import torchvision
from torchvision.transforms import ToPILImage
from torchvision.transforms import functional as F
from torch.utils.data import Dataset, DataLoader
from torchvision.models.detection.faster_rcnn import FastRCNNPredictor
from torchvision.models.detection.mask_rcnn import MaskRCNNPredictor
import os
import pickle
from tqdm import tqdm
import cv2
# Fix randomness
def fix_all_seeds(seed):
np.random.seed(seed)
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
fix_all_seeds(2021)
class Compose:
def __init__(self, transforms):
self.transforms = transforms
def __call__(self, image, target):
for t in self.transforms:
image, target = t(image, target)
return image, target
class Normalize:
def __call__(self, image, target):
image = F.normalize(image, RESNET_MEAN, RESNET_STD)
return image, target
class ToTensor:
def __call__(self, image, target):
image = F.to_tensor(image.copy())
return image, target
def get_transform(train):
transforms = [ToTensor()]
NORMALIZE = False
if NORMALIZE:
transforms.append(Normalize())
# Data augmentation for train
if train:
transforms.append(HorizontalFlip(0.5))
transforms.append(VerticalFlip(0.5))
return Compose(transforms)
def get_model():
NUM_CLASSES = 1+2
BOX_DETECTIONS_PER_IMG = 100
model = torchvision.models.detection.maskrcnn_resnet50_fpn(pretrained=True,
box_detections_per_img=BOX_DETECTIONS_PER_IMG)
in_features = model.roi_heads.box_predictor.cls_score.in_features
model.roi_heads.box_predictor = FastRCNNPredictor(in_features, NUM_CLASSES)
in_features_mask = model.roi_heads.mask_predictor.conv5_mask.in_channels
hidden_layer = 256
model.roi_heads.mask_predictor = MaskRCNNPredictor(in_features_mask, hidden_layer, NUM_CLASSES)
return model
def result_nms_pp(result, conf_thrs = 0.8, seg_ths = 0.5, nms_ths = 0.5):
masks = result['masks'].cpu()
labels = result['labels'].cpu()
mask_scores_raw = np.asarray(result['scores'].cpu())
mask_scores = mask_scores_raw>=conf_thrs
masks = np.asarray(masks[mask_scores].cpu())
labels = labels[mask_scores]
mask_scores_raw = mask_scores_raw[mask_scores]
len_pred = len(masks)
buffer = np.zeros((len_pred, len_pred))
class_ind = 0
for ind_pred_1 in range(len_pred):
for ind_pred_2 in range(ind_pred_1+1, len_pred):
intersection = ((masks[ind_pred_1, class_ind]>=seg_ths) * (masks[ind_pred_2, class_ind]>=seg_ths)).sum()
union = ((masks[ind_pred_1, class_ind]>=seg_ths) + (masks[ind_pred_2, class_ind]>=seg_ths)>=1).sum()
buffer[ind_pred_1, ind_pred_2] = intersection/union
buffer = buffer >= nms_ths
overlapped_idx = []
sorted_idx = list(np.argsort(mask_scores_raw)[::-1])
for idx in sorted_idx:
overlapped = np.where(buffer[idx] == 1)
if len(overlapped[0]) > 0:
#print(overlapped[0])
overlapped_idx.extend(overlapped[0])
nms_post_idx = np.delete(sorted_idx, overlapped_idx)
if len(nms_post_idx) == 0:
return [], [], []
masks = masks[nms_post_idx]
mask_scores_raw = mask_scores_raw[nms_post_idx]
labels = labels[nms_post_idx]
return masks, mask_scores_raw, labels
def metric_counter(masks_gt, masks_pred, class_ind = 0, seg_ths = 0.5):
len_gt, len_pred = len(masks_gt), len(masks_pred)
buffer = np.zeros((len_gt, len_pred))
for ind_gt in range(len_gt):
for ind_pred in range(len_pred):
buffer[ind_gt, ind_pred] = (masks_gt[ind_gt] * masks_pred[ind_pred, class_ind]>=seg_ths).sum()/masks_gt[ind_gt].sum()
TP = ((buffer.max(axis=1) >= 0.5) == 1).sum()
FN = ((buffer.max(axis=1) >= 0.5) == 0).sum()
FP = ((buffer.max(axis=0) >= 0.5) == 0).sum()
return {'TP':TP, 'FN':FN, 'FP':FP}
class SingleImageDataset(Dataset):
def __init__(self, imgs, transforms=None):
self.transforms = transforms
self.imgs = imgs
#print(self.imgs)
def __getitem__(self, idx):
img_path = self.imgs[idx]
image_name = img_path.split('/')[-1]
image = cv2.imread(img_path)[:,:,::-1]
if self.transforms is not None:
image, _ = self.transforms(image=image, target=None)
return {'image': image, 'image_name': image_name}
def __len__(self):
return len(self.imgs)
def inference(image_paths, model_path, device, save_dir=None):
SAVE_NPZ = False
DEVICE=device
model = get_model()
model.to(DEVICE)
for param in model.parameters():
param.requires_grad = True
model.load_state_dict(torch.load(model_path))
model.eval();
image_sample_paths = ['./data/patch_sample/blk-TMIGEPVTWTVC-TCGA-AA-3837-01Z-00-DX1.png',
'./data/patch_sample/blk-EYIAGQICRLWL-TCGA-CM-4743-01Z-00-DX1.png']
simage = SingleImageDataset(image_sample_paths, transforms=get_transform(train=False))
predictions = {}
for sample in tqdm(simage):
img = sample['image']
img_id = sample['image_name'].split('.')[0]
with torch.no_grad():
result = model([img.to(DEVICE)])[0]
masks, mask_scores_raw, labels = result_nms_pp(result)
predictions[img_id] = {}
predictions[img_id]['masks'] = np.asarray(masks)
predictions[img_id]['scores'] = np.asarray(mask_scores_raw)
predictions[img_id]['labels'] = np.asarray(labels)
if SAVE_NPZ == True:
np.savez(f'{save_dir}/{img_id}.npz', masks=np.asarray(masks), scores=np.asarray(mask_scores_raw), labels = np.asarray(labels))
return predictions
```
#### File: msi_highlow/utils/visualization.py
```python
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import cv2
from pathlib import Path
def plot(ax, df, idx, patch_sample_base = './data/patch_sample', alpha=0, fontsize=12):
cell_labels = ['no-label', 'neoplastic', 'inflammatory', 'connective', 'necrosis', 'non-neoplastic']
tissue_labels = ['ADI', 'BACK', 'DEB', 'LYM', 'MUC', 'MUS', 'NORM', 'STR', 'TUM']
gland_labels = ['benign_gland', 'malignant_gland']
gland_colors = {1:(246,232,195), 2:(128,205,193)}
# tissue_colors = {'LYM':[30,148,189], 'ADI':[141,141,141], 'BACK':[53,53,53], 'DEB': [186,39,154],
# 'MUC':[252,215,203], 'MUS':[67,172,34], 'NORM':[200,103,1], 'STR':[245,184,9],
# 'TUM':[231,71,23]}
tissue_colors = {'LYM': (0.11764705882352941, 0.5803921568627451, 0.7411764705882353),
'ADI': (0.5529411764705883, 0.5529411764705883, 0.5529411764705883),
'BACK': (0.20784313725490197, 0.20784313725490197, 0.20784313725490197),
'DEB': (0.7294117647058823, 0.15294117647058825, 0.6039215686274509),
'MUC': (0.9882352941176471, 0.8431372549019608, 0.796078431372549),
'MUS': (0.2627450980392157, 0.6745098039215687, 0.13333333333333333),
'NORM': (0.7843137254901961, 0.403921568627451, 0.00392156862745098),
'STR': (0.9607843137254902, 0.7215686274509804, 0.03529411764705882),
'TUM': (0.9058823529411765, 0.2784313725490196, 0.09019607843137255)}
patch_name = str(Path(df.loc[idx, 'filename']).stem)
tissue_argmax = df.loc[idx, 'tissue_type_ind']
cell_overlay_path = f'{patch_sample_base}/cell_overlay_{patch_name}.png'
gland_npz_path = f'{patch_sample_base}/gland_{patch_name}.npz'
with np.load(gland_npz_path, 'rb') as a:
masks = a['masks']
labels = a['labels']
cell_overlay_img = cv2.imread(cell_overlay_path)
cell_overlay_img = cv2.cvtColor(cell_overlay_img, cv2.COLOR_BGR2RGB)
cell_overlay_img = cv2.resize(cell_overlay_img, (224,224), interpolation = cv2.INTER_AREA)
for mask_ind in range(len(masks)):
mask_bin = 255*(masks[mask_ind][0]>0.5).astype(np.uint8)
contours, hierarchy = cv2.findContours(mask_bin, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
gland_class = labels[mask_ind]
for cnt in contours:
cv2.drawContours(cell_overlay_img, [cnt], 0, gland_colors[gland_class], 5) # blue
tissue_label = tissue_labels[tissue_argmax]
ax.imshow(cell_overlay_img)
ax.text(4, -8, tissue_label, color='w', backgroundcolor=tissue_colors[tissue_label], fontsize=fontsize)
```
|
{
"source": "JeonghyunJoo/dynamic-arg-parser",
"score": 3
}
|
#### File: JeonghyunJoo/dynamic-arg-parser/dynamicargparse.py
```python
import yaml
import sys
import itertools
def bool_converter(s):
if isinstance(s, str):
s = s.lower()
if s == 'true':
return True
elif s == 'false':
return False
else:
raise ValueError
elif isinstance(s, bool):
return s
elif isinstance(s, int):
return s != 0
else:
raise ValueError
#Check a type consistency b/w two types, typ1 and typ2
#If their types disagree but one type can cover the other, then return the more general type among them.
#If their types are contradictory, then raise exception
def type_consistency(typ1, typ2):
if typ1 == typ2:
return typ1
terminal_typ1 = typ1.replace('list_', '') if typ1.startswith('list') else typ1
terminal_typ2 = typ2.replace('list_', '') if typ2.startswith('list') else typ2
if terminal_typ1 == terminal_typ2:
#this case happens, only when two types have the same terminal type but one is a list type and the other is not
return 'list_' + terminal_typ1
#relationship
#String <- float, int, bool
#Float <- int
#int <- bool
num = {'str' : 3, 'float' : 2, 'int' : 1, 'bool' : 0}
M = max(num[terminal_typ1], num[terminal_typ2])
m = min(num[terminal_typ1], num[terminal_typ2])
if M == 3:
unified_terminal_typ = 'str'
elif M == 2 and m == 1:
unified_terminal_typ = 'float'
elif M == 1 and m == 0:
unified_terminal_typ = 'int'
else:
raise Exception()
if typ1.startswith('list') or typ2.startswith('list'):
return 'list_' + unified_terminal_typ
return unified_terminal_typ
class DynamicArgumentParser():
converters = {
'list_int' : int,
'int' : int,
'list_float' : float,
'float' : float,
'list_bool' : bool_converter,
'bool' : bool_converter,
'list_str' : str,
'str' : str
}
@classmethod
def _convert(cls, v):
if isinstance(v, list):
result_list = []
unified_terminal_typ = None
for e in v:
e, typ = DynamicArgumentParser._convert(e)
unified_terminal_typ = type_consistency(unified_terminal_typ, typ) if unified_terminal_typ else typ
result_list.append(e)
return result_list, 'list_' + unified_terminal_typ
if isinstance(v, bool):
return v, 'bool'
if isinstance(v, int):
return v, 'int'
elif isinstance(v, float):
return v, 'float'
elif isinstance(v, str):
#inspired by 'https://github.com/bruth/strconv'
for typ in ['int', 'float', 'bool']:
try:
converter = cls.converters[typ]
v = converter(v)
return v, typ
except ValueError:
pass
return v, 'str'
else:
raise Exception("Can not handle the conversion of the type {}".format(type(v)))
def __init__(self, staticparser = None, check_type_consistency = True):
# - check_type_consistency : Check whether a data type is matched for the same argument
super(DynamicArgumentParser, self).__init__()
self.staticparser = staticparser
self.arg_dict = {} # Key: arg name, Value: (arg value #converted python data, arg type #string, terminal type)
self.check_type_consistency = check_type_consistency
@classmethod
def dict_to_arg_dict(cls, dic, arg_dict = {}, prefix = []):
for k, v in dic.items():
if v is None:
continue
if isinstance(v, dict):
argname = '.'.join(prefix + [k])
arg_dict[argname] = ({}, 'dict')
cls.dict_to_arg_dict(v, arg_dict, prefix = prefix + [k])
else:
argname = '.'.join(prefix + [k])
_v, typ = cls._convert(v)
arg_dict[argname] = (v, typ)
return arg_dict
def update(self, add_dict, overwrite = True):
# - overwrite:
# If true, self.arg_dict has a priority over add_dict when a duplicate key occurs
# if not overwrite:
# filtered = filter(lambda item: item[0] not in self.arg_dict, add_dict.items())
# else:
# filtered = add_dict.items()
for arg, v in add_dict.items():
if arg in self.arg_dict:
value = self.arg_dict[arg][0]
typ = self.arg_dict[arg][1]
if self.check_type_consistency:
typ1 = self.arg_dict[arg][1]
typ2 = v[1]
try:
typ = type_consistency(typ1, typ2)
except KeyError:
#it happens when one type is 'dict' and the other type is 'dictionary'
msg = "Type Consistency check Error\n"\
"If you want to overwrite the argument value anyway, then set 'check_type_consistency = Fasle'\n"\
"Error Caused by:\n"
if typ2 == 'dict':
msg += "{} can not be extended, because the terminal value {value} is already assigned".format(arg)
elif typ1 == 'dict':
msg += "The terminal value {} can not be assigned to {}, because it has its children".format(v[0], arg)
raise Exception(msg)
except Exception:
msg = "Type Consistency check Error\n"\
"If you want to overwrite the argument value anyway, then set 'check_type_consistency = Fasle'\n"\
"Error Caused by:\n"
raise Exception("Contradictory types {} and {} for {}".format(typ1, typ2, arg))
if overwrite:
value = v[0]
self.arg_dict[arg] = (value, typ)
else:
self.arg_dict[arg] = v
#add_mode:
# - 'o' : Overwrite if a new value is given for the existing argument
# - 'a' : Add values only for new values
# - 'n' : Remove an old parsing result
def parse_config_file(self, file, add_mode = ['o', 'a', 'n'][0]):
if add_mode == 'n':
self.arg_dict = {}
with open(file, 'r') as f:
cfg = yaml.load(f, Loader=yaml.FullLoader)
arg_dict = DynamicArgumentParser.dict_to_arg_dict(cfg, arg_dict = {})
self.update(arg_dict, add_mode == 'o')
def static_parse_cmd_args(self, args = None, add_mode = ['o', 'a', 'n'][0]):
if add_mode == 'n':
self.arg_dict = {}
if args is None:
args = sys.argv[1:]
if self.staticparser is not None:
static_args, args = self.staticparser.parse_known_args(args)
arg_dict = DynamicArgumentParser.dict_to_arg_dict(static_args.__dict__, arg_dict = {})
self.update(arg_dict, add_mode == 'o')
return args
def dynamic_parse_cmd_args(self, args = None, add_mode = ['o', 'a', 'n'][0]):
if add_mode == 'n':
self.arg_dict = {}
if args is None:
args = sys.argv[1:]
arg_dict = {}
argvalue = []
argname = None
for arg in args + ["-"]: #Append a dummy argument to keep the logic simple
if arg.startswith(("-", "--")):
if argname is not None:
v = None
typ = None
if len(argvalue) == 0:
v, typ = True, 'bool'
elif len(argvalue) == 1:
v, typ = self._convert(argvalue[0])
elif len(argvalue) > 1:
v, typ = self._convert(argvalue)
lastindex = 0
while True:
lastindex = argname.find('.', lastindex)
if lastindex == -1:
break
arg_dict[ argname[:lastindex] ] = ({}, 'dict')
lastindex = lastindex + 1
arg_dict[argname] = (v, typ)
argname = arg.lstrip('-')
argvalue = []
assign_symbol = argname.find('=')
if assign_symbol != -1:
argname, argvalue = argname[:assign_symbol], argname[assign_symbol+1:]
argvalue = [x for x in argvalue.split(',') if len(x) > 0]
else:
argvalue.append(arg)
self.update(arg_dict, add_mode == 'o')
def parse_argument(self, args = None, cfgfile_arg = ''):
if args is None:
args = sys.argv[1:]
#renew old parsing results and parse command line arguments with a static parser
args_yet_to_be_parsed = self.static_parse_cmd_args(args, add_mode = 'n')
#handle arguments unrecognized by the static parser
self.dynamic_parse_cmd_args(args_yet_to_be_parsed, add_mode = 'a')
if cfgfile_arg != '' and cfgfile_arg in self.arg_dict:
cfg_filepath = self.arg_dict.get(cfgfile_arg)[0]
#Load arguments from the configuration file
self.parse_config_file(cfg_filepath, 'a') #'a' is No-Overwrite mode. CMD-line args has a priority
def convert_2_recursive_dict(arg_dict):
root_dir = {}
for k in sorted(arg_dict.keys()):
v, _ = arg_dict[k] #Drop type information
key_chain = k.split('.')
def get_parent_dict(keys, parent):
if len(keys) == 1:
return parent
else:
return get_parent_dict(keys[1:], parent[keys[0]])
p_dict = get_parent_dict(key_chain, root_dir)
p_dict[key_chain[-1]] = {} if isinstance(v, dict) else v
return root_dir
rdict = convert_2_recursive_dict(self.arg_dict)
#args_namespace = argdict_to_namespace(self.arg_dict)
args_tree = AugmentedNameSpace(rdict)
args_tree.activate(True)
return args_tree#args_namespace
class AugmentedNameSpace():
MEMBER_ATTRIBUTE = {'_mem_parent', '_mem_children', '_mem_activate', '_mem_argument_dict', '_mem_absorbing_node', '_mem_key_chain_buffer'}
def __init__(self, arg_dict, p = None, activate = False):
super(AugmentedNameSpace, self).__init__()
self._mem_parent = p
self._mem_children = {}
self._mem_activate = activate
self._mem_argument_dict = {} #key: arg name , value: {'value': value, 'ref_count': ref count}
self._mem_absorbing_node = NoneLike(self)
self._mem_key_chain_buffer = []
self._build(arg_dict)
def keys(self):
return itertools.chain(self._mem_argument_dict.keys(), self._mem_children)
def __getitem__(self, item):
if item in self._mem_argument_dict:
return self._mem_argument_dict[item]['value']
elif item in self._mem_children:
return self._mem_children[item].todict()
def toyaml(self, save_path = None):
if save_path != None:
with open(save_path, 'w') as f:
yaml.dump(self.todict(), f, sort_keys=True)
else:
yaml_str = yaml.dump(self.todict(), sort_keys=True)
return yaml_str
#def asdict(self):
def todict(self, include_ref_count = False):
root_dir = {}
for k,v in self._mem_argument_dict.items():
arg_value = v['value']
if include_ref_count:
root_dir[k] = (arg_value, v['ref_count'])
else:
root_dir[k] = arg_value
for k,v in self._mem_children.items():
root_dir[k] = v.todict(include_ref_count)
return root_dir
def activate(self, v = True):
self._mem_activate = v
for c in self._mem_children.values():
c.activate(v)
def trim(self, min_ref_count = 1):
del_keys = []
for k,v in self._mem_argument_dict.items():
if v['ref_count'] < min_ref_count:
del_keys.append(k)
for k in del_keys:
del self._mem_argument_dict[k]
del_keys = []
for k, c in self._mem_children.items():
if c.trim(min_ref_count) is None:
del_keys.append(k)
for k in del_keys:
del self._mem_children[k]
if len(self._mem_argument_dict) + len(self._mem_children) == 0:
return None
else:
return self
def _build(self, arg_dict):
for k,v in arg_dict.items():
if isinstance(v, dict):
self._add_child(k, v)
else:
setattr(self, k, v)
# setattr(self, k, v)
def _add_child(self, k, arg_dict):
self._mem_children[k] = AugmentedNameSpace(arg_dict, self, self._mem_activate)
def __repr__(self, as_str = True):
root_dir = {}
for k,v in self._mem_argument_dict.items():
root_dir[k] = '(value: {}, ref_count: {})'.format(v['value'], v['ref_count']) #('value: ' + str(arg_value), 'ref_count: ' + str(v['ref_count']))
for k,v in self._mem_children.items():
root_dir[k] = v.__repr__(as_str = False)
if as_str:
return str(root_dir)
else:
return root_dir
#return str(self.todict(True))
def _get_key_chain(self, key_chain = [], terminal_node = True):
assert isinstance(self._mem_key_chain_buffer[0], str)
key_chain.append( self._mem_key_chain_buffer[0] )
if self._mem_parent is None:
return
else:
self._mem_parent._get_key_chain(key_chain, terminal_node = False)
if terminal_node:
key_chain.reverse()
return
def __setattr__(self, key, value):
if key in AugmentedNameSpace.MEMBER_ATTRIBUTE:
super().__setattr__(key, value)
return
#Should I check the type of value?
#The value should satisfy:
# it can be stored in yaml-format
# it can be recovered from yaml-format
#Otherwise, An error might occur when it is saved or it might be loaded from the saved file in a wrong way
if key in self._mem_children:
self._mem_key_chain_buffer.clear()
self._mem_parent._get_key_chain( self._mem_key_chain_buffer )
self._mem_key_chain_buffer.append(key)
print("Warning: You just tried to assign the value '{}' to '{}', which is already taken by AugmentedNamespace node.".format(value, '.'.join(self._mem_key_chain_buffer)),
"This attempt will be ignored")
return
#raise Exception("It tries to assign a value by replacing AugmentedNode")
#Handle argument assignment
if key not in self._mem_argument_dict:
self._mem_argument_dict[key] = {'value': value, 'ref_count': 0}
else:
self._mem_argument_dict[key]['value'] = value
self._stack_ref_count(key)
def __getattr__(self, key):
if key in AugmentedNameSpace.MEMBER_ATTRIBUTE:
#Unreachable code. __getattribute__() should have handled this case as those attributes can be looked up through __dir__[key]
raise Exception()
if key in self._mem_children:
#Go through deeper level
self._mem_key_chain_buffer.clear()
self._mem_key_chain_buffer.append(key)
return self._mem_children[key]
elif key in self._mem_argument_dict:
#The terminal value get return
self._stack_ref_count(key)
return self._mem_argument_dict[key]['value']
else:
#The referenced key does not exist in the namespace. Move to the absorbing state
self._clear_key_chain_buffer()
self._append_key_chain_buffer(key)
return self._mem_absorbing_node
def _stack_ref_count(self, key):
additional_ref_count = 1 if self._mem_activate else 0
self._mem_argument_dict[key]['ref_count'] = self._mem_argument_dict[key]['ref_count'] + additional_ref_count
def _clear_key_chain_buffer(self):
self._mem_key_chain_buffer.clear()
def _append_key_chain_buffer(self, key):
self._mem_key_chain_buffer.append(key)
class NoneLike():
#Member = ['_momp']
def __init__(self, p):
super(NoneLike, self).__init__()
self._mem_p = p
def __bool__(self):
return False
def __str__(self):
return "None"
def __eq__(self, other):
return (other is None) or isinstance(other, NoneLike)
def __getattr__(self, key):
self._mem_p._append_key_chain_buffer(key)
return self
def __setattr__(self, key, value):
if key.startswith('_mem_'):
super().__setattr__(key, value)
else:
root_dict = {}
ptr = root_dict
for k in self._mem_p._mem_key_chain_buffer[1:]:
ptr[k] = {}
ptr = ptr[k]
ptr[key] = value
root_key = self._mem_p._mem_key_chain_buffer[0]
self._mem_p._add_child(root_key, root_dict)
#setattr(self._mem_p, root_key, root_dict)
```
|
{
"source": "jeonghyunkeem/cs492h",
"score": 2
}
|
#### File: models/retrieval/collect_embedding.py
```python
import os, sys, time
import numpy as np
import torch
import torch.nn.functional as F
from plyfile import PlyData, PlyElement
from torch.utils.tensorboard import SummaryWriter
from tqdm import tqdm
# from chamfer_distance.chamfer_distance import ChamferDistance
from autoencoder import PointNetAE
from Data.dataset import Dataset
import pickle
from sklearn.neighbors import NearestNeighbors
from sklearn.neighbors import KDTree
from pdb import set_trace
from sklearn.manifold import TSNE
from matplotlib import pyplot as plt
BASE_DIR = os.path.dirname(__file__)
DUMP_DIR = os.path.join(BASE_DIR, 'dump')
sys.path.append(os.path.join(BASE_DIR, 'chamfer_distance'))
# Hyperparam
BATCH_SIZE = 32
data = 'shapenetcorev2'
n_points = 2048
latent = 512
NUM_NEIGHBORS = 3
dump = True
# Set CUDA
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print(device)
# Dataset/loader
TRAIN_DATASET = Dataset(dataset_name=data, num_points=n_points, split='train', class_choice=True)
TEST_DATASET = Dataset(dataset_name=data, num_points=n_points, split='test', class_choice=True)
EVAL_DATASET = Dataset(dataset_name=data, num_points=n_points, split='val', class_choice=True)
TRAIN_DATALOADER = torch.utils.data.DataLoader(TRAIN_DATASET,
batch_size=BATCH_SIZE,
shuffle=False,
num_workers=4)
TEST_DATALOADER = torch.utils.data.DataLoader(TEST_DATASET,
batch_size=BATCH_SIZE,
shuffle=False,
num_workers=4)
# Set Network
n_dims = 3
net = PointNetAE(latent, n_dims, n_points)
net.to(device)
model = os.path.join(BASE_DIR, 'outputs') + "/model512_50.pth"
net.load_state_dict(torch.load(model))
net.eval()
# # Set Loss function
# chamfer_distance = ChamferDistance()
# criterion = chamfer_distance
def collect_embedding():
n = len(TRAIN_DATASET)
pbar = tqdm(total=n, leave=False)
# Evaluate
total_loss = 0.0
net.eval()
# Set embeddings
check = True
for i, data in enumerate(TRAIN_DATALOADER):
# Parse data
points, label, category, filename = data
points = points.cuda()
# if check: return
with torch.no_grad():
recon_ret, embedding = net(points)
embedding = np.array(embedding.cpu())
label = np.array(label.cpu())
category = np.array(category.cpu()).reshape(-1, 1)
filename = np.array(list(filename)).reshape(-1, 1)
# if check: return
if i == 0:
all_embeddings = np.array(embedding)
all_labels = np.array(label)
all_category = np.array(category)
all_filenames = np.array(filename)
else:
all_embeddings = np.vstack((all_embeddings, embedding))
all_labels = np.vstack((all_labels, label))
all_category = np.vstack((all_category, category))
all_filenames = np.vstack((all_filenames, filename))
# dist1, dist2 = criterion(points, recon_ret)
# loss = (torch.mean(dist1)) + (torch.mean(dist2))
batch_size = list(data[0].size())[0]
# total_loss += loss * batch_size
# pbar.set_description('Train Loss: {:f}'.format(loss))
pbar.update(batch_size)
pbar.close()
np.save(DUMP_DIR + '/all_embeddings', all_embeddings)
np.save(DUMP_DIR + '/all_labels', all_labels)
np.save(DUMP_DIR + '/all_category', all_category)
np.save(DUMP_DIR + '/all_filenames', all_filenames)
####For Retrieval
if dump:
database_kdtree = KDTree(all_embeddings)
pickle_out = open(os.path.join(DUMP_DIR, "shapenet_kdtree.pickle"),"wb")
pickle.dump(database_kdtree, pickle_out)
pickle_out.close()
def tsne_visualization():
all_embeddings = np.load(DUMP_DIR + '/all_embeddings.npy')
all_labels = np.load(DUMP_DIR + '/all_labels.npy')
time_start = time.time()
tsne = TSNE(n_components=2, random_state=0)
embeddings_2d = tsne.fit_transform(all_embeddings)
print('t-SNE done! Time elapsed: {} seconds'.format(time.time()-time_start))
plt.scatter(embeddings_2d[:, 0], embeddings_2d[:, 1], c=all_labels, label=all_labels, s=0.2, alpha=0.5)
plt.legend()
plt.savefig(os.path.join(DUMP_DIR, 'tsne.png'))
def retrieval(q, test=False):
# Set data
all_embeddings = np.load(DUMP_DIR + '/all_embeddings.npy')
all_labels = np.load(DUMP_DIR + '/all_labels.npy')
all_filenames = np.load(DUMP_DIR + '/all_filenames.npy')
with open(DUMP_DIR + '/shapenet_kdtree.pickle', 'rb') as pickle_file:
database_kdtree = pickle.load(pickle_file)
if not test:
# Result dict
pred = {}
# Target data
points, label, _, filename = TRAIN_DATASET[q]
points = points.cuda().unsqueeze(0)
embedding = net(points, r=True).detach().cpu()
# Search nearest neighbor in embedding space
dist, idx = database_kdtree.query(embedding, k=1)
pred['label'] = all_labels[idx]
pred['filename'] = all_filenames[idx]
else:
n = len(TRAIN_DATASET)
for i in range(n):
# Target data
points, label, category, filename = TRAIN_DATASET[i]
points = points.cuda().unsqueeze(0)
embedding = net(points, r=True).detach().cpu()
# Search nearest neighbor in embedding space
label = label.cpu().item()
filename = filename
dist, q_pred_idx = database_kdtree.query(embedding, k=1)
dist, e_pred_idx = database_kdtree.query(np.array([all_embeddings[i]]), k=1)
# Output
query_filename = all_filenames[q_pred_idx][0][0][0]
emb_filename = all_filenames[e_pred_idx][0][0][0]
if filename != emb_filename or filename != query_filename:
print(i)
print(filename)
print(emb_filename)
print(query_filename)
pickle_file.close()
return 0
print('success!')
pickle_file.close()
return 0
pickle_file.close()
return pred
if __name__ == "__main__":
collect_embedding()
# tsne_visualization()
output = retrieval(0, test=True)
```
#### File: retrieval/Data/dataset.py
```python
import os
import torch
import json
import h5py
from glob import glob
import numpy as np
import torch.utils.data as data
# import nyuName2ID as nyu
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
RET_DIR = os.path.dirname(BASE_DIR)
ROOT_DIR = os.path.dirname(RET_DIR)
ROOT2_DIR = os.path.dirname(ROOT_DIR)
HOME_DIR = os.path.dirname(ROOT2_DIR)
DATA_PATH = os.path.join(HOME_DIR, 'Dataset/ShapeNet')
shapenetpart_seg_num = [4, 2, 2, 4, 4, 3, 3, 2, 4, 2, 6, 2, 3, 3, 3, 3]
shapenetpart_seg_start_index = [0, 4, 6, 8, 12, 16, 19, 22, 24, 28, 30, 36, 38, 41, 44, 47]
CARED_CATEGORY = {'03001627': 'chair',
'04379243': 'table',
'02933112': 'cabinet',
'02747177': 'trash bin',
'02871439': 'bookshelf',
'03211117': 'display',
'04256520': 'sofa',
'02808440': 'bathtub',
"02818832": 'bed',
"03337140": 'file cabinet',
"02773838": 'bag',
"04004475": 'printer',
"04554684": 'washer',
"03636649": 'lamp',
"03761084": 'microwave',
"04330267": 'stove',
"02801938": 'basket',
"02828884": 'bench',
"03642806": 'laptop',
"03085013": 'keyboard'}
NAME2CLASS = {CARED_CATEGORY[key]:i for i, (key, item) in enumerate(CARED_CATEGORY.items())}
NAME2CLASS['other'] = 20
def translate_pointcloud(pointcloud):
xyz1 = np.random.uniform(low=2./3., high=3./2., size=[3])
xyz2 = np.random.uniform(low=-0.2, high=0.2, size=[3])
translated_pointcloud = np.add(np.multiply(pointcloud, xyz1), xyz2).astype('float32')
return translated_pointcloud
def jitter_pointcloud(pointcloud, sigma=0.01, clip=0.02):
N, C = pointcloud.shape
pointcloud += np.clip(sigma * np.random.randn(N, C), -1*clip, clip)
return pointcloud
def rotate_pointcloud(pointcloud):
theta = np.pi*2 * np.random.rand()
rotation_matrix = np.array([[np.cos(theta), -np.sin(theta)],[np.sin(theta), np.cos(theta)]])
pointcloud[:,[0,2]] = pointcloud[:,[0,2]].dot(rotation_matrix) # random rotation (x,z)
return pointcloud
class Dataset(data.Dataset):
def __init__(self, dataset_name='shapenetcorev2',
class_choice=None, num_points=2048, split='train',
load_name=True, load_file=True,
segmentation=False,
random_rotate=False, random_jitter=False, random_translate=False):
assert dataset_name.lower() in ['shapenetcorev2', 'shapenetpart',
'modelnet10', 'modelnet40', 'shapenetpartpart']
assert num_points <= 2048
if dataset_name in ['shapenetcorev2', 'shapenetpart', 'shapenetpartpart']:
assert split.lower() in ['train', 'test', 'val', 'trainval', 'all']
else:
assert split.lower() in ['train', 'test', 'all']
if dataset_name not in ['shapenetcorev2', 'shapenetpart'] and segmentation == True:
raise AssertionError
# self.root = os.path.join(root, dataset_name + '_' + '*hdf5_2048')
self.root = os.path.join(DATA_PATH, dataset_name + '_' + 'hdf5_2048') #*hdf5_2048')
self.dataset_name = dataset_name
self.class_choice = class_choice
self.num_points = num_points
self.split = split
self.load_name = load_name
self.load_file = load_file
self.segmentation = segmentation
self.random_rotate = random_rotate
self.random_jitter = random_jitter
self.random_translate = random_translate
self.path_h5py_all = []
self.path_name_all = []
self.path_file_all = []
if class_choice:
class_choice_list = []
for i, (key, value) in enumerate(CARED_CATEGORY.items()):
class_choice_list.append(value)
if self.split in ['train','trainval','all']:
self.get_path('train')
if self.dataset_name in ['shapenetcorev2', 'shapenetpart', 'shapenetpartpart']:
if self.split in ['val','trainval','all']:
self.get_path('val')
if self.split in ['test', 'all']:
self.get_path('test')
self.path_h5py_all.sort()
data, label, seg = self.load_h5py(self.path_h5py_all)
if self.load_name or self.class_choice != None:
self.path_name_all.sort()
self.name = self.load_json(self.path_name_all) # load label name
if self.load_file:
self.path_file_all.sort()
self.file = self.load_json(self.path_file_all) # load file name
self.data = np.concatenate(data, axis=0)
self.label = np.concatenate(label, axis=0)
if self.segmentation:
self.seg = np.concatenate(seg, axis=0)
if self.class_choice != None:
indices = np.where(np.isin(self.name, class_choice_list) == True)[0]
self.name = np.array(self.name)
self.name = self.name[indices].tolist()
self.data = self.data[indices]
self.label = self.label[indices]
# if self.segmentation:
# self.seg = self.seg[indices]
# self.seg_num_all = shapenetpart_seg_num[id_choice]
# self.seg_start_index = shapenetpart_seg_start_index[id_choice]
if self.load_file:
self.file = np.array(self.file)
self.file = self.file[indices].tolist()
elif self.segmentation:
self.seg_num_all = 50
self.seg_start_index = 0
def get_path(self, type):
path_h5py = os.path.join(self.root, '*%s*.h5'%type)
self.path_h5py_all += glob(path_h5py)
if self.load_name:
path_json = os.path.join(self.root, '%s*_id2name.json'%type)
self.path_name_all += glob(path_json)
if self.load_file:
path_json = os.path.join(self.root, '%s*_id2file.json'%type)
self.path_file_all += glob(path_json)
return
def load_h5py(self, path):
all_data = []
all_label = []
all_seg = []
for h5_name in path:
f = h5py.File(h5_name, 'r+')
data = f['data'][:].astype('float32')
label = f['label'][:].astype('int64')
if self.segmentation:
seg = f['seg'][:].astype('int64')
f.close()
all_data.append(data)
all_label.append(label)
if self.segmentation:
all_seg.append(seg)
return all_data, all_label, all_seg
def load_json(self, path):
all_data = []
for json_name in path:
j = open(json_name, 'r+')
data = json.load(j)
all_data += data
return all_data
def __getitem__(self, item):
point_set = self.data[item][:self.num_points]
label = self.label[item]
if self.load_name:
name = self.name[item] # get label name
name = NAME2CLASS[name]
if self.load_file:
file = self.file[item] # get file name
if self.random_rotate:
point_set = rotate_pointcloud(point_set)
if self.random_jitter:
point_set = jitter_pointcloud(point_set)
if self.random_translate:
point_set = translate_pointcloud(point_set)
# convert numpy array to pytorch Tensor
point_set = torch.from_numpy(point_set)
label = torch.from_numpy(np.array([label]).astype(np.int64))
label = label.squeeze(0)
if self.segmentation:
seg = self.seg[item]
seg = torch.from_numpy(seg)
return point_set, label, seg, name, file
else:
return point_set, label, name, file
def __len__(self):
return self.data.shape[0]
if __name__ == '__main__':
root = os.getcwd()
# choose dataset name from 'shapenetcorev2', 'shapenetpart', 'modelnet40' and 'modelnet10'
dataset_name = 'shapenetcorev2'
batch_size = 32
# choose split type from 'train', 'test', 'all', 'trainval' and 'val'
# only shapenetcorev2 and shapenetpart dataset support 'trainval' and 'val'
split = 'train' # 'test', 'all', 'val'
data = Dataset(dataset_name='shapenetcorev2', num_points=2048, split=split, class_choice=True)
print("datasize:", data.__len__())
n = len(data)
for i in range(n):
ps, lb, cat, fn = data[i]
if cat not in NAME2CLASS:
print(cat, fn)
break
# item = 0
# ps, lb, n, f = data[item]
# print(ps.size(), ps.type(), lb, lb.size(), lb.type(), n, f)
# item = 1
# ps, lb, n, f = data[item]
# print(ps.size(), ps.type(), lb, lb.size(), lb.type(), n, f)
# item = 2
# ps, lb, n, f = data[item]
# print(ps.size(), ps.type(), lb, lb.size(), lb.type(), n, f)
```
#### File: cs492h/models/RPCADnet.py
```python
import torch
import torch.nn as nn
import numpy as np
import sys
import os
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
ROOT_DIR = os.path.dirname(BASE_DIR)
sys.path.append(BASE_DIR)
from backbone_module import Pointnet2Backbone
from voting_module import VotingModule
from models.cad_proposal_module import ProposalModule
from dump_helper import dump_results
from loss_helper_rpcad import get_loss
class RPCADNet(nn.Module):
def __init__(self, num_class, num_heading_bin,
input_feature_dim=0, num_proposal=128, vote_factor=1, sampling='vote_fps'):
super().__init__()
self.num_class = num_class
self.num_heading_bin = num_heading_bin
self.input_feature_dim = input_feature_dim
self.num_proposal = num_proposal
self.vote_factor = vote_factor
self.sampling=sampling
# Backbone point feature learning
self.backbone_net = Pointnet2Backbone(input_feature_dim=self.input_feature_dim)
# Hough voting
self.vgen = VotingModule(self.vote_factor, 256)
# Vote aggregation and detection
self.pnet = ProposalModule(num_class, num_heading_bin, num_proposal, sampling)
def forward(self, inputs):
""" Forward pass of the network
Args:
inputs: dict
{point_clouds}
point_clouds: Variable(torch.cuda.FloatTensor)
(B, N, 3 + input_channels) tensor
Point cloud to run predicts on
Each point in the point-cloud MUST
be formated as (x, y, z, features...)
Returns:
end_points: dict
"""
end_points = {}
point_cloud = inputs['point_clouds']
batch_size = inputs['point_clouds'].shape[0]
end_points = self.backbone_net(inputs['point_clouds'], end_points)
# --------- HOUGH VOTING ---------
xyz = end_points['fp2_xyz']
features = end_points['fp2_features']
end_points['seed_inds'] = end_points['fp2_inds']
end_points['seed_xyz'] = xyz
end_points['seed_features'] = features
xyz, features = self.vgen(xyz, features)
features_norm = torch.norm(features, p=2, dim=1)
features = features.div(features_norm.unsqueeze(1))
end_points['vote_xyz'] = xyz
end_points['vote_features'] = features
# --------- BOUNDING BOX & CAD ALIGNMENT PROPOSAL ---------
end_points = self.pnet(xyz, features, point_cloud, end_points)
return end_points
# if __name__=='__main__':
# sys.path.append(os.path.join(ROOT_DIR, 'sunrgbd'))
# from sunrgbd_detection_dataset import SunrgbdDetectionVotesDataset, DC
# from loss_helper import get_loss
# # Define model
# model = VoteNet(10,12,10,np.random.random((10,3))).cuda()
# try:
# # Define dataset
# TRAIN_DATASET = SunrgbdDetectionVotesDataset('train', num_points=20000, use_v1=True)
# # Model forward pass
# sample = TRAIN_DATASET[5]
# inputs = {'point_clouds': torch.from_numpy(sample['point_clouds']).unsqueeze(0).cuda()}
# except:
# print('Dataset has not been prepared. Use a random sample.')
# inputs = {'point_clouds': torch.rand((20000,3)).unsqueeze(0).cuda()}
# end_points = model(inputs)
# for key in end_points:
# print(key, end_points[key])
# try:
# # Compute loss
# for key in sample:
# end_points[key] = torch.from_numpy(sample[key]).unsqueeze(0).cuda()
# loss, end_points = get_loss(end_points, DC)
# print('loss', loss)
# end_points['point_clouds'] = inputs['point_clouds']
# end_points['pred_mask'] = np.ones((1,128))
# dump_results(end_points, 'tmp', DC)
# except:
# print('Dataset has not been prepared. Skip loss and dump.')
```
#### File: cs492h/scan2cad/s2c_config.py
```python
import numpy as np
import sys
import os
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(BASE_DIR)
ROOT_DIR = os.path.dirname(BASE_DIR)
sys.path.append(os.path.join(ROOT_DIR, 'utils'))
class Scan2CADDatasetConfig(object):
def __init__(self):
self.num_class = 21 # 9
self.num_heading_bin = 12
# Top 20 categories
self.ShapenetIDToName = {
'03001627': 'chair',
'04379243': 'table',
'02933112': 'cabinet',
'02747177': 'trash bin',
'02871439': 'bookshelf',
'03211117': 'display',
'04256520': 'sofa',
'02808440': 'bathtub',
"02818832": 'bed',
"03337140": 'file cabinet',
"02773838": 'bag',
"04004475": 'printer',
"04554684": 'washer',
"03636649": 'lamp',
"03761084": 'microwave',
"04330267": 'stove',
"02801938": 'basket',
"02828884": 'bench',
"03642806": 'laptop',
"03085013": 'keyboard'
}
self.ShapenetNameToClass = {
'chair': 0, 'table': 1, 'cabinet': 2, 'trash bin': 3, 'bookshelf': 4,'display': 5,'sofa': 6,
'bathtub': 7,'bed': 8, 'file cabinet': 9, 'bag': 10, 'printer': 11, 'washer': 12, 'lamp': 13,
'microwave': 14, 'stove': 15, 'basket': 16, 'bench': 17, 'laptop': 18, 'keyboard': 19, 'other': 20
}
self.ClassToName = {self.ShapenetNameToClass[t]:t for t in self.ShapenetNameToClass}
def ShapenetIDtoClass(self, id):
if id in self.ShapenetIDToName:
cad_category = self.ShapenetIDToName[id]
else:
cad_category = 'other'
cad_class = self.ShapenetIDtoClass[cad_category]
return cad_class
def angle2class(self, angle):
''' Convert continuous angle to discrete class
[optinal] also small regression number from
class center angle to current angle.
angle is from 0-2pi (or -pi~pi), class center at 0, 1*(2pi/N), 2*(2pi/N) ... (N-1)*(2pi/N)
return is class of int32 of 0,1,...,N-1 and a number such that
class*(2pi/N) + number = angle
'''
num_class = self.num_heading_bin
angle = angle%(2*np.pi)
assert(angle>=0 and angle<=2*np.pi)
angle_per_class = 2*np.pi/float(num_class)
shifted_angle = (angle+angle_per_class/2)%(2*np.pi)
class_id = int(shifted_angle/angle_per_class)
residual_angle = shifted_angle - (class_id*angle_per_class+angle_per_class/2)
return class_id, residual_angle
def class2angle(self, pred_cls, residual, to_label_format=True):
''' Inverse function to angle2class '''
num_class = self.num_heading_bin
angle_per_class = 2*np.pi/float(num_class)
angle_center = pred_cls * angle_per_class
angle = angle_center + residual
if to_label_format and angle>np.pi:
angle = angle - 2*np.pi
return angle
```
#### File: cs492h/scan2cad/s2c_eval.py
```python
import torch
import numpy as np
import quaternion
import os
import s2c_utils
import pickle
ShapenetNameToClass = {'chair': 0, 'table': 1, 'cabinet': 2, 'trash bin': 3, 'bookshelf': 4,'display': 5,'sofa': 6, 'bathtub': 7, 'other': 8}
ShapenetClassToName = {ShapenetNameToClass[k]: k for k in ShapenetNameToClass}
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
ROOT_DIR = os.path.dirname(BASE_DIR)
RET_DIR = os.path.join(ROOT_DIR, 'models/retrieval/dump')
from models.retrieval.autoencoder import PointNetAE
def from_6d_to_mat(v):
v1 = v[:,:,:3].unsqueeze(-1) # (B, K, 3, 1)
v2 = v[:,:,3:].unsqueeze(-1) # (B, K, 3, 1)
v3 = torch.cross(v1, v2, dim=2)
M = torch.stack([v1, v2, v3], dim=3).squeeze(-1)
return M
def from_mat_to_q(M):
R = M[:, :, 0:3, 0:3].detach().cpu().numpy().copy()
q = quaternion.from_rotation_matrix(R[:, :, 0:3, 0:3])
return q
def from_6d_to_q(v):
M = from_6d_to_mat(v)
q = from_mat_to_q(M)
return q
def softmax(x):
''' Numpy function for softmax'''
shape = x.shape
probs = np.exp(x - np.max(x, axis=len(shape)-1, keepdims=True))
probs /= np.sum(probs, axis=len(shape)-1, keepdims=True)
return probs
# helper function to calculate difference between two quaternions
def calc_rotation_diff(q, q00):
rotation_dot = np.dot(quaternion.as_float_array(q00), quaternion.as_float_array(q))
rotation_dot_abs = np.abs(rotation_dot)
try:
error_rotation_rad = 2 * np.arccos(rotation_dot_abs)
except:
return 0.0
error_rotation_rad = 2 * np.arccos(rotation_dot_abs)
error_rotation = np.rad2deg(error_rotation_rad)
return error_rotation
def get_top_8_category(cat_id):
if cat_id > 7:
cat = 8
else:
cat = cat_id
return cat
class Evaluation:
def __init__(self, nms_iou=0.25):
self.class_total = {}
self.pred_total = {}
self.acc_per_scan = {}
self.acc_proposal_per_class = {}
self.acc_translation_per_class = {}
self.acc_rotation_per_class = {}
self.acc_scale_per_class = {}
for i in ShapenetClassToName:
self.class_total[i] = 0
self.pred_total[i]= 0
self.acc_proposal_per_class[i] = 0
self.acc_translation_per_class[i] = 0
self.acc_rotation_per_class[i] = 0
self.acc_scale_per_class[i] = 0
self.validate_idx_per_scene = {}
self.nms_iou = nms_iou
self.extra_dict = {}
# CAD Retrieval
self.sem_clses = np.load(RET_DIR + '/all_category.npy')
print(np.unique(self.sem_clses))
self.filenames = np.load(RET_DIR + '/all_filenames.npy')
self.CADnet = PointNetAE(latent=512, in_dims=3, n_points=2048)
self.CADnet.load_state_dict(torch.load(RET_DIR + '/model512_50.pth'))
self.CADnet.cuda()
self.CADnet.eval()
def NMS(self, B, K, center, size, obj_prob, sem_cls):
pred_crnrs_3d_upright_cam = np.zeros((B, K, 8, 3))
for b in range(B):
for k in range(K):
crnrs_3d_upright_cam = s2c_utils.get_3d_box(size[b,k,:3], 0, center[b,k,:3])
pred_crnrs_3d_upright_cam[b,k] = crnrs_3d_upright_cam
# ---------- NMS input: pred_with_prob in (B,K,8) -----------
pred_mask = np.zeros((B, K))
nonempty_box_mask = np.ones((B, K))
for i in range(B):
boxes_3d_with_prob = np.zeros((K,8))
for j in range(K):
boxes_3d_with_prob[j,0] = np.min(pred_crnrs_3d_upright_cam[i,j,:,0])
boxes_3d_with_prob[j,1] = np.min(pred_crnrs_3d_upright_cam[i,j,:,1])
boxes_3d_with_prob[j,2] = np.min(pred_crnrs_3d_upright_cam[i,j,:,2])
boxes_3d_with_prob[j,3] = np.max(pred_crnrs_3d_upright_cam[i,j,:,0])
boxes_3d_with_prob[j,4] = np.max(pred_crnrs_3d_upright_cam[i,j,:,1])
boxes_3d_with_prob[j,5] = np.max(pred_crnrs_3d_upright_cam[i,j,:,2])
boxes_3d_with_prob[j,6] = obj_prob[i,j]
boxes_3d_with_prob[j,7] = sem_cls[i,j] # only suppress if the two boxes are of the same class!!
nonempty_box_inds = np.where(nonempty_box_mask[i,:]==1)[0]
pick = s2c_utils.nms_3d_faster_samecls(boxes_3d_with_prob[nonempty_box_mask[i,:]==1,:], self.nms_iou)
assert(len(pick)>0)
pred_mask[i, nonempty_box_inds[pick]] = 1
return pred_mask
# ---------- NMS output: pred_mask in (B,K) -----------
def step(self, end_points, batch_iter, pcd=None):
gt_center = end_points['center_label']
pred_center = end_points['center']
B = gt_center.shape[0] # Batch size
K = pred_center.shape[1] # Num proposals
K2 = gt_center.shape[1] # Max obj num
# Ground-truth
gt_center = end_points['center_label'].detach().cpu().numpy() # (B, K2, 3)
gt_quaternion = end_points['rotation_label'].detach().cpu().numpy() # (B, K2, 4)
gt_scale = end_points['scale_label'].detach().cpu().numpy() # (B, K2, 3)
gt_class = end_points['sem_cls_label'].reshape(B, K2, 1) # (B, K2, 1)
gt_cad_total = end_points['n_total'].reshape(B, 1) # (B, 1)
gt_sym_label = end_points['cad_sym_label'].reshape(B, K2, 1)
# prediction
pred_class = torch.argmax(end_points['sem_cls_scores'], -1).reshape(B, K, 1)
pred_center = end_points['center'].detach().cpu().numpy()
pred_size = end_points['box_size'].detach().cpu().numpy()
pred_rot_6d = end_points['rot_6d_scores'] # (B, K, 6)
pred_quaternion = from_6d_to_q(pred_rot_6d)
# pred_quaternion = end_points['rotation_scores'].detach().cpu().numpy() # (B, K, 4)
pred_scale = end_points['scale_scores'].detach().cpu().numpy()
pred_obj_logits = end_points['objectness_scores'].detach().cpu().numpy()
pred_obj = softmax(pred_obj_logits)[:,:,1] # (B,K)
pred_mask = self.NMS(B, K, pred_center, pred_scale, pred_obj, pred_class)
# Threshold
threshold_translation = 0.2 # <-- in meter
threshold_rotation = 20 # <-- in deg
threshold_scale = 20 # <-- in %
class_total = {}
pred_total = {}
acc_proposal_per_class = {}
acc_translation_per_class = {}
acc_rotation_per_class = {}
acc_scale_per_class = {}
for i in ShapenetClassToName:
class_total[i] = 0
pred_total[i] = 0
acc_proposal_per_class[i] = 0
acc_translation_per_class[i] = 0
acc_rotation_per_class[i] = 0
acc_scale_per_class[i] = 0
# Change category
for b in range(B):
for k in range(K):
pred_class[b,k,:] = get_top_8_category(pred_class[b,k,:])
for k2 in range(K2):
gt_class[b,k2,:] = get_top_8_category(gt_class[b,k2,:])
acc_per_scan = {}
for b in range(B):
acc_per_scan[batch_iter + b] = {}
acc_per_scan[batch_iter + b]['n_total'] = gt_cad_total[b]
acc_per_scan[batch_iter + b]["n_good"] = 0
acc_per_scan[batch_iter + b]["n_files"] = []
self.validate_idx_per_scene[b] = []
K2 = gt_cad_total[b] # GT_cad_in_one_scene
for k_gt in range(K2):
# ------ Update total ------
gt_sem_cls = gt_class[b,k_gt,:].item()
if gt_sem_cls not in class_total:
class_total[gt_sem_cls] = 1
else:
class_total[gt_sem_cls] += 1
# CAD Retrieval
if pcd is not None:
batch_pc = pcd.cpu().numpy()[:,:,0:3] # (B, N, 3)
for b in range(B): # loop in scenes
K2 = gt_cad_total[b] # GT_cad_in_one_scene
pred_gt = []
with open(RET_DIR + '/shapenet_kdtree.pickle', 'rb') as pickle_file:
database_kdtree = pickle.load(pickle_file)
for k in np.where(pred_mask[b, :] == 1)[0]: # loop in proposals
# Class prediction
if pcd is not None:
box3d = s2c_utils.get_3d_box(pred_size[b,k,:3], 0, pred_center[b,k,:3])
box3d = s2c_utils.flip_axis_to_depth(box3d)
pc_in_box, inds = s2c_utils.extract_pc_in_box3d(batch_pc[b,:,:3], box3d)
if len(pc_in_box) < 5:
continue
cad_inds = np.where(inds == True)
cad_pc = pcd[b, cad_inds, :3]
embedding = self.CADnet(cad_pc, r=True)
embedding = embedding.detach().cpu()
dist, pred_idx = database_kdtree.query(embedding, k=5)
# Output
pred_sem_clses = self.sem_clses[pred_idx].squeeze(0)
cad_files = self.filenames[pred_idx].squeeze(0)
else:
pred_sem_cls = pred_class[b, k, :][0]
for k_gt in range(K2):
# Pass predicted ground-truth
if k_gt in pred_gt: continue
# ------ Compare Prediction with GT ------
gt_sem_cls = gt_class[b,k_gt,:].item()
if pcd is not None:
gt_sem_cls += 1
pred_sem_cls = -1
# Only compare with same class
for i in range(5):
if pred_sem_clses[i] > 8:
pred_sem_clses[i] = 8
if pred_sem_clses[i] == gt_sem_cls:
pred_sem_cls = pred_sem_clses[i, 0:1]
cad_file = cad_files[i, 0:1]
is_same_class = pred_sem_cls == gt_sem_cls
if is_same_class:
pred_total[gt_sem_cls] += 1
# Predicted Transformation
c = pred_center[b,k,:]
# q0 = pred_quaternion[b,k]
# q = np.quaternion(q0[0], q0[1], q0[2], q0[3])
q = pred_quaternion[b,k]
# q = np.quaternion(q0[0], q0[1], q0[2], q0[3])
s = pred_scale[b,k,:]
# Ground-truth Transformation
c_gt = gt_center[b,k_gt,:]
q_gt0 = gt_quaternion[b,k_gt,:]
q_gt = np.quaternion(q_gt0[0], q_gt0[1], q_gt0[2], q_gt0[3])
s_gt = gt_scale[b,k_gt,:]
# ---- Compute Error ----
# CENTER
error_translation = np.linalg.norm(c-c_gt, ord=2)
if error_translation <= threshold_translation:
acc_translation_per_class[gt_sem_cls] += 1
# SCALE
error_scale = 100.0*np.abs(np.mean(s/s_gt) - 1)
if error_scale <= threshold_scale:
acc_scale_per_class[gt_sem_cls] += 1
# ROTATION
sym = gt_sym_label[b, k_gt].item()
if sym == 1:
m = 2
tmp = [calc_rotation_diff(q, q_gt*quaternion.from_rotation_vector([0, (i*2.0/m)*np.pi, 0])) for i in range(m)]
error_rotation = np.min(tmp)
elif sym == 2:
m = 4
tmp = [calc_rotation_diff(q, q_gt*quaternion.from_rotation_vector([0, (i*2.0/m)*np.pi, 0])) for i in range(m)]
error_rotation = np.min(tmp)
elif sym == 3:
m = 36
tmp = [calc_rotation_diff(q, q_gt*quaternion.from_rotation_vector([0, (i*2.0/m)*np.pi, 0])) for i in range(m)]
error_rotation = np.min(tmp)
else:
error_rotation = calc_rotation_diff(q, q_gt)
if error_rotation <= threshold_rotation:
acc_rotation_per_class[gt_sem_cls] += 1
# CHECK ANSWER
is_valid_transformation = error_rotation <= threshold_rotation and error_translation <= threshold_translation and error_scale <= threshold_scale
if is_valid_transformation:
acc_per_scan[batch_iter + b]["n_good"] += 1
if gt_sem_cls not in acc_proposal_per_class:
acc_proposal_per_class[gt_sem_cls] = 1
else:
acc_proposal_per_class[gt_sem_cls] += 1
if pcd is not None:
acc_per_scan[batch_iter + b]["n_files"].append(cad_file)
self.validate_idx_per_scene[b].append(k)
pred_gt.append(k_gt)
break
# print(acc_per_scan)
# Update
for b in range(B):
b_id_scan = batch_iter + b
self.acc_per_scan[b_id_scan] = {}
self.acc_per_scan[b_id_scan]["n_total"] = acc_per_scan[b_id_scan]["n_total"].item()
self.acc_per_scan[b_id_scan]["n_good"] = acc_per_scan[b_id_scan]["n_good"]
if pcd is not None:
self.acc_per_scan[b_id_scan]["n_files"] = acc_per_scan[b_id_scan]["n_files"]
for sem_cls, n_total in class_total.items():
self.class_total[sem_cls] += n_total
self.pred_total[sem_cls] += pred_total[sem_cls]
self.acc_proposal_per_class[sem_cls] += acc_proposal_per_class[sem_cls]
self.acc_translation_per_class[sem_cls] += acc_translation_per_class[sem_cls]
self.acc_rotation_per_class[sem_cls] += acc_rotation_per_class[sem_cls]
self.acc_scale_per_class[sem_cls] += acc_scale_per_class[sem_cls]
def summary(self):
eval_dict = {}
accuracy_per_class = {}
good_t_per_class = {}
good_r_per_class = {}
good_s_per_class = {}
# Per scan
total_accuracy = {"n_total": 0, "n_good": 0}
for id_scan in self.acc_per_scan:
total_accuracy["n_total"] += self.acc_per_scan[id_scan]["n_total"]
total_accuracy["n_good"] += self.acc_per_scan[id_scan]["n_good"]
instance_mean_accuracy = float(total_accuracy["n_good"])/total_accuracy["n_total"]
# Per class
for sem_cls, n_total in self.class_total.items():
cat_name = ShapenetClassToName[sem_cls]
prediction = self.acc_proposal_per_class[sem_cls]
accuracy_per_class[sem_cls] = float(prediction / (n_total + 1e-6))
pred_total = self.pred_total[sem_cls]
center = self.acc_translation_per_class[sem_cls]
rotation = self.acc_rotation_per_class[sem_cls]
scale = self.acc_scale_per_class[sem_cls]
good_t_per_class[sem_cls] = float(center / (pred_total + 1e-6))
good_r_per_class[sem_cls] = float(rotation / (pred_total + 1e-6))
good_s_per_class[sem_cls] = float(scale / (pred_total + 1e-6))
eval_dict[cat_name] = [accuracy_per_class[sem_cls], prediction, n_total, center, rotation, scale, pred_total]
# Mean scores
class_mean_accuracy = np.mean([v for k,v in accuracy_per_class.items()])
class_mean_translation = np.mean([v for k,v in good_t_per_class.items()])
class_mean_rotation = np.mean([v for k,v in good_r_per_class.items()])
class_mean_scale = np.mean([v for k,v in good_s_per_class.items()])
return instance_mean_accuracy, class_mean_accuracy, class_mean_translation, class_mean_rotation, class_mean_scale, eval_dict
```
|
{
"source": "jeonghyunkeem/MLCVNet",
"score": 2
}
|
#### File: MLCVNet/models/loss_helper.py
```python
import torch
import torch.nn as nn
import numpy as np
import sys
import os
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
ROOT_DIR = os.path.dirname(BASE_DIR)
sys.path.append(os.path.join(ROOT_DIR, 'utils'))
from nn_distance import nn_distance, huber_loss
FAR_THRESHOLD = 0.6
NEAR_THRESHOLD = 0.3
GT_VOTE_FACTOR = 3 # number of GT votes per point
OBJECTNESS_CLS_WEIGHTS = [0.2,0.8] # put larger weights on positive objectness
def compute_vote_loss(end_points):
""" Compute vote loss: Match predicted votes to GT votes.
Args:
end_points: dict (read-only)
Returns:
vote_loss: scalar Tensor
Overall idea:
If the seed point belongs to an object (votes_label_mask == 1),
then we require it to vote for the object center.
Each seed point may vote for multiple translations v1,v2,v3
A seed point may also be in the boxes of multiple objects:
o1,o2,o3 with corresponding GT votes c1,c2,c3
Then the loss for this seed point is:
min(d(v_i,c_j)) for i=1,2,3 and j=1,2,3
"""
# Load ground truth votes and assign them to seed points
batch_size = end_points['seed_xyz'].shape[0]
num_seed = end_points['seed_xyz'].shape[1] # B,num_seed,3
vote_xyz = end_points['vote_xyz'] # B,num_seed*vote_factor,3
seed_inds = end_points['seed_inds'].long() # B,num_seed in [0,num_points-1]
# Get groundtruth votes for the seed points
# vote_label_mask: Use gather to select B,num_seed from B,num_point
# non-object point has no GT vote mask = 0, object point has mask = 1
# vote_label: Use gather to select B,num_seed,9 from B,num_point,9
# with inds in shape B,num_seed,9 and 9 = GT_VOTE_FACTOR * 3
seed_gt_votes_mask = torch.gather(end_points['vote_label_mask'], 1, seed_inds)
seed_inds_expand = seed_inds.view(batch_size,num_seed,1).repeat(1,1,3*GT_VOTE_FACTOR)
seed_gt_votes = torch.gather(end_points['vote_label'], 1, seed_inds_expand)
seed_gt_votes += end_points['seed_xyz'].repeat(1,1,3)
# Compute the min of min of distance
vote_xyz_reshape = vote_xyz.view(batch_size*num_seed, -1, 3) # from B,num_seed*vote_factor,3 to B*num_seed,vote_factor,3
seed_gt_votes_reshape = seed_gt_votes.view(batch_size*num_seed, GT_VOTE_FACTOR, 3) # from B,num_seed,3*GT_VOTE_FACTOR to B*num_seed,GT_VOTE_FACTOR,3
# A predicted vote to no where is not penalized as long as there is a good vote near the GT vote.
dist1, _, dist2, _ = nn_distance(vote_xyz_reshape, seed_gt_votes_reshape, l1=True)
votes_dist, _ = torch.min(dist2, dim=1) # (B*num_seed,vote_factor) to (B*num_seed,)
votes_dist = votes_dist.view(batch_size, num_seed)
vote_loss = torch.sum(votes_dist*seed_gt_votes_mask.float())/(torch.sum(seed_gt_votes_mask.float())+1e-6)
return vote_loss
def compute_objectness_loss(end_points):
""" Compute objectness loss for the proposals.
Args:
end_points: dict (read-only)
Returns:
objectness_loss: scalar Tensor
objectness_label: (batch_size, num_seed) Tensor with value 0 or 1
objectness_mask: (batch_size, num_seed) Tensor with value 0 or 1
object_assignment: (batch_size, num_seed) Tensor with long int
within [0,num_gt_object-1]
"""
# Associate proposal and GT objects by point-to-point distances
aggregated_vote_xyz = end_points['aggregated_vote_xyz']
gt_center = end_points['center_label'][:,:,0:3]
B = gt_center.shape[0]
K = aggregated_vote_xyz.shape[1]
K2 = gt_center.shape[1]
dist1, ind1, dist2, _ = nn_distance(aggregated_vote_xyz, gt_center) # dist1: BxK, dist2: BxK2
# Generate objectness label and mask
# objectness_label: 1 if pred object center is within NEAR_THRESHOLD of any GT object
# objectness_mask: 0 if pred object center is in gray zone (DONOTCARE), 1 otherwise
euclidean_dist1 = torch.sqrt(dist1+1e-6)
objectness_label = torch.zeros((B,K), dtype=torch.long).cuda()
objectness_mask = torch.ones((B,K)).cuda()
objectness_label[euclidean_dist1<NEAR_THRESHOLD] = 1
objectness_mask[euclidean_dist1<NEAR_THRESHOLD] = 1
objectness_mask[euclidean_dist1>FAR_THRESHOLD] = 1
# Compute objectness loss
objectness_scores = end_points['objectness_scores']
criterion = nn.CrossEntropyLoss(torch.Tensor(OBJECTNESS_CLS_WEIGHTS).cuda(), reduction='none')
objectness_loss = criterion(objectness_scores.transpose(2,1), objectness_label)
objectness_loss = torch.sum(objectness_loss * objectness_mask)/(torch.sum(objectness_mask)+1e-6)
# Set assignment
object_assignment = ind1 # (B,K) with values in 0,1,...,K2-1
return objectness_loss, objectness_label, objectness_mask, object_assignment
def compute_box_and_sem_cls_loss(end_points, config):
""" Compute 3D bounding box and semantic classification loss.
Args:
end_points: dict (read-only)
Returns:
center_loss
heading_cls_loss
heading_reg_loss
size_cls_loss
size_reg_loss
sem_cls_loss
"""
num_heading_bin = config.num_heading_bin
num_size_cluster = config.num_size_cluster
num_class = config.num_class
mean_size_arr = config.mean_size_arr
object_assignment = end_points['object_assignment']
batch_size = object_assignment.shape[0]
# Compute center loss
pred_center = end_points['center']
gt_center = end_points['center_label'][:,:,0:3]
dist1, ind1, dist2, _ = nn_distance(pred_center, gt_center) # dist1: BxK, dist2: BxK2
box_label_mask = end_points['box_label_mask']
objectness_label = end_points['objectness_label'].float()
centroid_reg_loss1 = \
torch.sum(dist1*objectness_label)/(torch.sum(objectness_label)+1e-6)
centroid_reg_loss2 = \
torch.sum(dist2*box_label_mask)/(torch.sum(box_label_mask)+1e-6)
center_loss = centroid_reg_loss1 + centroid_reg_loss2
# Compute heading loss
heading_class_label = torch.gather(end_points['heading_class_label'], 1, object_assignment) # select (B,K) from (B,K2)
criterion_heading_class = nn.CrossEntropyLoss(reduction='none')
heading_class_loss = criterion_heading_class(end_points['heading_scores'].transpose(2,1), heading_class_label) # (B,K)
heading_class_loss = torch.sum(heading_class_loss * objectness_label)/(torch.sum(objectness_label)+1e-6)
heading_residual_label = torch.gather(end_points['heading_residual_label'], 1, object_assignment) # select (B,K) from (B,K2)
heading_residual_normalized_label = heading_residual_label / (np.pi/num_heading_bin)
# Ref: https://discuss.pytorch.org/t/convert-int-into-one-hot-format/507/3
heading_label_one_hot = torch.cuda.FloatTensor(batch_size, heading_class_label.shape[1], num_heading_bin).zero_()
heading_label_one_hot.scatter_(2, heading_class_label.unsqueeze(-1), 1) # src==1 so it's *one-hot* (B,K,num_heading_bin)
heading_residual_normalized_loss = huber_loss(torch.sum(end_points['heading_residuals_normalized']*heading_label_one_hot, -1) - heading_residual_normalized_label, delta=1.0) # (B,K)
heading_residual_normalized_loss = torch.sum(heading_residual_normalized_loss*objectness_label)/(torch.sum(objectness_label)+1e-6)
# Compute size loss
size_class_label = torch.gather(end_points['size_class_label'], 1, object_assignment) # select (B,K) from (B,K2)
criterion_size_class = nn.CrossEntropyLoss(reduction='none')
size_class_loss = criterion_size_class(end_points['size_scores'].transpose(2,1), size_class_label) # (B,K)
size_class_loss = torch.sum(size_class_loss * objectness_label)/(torch.sum(objectness_label)+1e-6)
size_residual_label = torch.gather(end_points['size_residual_label'], 1, object_assignment.unsqueeze(-1).repeat(1,1,3)) # select (B,K,3) from (B,K2,3)
size_label_one_hot = torch.cuda.FloatTensor(batch_size, size_class_label.shape[1], num_size_cluster).zero_()
size_label_one_hot.scatter_(2, size_class_label.unsqueeze(-1), 1) # src==1 so it's *one-hot* (B,K,num_size_cluster)
size_label_one_hot_tiled = size_label_one_hot.unsqueeze(-1).repeat(1,1,1,3) # (B,K,num_size_cluster,3)
predicted_size_residual_normalized = torch.sum(end_points['size_residuals_normalized']*size_label_one_hot_tiled, 2) # (B,K,3)
mean_size_arr_expanded = torch.from_numpy(mean_size_arr.astype(np.float32)).cuda().unsqueeze(0).unsqueeze(0) # (1,1,num_size_cluster,3)
mean_size_label = torch.sum(size_label_one_hot_tiled * mean_size_arr_expanded, 2) # (B,K,3)
size_residual_label_normalized = size_residual_label / mean_size_label # (B,K,3)
size_residual_normalized_loss = torch.mean(huber_loss(predicted_size_residual_normalized - size_residual_label_normalized, delta=1.0), -1) # (B,K,3) -> (B,K)
size_residual_normalized_loss = torch.sum(size_residual_normalized_loss*objectness_label)/(torch.sum(objectness_label)+1e-6)
# 3.4 Semantic cls loss
sem_cls_label = torch.gather(end_points['sem_cls_label'], 1, object_assignment) # select (B,K) from (B,K2)
criterion_sem_cls = nn.CrossEntropyLoss(reduction='none')
sem_cls_loss = criterion_sem_cls(end_points['sem_cls_scores'].transpose(2,1), sem_cls_label) # (B,K)
sem_cls_loss = torch.sum(sem_cls_loss * objectness_label)/(torch.sum(objectness_label)+1e-6)
return center_loss, heading_class_loss, heading_residual_normalized_loss, size_class_loss, size_residual_normalized_loss, sem_cls_loss
def get_loss(end_points, config):
""" Loss functions
Args:
end_points: dict
{
seed_xyz, seed_inds, vote_xyz,
center,
heading_scores, heading_residuals_normalized,
size_scores, size_residuals_normalized,
sem_cls_scores, #seed_logits,#
center_label,
heading_class_label, heading_residual_label,
size_class_label, size_residual_label,
sem_cls_label,
box_label_mask,
vote_label, vote_label_mask
}
config: dataset config instance
Returns:
loss: pytorch scalar tensor
end_points: dict
"""
# Vote loss
vote_loss = compute_vote_loss(end_points)
end_points['vote_loss'] = vote_loss
# Obj loss
objectness_loss, objectness_label, objectness_mask, object_assignment = \
compute_objectness_loss(end_points)
end_points['objectness_loss'] = objectness_loss
end_points['objectness_label'] = objectness_label
end_points['objectness_mask'] = objectness_mask
end_points['object_assignment'] = object_assignment
total_num_proposal = objectness_label.shape[0]*objectness_label.shape[1]
end_points['pos_ratio'] = \
torch.sum(objectness_label.float().cuda())/float(total_num_proposal)
end_points['neg_ratio'] = \
torch.sum(objectness_mask.float())/float(total_num_proposal) - end_points['pos_ratio']
# Box loss and sem cls loss
center_loss, heading_cls_loss, heading_reg_loss, size_cls_loss, size_reg_loss, sem_cls_loss = \
compute_box_and_sem_cls_loss(end_points, config)
end_points['center_loss'] = center_loss
end_points['heading_cls_loss'] = heading_cls_loss
end_points['heading_reg_loss'] = heading_reg_loss
end_points['size_cls_loss'] = size_cls_loss
end_points['size_reg_loss'] = size_reg_loss
end_points['sem_cls_loss'] = sem_cls_loss
box_loss = center_loss + 0.1*heading_cls_loss + heading_reg_loss + 0.1*size_cls_loss + size_reg_loss
end_points['box_loss'] = box_loss
# Final loss function
loss = vote_loss + 0.5*objectness_loss + box_loss + 0.1*sem_cls_loss
loss *= 10
end_points['loss'] = loss
# --------------------------------------------
# Some other statistics
obj_pred_val = torch.argmax(end_points['objectness_scores'], 2) # B,K
obj_acc = torch.sum((obj_pred_val==objectness_label.long()).float()*objectness_mask)/(torch.sum(objectness_mask)+1e-6)
end_points['obj_acc'] = obj_acc
return loss, end_points
```
#### File: MLCVNet/scan2cad/s2c_utils.py
```python
import math
import quaternion
import numpy as np
from plyfile import PlyData, PlyElement
def write_ply(points, filename, text=True):
""" input: Nx3, write points to filename as PLY format. """
points = [(points[i,0], points[i,1], points[i,2]) for i in range(points.shape[0])]
vertex = np.array(points, dtype=[('x', 'f4'), ('y', 'f4'),('z', 'f4')])
el = PlyElement.describe(vertex, 'vertex', comments=['vertices'])
PlyData([el], text=text).write(filename)
def random_sampling(pc, num_sample, replace=None, return_choices=False):
""" Input is NxC, output is num_samplexC
"""
if replace is None: replace = (pc.shape[0]<num_sample)
choices = np.random.choice(pc.shape[0], num_sample, replace=replace)
if return_choices:
return pc[choices], choices
else:
return pc[choices]
def euler_from_quaternion(x, y, z, w):
"""
Convert a quaternion into euler angles (roll, pitch, yaw)
roll is rotation around x in radians (counterclockwise)
pitch is rotation around y in radians (counterclockwise)
yaw is rotation around z in radians (counterclockwise)
"""
t0 = +2.0 * (w * x + y * z)
t1 = +1.0 - 2.0 * (x * x + y * y)
roll_x = math.atan2(t0, t1)
t2 = +2.0 * (w * y - z * x)
t2 = +1.0 if t2 > +1.0 else t2
t2 = -1.0 if t2 < -1.0 else t2
pitch_y = math.asin(t2)
t3 = +2.0 * (w * z + x * y)
t4 = +1.0 - 2.0 * (y * y + z * z)
yaw_z = math.atan2(t3, t4)
return roll_x, pitch_y, yaw_z # in radians
def make_M_from_tqs(t, q, s):
q = np.quaternion(q[0], q[1], q[2], q[3])
T = np.eye(4)
T[0:3, 3] = t
R = np.eye(4)
R[0:3, 0:3] = quaternion.as_rotation_matrix(q)
S = np.eye(4)
S[0:3, 0:3] = np.diag(s)
M = T.dot(R).dot(S)
return M
def flip_axis_to_camera(pc):
''' Flip X-right,Y-forward,Z-up to X-right,Y-down,Z-forward
Input and output are both (N,3) array
'''
pc2 = np.copy(pc)
pc2[...,[0,1,2]] = pc2[...,[0,2,1]] # cam X,Y,Z = depth X,-Z,Y
pc2[...,1] *= -1
return pc2
def flip_axis_to_depth(pc):
pc2 = np.copy(pc)
pc2[...,[0,1,2]] = pc2[...,[0,2,1]] # depth X,Y,Z = cam X,Z,-Y
pc2[...,2] *= -1
return pc2
def in_hull(p, hull, check):
from scipy.spatial import Delaunay
if not isinstance(hull,Delaunay):
hull = Delaunay(hull)
return hull.find_simplex(p)>=0
def extract_pc_in_box3d(pc, box3d, check):
''' pc: (N,3), box3d: (8,3)
Order of indices:
(3)-----(2)
/ | / |
(4)-+---(1) |
| (7)---+-(6)
| / | /
(8)-----(5)
-: l (x)
|: h (y)
/: w (z)
'''
box3d_roi_inds = in_hull(pc[:,0:3], box3d, check)
return pc[box3d_roi_inds,:], box3d_roi_inds
def roty(t):
"""Rotation about the y-axis."""
c = np.cos(t)
s = np.sin(t)
return np.array([[c, 0, s],
[0, 1, 0],
[-s, 0, c]])
def rotate_aligned_boxes(centers, lengths, rot_mat):
# centers, lengths = input_boxes[:,0:3], input_boxes[:,3:6]
new_centers = np.dot(centers, np.transpose(rot_mat))
dx, dy = lengths[:,0]/2.0, lengths[:,1]/2.0
new_x = np.zeros((dx.shape[0], 4))
new_y = np.zeros((dx.shape[0], 4))
for i, crnr in enumerate([(-1,-1), (1, -1), (1, 1), (-1, 1)]):
crnrs = np.zeros((dx.shape[0], 3))
crnrs[:,0] = crnr[0]*dx
crnrs[:,1] = crnr[1]*dy
crnrs = np.dot(crnrs, np.transpose(rot_mat))
new_x[:,i] = crnrs[:,0]
new_y[:,i] = crnrs[:,1]
new_dx = 2.0*np.max(new_x, 1)
new_dy = 2.0*np.max(new_y, 1)
new_lengths = np.stack((new_dx, new_dy, lengths[:,2]), axis=1)
# return np.concatenate([new_centers, new_lengths], axis=1)
return new_centers, new_lengths
def filter_dominant_cls(points, sem_cls, not_cared_id):
'''
args:
points: (N, 3)
sem_cls: (N, 1)
returns:
dom_points: (M, 3)
'''
cls_book = {'max_total': 0, 'dominant': 0}
for cls_id in np.unique(sem_cls):
if cls_id in not_cared_id:
continue
cls_sum = np.sum(np.where(sem_cls == cls_id)[0])
if cls_sum > cls_book['max_total']:
cls_book['dominant'] = cls_id
cls_book['max_total'] = cls_sum
choices = np.where(sem_cls == cls_book['dominant'])[0]
return points[choices], choices
def get_3d_box_rotated(box_size, rot_mat, padding=None):
''' @<NAME>, KAIST
box_size is array(l,w,h), heading_angle is radius clockwise from pos x axis, center is xyz of box center
output (8,3) array for 3D box cornders
Similar to utils/compute_orientation_3d
Order of indices:
(3)-----(2)
/ | / |
(4)-+---(1) |
| (7)---+-(6)
| / | /
(8)-----(5)
-: l (x)
|: h (y)
/: w (z)
args:
box_size: float (3)
rot_mat: float (4, 4)
returns:
corners_3d: float (8, 3)
'''
R = rot_mat # (4, 4)
l,h,w = box_size * 2
x_corners = [l/2,l/2,-l/2,-l/2,l/2,l/2,-l/2,-l/2];
y_corners = [h/2,h/2,h/2,h/2,-h/2,-h/2,-h/2,-h/2];
z_corners = [w/2,-w/2,-w/2,w/2,w/2,-w/2,-w/2,w/2];
hcoord = np.ones(8, dtype=np.float32)
corners_3d = np.vstack([x_corners,y_corners,z_corners, hcoord])
if padding:
p = padding
corners_3d[0:1, :] += [p, p, -p, -p, p, p, -p, -p]
corners_3d[1:2, :] += [p, p, p, p, -p, -p, -p, -p]
corners_3d[2:3, :] += [p, -p, -p, p, p, -p, -p, p]
corners_3d = np.dot(R, corners_3d)
corners_3d = np.transpose(corners_3d)
corners_3d = corners_3d[:, :3]
assert(corners_3d.shape[0] * corners_3d.shape[1] == 24)
return corners_3d
def batch_get_3d_box_rotated(box_size, rot_mat):
''' box_size: [x1,x2,...,xn,3] => (B, 3)
heading_angle: [x1,x2,...,xn,4,4] => (B, 4, 4)
center: [x1,x2,...,xn,3] => (B, 3)
Return:
[x1,x3,...,xn,8,3] => (B, 8, 3)
'''
input_shape = box_size.shape[0] # B
R = rot_mat # (B, 4, 4)
l = np.expand_dims(box_size[...,0], -1) # [x1,...,xn,1]
w = np.expand_dims(box_size[...,1], -1)
h = np.expand_dims(box_size[...,2], -1)
corners_3d = np.zeros(tuple(list(input_shape)+[8,3]))
corners_3d[...,:,0] = np.concatenate((l/2,l/2,-l/2,-l/2,l/2,l/2,-l/2,-l/2, 1), -1)
corners_3d[...,:,1] = np.concatenate((h/2,h/2,h/2,h/2,-h/2,-h/2,-h/2,-h/2, 1), -1)
corners_3d[...,:,2] = np.concatenate((w/2,-w/2,-w/2,w/2,w/2,-w/2,-w/2,w/2, 1), -1)
tlist = [i for i in range(len(input_shape))]
tlist += [len(input_shape)+1, len(input_shape)]
corners_3d = np.matmul(corners_3d, np.transpose(R, tuple(tlist)))
assert(corners_3d.shape[1] * corners_3d.shape[2] == 24)
return corners_3d
def get_3d_box(box_size, heading_angle, center):
''' box_size is array(l,w,h), heading_angle is radius clockwise from pos x axis, center is xyz of box center
output (8,3) array for 3D box cornders
Similar to utils/compute_orientation_3d
'''
R = roty(heading_angle)
l,w,h = box_size
x_corners = [l/2,l/2,-l/2,-l/2,l/2,l/2,-l/2,-l/2];
y_corners = [h/2,h/2,h/2,h/2,-h/2,-h/2,-h/2,-h/2];
z_corners = [w/2,-w/2,-w/2,w/2,w/2,-w/2,-w/2,w/2];
corners_3d = np.dot(R, np.vstack([x_corners,y_corners,z_corners]))
corners_3d[0,:] = corners_3d[0,:] + center[0];
corners_3d[1,:] = corners_3d[1,:] + center[1];
corners_3d[2,:] = corners_3d[2,:] + center[2];
corners_3d = np.transpose(corners_3d)
return corners_3d
def nms_3d_faster_samecls(boxes, overlap_threshold, old_type=False):
x1 = boxes[:,0]
y1 = boxes[:,1]
z1 = boxes[:,2]
x2 = boxes[:,3]
y2 = boxes[:,4]
z2 = boxes[:,5]
score = boxes[:,6]
cls = boxes[:,7]
area = (x2-x1)*(y2-y1)*(z2-z1)
I = np.argsort(score)
pick = []
while (I.size!=0):
last = I.size
i = I[-1]
pick.append(i)
xx1 = np.maximum(x1[i], x1[I[:last-1]])
yy1 = np.maximum(y1[i], y1[I[:last-1]])
zz1 = np.maximum(z1[i], z1[I[:last-1]])
xx2 = np.minimum(x2[i], x2[I[:last-1]])
yy2 = np.minimum(y2[i], y2[I[:last-1]])
zz2 = np.minimum(z2[i], z2[I[:last-1]])
cls1 = cls[i]
cls2 = cls[I[:last-1]]
l = np.maximum(0, xx2-xx1)
w = np.maximum(0, yy2-yy1)
h = np.maximum(0, zz2-zz1)
if old_type:
o = (l*w*h)/area[I[:last-1]]
else:
inter = l*w*h
o = inter / (area[i] + area[I[:last-1]] - inter)
o = o * (cls1==cls2)
I = np.delete(I, np.concatenate(([last-1], np.where(o>overlap_threshold)[0])))
return pick
```
|
{
"source": "jeonghyunkeem/PointGroup",
"score": 2
}
|
#### File: dataset/scan2cad/prepare_data_inst_gttxt.py
```python
import numpy as np
import glob
import torch
import os
from s2c_map import CARED_CLASS_MASK
semantic_label_idxs = [sem_idx+1 for sem_idx in CARED_CLASS_MASK]
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
META_DATA = os.path.join(BASE_DIR, 'meta_data')
DATA_ROOT = os.path.join(BASE_DIR, 'data3')
FILENAME = '_inst.pth'
def load_data_split(split):
all_scan_names = list(set(os.path.basename(scan)[:12] \
for scan in os.listdir(DATA_ROOT) if scan.startswith('scene')))
split_file = os.path.join(META_DATA, 'scan2cad_{}.txt'.format(split))
with open(split_file, 'r') as f:
split_names = []
split_names = f.read().splitlines()
error_meta = os.path.join(DATA_ROOT, 'error_scan.txt')
with open(error_meta, 'r') as f:
error_list = []
error_list = f.read().splitlines()
split_data_names = []
for i, scan_name in enumerate(split_names):
if scan_name not in all_scan_names or scan_name in error_list: continue
split_data_names.append(os.path.join(DATA_ROOT, scan_name + FILENAME))
return split_data_names
if __name__ == '__main__':
split = 'val'
data_names = load_data_split(split)
files = sorted(data_names)
rooms = [torch.load(i) for i in files if os.path.exists(i)]
SPLIT_DIR = os.path.join(BASE_DIR, '{}'.format(split)) + '_gt'
if not os.path.exists(SPLIT_DIR):
os.mkdir(SPLIT_DIR)
for i in range(len(rooms)):
xyz, rgb, label, instance_label = rooms[i] # label 0~19 (-1); instance_label 0~instance_num-1 (-1)
scene_name = files[i].split('/')[-1][:12]
print(f'{(i+1):4d}/{len(rooms):4d}: {scene_name:12s}')
instance_label_new = np.zeros(instance_label.shape, dtype=np.int32) # 0 for unannotated, xx00y: x for semantic_label, y for inst_id (1~instance_num)
instance_num = int(instance_label.max()) + 1
for inst_id in range(instance_num):
instance_mask = np.where(instance_label == inst_id)[0]
sem_id = int(label[instance_mask[0]])
if(sem_id == -1): sem_id = 0
semantic_label = semantic_label_idxs[sem_id]
instance_label_new[instance_mask] = semantic_label * 1000 + inst_id + 1
np.savetxt(os.path.join(SPLIT_DIR, scene_name + '.txt'), instance_label_new, fmt='%d')
```
#### File: dataset/scan2cad/s2c_collect_pgroup.py
```python
import os, sys
import json
import h5py
import numpy as np
import quaternion
import torch
from torch.utils.data import Dataset
BASE_DIR_1 = os.path.dirname(os.path.abspath(__file__)) # scan2cad
BASE_DIR = os.path.dirname(BASE_DIR_1) # dataset
ROOT_DIR = os.path.dirname(BASE_DIR) # PointGroup
DATA_DIR = os.path.dirname(ROOT_DIR) # /root/
DATA_DIR = os.path.join(DATA_DIR, 'Dataset') # /root/Dataset
DUMP_DIR = os.path.join(ROOT_DIR, 'data')
sys.path.append(BASE_DIR)
sys.path.append(ROOT_DIR)
from s2c_map import CLASS_MAPPING, ID2NAME, CARED_CLASS_MASK
from s2c_config import Scan2CADDatasetConfig
import s2c_utils
sys.path.append(os.path.join(ROOT_DIR, 'models/retrieval/'))
DC = Scan2CADDatasetConfig()
MAX_NUM_POINT = 50000
MAX_NUM_OBJ = 64
INS_NUM_POINT = 2048
FEATURE_DIMENSION = 512
MAX_DATA_SIZE = 15000
CHUNK_SIZE = 1000
INF = 9999
NOT_CARED_ID = np.array([INF]) # wall, floor
# Thresholds
PADDING = 0.05
SCALE_THRASHOLD = 0.05
SEG_THRESHOLD = 1
REMAPPER = np.ones(35, dtype=np.int64) * (-1)
for i, x in enumerate(CARED_CLASS_MASK):
REMAPPER[x] = i
print(f'REMAPPER[{x:2d}] => {i:2d}')
SYM2CLASS = {"__SYM_NONE": 0, "__SYM_ROTATE_UP_2": 1, "__SYM_ROTATE_UP_4": 2, "__SYM_ROTATE_UP_INF": 3}
# functions ==============================================================================================
def from_q_to_6d(q):
q = np.quaternion(q[0], q[1], q[2], q[3])
mat = quaternion.as_rotation_matrix(q) # 3x3
rep6d = mat[:, 0:2].transpose().reshape(-1, 6) # 6
return rep6d
def nn_search(p, ps):
target = torch.from_numpy(ps.copy())
p = torch.from_numpy(p.copy())
p_diff = target - p
p_dist = torch.sum(p_diff**2, dim=-1)
dist, idx = torch.min(p_dist, dim=-1)
return dist.item(), idx.item()
def make_M_from_tqs(t, q, s):
q = np.quaternion(q[0], q[1], q[2], q[3])
T = np.eye(4)
T[0:3, 3] = t
R = np.eye(4)
R[0:3, 0:3] = quaternion.as_rotation_matrix(q)
S = np.eye(4)
S[0:3, 0:3] = np.diag(s)
M = T.dot(R).dot(S)
return M
def compose_mat4(t, q, s, center=None):
if not isinstance(q, np.quaternion):
q = np.quaternion(q[0], q[1], q[2], q[3])
T = np.eye(4)
T[0:3, 3] = t
R = np.eye(4)
R[0:3, 0:3] = quaternion.as_rotation_matrix(q)
S = np.eye(4)
S[0:3, 0:3] = np.diag(s)
C = np.eye(4)
if center is not None:
C[0:3, 3] = center
M = T.dot(R).dot(S).dot(C)
return M
def decompose_mat4(M):
R = M[0:3, 0:3].copy()
sx = np.linalg.norm(R[0:3, 0])
sy = np.linalg.norm(R[0:3, 1])
sz = np.linalg.norm(R[0:3, 2])
s = np.array([sx, sy, sz])
R[:,0] /= sx
R[:,1] /= sy
R[:,2] /= sz
q = quaternion.from_rotation_matrix(R[0:3, 0:3])
t = M[0:3, 3]
return t, q, s
# ========================================================================================================
LOG_N = 100
def print_log(log):
print('-'*LOG_N+'\n'+log+' \n'+'-'*LOG_N)
class Scan2CADCollect(Dataset):
def __init__(self, split_set='train', distr_check=False):
self.data_path = os.path.join(DATA_DIR, 'Scan2CAD/export')
self.out_path = os.path.join(BASE_DIR_1, 'data4')
if not os.path.exists(self.out_path):
os.mkdir(self.out_path)
print("Create export directory: {}".format(self.out_path))
all_scan_names = list(set([os.path.basename(x)[0:12] \
for x in os.listdir(self.data_path) if x.startswith('scene')]))
self.scan_names = []
if split_set in ['all', 'train', 'val', 'test']:
split_filenames = os.path.join(BASE_DIR_1, 'meta_data',
'scan2cad_{}.txt'.format(split_set))
with open(split_filenames, 'r') as f:
self.scan_list = f.read().splitlines()
# remove unavailiable scans
num_scans = len(self.scan_list)
self.scan_list = [sname for sname in self.scan_list \
if sname in all_scan_names]
print_log('Dataset for {}: kept {} scans out of {}'.format(split_set, len(self.scan_list), num_scans))
num_scans = len(self.scan_list)
else:
print('illegal split name')
return
filename_json = BASE_DIR_1 + "/full_annotations.json"
assert filename_json
self.dataset = {}
cat_summary = dict.fromkeys(DC.ClassToName, 0)
cat_ids = []
with open(filename_json, 'r') as f:
data = json.load(f)
d = {}
i = -1
for idx, r in enumerate(data):
i_scan = r["id_scan"]
if i_scan not in self.scan_list:
continue
self.scan_names.append(i_scan)
i += 1
d[i] = {}
d[i]['id_scan'] = i_scan
d[i]['trs'] = r["trs"]
n_model = r["n_aligned_models"]
d[i]['n_total'] = n_model
d[i]['models'] = {}
for j in range(n_model):
d[i]['models'][j] = {}
d[i]['models'][j]['trs'] = r["aligned_models"][j]['trs']
d[i]['models'][j]['center'] = r["aligned_models"][j]['center']
d[i]['models'][j]['bbox'] = r["aligned_models"][j]['bbox']
d[i]['models'][j]['sym'] = SYM2CLASS[r["aligned_models"][j]['sym']]
d[i]['models'][j]['fname'] = r["aligned_models"][j]['id_cad']
cat_id = r["aligned_models"][j]['catid_cad']
cat_ids.append(cat_id)
d[i]['models'][j]['cat_id'] = cat_id
cat_class = DC.ShapenetIDtoClass(cat_id)
d[i]['models'][j]['sem_cls'] = cat_class
# category summary
cat_summary[cat_class]+=1
self.dataset = d
self.cat_ids = np.unique(cat_ids)
if distr_check:
for k, v in sorted(cat_summary.items(), key=lambda item:item[1], reverse=True):
print(f'{k:2d}: {DC.ClassToName[k]:12s} => {v:4d}')
def __len__(self):
return len(self.dataset)
def size_check(self, scale, id_scan, sem_cls):
check = False
if scale[0] < SCALE_THRASHOLD:
scale[0] = SCALE_THRASHOLD
check = True
if scale[1] < SCALE_THRASHOLD:
scale[1] = SCALE_THRASHOLD
check = True
if scale[2] < SCALE_THRASHOLD:
scale[2] = SCALE_THRASHOLD
check = True
return scale
def collect(self, N, dump=False):
""" Return dictionary of {verts(x,y,z): cad filename}
Note:
NK = a total number of instances in dataset
V = a number of vertices
args:
N: int
a size of dataset
return:
dict: (NK, 1, V, 3)
a dictionary for verts-cad_file pairs
"""
# ======= GLOBAL LABEL VARIABLES =======
error_scan = {} # Text
# Anchor collection (for detection)
print_log(" LOADING SCENES")
collect_path = os.path.join(BASE_DIR, 'collect')
for index in range(N):
data = self.dataset[index]
id_scan = data['id_scan']
K = data['n_total']
assert(K <= MAX_NUM_OBJ)
# Point Cloud
mesh_vertices = np.load(os.path.join(self.data_path, id_scan) + '_vert.npy') # (N, 3)
semantic_labels = np.load(os.path.join(self.data_path, id_scan) + '_sem_label.npy') # (N, sem_cls(0, 1~35, 36~MAX, INF))
point_cloud = mesh_vertices[:,0:3]
colors = mesh_vertices[:,3:6] / 127.5 - 1
instance_vertices = np.ones((point_cloud.shape[0]), dtype=np.int64) * (-1)
semantic_vertices = np.ones((point_cloud.shape[0]), dtype=np.int64) * (-1)
# Sorting points cropping order to avoid overlapping
sort_by_scale = {}
for model in range(K):
obj_scale = np.array(data['models'][model]['trs']['scale'])
sort_by_scale[model] = np.sum(obj_scale)
model_scale_order = {model: scale for model, scale in sorted(sort_by_scale.items(), key=(lambda item:item[1]), reverse=True)}
K = len(model_scale_order.keys())
# Iterate on scale_order
checked = False
k = -1
for i, model in enumerate(model_scale_order.keys()):
k += 1
# semantics ()
sem_cls = data['models'][model]['sem_cls'] # (0~num_classes-1)
# Transform
obj_center = np.array(data['models'][model]['center'])
obj_translation = np.array(data['models'][model]['trs']['translation'])
obj_rotation = np.array(data['models'][model]['trs']['rotation'])
obj_scale = np.array(data['models'][model]['trs']['scale'])
obj_scale = self.size_check(obj_scale, id_scan, sem_cls)
Mobj = compose_mat4(obj_translation, obj_rotation, obj_scale, obj_center)
# Instance vertices
# - (1) Region Crop & Axis-aligned Bounding Box
vert_choices = np.array([])
ins_bbox = np.array(data['models'][model]['bbox'])
obj_corners = s2c_utils.get_3d_box_rotated(ins_bbox, Mobj, padding=PADDING)
ex_points, obj_vert_ind = s2c_utils.extract_pc_in_box3d(point_cloud, obj_corners)
nx = ex_points.shape[0]
# - (2) Instance Segments Crop
seg_points, vert_choices = \
s2c_utils.filter_dominant_cls(point_cloud, obj_vert_ind, semantic_labels, sem_cls+1, NOT_CARED_ID)
seg_nx = seg_points.shape[0]
# ======= Semantic/Instance vertices =======
if seg_nx < SEG_THRESHOLD:
k -= 1
checked = True
continue
sem_cls = REMAPPER[sem_cls]
# if sem_cls < 0: continue # ignore non-valid class object (only preserve CARED classes)
instance_vertices[vert_choices] = k # (0~K-1) NOTE:unannotated=-1
semantic_vertices[vert_choices] = sem_cls # (0~num_classes-1) NOTE:unannotated=-1
# error check
ins_list = np.unique(instance_vertices)
if (np.max(instance_vertices)+1) != (len(ins_list)-1):
print_log(f"[{index}/{N} Error] Please check this scene --> {id_scan}")
error_scan[id_scan] = 0
continue
# DUMP COLLECT RESULTS
if dump:
scene_path = os.path.join(collect_path, f'{id_scan}')
if not os.path.exists(scene_path):
os.mkdir(scene_path)
print("Created scene directory: {}".format(scene_path))
s2c_utils.write_scene_results(points=point_cloud, ins_points=instance_vertices, num_instances=K, bboxes=None, file_path=scene_path)
point_cloud = np.ascontiguousarray(point_cloud[:, :3] - point_cloud[:, :3].mean(0))
pcoord = point_cloud.astype(np.float64)
colors = colors.astype(np.float32)
sem_labels = semantic_vertices.astype(np.float64)
ins_labels = instance_vertices.astype(np.float64)
# ============ DUMP ============
# scene data
file_path = os.path.join(self.out_path, id_scan+'_inst.pth')
torch.save((pcoord, colors, sem_labels, ins_labels), file_path)
print(f"[{index}/{N} Saved] {id_scan} >>> {file_path}")
# error scan
with open(self.out_path+'/error_scan.txt', 'w') as f:
print_log("ERROR SCAN")
for i, sname in enumerate(error_scan.keys()):
print('{:2d}: {}'.format(i, sname))
f.write(sname)
f.write('\n')
if __name__ == "__main__":
Dataset = Scan2CADCollect(split_set='all', distr_check=True)
N = len(Dataset)
Dataset.collect(N, dump=False)
```
|
{
"source": "jeonghyunkeem/structurenet",
"score": 3
}
|
#### File: structurenet/code/data.py
```python
import sys
import os
import json
import torch
import numpy as np
from torch.utils import data
from pyquaternion import Quaternion
from sklearn.decomposition import PCA
from collections import namedtuple
from utils import one_hot
import trimesh
# store a part hierarchy of graphs for a shape
class Tree(object):
# global object category information
part_name2id = dict()
part_id2name = dict()
part_name2cids = dict()
part_non_leaf_sem_names = []
num_sem = None
root_sem = None
@ staticmethod
def load_category_info(cat):
with open(os.path.join('../stats/part_semantics/', cat+'.txt'), 'r') as fin:
for l in fin.readlines():
x, y, _ = l.rstrip().split()
x = int(x)
Tree.part_name2id[y] = x
Tree.part_id2name[x] = y
Tree.part_name2cids[y] = []
if '/' in y:
Tree.part_name2cids['/'.join(y.split('/')[:-1])].append(x)
Tree.num_sem = len(Tree.part_name2id) + 1
for k in Tree.part_name2cids:
Tree.part_name2cids[k] = np.array(Tree.part_name2cids[k], dtype=np.int32)
if len(Tree.part_name2cids[k]) > 0:
Tree.part_non_leaf_sem_names.append(k)
Tree.root_sem = Tree.part_id2name[1]
# store a part node in the tree
class Node(object):
def __init__(self, part_id=0, is_leaf=False, box=None, label=None, children=None, edges=None, full_label=None, geo=None, geo_feat=None):
self.is_leaf = is_leaf # store True if the part is a leaf node
self.part_id = part_id # part_id in result_after_merging.json of PartNet
self.box = box # box parameter for all nodes
self.geo = geo # 1 x 1000 x 3 point cloud
self.geo_feat = geo_feat # 1 x 100 geometry feature
self.label = label # node semantic label at the current level
self.full_label = full_label # node semantic label from root (separated by slash)
self.children = [] if children is None else children
# all of its children nodes; each entry is a Node instance
self.edges = [] if edges is None else edges
# all of its children relationships;
# each entry is a tuple <part_a, part_b, type, params, dist>
"""
Here defines the edges format:
part_a, part_b:
Values are the order in self.children (e.g. 0, 1, 2, 3, ...).
This is an directional edge for A->B.
If an edge is commutative, you may need to manually specify a B->A edge.
For example, an ADJ edge is only shown A->B,
there is no edge B->A in the json file.
type:
Four types considered in StructureNet: ADJ, ROT_SYM, TRANS_SYM, REF_SYM.
params:
There is no params field for ADJ edge;
For ROT_SYM edge, 0-2 pivot point, 3-5 axis unit direction, 6 radian rotation angle;
For TRANS_SYM edge, 0-2 translation vector;
For REF_SYM edge, 0-2 the middle point of the segment that connects the two box centers,
3-5 unit normal direction of the reflection plane.
dist:
For ADJ edge, it's the closest distance between two parts;
For SYM edge, it's the chamfer distance after matching part B to part A.
"""
def get_semantic_id(self):
return Tree.part_name2id[self.full_label]
def get_semantic_one_hot(self):
out = np.zeros((1, Tree.num_sem), dtype=np.float32)
out[0, Tree.part_name2id[self.full_label]] = 1
return torch.tensor(out, dtype=torch.float32).to(device=self.box.device)
def get_box_quat(self):
box = self.box.cpu().numpy().squeeze()
center = box[:3]
size = box[3:6]
xdir = box[6:9]
xdir /= np.linalg.norm(xdir)
ydir = box[9:]
ydir /= np.linalg.norm(ydir)
zdir = np.cross(xdir, ydir)
zdir /= np.linalg.norm(zdir)
rotmat = np.vstack([xdir, ydir, zdir]).T
q = Quaternion(matrix=rotmat)
quat = np.array([q.w, q.x, q.y, q.z], dtype=np.float32)
box_quat = np.hstack([center, size, quat]).astype(np.float32)
return torch.from_numpy(box_quat).view(1, -1).to(device=self.box.device)
def set_from_box_quat(self, box_quat):
box_quat = box_quat.cpu().numpy().squeeze()
center = box_quat[:3]
size = box_quat[3:6]
q = Quaternion(box_quat[6], box_quat[7], box_quat[8], box_quat[9])
rotmat = q.rotation_matrix
box = np.hstack([center, size, rotmat[:, 0].flatten(), rotmat[:, 1].flatten()]).astype(np.float32)
self.box = torch.from_numpy(box).view(1, -1)
def to(self, device):
if self.box is not None:
self.box = self.box.to(device)
for edge in self.edges:
if 'params' in edge:
edge['params'].to(device)
if self.geo is not None:
self.geo = self.geo.to(device)
for child_node in self.children:
child_node.to(device)
return self
def _to_str(self, level, pid, detailed=False):
out_str = ' |'*(level-1) + ' โ'*(level > 0) + str(pid) + ' ' + self.label + (' [LEAF] ' if self.is_leaf else ' ') + '{' + str(self.part_id) + '}'
if detailed:
out_str += 'Box('+';'.join([str(item) for item in self.box.numpy()])+')\n'
else:
out_str += '\n'
if len(self.children) > 0:
for idx, child in enumerate(self.children):
out_str += child._to_str(level+1, idx)
if detailed and len(self.edges) > 0:
for edge in self.edges:
if 'params' in edge:
edge = edge.copy() # so the original parameters don't get changed
edge['params'] = edge['params'].cpu().numpy()
out_str += ' |'*(level) + ' โ' + 'Edge(' + str(edge) + ')\n'
return out_str
def __str__(self):
return self._to_str(0, 0)
def depth_first_traversal(self):
nodes = []
stack = [self]
while len(stack) > 0:
node = stack.pop()
nodes.append(node)
stack.extend(reversed(node.children))
return nodes
def child_adjacency(self, typed=False, max_children=None):
if max_children is None:
adj = torch.zeros(len(self.children), len(self.children))
else:
adj = torch.zeros(max_children, max_children)
if typed:
edge_types = ['ADJ', 'ROT_SYM', 'TRANS_SYM', 'REF_SYM']
for edge in self.edges:
if typed:
edge_type_index = edge_types.index(edge['type'])
adj[edge['part_a'], edge['part_b']] = edge_type_index
adj[edge['part_b'], edge['part_a']] = edge_type_index
else:
adj[edge['part_a'], edge['part_b']] = 1
adj[edge['part_b'], edge['part_a']] = 1
return adj
def geos(self, leafs_only=True):
nodes = list(self.depth_first_traversal())
out_geos = []; out_nodes = [];
for node in nodes:
if not leafs_only or node.is_leaf:
out_geos.append(node.geo)
out_nodes.append(node)
return out_geos, out_nodes
def boxes(self, per_node=False, leafs_only=False):
nodes = list(reversed(self.depth_first_traversal()))
node_boxesets = []
boxes_stack = []
for node in nodes:
node_boxes = []
for i in range(len(node.children)):
node_boxes = boxes_stack.pop() + node_boxes
if node.box is not None and (not leafs_only or node.is_leaf):
node_boxes.append(node.box)
if per_node:
node_boxesets.append(node_boxes)
boxes_stack.append(node_boxes)
assert len(boxes_stack) == 1
if per_node:
return node_boxesets, list(nodes)
else:
boxes = boxes_stack[0]
return boxes
def graph(self, leafs_only=False):
part_boxes = []
part_geos = []
edges = []
part_ids = []
part_sems = []
nodes = list(reversed(self.depth_first_traversal()))
box_index_offset = 0
for node in nodes:
child_count = 0
box_idx = {}
for i, child in enumerate(node.children):
if leafs_only and not child.is_leaf:
continue
part_boxes.append(child.box)
part_geos.append(child.geo)
part_ids.append(child.part_id)
part_sems.append(child.full_label)
box_idx[i] = child_count+box_index_offset
child_count += 1
for edge in node.edges:
if leafs_only and not (
node.children[edge['part_a']].is_leaf and
node.children[edge['part_b']].is_leaf):
continue
edges.append(edge.copy())
edges[-1]['part_a'] = box_idx[edges[-1]['part_a']]
edges[-1]['part_b'] = box_idx[edges[-1]['part_b']]
box_index_offset += child_count
return part_boxes, part_geos, edges, part_ids, part_sems
def edge_tensors(self, edge_types, device, type_onehot=True):
num_edges = len(self.edges)
# get directed edge indices in both directions as tensor
edge_indices = torch.tensor(
[[e['part_a'], e['part_b']] for e in self.edges] + [[e['part_b'], e['part_a']] for e in self.edges],
device=device, dtype=torch.long).view(1, num_edges*2, 2)
# get edge type as tensor
edge_type = torch.tensor([edge_types.index(edge['type']) for edge in self.edges], device=device, dtype=torch.long)
if type_onehot:
edge_type = one_hot(inp=edge_type, label_count=len(edge_types)).transpose(0, 1).view(1, num_edges, len(edge_types)).to(dtype=torch.float32)
else:
edge_type = edge_type.view(1, num_edges)
edge_type = torch.cat([edge_type, edge_type], dim=1) # add edges in other direction (symmetric adjacency)
return edge_type, edge_indices
def get_subtree_edge_count(self):
cnt = 0
if self.children is not None:
for cnode in self.children:
cnt += cnode.get_subtree_edge_count()
if self.edges is not None:
cnt += len(self.edges)
return cnt
# functions for class Tree
def __init__(self, root):
self.root = root
def to(self, device):
self.root = self.root.to(device)
return self
def __str__(self):
return str(self.root)
def depth_first_traversal(self):
return self.root.depth_first_traversal()
def boxes(self, per_node=False, leafs_only=False):
return self.root.boxes(per_node=per_node, leafs_only=leafs_only)
def graph(self, leafs_only=False):
return self.root.graph(leafs_only=leafs_only)
def free(self):
for node in self.depth_first_traversal():
del node.geo
del node.geo_feat
del node.box
del node
# extend torch.data.Dataset class for PartNet
class PartNetDataset(data.Dataset):
def __init__(self, root, object_list, data_features, load_geo=False):
self.root = root
self.data_features = data_features
self.load_geo = load_geo
if isinstance(object_list, str):
with open(os.path.join(self.root, object_list), 'r') as f:
self.object_names = [item.rstrip() for item in f.readlines()]
else:
self.object_names = object_list
def __getitem__(self, index):
if 'object' in self.data_features:
obj = self.load_object(os.path.join(self.root, self.object_names[index]+'.json'), \
load_geo=self.load_geo)
data_feats = ()
for feat in self.data_features:
if feat == 'object':
data_feats = data_feats + (obj,)
elif feat == 'name':
data_feats = data_feats + (self.object_names[index],)
else:
assert False, 'ERROR: unknow feat type %s!' % feat
return data_feats
def __len__(self):
return len(self.object_names)
def get_anno_id(self, anno_id):
obj = self.load_object(os.path.join(self.root, anno_id+'.json'), \
load_geo=self.load_geo)
return obj
@staticmethod
def load_object(fn, load_geo=False):
if load_geo:
geo_fn = fn.replace('_hier', '_geo').replace('json', 'npz')
geo_data = np.load(geo_fn)
with open(fn, 'r') as f:
root_json = json.load(f)
# create a virtual parent node of the root node and add it to the stack
StackElement = namedtuple('StackElement', ['node_json', 'parent', 'parent_child_idx'])
stack = [StackElement(node_json=root_json, parent=None, parent_child_idx=None)]
root = None
# traverse the tree, converting each node json to a Node instance
while len(stack) > 0:
stack_elm = stack.pop()
parent = stack_elm.parent
parent_child_idx = stack_elm.parent_child_idx
node_json = stack_elm.node_json
node = Tree.Node(
part_id=node_json['id'],
is_leaf=('children' not in node_json),
label=node_json['label'])
if 'geo' in node_json.keys():
node.geo = torch.tensor(np.array(node_json['geo']), dtype=torch.float32).view(1, -1, 3)
if load_geo:
node.geo = torch.tensor(geo_data['parts'][node_json['id']], dtype=torch.float32).view(1, -1, 3)
if 'box' in node_json:
node.box = torch.from_numpy(np.array(node_json['box'])).to(dtype=torch.float32)
if 'children' in node_json:
for ci, child in enumerate(node_json['children']):
stack.append(StackElement(node_json=node_json['children'][ci], parent=node, parent_child_idx=ci))
if 'edges' in node_json:
for edge in node_json['edges']:
if 'params' in edge:
edge['params'] = torch.from_numpy(np.array(edge['params'])).to(dtype=torch.float32)
node.edges.append(edge)
if parent is None:
root = node
root.full_label = root.label
else:
if len(parent.children) <= parent_child_idx:
parent.children.extend([None] * (parent_child_idx+1-len(parent.children)))
parent.children[parent_child_idx] = node
node.full_label = parent.full_label + '/' + node.label
obj = Tree(root=root)
return obj
@staticmethod
def save_object(obj, fn):
# create a virtual parent node of the root node and add it to the stack
StackElement = namedtuple('StackElement', ['node', 'parent_json', 'parent_child_idx'])
stack = [StackElement(node=obj.root, parent_json=None, parent_child_idx=None)]
obj_json = None
# traverse the tree, converting child nodes of each node to json
while len(stack) > 0:
stack_elm = stack.pop()
parent_json = stack_elm.parent_json
parent_child_idx = stack_elm.parent_child_idx
node = stack_elm.node
node_json = {
'id': node.part_id,
'label': f'{node.label if node.label is not None else ""}'}
if node.geo is not None:
node_json['geo'] = node.geo.cpu().numpy().reshape(-1).tolist()
if node.box is not None:
node_json['box'] = node.box.cpu().numpy().reshape(-1).tolist()
if len(node.children) > 0:
node_json['children'] = []
for child in node.children:
node_json['children'].append(None)
stack.append(StackElement(node=child, parent_json=node_json, parent_child_idx=len(node_json['children'])-1))
if len(node.edges) > 0:
node_json['edges'] = []
for edge in node.edges:
node_json['edges'].append(edge)
if 'params' in edge:
node_json['edges'][-1]['params'] = node_json['edges'][-1]['params'].cpu().numpy().reshape(-1).tolist()
if parent_json is None:
obj_json = node_json
else:
parent_json['children'][parent_child_idx] = node_json
with open(fn, 'w') as f:
json.dump(obj_json, f)
```
#### File: structurenet/partnet_edges/detect_rot_sym.py
```python
import numpy as np
from utils import get_pc_center, get_pca_axes, get_chamfer_distance
''' rotational symmetry
Output: ret: T/F, pt: pivot point xyz, nor: normal unit vector, angle: rotation radian angle
Usage: part_B_point = Rot(part_A_point - pt, nor, angel) + pt
'''
def compute_params(pc1_center, pc2_center, pc1_v1, pc1_v2, pc2_v1, pc2_v2):
mid_v1 = (pc1_v1 + pc2_v1) / 2
nor_v1 = pc1_v1 - pc2_v1
nor_v1_len = np.linalg.norm(nor_v1)
if nor_v1_len < 1e-6:
return np.zeros((3), dtype=np.float32), np.zeros((3), dtype=np.float32), 0.0
nor_v1 /= nor_v1_len
mid_v2 = (pc1_v2 + pc2_v2) / 2
nor_v2 = pc1_v2 - pc2_v2
nor_v2_len = np.linalg.norm(nor_v2)
if nor_v2_len < 1e-6:
return np.zeros((3), dtype=np.float32), np.zeros((3), dtype=np.float32), 0.0
nor_v2 /= nor_v2_len
# compute the axis direction
nor = np.cross(nor_v1, nor_v2)
nor_len = np.linalg.norm(nor)
if nor_len < 1e-6:
return np.zeros((3), dtype=np.float32), np.zeros((3), dtype=np.float32), 0.0
nor /= nor_len
# compute one pivot point (any point along the axis is good)
A = np.array([[nor_v1[0], nor_v1[1], nor_v1[2]], \
[nor_v2[0], nor_v2[1], nor_v2[2]], \
[nor[0], nor[1], nor[2]]], dtype=np.float32)
b = np.array([np.dot(nor_v1, mid_v1), np.dot(nor_v2, mid_v2), np.dot(nor, mid_v1)])
pt = np.matmul(np.linalg.inv(A), b)
# compute rotation angle
tv1 = pc1_center - pt - nor * np.dot(pc1_center - pt, nor)
tv2 = pc2_center - pt - nor * np.dot(pc2_center - pt, nor)
c = np.dot(tv1, tv2) / (np.linalg.norm(tv1) * np.linalg.norm(tv2))
c = np.clip(c, -1.0, 1.0)
angle = np.arccos(c)
return pt, nor, angle
def compute_rot_sym(pc1, pc2):
pc1_center = get_pc_center(pc1)
pc2_center = get_pc_center(pc2)
pc1_axes = get_pca_axes(pc1)
pc2_axes = get_pca_axes(pc2)
min_error = 1e8; min_pt = None; min_nor = None; min_angle = None;
for axe_id in range(3):
pc1_axis1 = pc1_axes[axe_id]
pc1_axis2 = pc1_axes[(axe_id+1)%3]
pc2_axis1 = pc2_axes[axe_id]
pc2_axis2 = pc2_axes[(axe_id+1)%3]
pt, nor, angle = compute_params(pc1_center, pc2_center, pc1_center + pc1_axis1, pc1_center + pc1_axis2, pc2_center + pc2_axis1, pc2_center + pc2_axis2)
new_pc1 = atob_rot_sym(pc1, pt, nor, angle)
error = get_chamfer_distance(new_pc1, pc2)
if error < min_error:
min_error = error; min_pt = pt; min_nor = nor; min_angle = angle;
pt, nor, angle = compute_params(pc1_center, pc2_center, pc1_center + pc1_axis1, pc1_center + pc1_axis2, pc2_center - pc2_axis1, pc2_center + pc2_axis2)
new_pc1 = atob_rot_sym(pc1, pt, nor, angle)
error = get_chamfer_distance(new_pc1, pc2)
if error < min_error:
min_error = error; min_pt = pt; min_nor = nor; min_angle = angle;
pt, nor, angle = compute_params(pc1_center, pc2_center, pc1_center + pc1_axis1, pc1_center + pc1_axis2, pc2_center + pc2_axis1, pc2_center - pc2_axis2)
new_pc1 = atob_rot_sym(pc1, pt, nor, angle)
error = get_chamfer_distance(new_pc1, pc2)
if error < min_error:
min_error = error; min_pt = pt; min_nor = nor; min_angle = angle;
pt, nor, angle = compute_params(pc1_center, pc2_center, pc1_center + pc1_axis1, pc1_center + pc1_axis2, pc2_center - pc2_axis1, pc2_center - pc2_axis2)
new_pc1 = atob_rot_sym(pc1, pt, nor, angle)
error = get_chamfer_distance(new_pc1, pc2)
if error < min_error:
min_error = error; min_pt = pt; min_nor = nor; min_angle = angle;
return min_error, min_pt, min_nor, min_angle
def atob_rot_sym(pc, pt, nor, angle):
s = np.sin(angle); c = np.cos(angle); nx = nor[0]; ny = nor[1]; nz = nor[2];
rotmat = np.array([[c + (1 - c) * nx * nx, (1 - c) * nx * ny - s * nz, (1 - c) * nx * nz + s * ny], \
[(1 - c) * nx * ny + s * nz, c + (1 - c) * ny * ny, (1 - c) * ny * nz - s * nx], \
[(1 - c) * nx * nz - s * ny, (1 - c) * ny * nz + s * nx, c + (1 - c) * nz * nz]], dtype=np.float32)
return np.matmul(rotmat, (pc - pt).T).T + pt
```
#### File: structurenet/partnet_edges/detect_trans_sym.py
```python
import numpy as np
from utils import get_pc_center, get_chamfer_distance
''' translation symmetry
Output: ret: T/F, trans: translation vector xyz
Usage: part_B_point = part_A_point + (x, y, z)
'''
def compute_trans_sym(pc1, pc2):
pc1_center = get_pc_center(pc1)
pc2_center = get_pc_center(pc2)
trans = pc2_center - pc1_center
new_pc1 = atob_trans_sym(pc1, trans)
error = get_chamfer_distance(new_pc1, pc2)
return error, trans
def atob_trans_sym(pc, trans):
return pc + trans
```
|
{
"source": "jeonghyun-kim-jake/CoCosNet",
"score": 2
}
|
#### File: CoCosNet/data/bf_ade20k_dataset.py
```python
import os
import random
from data.pix2pix_dataset import Pix2pixDataset
from data.image_folder import make_dataset
from data.base_dataset import BaseDataset, get_params, get_transform
import torch
import torchvision.transforms as transforms
from PIL import Image
from os.path import isfile, join, abspath
class BFADE20KDataset(Pix2pixDataset):
@staticmethod
def modify_commandline_options(parser, is_train):
parser = Pix2pixDataset.modify_commandline_options(parser, is_train)
parser.set_defaults(preprocess_mode='resize_and_crop')
if is_train:
parser.set_defaults(load_size=286)
else:
parser.set_defaults(load_size=256)
parser.set_defaults(crop_size=256)
parser.set_defaults(display_winsize=256)
parser.set_defaults(label_nc=150)
parser.set_defaults(contain_dontcare_label=True)
parser.set_defaults(cache_filelist_read=False)
parser.set_defaults(cache_filelist_write=False)
return parser
def get_paths(self, opt):
instance_dir = opt.instance_dir
instance_paths = make_dataset(instance_dir, recursive=False, read_cache=True)
image_dir = opt.image_dir
image_paths = make_dataset(image_dir, recursive=False, read_cache=True)
image_files = []
image_files_checks = []
instance_files = []
for p in image_paths:
file_name = os.path.basename(p)
instance_check_path = join(instance_dir, file_name)
if ( p.endswith('.jpg') or p.endswith('.png') )and isfile(instance_check_path):
image_files.append(p)
image_files_checks.append(file_name)
for p in instance_paths:
file_name = os.path.basename(p)
if ( p.endswith('.png') and not p.endswith('.png.png') ) and file_name in image_files_checks:
instance_files.append(p)
assert len(instance_files) == len(image_files), "The #images in {} and {} do not match.".format(len(instance_files),len(image_files))
return instance_files, image_files
def get_ref(self, opt):
extra = '_test' if opt.phase == 'test' else ''
with open('./data/ade20k_ref{}.txt'.format(extra)) as fd:
lines = fd.readlines()
ref_dict = {}
for i in range(len(lines)):
items = lines[i].strip().split(',')
key = items[0]
if opt.phase == 'test':
val = items[1:]
else:
val = [items[1], items[-1]]
ref_dict[key] = val
train_test_folder = ('training', 'validation')
return ref_dict, train_test_folder
def __getitem__(self, index):
# Label Image
label_path = self.label_paths[index]
label_tensor, params1 = self.get_label_tensor(label_path)
file_name = os.path.basename(label_path)
# input image (real images)
image_path = self.image_paths[index]
if not self.opt.no_pairing_check:
assert self.paths_match(label_path, image_path), \
"The label_path %s and image_path %s don't match." % \
(label_path, image_path)
# input image
image = Image.open(image_path)
image = image.convert('RGB')
transform_image = get_transform(self.opt, params1)
image_tensor = transform_image(image)
ref_tensor = 0
label_ref_tensor = 0
# input's segment
segment_path = join(self.opt.instance_dir, file_name)
path_ref = image_path
# input_image --> ref
image_ref = Image.open(path_ref).convert('RGB')
# ref label -> expansion
path_ref_label = join(self.opt.segment_dir, file_name)
label_ref_tensor, params = self.get_label_tensor(path_ref_label)
transform_image = get_transform(self.opt, params)
ref_tensor = transform_image(image_ref)
self_ref_flag = torch.zeros_like(ref_tensor)
input_dict = {'label': label_tensor,
'image': image_tensor,
'path': image_path,
'self_ref': self_ref_flag,
'ref': ref_tensor,
'label_ref': label_ref_tensor
}
print("\n\n====")
print("image_path", image_path)
print("label_path", label_path)
print("segment_path", segment_path)
print("====\n\n")
# Give subclasses a chance to modify the final output
self.postprocess(input_dict)
return input_dict
```
|
{
"source": "jeongin8885/scheduler-cli",
"score": 3
}
|
#### File: dist-packages/scheduler/valid_date.py
```python
def valid_date(year, month, day):
month31 = [1, 3, 5, 7, 8, 10, 12]
month30 = [4, 6, 9, 11]
return (month in month31 and day > 0 and day < 32) or (month in month30 and day > 0 and day < 31) or (month == 2 and day > 0 and day < 29) or (month == 2 and day == 29 and (year % 4 == 0 and (year % 100 != 0 or year % 400 == 0)))
```
#### File: scheduler-cli/scheduler/main.py
```python
from termcolor import colored
from colorama import init
from .input_command import input_command
import sys
from pathlib import Path
def main_scheduler():
init()
import sqlite3
home_dir = str(Path.home())
# SQLite DBํ์ผ์ ์์ฑํ๊ฑฐ๋ ์ฐ๊ฒฐํฉ๋๋ค.
conn = sqlite3.connect(home_dir + "/scheduler.db")
cur = conn.cursor()
# table ๋ง๋๋ sql ๊ตฌ๋ฌธ
create_table = 'create table if not exists todo(year integer not null, category text not null, month integer not null, day integer not null, what text not null, done integer)'
# ์ฒ์์ ํด๋น ํ
์ด๋ธ์ด ์์ ๋ ํ
์ด๋ธ ์์ฑ ๊ตฌ๋ฌธ
cur.execute(create_table)
# ๊ฐ์ข
๋์๋ง ๋ฉ์ธ์ง ๋ฌธ์์ด
# ์ ์ฒด ๋์๋ง
title_string = "%-35s|%-36s|%-40s\n"%("function", "command", "example")
add_help_string = ("-" * 35 + '+') + ("-" * 36 + '+') + ("-" * 30 + '+') + '\n'
add_help_string += "%-35s|%-45s|%-40s\n"%("add schedule with category", colored("add {due} {content} in {category}", 'yellow'), colored("add 2018/3/2 go school in school", 'cyan'))
add_help_string += "%-35s|%-45s|%-40s\n"%("add schedule without category", colored("add {due} {content}", 'yellow'), colored("add 2018/3/2 go school", 'cyan'))
delete_help_string = ("-" * 35 + '+') + ("-" * 36 + '+') + ("-" * 30 + '+') + '\n'
delete_help_string += "%-35s|%-45s|%-40s\n"%("delete all schedule", colored("delete all", 'yellow'), colored("delete all", 'cyan'))
delete_help_string += "%-35s|%-45s|%-40s\n"%("delete schedule with category", colored("delete in {category}", 'yellow'), colored("delete in hoesung", 'cyan'))
delete_help_string += "%-35s|%-45s|%-40s\n"%("delete schedule with content", colored("delete {content}", 'yellow'), colored("delete hit hoesung", 'cyan'))
update_help_string = ("-" * 35 + '+') + ("-" * 36 + '+') + ("-" * 30 + '+') + '\n'
update_help_string += "%-35s|%-45s|%-40s\n"%("update state with content", colored("update {content} {done/undone}", 'yellow'), colored("update hit hoesung done", 'cyan'))
update_help_string += "%-35s|%-45s|%-40s\n"%("update due date with content", colored("update {content} at {due}", 'yellow'), colored("update hit hoesung at 2018/7/1", 'cyan'))
update_help_string += ("-" * 35 + '+') + ("-" * 36 + '+') + ("-" * 30 + '+') + '\n'
update_help_string += "%-35s|%-45s|%-40s\n"%("update state with category", colored("update in {category} {done/undone}", 'yellow'), colored("update in school done", 'cyan'))
update_help_string += "%-35s|%-45s|%-40s\n"%("update due date with category", colored("update in {category} at {due}", 'yellow'), colored("update in school at 2018/7/1", 'cyan'))
show_help_string = ("-" * 35 + '+') + ("-" * 36 + '+') + ("-" * 30 + '+') + '\n'
show_help_string += "%-35s|%-45s|%-40s\n"%("get all schedule", colored("show all", 'yellow'), colored("show all", 'cyan'))
show_help_string += "%-35s|%-45s|%-40s\n"%("get schedule with content", colored("show {content}", 'yellow'), colored("show hit hoesung", 'cyan'))
show_help_string += "%-35s|%-45s|%-40s\n"%("get all schedule in category", colored("show in {category}", 'yellow'), colored("show in school", 'cyan'))
show_help_string += "%-35s|%-45s|%-40s\n"%("get all calender at specific month", colored("show cal {year/month}", 'yellow'), colored("show cal 2018/03", 'cyan'))
full_help_string = title_string + add_help_string + delete_help_string + update_help_string + show_help_string
add_help_string = title_string + add_help_string
show_help_string = title_string + show_help_string
update_help_string = title_string + update_help_string
delete_help_string = title_string + delete_help_string
# exit์ ์
๋ ฅ๋ฐ์ ๋๊น์ง ๊ณ์ ์
๋ ฅ
if len(sys.argv) == 1:
# ์ด๊ธฐ ๋์๋ง ๋ฉ์ธ์ง 1ํ ์ถ๋ ฅ
print(full_help_string)
while True:
# ์
๋ ฅ์ ๋ฌธ์ฅ์ผ๋ก ๋ฐ๋๋ค.
command = input('Input a command: ')
if input_command(command) == 1:
break
else:
input_command(sys.argv[1:])
cur.close()
conn.close()
if __name__ == "__main__":
main_scheduler()
```
#### File: scheduler-cli/scheduler/make_string.py
```python
from termcolor import colored
from colorama import init
from datetime import datetime
# ๊ฒ์ ํ ๊ฒฐ๊ณผ ๋ฌธ์์ด ๋ง๋๋ ํจ์
def make_string(schedule_list):
init()
string = 'due\t\t |content\t\t |category\t |done\t |D-day\n'
string += '-' * 88 + '\n'
done = ['undone', 'done', 'overdue']
color = ['yellow', 'green', 'red']
if not schedule_list:
string += 'No result found. To show all schedule, enter \"show all\"'
for x in schedule_list:
overdue = datetime(x[0], x[2], x[3]) - datetime.now()
if overdue.days < 0:
if x[5] == 0:
a = 2
else:
a = x[5]
dday = 'D+' + str((-1) * overdue.days)
else:
a = x[5]
dday = 'D' + str((-1) * overdue.days)
if x[3] > 9:
day = str(x[3])
else:
day = '0' + str(x[3])
if x[2] > 9:
month = str(x[2])
else:
month = '0' + str(x[2])
date = datetime(x[0], x[2], x[3]).weekday()
weekdays = ['MON', 'TUE', 'WED', 'THU', 'FRI', 'SAT', 'SUN']
string += '%-16s |%-22s |%-14s |%-20s|%-10s\n'%(str(x[0]) +'/' + month + '/' + day + '(' + weekdays[date] + ')' , x[4], x[1], colored(done[a], color[a]), dday)
return string.strip()
```
#### File: scheduler-cli/slackbot/input_command.py
```python
import sqlite3, os
from pathlib import Path
from .valid_date import valid_date
from .make_string import make_string
from slacker import Slacker
import websocket
home_dir = str(Path.home())
def handler(command, user):
conn = sqlite3.connect(home_dir + '/server.db')
cur = conn.cursor()
create_db = 'create table if not exists server(user text not null, year integer not null, category text not null, month integer not null, day integer not null, what text not null, done integer)'
cur.execute(create_db)
conn.close()
command_list = command.split(' ')
if command_list[0] == '!scheduler':
string = input_command(command_list[1:], user)
return string
def input_command(command, user):
if command[0] == 'add':
if '/' in command[1] and len(command[1].split('/')) == 3:
year, month, day = command[1].split('/')
if int(year) > 9999:
print("Please input year under 10000")
return 1
if not valid_date(int(year), int(month), int(day)):
print("Date is not valid")
return 1
# in ํค์๋๋ฅผ ํตํด ์ด๋ category์ ๋ฃ์์ง ์ ํ ์ ์๋ค.
if 'in' in command:
category_split = command.index('in')
category_list = command[category_split + 1:]
content_list = command[2:category_split]
# in ํค์๋๊ฐ ์์ผ๋ฉด no category ์ฒ๋ฆฌํ๋ค.
else:
category = 'No category'
content_list = command[2:]
# content, category๋ฅผ ๋์ด์ฐ๊ธฐ๋ก ๋ฌถ๊ธฐ
content = ''
for x in content_list:
content += x + ' '
if len(content) > 22:
print("plz enter content less than 20 letters")
return 1
category = ''
if 'in' in command:
for x in category_list:
category += x + ' '
else:
category = "No category"
category = category.strip()
content = content.strip()
add_cal(category, int(year), int(month), int(day), content, 0, user)
string = 'add ok'
elif command[0] == 'show':
if len(command) > 2 and command[1] == 'in':
cat = ''
for x in command[2:]:
cat += x + ' '
result = return_cal_cat(user, cat.strip())
string = make_string(result)
elif len(command) == 2 and command[1] == 'all':
result = return_cal(user)
string = make_string(result)
elif command[0] == 'delete':
if len(command) > 2 and command[1] == 'in':
cat = ''
for x in command[2:]:
cat += x + ' '
delete_cal_cat(user, cat.strip())
result = return_cal(user)
string = make_string(result)
elif len(command) == 2 and command[1] == 'all':
result = return_cal(user)
string = make_string(result)
elif len(command) > 1:
content = ''
for x in command[1:]:
content += x + ' '
delete_cal(user, content.strip())
result = return_cal(user)
string = make_string(result)
else:
string = '!scheduler add {due} {content} in {category}\n!scheduler show all\n!scheduler show in {category}'
return string
def return_cal_cat(user, category):
conn = sqlite3.connect(home_dir + '/server.db')
cur = conn.cursor()
select_data = 'select * from server where user=? and category=?'
cur.execute(select_data, (user,category,))
result = cur.fetchall()
conn.close()
return result
def return_cal(user):
conn = sqlite3.connect(home_dir + '/server.db')
cur = conn.cursor()
select_data = 'select * from server where user=?'
cur.execute(select_data, (user,))
result = cur.fetchall()
conn.close()
return result
def delete_cal_cat(user, category):
conn = sqlite3.connect(home_dir + '/server.db')
cur = conn.cursor()
select_data = 'delete from server where user=? and category=?'
cur.execute(select_data, (user,category,))
conn.commit()
conn.close()
def delete_cal(user, content):
conn = sqlite3.connect(home_dir + '/server.db')
cur = conn.cursor()
select_data = 'delete from server where user=? and content = ?'
cur.execute(select_data, (user,))
conn.close()
def delete_cal_all(user):
conn = sqlite3.connect(home_dir + '/server.db')
cur = conn.cursor()
select_data = 'delete from server where user=?'
cur.execute(select_data, (user,))
conn.close()
def add_cal(category, year, month, day, content, finished, user):
conn = sqlite3.connect(home_dir + '/server.db')
cur = conn.cursor()
insert_db = 'insert into server (year, user, category, month, day, what, done) values (?,?,?,?,?,?,?)'
cur.execute(insert_db,(year, user, category, month, day, content, finished,))
conn.commit()
conn.close()
```
|
{
"source": "jeonginlee/groove_scheduler",
"score": 2
}
|
#### File: groove_scheduler/controllers/album.py
```python
from flask import *
album = Blueprint('album', __name__, template_folder='templates')
@album.route('/album/edit')
def album_edit_route():
options = {
"edit": True
}
return render_template("album.html", **options)
@album.route('/album')
def album_route():
options = {
"edit": False
}
return render_template("album.html", **options)
```
#### File: groove_scheduler/controllers/albums.py
```python
from flask import *
albums = Blueprint('albums', __name__, template_folder='templates')
@albums.route('/albums/edit')
def albums_edit_route():
options = {
"edit": True
}
return render_template("albums.html", **options)
@albums.route('/albums')
def albums_route():
options = {
"edit": False
}
return render_template("albums.html", **options)
```
|
{
"source": "Jeongisu94/KDT_HWrepo",
"score": 3
}
|
#### File: KDT_HWrepo/KDT_HWrepo_Week3/kdt_hw_week3.py
```python
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
import smtplib
import re #regular expression
SMTP_SERVER = 'smtp.gmail.com'
SMTP_PORT = 465
SMTP_USER = ''
SMTP_PASSWORD = ''
def send_mail(name, addr, subject, contents, attachment=None):
if not re.match('(^[a-zA-Z0-9_.-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+$)', addr):
print('Wrong email')
return
msg = MIMEMultipart('alternative')
if attachment:
msg = MIMEMultipart('mixed')
msg['From'] = SMTP_USER
msg['To'] = addr
msg['Subject'] = name + '๋, ' + subject
text = MIMEText(contents, _charset='utf-8')
msg.attach(text)
if attachment:
from email.mime.base import MIMEBase
from email import encoders
file_data = MIMEBase('application', 'octect-stream')
file_data.set_payload(open(attachment, 'rb').read())
encoders.encode_base64(file_data)
import os
filename = os.path.basename(attachment)
file_data.add_header('Content-Disposition', 'attachment; filename="' + filename + '"')
msg.attach(file_data)
smtp = smtplib.SMTP_SSL(SMTP_SERVER, SMTP_PORT)
smtp.login(SMTP_USER, SMTP_PASSWORD)
smtp.sendmail(SMTP_USER, addr, msg.as_string())
smtp.close()
#### ํ๋ก์ ํธ ํด๋์ ์๋ email_list.xlsx ํ์ผ์ ์ด๋ฉ์ผ ๋ฐ์ ์ฌ๋๋ค์ ์ ๋ณด๋ฅผ ์
๋ ฅํ์ธ์.
#### ์์
ํ์ผ์ ์ ๋ณด๋ฅผ ์ฝ์ด์ฌ ์ ์๋ ๋ชจ๋์ importํ์ธ์.
#### email_list.xlsx ํ์ผ์ ์ฝ์ด์ ํด๋น ์ฌ๋๋ค์๊ฒ ์์งํ ๋ด์ค ์ ๋ณด ์์
ํ์ผ์ send_mail ํจ์๋ฅผ ์ด์ฉํด ์ ์กํ์ธ์.
from openpyxl import load_workbook
wb = load_workbook('email_list.xlsx', read_only=True)
data = wb.active
receivers = []
#email_list์ ํ์ผ์์ ์ด๋ฆ๊ณผ ์ด๋ฉ์ผ์ ๊ฐ์ ธ์ ๋ฆฌ์คํธ์ ์ ์ฅ
for names, emails in zip (data.iter_rows(min_row=3,min_col=2, max_col=2),data.iter_rows(min_row=3,min_col=3,max_col=3)):
for name, email in zip(names, emails):
tempReceive = [name.value,email.value]
receivers.append(tempReceive)
#from NaverNewsCrawler import NaverNewsCrawler
#crawler = NaverNewsCrawler("ํจ์คํธ ์บ ํผ์ค") # ๋ค์ด๋ฒ๋ด์ค์์ ๋ฐ์ดํฐ ํฌ๋กค๋ง
#crawler.get_news("data.xlsx")
crawledWorkbook = load_workbook('data.xlsx', read_only=True)
crawledData = crawledWorkbook.active
contents = ''
for row in crawledData.iter_rows():
for cell in row:
contents = contents + " "+ str(cell.value)
contents += '\n'
for name , mail in receivers:
send_mail(name,mail,'Naver News : ',contents)
```
|
{
"source": "jeongj/AlbumView",
"score": 3
}
|
#### File: jeongj/AlbumView/AlbumViewer.py
```python
import sys
import os
import screeninfo
from PIL import Image, ImageTk # Pillow module
import zipfile
if sys.version_info[0] == 2: # not tested yet
import Tkinter as tk # Tkinter -> tkinter in Python3, Tkinter in python2
from BytesIO import BytesIO # import StringIO #Python2
import tkFileDialog as tkFD
else:
import tkinter as tk # Tkinter -> tkinter in Python3, Tkinter in python2
from io import BytesIO
import tkinter.filedialog as tkFD
from tkinter import messagebox as mbox
class App(tk.Frame):
def __init__(self):
self.root = tk.Tk()
self.SetWigets()
self.SetCanvas()
self.SetHotKey() # bind
self.color = '#000000'
self.nCurrnetImage = 0
self.filename = ""
self.filemod = ""
self.foldername = ""
self.bFullScreen, self.bInformation = False, False
self.lImageExts = ['.jpg', '.jpeg', '.png', '.gif', '.webp']
self.nMode = 0 # 0:one view,1:two view(odd left),2:two view(even left)
self.lImages = []
#self.foldername = os.getcwd()
#print(self.foldername)
self.lImages.append("HotKeys.png")
#self.ShowImage()
def Quit(self,event=None):
self.root.quit()
def SetWigets(self):
self.btnframe = tk.Frame(self.root)
self.btnframe.pack({"side": "top"})
self.btnQuit = tk.Button(self.btnframe,text="QUIT",command=self.Quit)
self.btnQuit["fg"] = "red"
self.btnQuit.pack({"side": "left"})
self.btnZip = tk.Button(self.btnframe,text="Load Zip File",command=self.LoadZip)
self.btnZip.pack({"side": "left"})
self.btnLoadFolder = tk.Button(self.btnframe,text="Choose a Folder",command=self.LoadFolder)
self.btnLoadFolder.pack({"side": "left"})
self.btnLoadFile = tk.Button(self.btnframe,text="Load A File",command=self.LoadFile)
self.btnLoadFile.pack({"side": "left"})
self.btnPrev = tk.Button(self.btnframe,text="Previous",command=self.PreviousImage)
self.btnPrev.pack({"side": "left"})
self.btnNext = tk.Button(self.btnframe,text="Next",command=self.NextImage)
self.btnNext.pack({"side": "left"})
def SetCanvas(self,W=640,H=480):
self.canvas = tk.Canvas(self.root, width=W, height=H)
self.canvas.pack(expand=True)
#self.canvas.create_text() # for initial help for shortcut
def SetHotKey(self):
self.root.bind('<a>', self.PreviousImage)
self.root.bind('<d>', self.NextImage)
self.root.bind('<f>', self.ToggleFullScreen)
self.root.bind('<e>', self.LoadFile)
self.root.bind('<q>', self.Quit)
self.root.bind('<r>', self.LoadFolder)
self.root.bind('<o>', self.LoadFile)
self.root.bind('<z>', self.LoadZip)
self.root.bind('<t>', self.ToggleMode)
self.root.bind('<BackSpace>', self.Delete)
self.root.bind('<i>', self.ToggleInformation)
def ToggleInformation(self,event=None):
self.bInformation = not self.bInformation
def Delete(self,event=None):
if self.bFullScreen: # tkinter lose focus after askokcancel when fullscreen
self.ToggleFullScreen() # full to normal
WasFullScreen = True
else :
WasFullScreen = False
if mbox.askokcancel(title="Delete", message="Sure?", icon="warning"):
os.remove(self.foldername + '/' + self.lImages[self.nCurrnetImage])
self.lImages.remove(self.lImages[self.nCurrnetImage])
self.ShowImage()
#if WasFullScreen:
# self.ToggleFullScreen()
def ToggleMode(self,event=None):
_,self.nMode=divmod(self.nMode+1,3)
def LoadZip(self, event=None): # deal with zip file
self.filename = tkFD.askopenfilename()
if len(self.filename) < 1: # when cancel chosen from the dialog
return
if not zipfile.is_zipfile(self.filename):
self.canvas.create_text("Loaded file is not a zipfile.")
return
self.filemod = 'Zipped'
self.zf = zipfile.ZipFile(self.filename,"r") # open zipfile
self.lImages = self.zf.namelist() # get filelist from zipfile
for filename in self.lImages:
if self.CheckExt(filename) == False:
self.lImages.remove(filename)
self.lImages.sort() # sort the filelist
self.nCurrnetImage = 0 # number for current image # image indexes
self.ShowImage() # call show image for first image
print('%d image(s) found'%len(self.lImages))
for filename in self.lImages:
print(filename)
def LoadFolder(self, event=None): # deal with zip file
self.foldername = tkFD.askdirectory()
if self.foldername :
self.ProcessFolder()
self.filemod = 'Folder'
self.nCurrnetImage = 0 # number for current image # image indexes
self.ShowImage() # call show image for first image
def ProcessFolder(self): # list a directory, remove non-readables then sort
self.lImages = os.listdir(self.foldername)
for filename in self.lImages:
if self.CheckExt(filename) == False:
self.lImages.remove(filename)
self.lImages.sort() # sort the filelist
def LoadFile(self): # just load an image file with PIL and Tkinter
filename = tkFD.askopenfilename()
if len(filename) < 1: # when cancel chosen from the dialog
return
if self.CheckExt(filename) == False:
print('%s:Not a supported file'%filename)
return
self.filemod = 'File'
self.foldername = os.path.split(filename)[0]
self.filename = os.path.split(filename)[1]
self.ProcessFolder()
self.nCurrnetImage = self.lImages.index(self.filename)
self.ShowImage()
def ShowImage(self):
w, h = self.root.winfo_width(),self.root.winfo_height()
wc,hc = self.canvas.winfo_width(),self.canvas.winfo_height()
self.canvas.delete("all") # clear canvas
if self.bFullScreen == True:
self.canvas.config(width=w-6,height=h-6)
else :
self.canvas.config(width=w-6,height=h-28) # if not smaller it frame expands on next image
#self.canvas.create_rectangle(3,3,w-4,h-28) # for measuring canvas size
if self.filemod == 'Zipped':
imagedata = self.zf.read(self.lImages[self.nCurrnetImage])
obj = BytesIO(imagedata) # image data convert to BytesIO
img = Image.open(obj) # open pil image
if self.nMode != 0:
if self.nMode == 1:
tFilename = self.lImages[self.nCurrnetImage-1]
else:# self.nMode == 2:
tFilename = self.lImages[self.nCurrnetImage+1]
imagedata2 = self.zf.read(tFilename)
obj2 = BytesIO(imagedata2)
img2 = Image.open(obj2)
else:# self.filemod == 'Folder' or 'File':
fullpathfilename = self.foldername + '/' if len(self.foldername) > 0 else ""
fullpathfilename += self.lImages[self.nCurrnetImage]
try:
fullpathfilename = self.foldername + '/' if len(self.foldername)>0 else ""
fullpathfilename += self.lImages[self.nCurrnetImage]
print(fullpathfilename)
img = Image.open(fullpathfilename)
except:
self.lImages.remove(self.lImages[self.nCurrnetImage])
return
if self.nMode != 0:
if self.nMode == 1:
img2 = Image.open(self.foldername + '/' + self.lImages[self.nCurrnetImage-1])
elif self.nMode == 2:
img2 = Image.open(self.foldername + '/' + self.lImages[self.nCurrnetImage+1])
if self.nMode == 0 : # one view mode
self.photo = self.RatioResize(img,wc,hc)
self.canvas.create_image(w/2, h/2, image=self.photo, anchor=tk.CENTER)
else :
self.photo = self.RatioResize(img,wc/2,hc)
self.photo2 = self.RatioResize(img2,wc/2,hc)
if self.nMode == 1 : # two view mode, img2 on left
self.canvas.create_image(wc/2,0,image=self.photo, anchor=tk.NW)
self.canvas.create_image(0,0,image=self.photo2, anchor=tk.NW)
else : # two view mode, img2 on right
self.canvas.create_image(0, 0, image=self.photo, anchor=tk.NW)
self.canvas.create_image(wc / 2, 0, image=self.photo2, anchor=tk.NW)
if self.bInformation:
self.canvas.create_text(5, 5, anchor="nw", font=("Purisa", 12), text="%dx%d" % (img.size[0], img.size[1]))
self.canvas.create_text(5, 15, anchor="nw", font=("Purisa", 12), text=self.lImages[self.nCurrnetImage])
self.root.title("[%d/%d]"%(self.nCurrnetImage,len(self.lImages)))
def RatioResize(self,img,wc,hc):
ratiow, ratioh = float(wc) / img.size[0], float(hc) / img.size[1]
ratio = ratioh if ratiow > ratioh else ratiow
img_resized = img.resize((int(img.size[0] * ratio), int(img.size[1] * ratio)), Image.ANTIALIAS)
return ImageTk.PhotoImage(img_resized)
def PreviousImage(self, event=None):
if self.filemod == "Zipped" and self.filename == "":
return
elif self.filemod == "" :
return
self.nCurrnetImage= self.nCurrnetImage-1 if self.nMode==0 else self.nCurrnetImage-2
if self.nCurrnetImage < 0: # bounded at first image
self.nCurrnetImage = 0
self.ShowImage()
def NextImage(self, event=None):
if self.filemod == "Zipped" and self.filename == "":
return
elif self.filemod == "" :
return
self.nCurrnetImage= self.nCurrnetImage+1 if self.nMode==0 else self.nCurrnetImage+2
if self.nCurrnetImage >= len(self.lImages): # bounded at last image
self.nCurrnetImage = len(self.lImages)-1 # list ends with len-1
self.ShowImage()
def ToggleFullScreen(self, event=None):
if self.bFullScreen : # full to normal
self.canvas.pack_forget()
self.btnframe.pack()
self.canvas.pack()
else: # normal to full
self.btnframe.pack_forget() # hide button frame
# self.root.state('zoomed') # Windows and Mac only, Not X11
self.root.overrideredirect(True) # for mac, cover the dock area
self.root.geometry("{0}x{1}+0+0".format(self.root.winfo_screenwidth(), self.root.winfo_screenheight()))
#self.root.wm_attributes("-zoomed", False) # once effected, top menu bar stays
self.root.update_idletasks()
self.bFullScreen = not self.bFullScreen
self.root.wm_attributes("-fullscreen", self.bFullScreen)
self.ShowImage()
def CheckExt(self,filename):
for ext in self.lImageExts:
if filename.lower().endswith(ext):
return True
return False
app = App()
app.root.mainloop()
```
|
{
"source": "JEONG-JIWOO/PinoBot",
"score": 3
}
|
#### File: modules/Hardware/pino_uart.py
```python
import serial
import ast , time
class Pino_UART:
"""
Description:
- Uart Module for use in pinobot
Summary of Class
1. write(self, str_msg="", pino_response = None):
2. read(self):
"""
def __init__(self, port="/dev/serial0", baud_rate=115200):
# 0. Argument
self.port = port # Rpi basic : /dev/serial0
self.baud = baud_rate # default : 115200
self.last_exception = "" # save exception message for logging
self.parsed_data = {}
# 3. Objects
self.pino_serial = serial.Serial(
self.port, self.baud, timeout=0, write_timeout=0.05
)
# [B.1] write "string" or "DialogFlow result" to serial port
def write(self, str_msg="", pino_response = None):
"""
Description
-----------
send string message or pino_response
Parameters
----------
str_msg: message to send {str, optional}
pino_response { pino_dialogflow.py | PinoResponse , optional}:
pino_response.stt_result : stt text (str)
pino_response.tts_result : audio .wav format (binary file)
pino_response.intent_name : intent name (str)
pino_response.intent_response : intent response text (str)
pino_response.intent_parameter = {} : intent parameter (dict)
pino_response.action_cmd = [] : pinobot command (list)
Notes
-----
[Serial protocol send Rpi to Arduino]
$stt|$intent_name|$intent_response|$num_action_parameter|$action_parameter_1_key:$action_parameter_2_value..
ex1 ํ๋ผ๋ฏธํฐ 0๊ฐ์ผ๋:
์๋
ํ์ธ์|Welcome|๋ฐ๊ฐ์์|0
stt๊ฒฐ๊ณผ : ์๋
ํ์ธ์
DialogFlow ์ธํ
ํธ ์ด๋ฆ : Welcome
DialogFlow ์๋ต ํ
์คํธ : ๋ฐ๊ฐ์์
DialogFlow ํ๋ผ๋ฏธํฐ ๊ฐ์ : 0
ex2 ํ๋ผ๋ฏธํฐ 1๊ฐ์ผ๋:
์๋
ํ์ธ์|Welcome|๋ฐ๊ฐ์์|1|play_sound:1.wav|
DialogFlow ํ๋ผ๋ฏธํฐ ๊ฐ์ : 1
ํ๋ผ๋ฏธํฐ1 ์ด๋ฆ : play_sound
ํ๋ผ๋ฏธํฐ1 ๊ฐ : 1.wav
ex2 ํ๋ผ๋ฏธํฐ 2๊ฐ์ผ๋:
์๋
ํ์ธ์|Welcome|๋ฐ๊ฐ์์|2|play_sound:1.wav|temp:30
DialogFlow ํ๋ผ๋ฏธํฐ ๊ฐ์ : 2
ํ๋ผ๋ฏธํฐ1 ์ด๋ฆ : play_sound
ํ๋ผ๋ฏธํฐ2 ๊ฐ : 1.wav
ํ๋ผ๋ฏธํฐ2 ์ด๋ฆ : temp
ํ๋ผ๋ฏธํฐ2 ๊ฐ : 30
Return
------
result : fail -1
success 0
Example
-------
uart = Pino_UART()
uart.write("ready")
Gbot.start_stream()
result2 = Gbot.get_stream_response()
uart.write(pino_response=result2)
"""
try:
# 1.1 send DialogFlow result
if pino_response is not None :
new_msg = "%s|%s|%s|%d"%(pino_response.stt_result,
pino_response.intent_name,
pino_response.intent_response,
len(pino_response.intent_parameter))
action_dict = pino_response.intent_parameter
for action_parameter in action_dict.keys():
new_msg += "|%s:%s"%(action_parameter, action_dict[action_parameter])
self.pino_serial.write(new_msg.encode())
# 1.2 or send just string message
elif str_msg != "":
self.pino_serial.write(str_msg.encode())
# 2. if Fail to send data
except Exception as E:
self.last_exception = "PINO_UART.write()" + repr(E) # save Error message to class for log
return -1
# 3. Success to send data
else:
return 0
# [B.2] read msgs form serial port , if exist.
def read(self):
"""
Description
-----------
read serial port without block
and parse and return
Notes
-----
[Serial protocol receive form Arduino]
event_name:$name|para1Name:$para1Value|para2Name:$para2Value|para3Name:$para3Value|para4Name:$para1Value
after decode
{ event_name : $name ,
para1Name : $para1Value,
para2Name : $para2Value,
para3Name : $para3Value,
...
paraNName : $paraNValue,
}
ex)
event_name:humidity_event|humidity:50|temp:12.3|cds:457
Return
------
parsed_data : {dict}
ex)
{
event_name : humidity_event,
humidity : 50,
temp : 12.3,
cds : 457
}
"""
# 1. read serial data
received_msg = ""
try:
data_left = self.pino_serial.inWaiting()
time.sleep(0.01) # wait for get all data
if data_left > 0:
time.sleep(0.2)
data_left = self.pino_serial.inWaiting()
received_msg = str(self.pino_serial.read(data_left).decode())
self.pino_serial.flushInput()
else :
return None
except Exception as E:
self.last_exception = "PINO_UART.read()" + repr(E) # save Error message to class for log
return None
# 2. if there are empty message , clear buffer and return None
if received_msg is "":
self.pino_serial.flushInput()
return None
blocks = received_msg.split("|")
# 3. split each word
parsed_data = {}
for block in blocks:
b2 = block.split(":")
if len(b2) == 2:
parameter_name = b2[0]
parsed_data[parameter_name] = b2[1]
# 4. try to convert string to float or int
for k in parsed_data.keys():
try:
new_value = ast.literal_eval(parsed_data[k])
except ValueError :
pass
else :
parsed_data.update({k:new_value})
# 5. check parse response in valid and return
if 'event_name' not in parsed_data or len(parsed_data) < 1 :
return None
else :
self.parsed_data = parsed_data
return parsed_data
```
#### File: PinoBot/modules/pinobot.py
```python
from modules.pino_boot_loader import PinoBootLoader
import time , threading, logging
from logging.handlers import RotatingFileHandler
from enum import Enum
from google.api_core.exceptions import Unknown
class PinoState(Enum):
IDLE = 0
SENSOR_ON =1
STILL_ON = 2
SENSOR_OFF = 3
LISTEN_SUCCESS = 11
LISTEN_FAIL = 12
LISTEN_CLOUD_ERROR = 13
UART_ON = 20
DO_SOMETHING = 30
#GO_SLEEP = 4
#WAKEP_UP = 5
class PinoBot:
"""
Description:
pinobot main module
-
Summary of Class
1. setup(self):
set logging and boot pinobot
2. update(self):
read hardware and update PinoBot's State
3. listen(self):
start audio recording and streaming and return response
if google cloud error occurs, display to OLED
4. say(self,response):
pinobot say tts response
5. act(self, response):
pinobot do action by response.action_cmd
6. call_uart_event(self):
if pinobot's uart got some message,
use this to call dialogflow event
5. call_intent(self,text = "" ,event_name="", event_parameter=None):
call dialogflow manually by dict or text, and get responses
"""
def __init__(self,base_path ="/home/pi/Desktop/PinoBot/"):
# 0. Argument
# 1. Static Variables
# 2. variables
self.cur_volume = 0 # current speaker volume rate [ 0 ~ 10 ]
self.detect = {
"pre_state": 0, # last sensor state, 1: in , 0: out
"distance": 30, # cm # sonic sensor threshold to between 1 to 0
"first_time": time.time(),
} # sec # first time sonic sensor detect object
self.base_path = base_path
self.state = PinoState.IDLE
# 3. Objects
self.hardware = None
self.cloud = None
self.config = None
self.log = None
self.response = None
self.uart_cmd = None
# threads
self.say_thread = None
self.act_thread = None
# 4. Run setup
self.setup()
def setup(self):
"""
Description
-----------
set logging and boot pinobot
"""
# 1. init log
path = self.base_path + "/main.log"
self.log = logging.getLogger("Main")
self.log.setLevel(logging.DEBUG)
formatter = logging.Formatter(
"[%(levelname)s] (%(asctime)s : %(filename)s:%(lineno)d) > %(message)s"
)
log_file = RotatingFileHandler(
filename=path, maxBytes=5 * 1024 * 1024, mode="w", encoding="utf-8"
)
log_file.setFormatter(formatter)
self.log.addHandler(log_file)
log_console = logging.StreamHandler()
log_console.setFormatter(formatter)
self.log.addHandler(log_console)
self.log.info("[PinoBot] Start Boot")
boot = PinoBootLoader(self.base_path,self.log)
# 2. run boot sequence
self.hardware, self.cloud, self.config = boot.boot()
del boot
self.log.info("[PinoBot] Boot Done..")
def update(self):
"""
Description
-----------
read hardware and update PinoBot's State
Notes
-----
State : PinoState
priority [1] : Serial command
priority [2] : ultrasonic sensor state
If the ultrasonic sensor and uart command come in at the same time,
the uart command is given priority.
"""
# 2. read hardware signals
cur_sensor_state = 0
distance,uart_cmd = self.hardware.read()
if self.detect["distance"] > distance > 4:
cur_sensor_state = 1
# 3. uart command on
if uart_cmd is not None:
self.uart_cmd = uart_cmd
self.state = PinoState.UART_ON
print("uart : ",self.uart_cmd)
return self.state
# 4. set state by sensor
if self.detect["pre_state"] == 0 and cur_sensor_state == 1:
# 4.1 object [ 0 -> 1 ] , new object, add talk task
self.detect["first_time"] = time.time()
self.state = PinoState.SENSOR_ON
elif self.detect["pre_state"] == 1 and cur_sensor_state == 1:
# 4.2 object [ 1 -> 1 ] , object still in
self.state = PinoState.STILL_ON
elif self.detect["pre_state"] == 1 and cur_sensor_state == 0:
# 4.3 object [ 1 -> 0 ] , object gone
self.state = PinoState.SENSOR_OFF
self.detect["pre_state"] = cur_sensor_state # update sensor state
return self.state
def listen(self):
"""
Description
-----------
start audio recording and streaming and return response
if google cloud error occurs, display to OLED
Return
------
response : { Parsed DialogFlow response , PinoResponse object}
"""
self.log.info("listen")
self.hardware.write(text="๋ฃ๋์ค")
# 2.1. streaming voice
if self.cloud.start_stream() == -1:
self.hardware.write(text="๋
น์ ์คํจ\n ใ
ใ
")
return None
self.hardware.write(text="๋ฃ๋์ค..")
try:
response = self.cloud.get_stream_response()
if response.stt_result == "[Fail]":
self.state = PinoState.LISTEN_FAIL
return None
# 2.E0. Gcloud Error
if self.cloud.gcloud_state < 0:
self.state = PinoState.LISTEN_CLOUD_ERROR
if self.cloud.gcloud_state == -1: # Internet Error
self.hardware.write(text="์ธํฐ๋ท ๋ฌธ์ \n ๊ฐ ์์ด์ ใ
ใ
")
elif self.cloud.gcloud_state == -2: # google server error
self.hardware.write(text="์ธํฐ๋ท ๋ฌธ์ \n ๊ฐ ์์ด์ ใ
ใ
")
elif self.cloud.gcloud_state == -3:
self.hardware.write(text="์ค๋์ ํ ๋น๋์ \n ๋ค ์ฌ์ฉํ๋ค์ ใ
ใ
")
elif self.cloud.gcloud_state < -3:
self.hardware.write(text="๋ฌด์ธ๊ฐ ๋ฌธ์ ๊ฐ ์์ด์ \n ใ
ใ
")
self.log.warning("[PinoBot] cloud Error type : %d" % self.cloud.gcloud_state)
except Exception as E:
self.log.error("[PinoBot] listen Error : %s" % repr(E))
return None
else:
return response
def start_say(self, response):
"""
Description
-----------
pinobot say tts response
Parameters
----------
response : { Parsed DialogFlow response , PinoResponse object}
PinoResponse.tts_result is audio binary file .wav format
[TODO] add local wave file play feature
"""
if response is None:
self.log.warning("say.. nothing")
return 0
try:
self.say_thread = threading.Thread(
target=self.cloud.play_audio_response, args=(response,)
)
self.say_thread.start()
except Exception as E:
self.log.error("[PinoBot] say Error : %s" % repr(E))
return -1
def start_act(self, response):
"""
Description
-----------
pinobot do action by response.action_cmd
Notes
-----
action could take few seconds, therefor add threading
Parameters
----------
response : { Parsed DialogFlow response , PinoResponse object}
PinoResponse.action_cmd is list of comaands
Return
------
result : 0 Success
1 Fail
"""
if response is None:
self.log.warning('act.. nothing')
return 0
try:
self.act_thread = threading.Thread(
target=self.hardware.run_pinobot_cmd, args=(response,)
)
self.act_thread.start()
except Exception as E:
self.log.error("[PinoBot] act Error : %s" % repr(E))
return -1
else :
return 0
def wait_say_and_act(self,timeout = 30):
"""
Description
-----------
wait until say and act finish
Parameters
----------
timeout : seconds {int or float}
Return
------
result : 0 Success
1 Fail
"""
self.state = PinoState.DO_SOMETHING
if self.act_thread and self.say_thread :
for i in range (int(timeout*1000)):
if self.act_thread.is_alive() and self.say_thread.is_alive():
time.sleep(0.01)
else :
return 0
# [TODO] add flag to all thread and make ways to force stop.
return -1
def call_uart_event(self):
"""
Description
-----------
if pinobot's uart got some message,
use this to call dialogflow event
Notes
-----
uart_cmd : { dict }
{ event_name : $name ,
para1Name : $para1Value,
...
paraNName : $paraNValue,
}
Return
------
response : { Parsed DialogFlow response , PinoResponse object}
z
"""
try:
if self.uart_cmd is not None:
self.hardware.write(text="๋ฉ์ธ์ง ํ์ธ์ค..")
response = self.cloud.send_event(self.uart_cmd ['event_name'], self.uart_cmd )
self.uart_cmd = None # reset
self.state = PinoState.IDLE
self.hardware.write(serial_msg = "ready")
return response
else:
self.hardware.write(serial_msg = "ready")
return self.cloud.parsing_response(None,None,None)
except Exception as E:
self.log.error("[PinoBot] call_uart_event Error : %s" % repr(E))
return None
def call_intent(self,text = "" ,event_name="", event_parameter=None):
"""
Description
-----------
call dialogflow manually by dict or text, and get responses
Parameters
----------
text : natural word message to call dialogflow { str, optional }
event_name : dialogflow intent event name { str, optional }
event_parameter : dialogflow intent parameter { dict, optional }
event_parameter without event_name is useless
Return
------
response : { Parsed DialogFlow response , PinoResponse object}
Example
-------
r = bot.call_intent(text = '์๋
ํ์ธ์')
print(r.intent_response)
>> ๋ฐ๊ฐ์ต๋๋ค ์ ๋ ํผ๋
ธ๋ด์
๋๋ค
r = bot.call_intent("weather",{'humidity:'50','temp':'20'}
print(r.intent_response)
>> ์ง๊ธ ๋ฐฉ์ ์ํ๋ฅผ ์๋ ค๋๋ฆฝ๋๋ค, ์ต๋๋ 50ํ๋ก ์จ๋๋ 20๋์
๋๋ค.
"""
try :
if event_name is not "":
self.cloud.send_event(event_name, event_parameter)
return self.cloud.parsing_response()
except Exception as E:
self.log.error("[PinoBot] call_intent Error : %s" % repr(E))
return None
def return_idle(self):
"""
Description
-----------
display idle message and
return state to idle
"""
self.hardware.write(text="๋๊ธฐ์ค..")
self.state = PinoState.IDLE
```
|
{
"source": "JeongJuhyeon/my-talon-scripts",
"score": 3
}
|
#### File: my-talon-scripts/misc/repeater.py
```python
from talon.voice import Context, Rep, talon
from inspect import signature
from math import floor
from ..utils import optional_numerals, text_to_number
ctx = Context("repeater")
ordinals = {}
def ordinal(n):
"""
Convert an integer into its ordinal representation::
ordinal(0) => '0th'
ordinal(3) => '3rd'
ordinal(122) => '122nd'
ordinal(213) => '213th'
"""
n = int(n)
suffix = ["th", "st", "nd", "rd", "th"][min(n % 10, 4)]
if 11 <= (n % 100) <= 13:
suffix = "th"
return str(n) + suffix
ordinal_words = {}
ordinal_ones = ['first', 'second', 'third', 'fourth', 'fifth', 'sixth', 'seventh', 'eight', 'ninth']
ordinal_teens = ['tenth', 'eleventh', 'twelfth', 'thirteenth', 'fourteenth', 'fifteenth', 'sixteenth', 'seventeenth', 'eighteenth', 'nineteenth']
ordinal_tens = ['twentieth', 'thirtieth', 'fortieth', 'fiftieth', 'sixtieth', 'seventieth', 'eightieth', 'ninetieth']
ordinal_tenty = ['twenty', 'thirty', 'forty', 'fifty', 'sixty', 'seventy', 'eighty', 'ninety']
def ordinal_word(n):
n = int(n)
result = ""
if n > 19:
if n % 10 == 0:
result += ordinal_tens[floor((n / 10)) - 2]
else:
result += ordinal_tenty[floor(n / 10) - 2]
result += ordinal_ones[(n % 10) - 1]
elif n > 9:
result += ordinal_teens[n - 11]
else:
result += ordinal_ones[n - 1]
return result
for n in range(2, 100):
ordinals[ordinal(n)] = n - 1
ordinal_words[ordinal_word(n)] = n - 1
ctx.set_list("ordinals", ordinals.keys())
ctx.set_list("ordinal_words", ordinal_words.keys())
def repeat_ordinal(m):
o = m["repeater.ordinals"][0]
repeater = Rep(int(ordinals[o]))
repeater.ctx = talon
return repeater(None)
def repeat_ordinal_word(m):
o = m["repeater.ordinal_words"][0]
repeater = Rep(int(ordinal_words[o]))
repeater.ctx = talon
return repeater(None)
ctx.keymap({
"{repeater.ordinals}": repeat_ordinal,
"{repeater.ordinal_words}": repeat_ordinal_word,
})
```
|
{
"source": "jeongjuns/Control_yolact",
"score": 2
}
|
#### File: jeongjuns/Control_yolact/W_conv.py
```python
import math
import warnings
import torch
from torch import Tensor
from torch.nn.parameter import Parameter
import torch.nn.functional as F
import torch.nn.init as init
from torch.nn.modules.module import Module
from torch.nn.modules.utils import _single, _pair, _triple, _reverse_repeat_tuple
from torch.autograd import Variable
from torch.nn.common_types import _size_1_t, _size_2_t, _size_3_t
from typing import Optional, List, Tuple
#from torch.nn.modules.conv import _ConvNd
flcnt1=0
flcnt2=0
flcnt3=0
avgcnt1=0
avgcnt2=0
avgcnt3=0
#fpnlatlayercnt=0
flfpnlatlayercnt=0
bboxcnt=0
flbboxcnt=0
confcnt=0
flconfcnt=0
maskcnt=0
flmaskcnt=0
makenetcnt=0
flmakenetcnt=0
segcnt=0
flsegcnt=0
# torch.nn.conv2d ๋ณํ
class W_ConvNd(Module):
__constants__ = ['stride', 'padding', 'dilation', 'groups',
'padding_mode', 'output_padding', 'in_channels',
'out_channels', 'kernel_size']
__annotations__ = {'bias': Optional[torch.Tensor]}
_in_channels: int
out_channels: int
kernel_size: Tuple[int, ...]
stride: Tuple[int, ...]
padding: Tuple[int, ...]
dilation: Tuple[int, ...]
transposed: bool
output_padding: Tuple[int, ...]
groups: int
padding_mode: str
weight: Tensor
bias: Optional[Tensor]
def __init__(self,
in_channels: int,
out_channels: int,
kernel_size: _size_1_t,
stride: _size_1_t,
padding: _size_1_t,
dilation: _size_1_t,
transposed: bool,
output_padding: _size_1_t,
groups: int,
bias: Optional[Tensor],
padding_mode: str) -> None:
super(W_ConvNd, self).__init__()
if in_channels % groups != 0:
raise ValueError('in_channels must be divisible by groups')
if out_channels % groups != 0:
raise ValueError('out_channels must be divisible by groups')
valid_padding_modes = {'zeros', 'reflect', 'replicate', 'circular'}
if padding_mode not in valid_padding_modes:
raise ValueError("padding_mode must be one of {}, but got padding_mode='{}'".format(
valid_padding_modes, padding_mode))
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = kernel_size
self.stride = stride
self.padding = padding
self.dilation = dilation
self.transposed = transposed
self.output_padding = output_padding
self.groups = groups
self.padding_mode = padding_mode
# `_reversed_padding_repeated_twice` is the padding to be passed to
# `F.pad` if needed (e.g., for non-zero padding types that are
# implemented as two ops: padding + conv). `F.pad` accepts paddings in
# reverse order than the dimension.
self._reversed_padding_repeated_twice = _reverse_repeat_tuple(self.padding, 2)
if transposed:
self.weight = Parameter(torch.Tensor(
in_channels, out_channels // groups, *kernel_size))
else:
self.weight = Parameter(torch.Tensor(
out_channels, in_channels // groups, *kernel_size))
if bias:
self.bias = Parameter(torch.Tensor(out_channels))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self) -> None:
init.kaiming_uniform_(self.weight, a=math.sqrt(5))
if self.bias is not None:
fan_in, _ = init._calculate_fan_in_and_fan_out(self.weight)
bound = 1 / math.sqrt(fan_in)
init.uniform_(self.bias, -bound, bound)
def extra_repr(self):
s = ('{in_channels}, {out_channels}, kernel_size={kernel_size}'
', stride={stride}')
if self.padding != (0,) * len(self.padding):
s += ', padding={padding}'
if self.dilation != (1,) * len(self.dilation):
s += ', dilation={dilation}'
if self.output_padding != (0,) * len(self.output_padding):
s += ', output_padding={output_padding}'
if self.groups != 1:
s += ', groups={groups}'
if self.bias is None:
s += ', bias=False'
if self.padding_mode != 'zeros':
s += ', padding_mode={padding_mode}'
return s.format(**self.__dict__)
def __setstate__(self, state):
super(_ConvNd, self).__setstate__(state)
if not hasattr(self, 'padding_mode'):
self.padding_mode = 'zeros'
class W_Conv2d1(W_ConvNd):
def __init__(
self,
in_channels: int,
out_channels: int,
kernel_size: _size_2_t,
stride: _size_2_t = 1,
padding: _size_2_t = 0,
dilation: _size_2_t = 1,
groups: int = 1,
bias: bool = True,
padding_mode: str = 'zeros' # TODO: refine this type
):
kernel_size = _pair(kernel_size)
stride = _pair(stride)
padding = _pair(padding)
dilation = _pair(dilation)
super(W_Conv2d1, self).__init__(
in_channels, out_channels, kernel_size, stride, padding, dilation,
False, _pair(0), groups, bias, padding_mode)
################################################# jj add
self.W1 = Parameter(make_mw(out_channels, in_channels, kernel_size[0]), requires_grad=True)
W_Conv2d1.fl = {}
W_Conv2d1.Wweight={}
################################################# jj end
def _conv_forward(self, input, weight):
if self.padding_mode != 'zeros':
return F.conv2d(F.pad(input, self._reversed_padding_repeated_twice, mode=self.padding_mode),
weight, self.bias, self.stride,
_pair(0), self.dilation, self.groups)
return F.conv2d(input, weight, self.bias, self.stride,
self.padding, self.dilation, self.groups)
def forward(self, input: Tensor) -> Tensor:
################################################# jj add
global flcnt1
global avgcnt1
if avgcnt1 == 34:
avgcnt1 = 1
if flcnt1 == 33:
avgcnt1 += 1
for i in range(33,66):
if flcnt1 == i:
W_Conv2d1.fl['{0}'.format(i-33)] = self.weight.clone().detach()
if flcnt1 > 32:
for i in range(1,34):
if avgcnt1 == i:
W_Conv2d1.Wweight['{0}'.format(i)] = mod_compute(W_Conv2d1.fl['{0}'.format(i-1)], self.W1)
if flcnt1 < 66:
flcnt1+=1
if 0 < avgcnt1 < 34:
avgcnt1+=1
if flcnt1 < 34:
return self._conv_forward(input, self.weight)
else :
return self._conv_forward(input, mod_compute(W_Conv2d1.fl['{0}'.format(avgcnt1-2)], self.W1))
def mod_compute(fl, w):
# seungil modification
if fl.size(3) == 1:
fl = fl.squeeze(-1).squeeze(-1)
fla_tensor = w@fl
fla_tensor = fla_tensor.unsqueeze(-1).unsqueeze(-1)
elif fl.size(3) == 3:
fla_tensor = torch.zeros(fl.size(0), fl.size(1), 3, 3)
for i in range(3):
for j in range(3):
temp = fl[:,:,i,j].squeeze(-1).squeeze(-1)
temp = w@temp
fla_tensor[:,:,i,j] = temp
return fla_tensor
def make_mw(o_size, i_size, k_size):
# seungil modification
mw = torch.eye(o_size)
return mw
################################################# jj end
class W_Conv2d2(W_ConvNd):
def __init__(
self,
in_channels: int,
out_channels: int,
kernel_size: _size_2_t,
stride: _size_2_t = 1,
padding: _size_2_t = 0,
dilation: _size_2_t = 1,
groups: int = 1,
bias: bool = True,
padding_mode: str = 'zeros' # TODO: refine this type
):
kernel_size = _pair(kernel_size)
stride = _pair(stride)
padding = _pair(padding)
dilation = _pair(dilation)
super(W_Conv2d2, self).__init__(
in_channels, out_channels, kernel_size, stride, padding, dilation,
False, _pair(0), groups, bias, padding_mode)
################################################# jj add
self.W2 = Parameter(make_mw(out_channels, in_channels, kernel_size[0]), requires_grad=True)
W_Conv2d2.fl = {}
W_Conv2d2.Wweight={}
################################################# jj end
def _conv_forward(self, input, weight):
if self.padding_mode != 'zeros':
return F.conv2d(F.pad(input, self._reversed_padding_repeated_twice, mode=self.padding_mode),
weight, self.bias, self.stride,
_pair(0), self.dilation, self.groups)
return F.conv2d(input, weight, self.bias, self.stride,
self.padding, self.dilation, self.groups)
def forward(self, input: Tensor) -> Tensor:
################################################# jj add
global flcnt2
global avgcnt2
if avgcnt2 == 34:
avgcnt2 = 1
if flcnt2 == 33:
avgcnt2 += 1
for i in range(33,66):
if flcnt2 == i:
W_Conv2d2.fl['{0}'.format(i-33)] = self.weight.clone().detach()
if flcnt2 > 32:
for i in range(1,34):
if avgcnt2 == i:
W_Conv2d2.Wweight['{0}'.format(i)] = mod_compute(W_Conv2d2.fl['{0}'.format(i-1)], self.W2)
if flcnt2 < 66:
flcnt2+=1
if 0 < avgcnt2 < 34:
avgcnt2+=1
#if flcnt2 == 66:
# print(W_Conv2d2.fl['{0}'.format(32)][0][0])
if flcnt2 < 34:
return self._conv_forward(input, self.weight)
else :
return self._conv_forward(input, mod_compute(W_Conv2d2.fl['{0}'.format(avgcnt2-2)], self.W2))
################################################# jj end
class W_Conv2d3(W_ConvNd):
def __init__(
self,
in_channels: int,
out_channels: int,
kernel_size: _size_2_t,
stride: _size_2_t = 1,
padding: _size_2_t = 0,
dilation: _size_2_t = 1,
groups: int = 1,
bias: bool = True,
padding_mode: str = 'zeros' # TODO: refine this type
):
kernel_size = _pair(kernel_size)
stride = _pair(stride)
padding = _pair(padding)
dilation = _pair(dilation)
super(W_Conv2d3, self).__init__(
in_channels, out_channels, kernel_size, stride, padding, dilation,
False, _pair(0), groups, bias, padding_mode)
################################################# jj add
self.W3 = Parameter(make_mw(out_channels, in_channels, kernel_size[0]), requires_grad=True)
W_Conv2d3.fl = {}
W_Conv2d3.Wweight={}
################################################# jj end
def _conv_forward(self, input, weight):
if self.padding_mode != 'zeros':
return F.conv2d(F.pad(input, self._reversed_padding_repeated_twice, mode=self.padding_mode),
weight, self.bias, self.stride,
_pair(0), self.dilation, self.groups)
return F.conv2d(input, weight, self.bias, self.stride,
self.padding, self.dilation, self.groups)
def forward(self, input: Tensor) -> Tensor:
################################################# jj add
global flcnt3
global avgcnt3
if avgcnt3 == 34:
avgcnt3 = 1
if flcnt3 == 33:
avgcnt3 += 1
for i in range(33,66):
if flcnt3 == i:
W_Conv2d3.fl['{0}'.format(i-33)] = self.weight.clone().detach()
if flcnt3 > 32:
for i in range(1,34):
if avgcnt3 == i:
W_Conv2d3.Wweight['{0}'.format(i)] = mod_compute(W_Conv2d3.fl['{0}'.format(i-1)], self.W3)
if flcnt3 < 66:
flcnt3+=1
if 0 < avgcnt3 < 34:
avgcnt3+=1
if flcnt3 < 34:
return self._conv_forward(input, self.weight)
else :
return self._conv_forward(input, mod_compute(W_Conv2d3.fl['{0}'.format(avgcnt3-2)], self.W3))
################################################# jj end
class bbox_Conv2d(W_ConvNd):
def __init__(
self,
in_channels: int,
out_channels: int,
kernel_size: _size_2_t,
stride: _size_2_t = 1,
padding: _size_2_t = 0,
dilation: _size_2_t = 1,
groups: int = 1,
bias: bool = True,
padding_mode: str = 'zeros' # TODO: refine this type
):
kernel_size = _pair(kernel_size)
stride = _pair(stride)
padding = _pair(padding)
dilation = _pair(dilation)
super(bbox_Conv2d, self).__init__(
in_channels, out_channels, kernel_size, stride, padding, dilation,
False, _pair(0), groups, bias, padding_mode)
self.mw = Parameter(make_mw(out_channels,in_channels,kernel_size[0]), requires_grad=True)
bbox_Conv2d.fl={}
bbox_Conv2d.Wweight={}
def _conv_forward(self, input, weight):
if self.padding_mode != 'zeros':
return F.conv2d(F.pad(input, self._reversed_padding_repeated_twice, mode=self.padding_mode),
weight, self.bias, self.stride,
_pair(0), self.dilation, self.groups)
return F.conv2d(input, weight, self.bias, self.stride,
self.padding, self.dilation, self.groups)
def forward(self, input: Tensor) -> Tensor:
global flbboxcnt
global bboxcnt
if bboxcnt == 6:
bboxcnt = 1
if flbboxcnt == 5:
bboxcnt += 1
for i in range(5,10):
if flbboxcnt == i:
bbox_Conv2d.fl['{0}'.format(i-5)] = self.weight.clone().detach()
if flbboxcnt > 4:
for i in range(1,6):
if bboxcnt == i:
bbox_Conv2d.Wweight['{0}'.format(i)] = mod_compute(bbox_Conv2d.fl['{0}'.format(i-1)], self.mw)
if flbboxcnt < 10:
flbboxcnt+=1
if 0 < bboxcnt < 6:
bboxcnt+=1
#if flbboxcnt == 10:
# print(bbox_Conv2d.fl['{0}'.format(0)][0][0])
# print(bbox_Conv2d.Wweight['{0}'.format(1)][0][0])
if flbboxcnt < 6:
return self._conv_forward(input, self.weight)
else :
return self._conv_forward(input, mod_compute(bbox_Conv2d.fl['{0}'.format(bboxcnt-2)], self.mw))
class conf_Conv2d(W_ConvNd):
def __init__(
self,
in_channels: int,
out_channels: int,
kernel_size: _size_2_t,
stride: _size_2_t = 1,
padding: _size_2_t = 0,
dilation: _size_2_t = 1,
groups: int = 1,
bias: bool = True,
padding_mode: str = 'zeros' # TODO: refine this type
):
kernel_size = _pair(kernel_size)
stride = _pair(stride)
padding = _pair(padding)
dilation = _pair(dilation)
super(conf_Conv2d, self).__init__(
in_channels, out_channels, kernel_size, stride, padding, dilation,
False, _pair(0), groups, bias, padding_mode)
self.mw = Parameter(make_mw(out_channels,in_channels,kernel_size[0]), requires_grad=True)
conf_Conv2d.fl={}
conf_Conv2d.Wweight={}
def _conv_forward(self, input, weight):
if self.padding_mode != 'zeros':
return F.conv2d(F.pad(input, self._reversed_padding_repeated_twice, mode=self.padding_mode),
weight, self.bias, self.stride,
_pair(0), self.dilation, self.groups)
return F.conv2d(input, weight, self.bias, self.stride,
self.padding, self.dilation, self.groups)
def forward(self, input: Tensor) -> Tensor:
global flconfcnt
global confcnt
if confcnt == 6:
confcnt = 1
if flconfcnt == 5:
confcnt += 1
for i in range(5,10):
if flconfcnt == i:
conf_Conv2d.fl['{0}'.format(i-5)] = self.weight.clone().detach()
if flconfcnt > 4:
for i in range(1,6):
if confcnt == i:
conf_Conv2d.Wweight['{0}'.format(i)] = mod_compute(conf_Conv2d.fl['{0}'.format(i-1)], self.mw)
if flconfcnt < 10:
flconfcnt+=1
if 0 < confcnt < 6:
confcnt+=1
if flconfcnt < 6:
return self._conv_forward(input, self.weight)
else :
return self._conv_forward(input, mod_compute(conf_Conv2d.fl['{0}'.format(confcnt-2)], self.mw))
class mask_Conv2d(W_ConvNd):
def __init__(
self,
in_channels: int,
out_channels: int,
kernel_size: _size_2_t,
stride: _size_2_t = 1,
padding: _size_2_t = 0,
dilation: _size_2_t = 1,
groups: int = 1,
bias: bool = True,
padding_mode: str = 'zeros' # TODO: refine this type
):
kernel_size = _pair(kernel_size)
stride = _pair(stride)
padding = _pair(padding)
dilation = _pair(dilation)
super(mask_Conv2d, self).__init__(
in_channels, out_channels, kernel_size, stride, padding, dilation,
False, _pair(0), groups, bias, padding_mode)
self.mw = Parameter(make_mw(out_channels,in_channels,kernel_size[0]), requires_grad=True)
mask_Conv2d.fl={}
mask_Conv2d.Wweight={}
def _conv_forward(self, input, weight):
if self.padding_mode != 'zeros':
return F.conv2d(F.pad(input, self._reversed_padding_repeated_twice, mode=self.padding_mode),
weight, self.bias, self.stride,
_pair(0), self.dilation, self.groups)
return F.conv2d(input, weight, self.bias, self.stride,
self.padding, self.dilation, self.groups)
def forward(self, input: Tensor) -> Tensor:
global flmaskcnt
global maskcnt
if maskcnt == 6:
maskcnt = 1
if flmaskcnt == 5:
maskcnt += 1
for i in range(5,10):
if flmaskcnt == i:
mask_Conv2d.fl['{0}'.format(i-5)] = self.weight.clone().detach()
if flmaskcnt > 4:
for i in range(1,6):
if maskcnt == i:
mask_Conv2d.Wweight['{0}'.format(i)] = mod_compute(mask_Conv2d.fl['{0}'.format(i-1)], self.mw)
if flmaskcnt < 10:
flmaskcnt+=1
if 0 < maskcnt < 6:
maskcnt+=1
if flmaskcnt < 6:
return self._conv_forward(input, self.weight)
else :
return self._conv_forward(input, mod_compute(mask_Conv2d.fl['{0}'.format(maskcnt-2)], self.mw))
class makenet_Conv2d(W_ConvNd):
def __init__(
self,
in_channels: int,
out_channels: int,
kernel_size: _size_2_t,
stride: _size_2_t = 1,
padding: _size_2_t = 0,
dilation: _size_2_t = 1,
groups: int = 1,
bias: bool = True,
padding_mode: str = 'zeros' # TODO: refine this type
):
kernel_size = _pair(kernel_size)
stride = _pair(stride)
padding = _pair(padding)
dilation = _pair(dilation)
super(makenet_Conv2d, self).__init__(
in_channels, out_channels, kernel_size, stride, padding, dilation,
False, _pair(0), groups, bias, padding_mode)
self.mw = Parameter(make_mw(out_channels,in_channels,kernel_size[0]), requires_grad=True)
makenet_Conv2d.fl={}
makenet_Conv2d.Wweight={}
def _conv_forward(self, input, weight):
if self.padding_mode != 'zeros':
return F.conv2d(F.pad(input, self._reversed_padding_repeated_twice, mode=self.padding_mode),
weight, self.bias, self.stride,
_pair(0), self.dilation, self.groups)
return F.conv2d(input, weight, self.bias, self.stride,
self.padding, self.dilation, self.groups)
def forward(self, input: Tensor) -> Tensor:
global flmakenetcnt
global makenetcnt
if makenetcnt == 11:
makenetcnt = 1
if flmakenetcnt == 10:
makenetcnt += 1
for i in range(10,20):
if flmakenetcnt == i:
makenet_Conv2d.fl['{0}'.format(i-10)] = self.weight.clone().detach()
if flmakenetcnt > 9:
for i in range(1,11):
if makenetcnt == i:
makenet_Conv2d.Wweight['{0}'.format(i)] = mod_compute(makenet_Conv2d.fl['{0}'.format(i-1)], self.mw)
if flmakenetcnt < 20:
flmakenetcnt+=1
if 0 < makenetcnt < 11:
makenetcnt+=1
if flmakenetcnt < 11:
return self._conv_forward(input, self.weight)
else :
return self._conv_forward(input, mod_compute(makenet_Conv2d.fl['{0}'.format(makenetcnt-2)], self.mw))
class seg_Conv2d(W_ConvNd):
def __init__(
self,
in_channels: int,
out_channels: int,
kernel_size: _size_2_t,
stride: _size_2_t = 1,
padding: _size_2_t = 0,
dilation: _size_2_t = 1,
groups: int = 1,
bias: bool = True,
padding_mode: str = 'zeros' # TODO: refine this type
):
kernel_size = _pair(kernel_size)
stride = _pair(stride)
padding = _pair(padding)
dilation = _pair(dilation)
super(seg_Conv2d, self).__init__(
in_channels, out_channels, kernel_size, stride, padding, dilation,
False, _pair(0), groups, bias, padding_mode)
self.mw = Parameter(make_mw(out_channels,in_channels,kernel_size[0]), requires_grad=True)
seg_Conv2d.fl={}
seg_Conv2d.Wweight={}
def _conv_forward(self, input, weight):
if self.padding_mode != 'zeros':
return F.conv2d(F.pad(input, self._reversed_padding_repeated_twice, mode=self.padding_mode),
weight, self.bias, self.stride,
_pair(0), self.dilation, self.groups)
return F.conv2d(input, weight, self.bias, self.stride,
self.padding, self.dilation, self.groups)
def forward(self, input: Tensor) -> Tensor:
global flsegcnt
global segcnt
if segcnt == 2:
segcnt = 1
if flsegcnt == 1:
segcnt += 1
for i in range(1,2):
if flsegcnt == i:
seg_Conv2d.fl['{0}'.format(i-1)] = self.weight.clone().detach()
if flsegcnt > 0:
for i in range(1,2):
if segcnt == i:
seg_Conv2d.Wweight['{0}'.format(i)] = mod_compute(seg_Conv2d.fl['{0}'.format(i-1)], self.mw)
if flsegcnt < 2:
flsegcnt+=1
if 0 < segcnt < 2:
segcnt+=1
if flsegcnt < 2:
return self._conv_forward(input, self.weight)
else :
return self._conv_forward(input, mod_compute(seg_Conv2d.fl['{0}'.format(segcnt-2)], self.mw))
class fpn_lat_layers_Conv2d(W_ConvNd):
def __init__(
self,
in_channels: int,
out_channels: int,
kernel_size: _size_2_t,
stride: _size_2_t = 1,
padding: _size_2_t = 0,
dilation: _size_2_t = 1,
groups: int = 1,
bias: bool = True,
padding_mode: str = 'zeros' # TODO: refine this type
):
kernel_size = _pair(kernel_size)
stride = _pair(stride)
padding = _pair(padding)
dilation = _pair(dilation)
super(fpn_lat_layers_Conv2d, self).__init__(
in_channels, out_channels, kernel_size, stride, padding, dilation,
False, _pair(0), groups, bias, padding_mode)
self.mw = Parameter(make_mw(out_channels,in_channels,kernel_size[0]), requires_grad=True)
self.fl_2048=torch.ones(256,2048,1,1)
self.fl_1024=torch.ones(256,1024,1,1)
self.fl_512=torch.ones(256,512,1,1)
self.fla_2048=torch.ones(256,2048,1,1)
self.fla_1024=torch.ones(256,1024,1,1)
self.fla_512=torch.ones(256,512,1,1)
self.in_channels = in_channels
self.cnt = 0
def _conv_forward(self, input, weight):
if self.padding_mode != 'zeros':
return F.conv2d(F.pad(input, self._reversed_padding_repeated_twice, mode=self.padding_mode),
weight, self.bias, self.stride,
_pair(0), self.dilation, self.groups)
return F.conv2d(input, weight, self.bias, self.stride,
self.padding, self.dilation, self.groups)
def forward(self, input: Tensor) -> Tensor:
if self.cnt < 2:
self.cnt += 1
if self.cnt == 2:
if self.in_channels == 2048:
self.fl_2048 = self.weight.clone().detach()
elif self.in_channels == 1024:
self.fl_1024 = self.weight.clone().detach()
elif self.in_channels == 512:
self.fl_512 = self.weight.clone().detach()
self.cnt += 1
if self.cnt > 2:
if self.in_channels == 2048:
self.fla_2048 = self.fl_2048.squeeze(-1).squeeze(-1)
self.fla_2048 = [email protected]_2048
self.fla_2048 = self.fla_2048.unsqueeze(-1).unsqueeze(-1)
elif self.in_channels == 1024:
self.fla_1024 = self.fl_1024.squeeze(-1).squeeze(-1)
self.fla_1024 = [email protected]_1024
self.fla_1024 = self.fla_1024.unsqueeze(-1).unsqueeze(-1)
elif self.in_channels == 512:
self.fla_512 = self.fl_512.squeeze(-1).squeeze(-1)
self.fla_512 = [email protected]_512
self.fla_512 = self.fla_512.unsqueeze(-1).unsqueeze(-1)
#print(self.fl_512[0][0])
if self.cnt < 2:
return self._conv_forward(input, self.weight)
elif self.in_channels == 2048:
return self._conv_forward(input, self.fla_2048)
elif self.in_channels == 1024:
return self._conv_forward(input, self.fla_1024)
elif self.in_channels == 512:
return self._conv_forward(input, self.fla_512)
return self._conv_forward(input, self.weight)
class fpn_pred_layers_Conv2d(W_ConvNd):
def __init__(
self,
in_channels: int,
out_channels: int,
x_cnt : int,
kernel_size: _size_2_t,
stride: _size_2_t = 1,
padding: _size_2_t = 0,
dilation: _size_2_t = 1,
groups: int = 1,
bias: bool = True,
padding_mode: str = 'zeros' # TODO: refine this type
):
kernel_size = _pair(kernel_size)
stride = _pair(stride)
padding = _pair(padding)
dilation = _pair(dilation)
super(fpn_pred_layers_Conv2d, self).__init__(
in_channels, out_channels, kernel_size, stride, padding, dilation,
False, _pair(0), groups, bias, padding_mode)
self.mw = Parameter(make_mw(out_channels,in_channels,kernel_size[0]), requires_grad=True)
self.fl_512=torch.ones(256,256,3,3)
self.fl_1024=torch.ones(256,256,3,3)
self.fl_2048=torch.ones(256,256,3,3)
self.fla_512=torch.ones(256,256,3,3)
self.fla_1024=torch.ones(256,256,3,3)
self.fla_2048=torch.ones(256,256,3,3)
self.cnt = 0
self.x_cnt = x_cnt
def _conv_forward(self, input, weight):
if self.padding_mode != 'zeros':
return F.conv2d(F.pad(input, self._reversed_padding_repeated_twice, mode=self.padding_mode),
weight, self.bias, self.stride,
_pair(0), self.dilation, self.groups)
return F.conv2d(input, weight, self.bias, self.stride,
self.padding, self.dilation, self.groups)
def forward(self, input: Tensor) -> Tensor:
if self.cnt < 2:
self.cnt += 1
if self.cnt == 2:
if self.x_cnt == 512:
self.fl_512 = self.weight.clone().detach()
elif self.x_cnt == 1024:
self.fl_1024 = self.weight.clone().detach()
elif self.x_cnt == 2048:
self.fl_2048 = self.weight.clone().detach()
self.cnt += 1
if self.cnt > 2:
if self.x_cnt == 512:
self.fla_512 = torch.zeros(self.fl_512.size(0), self.fl_512.size(1),3,3).cuda()
for i in range(3):
for j in range(3):
temp = self.fl_512[:,:,i,j].squeeze(-1).squeeze(-1)
temp = self.mw@temp
self.fla_512[:,:,i,j] = temp
if self.x_cnt == 1024:
self.fla_1024 = torch.zeros(self.fl_1024.size(0), self.fl_1024.size(1),3,3).cuda()
for i in range(3):
for j in range(3):
temp = self.fl_1024[:,:,i,j].squeeze(-1).squeeze(-1)
temp = self.mw@temp
self.fla_1024[:,:,i,j] = temp
if self.x_cnt == 2048:
self.fla_2048 = torch.zeros(self.fl_2048.size(0), self.fl_2048.size(1),3,3).cuda()
for i in range(3):
for j in range(3):
temp = self.fl_2048[:,:,i,j].squeeze(-1).squeeze(-1)
temp = self.mw@temp
self.fla_2048[:,:,i,j] = temp
if self.cnt < 2:
return self._conv_forward(input, self.weight)
elif self.x_cnt == 512:
return self._conv_forward(input, self.fla_512)
elif self.x_cnt == 1024:
return self._conv_forward(input, self.fla_1024)
elif self.x_cnt == 2048:
return self._conv_forward(input, self.fla_2048)
return self._conv_forward(input, self.weight)
class fpn_down_layers_Conv2d(W_ConvNd):
def __init__(
self,
in_channels: int,
out_channels: int,
x_cnt : int,
kernel_size: _size_2_t,
stride: _size_2_t = 1,
padding: _size_2_t = 0,
dilation: _size_2_t = 1,
groups: int = 1,
bias: bool = True,
padding_mode: str = 'zeros' # TODO: refine this type
):
kernel_size = _pair(kernel_size)
stride = _pair(stride)
padding = _pair(padding)
dilation = _pair(dilation)
super(fpn_down_layers_Conv2d, self).__init__(
in_channels, out_channels, kernel_size, stride, padding, dilation,
False, _pair(0), groups, bias, padding_mode)
self.mw = Parameter(make_mw(out_channels,in_channels,kernel_size[0]), requires_grad=True)
self.fl_0=torch.ones(2,2,2,2)
self.fl_1=torch.ones(2,2,2,2)
self.fla_0=torch.ones(2,2,2,2)
self.fla_1=torch.ones(2,2,2,2)
self.cnt = 0
self.x_cnt = x_cnt
def _conv_forward(self, input, weight):
if self.padding_mode != 'zeros':
return F.conv2d(F.pad(input, self._reversed_padding_repeated_twice, mode=self.padding_mode),
weight, self.bias, self.stride,
_pair(0), self.dilation, self.groups)
return F.conv2d(input, weight, self.bias, self.stride,
self.padding, self.dilation, self.groups)
def forward(self, input: Tensor) -> Tensor:
if self.cnt < 2:
self.cnt += 1
if self.cnt == 2:
if self.x_cnt == 0:
self.fl_0 = self.weight.clone().detach()
elif self.x_cnt == 1:
self.fl_1 = self.weight.clone().detach()
self.cnt += 1
if self.cnt > 2:
if self.x_cnt == 0:
self.fla_0 = torch.zeros(self.fl_0.size(0), self.fl_0.size(1),3,3).cuda()
for i in range(3):
for j in range(3):
temp = self.fl_0[:,:,i,j].squeeze(-1).squeeze(-1)
temp = self.mw@temp
self.fla_0[:,:,i,j] = temp
if self.x_cnt == 1:
self.fla_1 = torch.zeros(self.fl_1.size(0), self.fl_1.size(1),3,3).cuda()
for i in range(3):
for j in range(3):
temp = self.fl_1[:,:,i,j].squeeze(-1).squeeze(-1)
temp = self.mw@temp
self.fla_1[:,:,i,j] = temp
if self.cnt < 2:
return self._conv_forward(input, self.weight)
elif self.x_cnt == 0:
return self._conv_forward(input, self.fla_0)
elif self.x_cnt == 1:
return self._conv_forward(input, self.fla_1)
return self._conv_forward(input, self.weight)
```
|
{
"source": "jeongjuns/yolactpose",
"score": 3
}
|
#### File: jeongjuns/yolactpose/opt.py
```python
import argparse
import torch
def str2bool(v):
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
parser = argparse.ArgumentParser(description='PyTorch AlphaPose Training')
"----------------------------- General options -----------------------------"
parser.add_argument('--expID', default='default', type=str,
help='Experiment ID')
parser.add_argument('--datasetA', default='coco', type=str,
help='Dataset choice: mpii | coco')
parser.add_argument('--nThreads', default=30, type=int,
help='Number of data loading threads')
parser.add_argument('--debug', default=False, type=bool,
help='Print the debug information')
parser.add_argument('--snapshot', default=1, type=int,
help='How often to take a snapshot of the model (0 = never)')
"----------------------------- AlphaPose options -----------------------------"
parser.add_argument('--addDPG', default=False, type=bool,
help='Train with data augmentation')
parser.add_argument('--sp', default=True, action='store_true',
help='Use single process for pytorch')
parser.add_argument('--profile', default=False, action='store_true',
help='add speed profiling at screen output')
"----------------------------- Model options -----------------------------"
parser.add_argument('--netType', default='hgPRM', type=str,
help='Options: hgPRM | resnext')
parser.add_argument('--loadModel', default=None, type=str,
help='Provide full path to a previously trained model')
parser.add_argument('--Continue', default=False, type=bool,
help='Pick up where an experiment left off')
parser.add_argument('--nFeats', default=256, type=int,
help='Number of features in the hourglass')
parser.add_argument('--nClasses', default=33, type=int,
help='Number of output channel')
parser.add_argument('--nStack', default=4, type=int,
help='Number of hourglasses to stack')
"----------------------------- Hyperparameter options -----------------------------"
parser.add_argument('--fast_inference', default=True, type=bool,
help='Fast inference')
parser.add_argument('--use_pyranet', default=True, type=bool,
help='use pyranet')
"----------------------------- Hyperparameter options -----------------------------"
parser.add_argument('--LR', default=2.5e-4, type=float,
help='Learning rate')
parser.add_argument('--momentum', default=0, type=float,
help='Momentum')
parser.add_argument('--weightDecay', default=0, type=float,
help='Weight decay')
parser.add_argument('--crit', default='MSE', type=str,
help='Criterion type')
parser.add_argument('--optMethod', default='rmsprop', type=str,
help='Optimization method: rmsprop | sgd | nag | adadelta')
"----------------------------- Training options -----------------------------"
parser.add_argument('--nEpochs', default=50, type=int,
help='Number of hourglasses to stack')
parser.add_argument('--epoch', default=0, type=int,
help='Current epoch')
parser.add_argument('--trainBatch', default=40, type=int,
help='Train-batch size')
parser.add_argument('--validBatch', default=20, type=int,
help='Valid-batch size')
parser.add_argument('--trainIters', default=0, type=int,
help='Total train iters')
parser.add_argument('--valIters', default=0, type=int,
help='Total valid iters')
parser.add_argument('--init', default=None, type=str,
help='Initialization')
"----------------------------- Data options -----------------------------"
parser.add_argument('--inputResH', default=320, type=int,
help='Input image height')
parser.add_argument('--inputResW', default=256, type=int,
help='Input image width')
parser.add_argument('--outputResH', default=80, type=int,
help='Output heatmap height')
parser.add_argument('--outputResW', default=64, type=int,
help='Output heatmap width')
parser.add_argument('--scale', default=0.25, type=float,
help='Degree of scale augmentation')
parser.add_argument('--rotate', default=30, type=float,
help='Degree of rotation augmentation')
parser.add_argument('--hmGauss', default=1, type=int,
help='Heatmap gaussian size')
"----------------------------- PyraNet options -----------------------------"
parser.add_argument('--baseWidth', default=9, type=int,
help='Heatmap gaussian size')
parser.add_argument('--cardinality', default=5, type=int,
help='Heatmap gaussian size')
parser.add_argument('--nResidual', default=1, type=int,
help='Number of residual modules at each location in the pyranet')
"----------------------------- Distribution options -----------------------------"
parser.add_argument('--dist', dest='dist', type=int, default=1,
help='distributed training or not')
parser.add_argument('--backend', dest='backend', type=str, default='gloo',
help='backend for distributed training')
parser.add_argument('--port', dest='port',
help='port of server')
"----------------------------- Detection options -----------------------------"
parser.add_argument('--net', dest='demo_net', help='Network to use [vgg16 res101]',
default='res152')
#parser.add_argument('--indir', dest='inputpath',
# help='image-directory', default="")
parser.add_argument('--list', dest='inputlist',
help='image-list', default="")
parser.add_argument('--mode', dest='mode',
help='detection mode, fast/normal/accurate', default="normal")
parser.add_argument('--outdir', dest='outputpath',
help='output-directory', default="examples/res/")
parser.add_argument('--inp_dim', dest='inp_dim', type=str, default='608',
help='inpdim')
parser.add_argument('--conf', dest='confidence', type=float, default=0.05,
help='bounding box confidence threshold')
parser.add_argument('--nms', dest='nms_thesh', type=float, default=0.6,
help='bounding box nms threshold')
parser.add_argument('--save_img', default=False, action='store_true',
help='save result as image')
parser.add_argument('--vis', default=False, action='store_true',
help='visualize image')
parser.add_argument('--matching', default=False, action='store_true',
help='use best matching')
parser.add_argument('--format', type=str,
help='save in the format of cmu or coco or openpose, option: coco/cmu/open')
parser.add_argument('--detbatch', type=int, default=1,
help='detection batch size')
parser.add_argument('--posebatch', type=int, default=80,
help='pose estimation maximum batch size')
"----------------------------- Video options -----------------------------"
parser.add_argument('--videoA', dest='video',
help='video-name', default="")
parser.add_argument('--webcam', dest='webcam', type=str,
help='webcam number', default='0')
parser.add_argument('--save_video', dest='save_video',
help='whether to save rendered video', default=False, action='store_true')
parser.add_argument('--vis_fast', dest='vis_fast',
help='use fast rendering', action='store_true', default=False)
parser.add_argument('--trained_model',
default='weights/ssd300_mAP_77.43_v2.pth', type=str,
help='Trained state_dict file path to open. If "interrupt", this will open the interrupt file.')
parser.add_argument('--color', type=str,
help='color you want detect')
parser.add_argument('--top_k', default=5, type=int,
help='Further restrict the number of predictions to parse')
parser.add_argument('--cuda', default=True, type=str2bool,
help='Use cuda to evaulate model')
parser.add_argument('--fast_nms', default=True, type=str2bool,
help='Whether to use a faster, but not entirely correct version of NMS.')
parser.add_argument('--cross_class_nms', default=False, type=str2bool,
help='Whether compute NMS cross-class or per-class.')
parser.add_argument('--display_masks', default=True, type=str2bool,
help='Whether or not to display masks over bounding boxes')
parser.add_argument('--display_bboxes', default=True, type=str2bool,
help='Whether or not to display bboxes around masks')
parser.add_argument('--display_text', default=True, type=str2bool,
help='Whether or not to display text (class [score])')
parser.add_argument('--display_scores', default=True, type=str2bool,
help='Whether or not to display scores in addition to classes')
parser.add_argument('--display', dest='display', action='store_true',
help='Display qualitative results instead of quantitative ones.')
parser.add_argument('--shuffle', dest='shuffle', action='store_true',
help='Shuffles the images when displaying them. Doesn\'t have much of an effect when display is off though.')
parser.add_argument('--ap_data_file', default='results/ap_data.pkl', type=str,
help='In quantitative mode, the file to save detections before calculating mAP.')
parser.add_argument('--resume', dest='resume', action='store_true',
help='If display not set, this resumes mAP calculations from the ap_data_file.')
parser.add_argument('--max_images', default=-1, type=int,
help='The maximum number of images from the dataset to consider. Use -1 for all.')
parser.add_argument('--output_coco_json', dest='output_coco_json', action='store_true',
help='If display is not set, instead of processing IoU values, this just dumps detections into the coco json file.')
parser.add_argument('--bbox_det_file', default='results/bbox_detections.json', type=str,
help='The output file for coco bbox results if --coco_results is set.')
parser.add_argument('--mask_det_file', default='results/mask_detections.json', type=str,
help='The output file for coco mask results if --coco_results is set.')
parser.add_argument('--config', default=None,
help='The config object to use.')
parser.add_argument('--output_web_json', dest='output_web_json', action='store_true',
help='If display is not set, instead of processing IoU values, this dumps detections for usage with the detections viewer web thingy.')
parser.add_argument('--web_det_path', default='web/dets/', type=str,
help='If output_web_json is set, this is the path to dump detections into.')
parser.add_argument('--no_bar', dest='no_bar', action='store_true',
help='Do not output the status bar. This is useful for when piping to a file.')
parser.add_argument('--display_lincomb', default=False, type=str2bool,
help='If the config uses lincomb masks, output a visualization of how those masks are created.')
parser.add_argument('--benchmark', default=False, dest='benchmark', action='store_true',
help='Equivalent to running display mode but without displaying an image.')
parser.add_argument('--no_sort', default=False, dest='no_sort', action='store_true',
help='Do not sort images by hashed image ID.')
parser.add_argument('--seed', default=None, type=int,
help='The seed to pass into random.seed. Note: this is only really for the shuffle and does not (I think) affect cuda stuff.')
parser.add_argument('--mask_proto_debug', default=False, dest='mask_proto_debug', action='store_true',
help='Outputs stuff for scripts/compute_mask.py.')
parser.add_argument('--no_crop', default=False, dest='crop', action='store_false',
help='Do not crop output masks with the predicted bounding box.')
parser.add_argument('--image', default=None, type=str,
help='A path to an image to use for display.')
parser.add_argument('--images', default=None, type=str,
help='An input folder of images and output folder to save detected images. Should be in the format input->output.')
parser.add_argument('--video', default=None, type=str,
help='A path to a video to evaluate on. Passing in a number will use that index webcam.')
parser.add_argument('--video_multiframe', default=1, type=int,
help='The number of frames to evaluate in parallel to make videos play at higher fps.')
parser.add_argument('--score_threshold', default=0, type=float,
help='Detections with a score under this threshold will not be considered. This currently only works in display mode.')
parser.add_argument('--dataset', default=None, type=str,
help='If specified, override the dataset specified in the config with this one (example: coco2017_dataset).')
parser.add_argument('--detect', default=False, dest='detect', action='store_true',
help='Don\'t evauluate the mask branch at all and only do object detection. This only works for --display and --benchmark.')
parser.add_argument('--display_fps', default=False, dest='display_fps', action='store_true',
help='When displaying / saving video, draw the FPS on the frame')
parser.add_argument('--emulate_playback', default=False, dest='emulate_playback', action='store_true',
help='When saving a video, emulate the framerate that you\'d get running in real-time mode.')
parser.set_defaults(no_bar=False, display=False, resume=False, output_coco_json=False, output_web_json=False, shuffle=False,
benchmark=False, no_sort=False, no_hash=False, mask_proto_debug=False, crop=True, detect=False, display_fps=False,
emulate_playback=False)
opt = parser.parse_args()
opt.num_classes = 80
```
|
{
"source": "jeongjy0317/Arcalive-Emoji-Downloader",
"score": 3
}
|
#### File: jeongjy0317/Arcalive-Emoji-Downloader/main.py
```python
import os
import requests
from bs4 import BeautifulSoup
defaultURL = "https://arca.live/"
placeHolder = {
"emoji": "body > div > div.content-wrapper.clearfix > article > div > div.article-wrapper > div.article-body",
"info": "body > div > div.content-wrapper.clearfix > article > div > div.article-wrapper > div.article-head"
}
downloadDirectory = "./downloads"
counter = 0
returnFormat = {
"name": "",
"author": "",
"sold": "",
"submitted": "",
"emojis": []
}
def requestData(startsWith, numericOnly=False):
data = input(f"\n์
๋ ฅ ์์ : %se/1234\n๋ค์ด๋ก๋๋ฐ์ ์ด๋ชจํฐ์ฝ์ URL์ ์
๋ ฅํด์ฃผ์ธ์ > " % defaultURL)
if not data.startswith(startsWith):
print("โ ์๋ชป๋ ์
๋ ฅ์
๋๋ค.")
return False
elif numericOnly and data.isdigit():
print("โ ํ์์ด ์๋ชป๋์์ต๋๋ค.")
return False
else:
return data.replace(startsWith, "")
def createFolder(directory):
try:
if not os.path.exists(directory):
os.makedirs(directory)
except OSError:
print('Error: Creating directory. ' + directory)
print("[์์นด๋ผ์ด๋ธ ์ด๋ชจํฐ์ฝ ๋ค์ด๋ก๋]\n์์ฅํ๊ณ ์ถ์ ์ด๋ชจํฐ์ฝ์ ๋ค์ด๋ฐ์ ์ ์ฅํ์ธ์.\nโ๋ณธ ํ๋ก๊ทธ๋จ์ ์ฌ์ฉ์ผ๋ก ์ธํด ๋ฐ์ํ๋ ๋ชจ๋ ๋ฌธ์ ์ ๋ํด ์ ์์๋ ์๋ฌด๋ฐ ์ฑ
์์ ์ง์ง ์์ต๋๋ค.", end="\n\n")
# Create download folder if not exists
if os.path.isdir(downloadDirectory):
print(f"โ
๋ค์ด๋ก๋๋ฐ์ ๋๋ ํ ๋ฆฌ(%s) ํ์ธ ์๋ฃ" % downloadDirectory)
else:
print(f"โ ๋ค์ด๋ก๋๋ฐ์ ๋๋ ํ ๋ฆฌ(%s)๊ฐ ์กด์ฌํ์ง ์์ ์์ฑํ์์ต๋๋ค." % downloadDirectory)
createFolder(downloadDirectory)
# Get emoji ID
while True:
emojiID = requestData(defaultURL + "e/")
if str(type(emojiID)) == "<class 'str'>" and len(emojiID) >= 1:
print(f"โ
๋์ ์ด๋ชจํฐ์ฝ ID: %s" % emojiID)
break
GETdata = requests.get(f"%se/%s" % (defaultURL, emojiID))
if GETdata.status_code == 200:
EmojiShopHTML = GETdata.text
soup = BeautifulSoup(EmojiShopHTML, 'html.parser')
EmojiData = soup.select_one(placeHolder['emoji'])
EmojiInfo = soup.select_one(placeHolder['info'])
else:
print(f"โ HTTP ERROR %s\n์๋ฒ์ ์ฐ๊ฒฐํ ์ ์๊ฑฐ๋ ์ ๊ทผํ ์ ์๋ ์ด๋ชจํฐ์ฝ์
๋๋ค." % GETdata.status_code)
quit()
# Data processing
try:
returnFormat["name"] = EmojiInfo.findAll("div")[1].text.replace("\n", "")
returnFormat["author"] = EmojiInfo.findAll("div")[3].text.replace("\n", "")
returnFormat["sold"] = EmojiInfo.findAll("div")[4].findAll("span")[1].text.replace("\n", "")
returnFormat["submitted"] = EmojiInfo.findAll("div")[4].findAll("span")[2].findAll("span")[2].text.replace("\n", "")
for piece in EmojiData.findAll('img'):
returnFormat["emojis"].append(piece["src"].replace("//", "https://"))
print("\nโ
์ด๋ชจํฐ์ฝ ์ ๋ณด๋ฅผ ์ฑ๊ณต์ ์ผ๋ก ๋ถ๋ฌ์์ต๋๋ค.")
print(f" - ์ด๋ฆ : %s" % returnFormat["name"])
print(f" - ๊ฐฏ์ : %s๊ฐ" % len(returnFormat["emojis"]))
print(f" - ์ ์์ : %s" % returnFormat["author"])
print(f" - ํ๋งค์ : %sํ" % returnFormat["sold"])
print(f" - ๋ฑ๋ก์ผ : %s" % returnFormat["submitted"])
except:
print("โ ๋ด๋ ค๋ฐ์ ์ ์๋ ์ด๋ชจํฐ์ฝ์
๋๋ค.")
quit()
# Download
try:
saveDirectory = f"%s/%s-%s-%s/" % (downloadDirectory, emojiID, returnFormat["name"], returnFormat["author"])
if os.path.isdir(saveDirectory):
print(f"\nโ ์ต์ข
๋ค์ด๋ก๋ ๋๋ ํ ๋ฆฌ๊ฐ ์ด๋ฏธ ์กด์ฌํฉ๋๋ค.\n๋ค์ ๋๋ ํ ๋ฆฌ๋ฅผ ์ญ์ ํ ํ ๋ค์ ์๋ํด์ฃผ์ธ์ : %s" % saveDirectory)
quit()
else:
createFolder(saveDirectory)
print("\nโ
๋ค์ด๋ก๋ ๋ฐ์ ํด๋๋ฅผ ์์ฑํ์์ต๋๋ค.")
for imageURL in returnFormat["emojis"]:
link = imageURL.split("/")
currentFile = {
"name": link[len(link)-1].split(".")[0],
"ext": link[len(link)-1].split(".")[1]
}
print(f" > %d ๊ฐ์ค %d ๋ฒ์งธ ํญ๋ชฉ์ ๋ด๋ ค๋ฐ๊ณ ์์ต๋๋ค." % (len(returnFormat["emojis"]), counter + 1))
fileFromURL = requests.get(imageURL)
openedFile = open(f"%s%d.%s" % (saveDirectory, counter, currentFile["ext"]), "wb")
openedFile.write(fileFromURL.content)
openedFile.close()
counter = counter + 1
print("\nโ
๋ค์ด๋ก๋๊ฐ ์๋ฃ๋์์ต๋๋ค.")
except:
print("\n์ค๋ฅ๊ฐ ๋ฐ์ํ์ต๋๋ค.")
```
|
{
"source": "Jeongkiwon/nomadgram",
"score": 2
}
|
#### File: nomadgram/notifications/views.py
```python
from rest_framework.views import APIView
from rest_framework.response import Response
from . import models, serializers
# Create your views here.
class Notifications(APIView):
def get(self,request, format=None):
user=request.user
notifications=models.Notifications.objects.filter(to=user)
serializer=serializers.NotificationSerializer(notifications, many=True)
return Response(data=serializer.data, status=200)
def create_notification(creator, to, notification_type, image=None, comment=None):
notification=models.Notifications.objects.create(
creator=creator,
to=to,
notification_type=notification_type,
image=image,
comment=comment,
)
notification.save
```
|
{
"source": "JeongminB/pp",
"score": 2
}
|
#### File: models/networks/encoder.py
```python
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from models.networks.base_network import BaseNetwork
import torch.nn.utils.spectral_norm as spectral_norm
class ImageEncoder(BaseNetwork):
def __init__(self, params):
super().__init__()
self.act = nn.LeakyReLU(0.2)
kw = 3
pw = int(np.ceil((kw-1.)/2))
nef = params.num_encoder_filters
self.layer1 = self.norm_layer(nn.Conv2d(3, nef, kw, 2, pw))
self.layer2 = self.norm_layer(nn.Conv2d(1 * nef, 2 * nef, kw, 2, pw))
self.layer3 = self.norm_layer(nn.Conv2d(2 * nef, 4 * nef, kw, 2, pw))
self.layer4 = self.norm_layer(nn.Conv2d(4 * nef, 8 * nef, kw, 2, pw))
self.layer5 = self.norm_layer(nn.Conv2d(8 * nef, 8 * nef, kw, 2, pw))
if params.crop_size >=256:
self.layer6 = self.get_norm_layer(nn.Conv2d(8 * nef, 8 * nef, kw, 2, pw))
self.s0 = s0 = 4
self.fc_mu = nn.Linear(nef * 8 * s0 * s0, 256)
self.fc_var = nn.Linear(nef * 8 * s0 * s0, 256)
self.params = params
def norm_layer(self, layer):
if self.params.use_spec_norm_e:
layer = spectral_norm(layer)
num_ch = getattr(layer, 'out_channels')
norm_layer = nn.InstanceNorm2d(num_ch, affine=False)
return nn.Sequential(layer, norm_layer)
def forward(self, style_image):
x = style_image
if x.size(2) != 256 or x.size(3) != 256:
x = F.interpolate(x, size=(256, 256), mode='bilinear') # ๋ง์ง๋ง conv ์ถ๋ ฅ์ด (N, 8*nef, s0, s0) ์ด๋ฏ๋ก
x = self.act(self.layer1(x))
x = self.act(self.layer2(x))
x = self.act(self.layer3(x))
x = self.act(self.layer4(x))
x = self.act(self.layer5(x))
if self.params.crop_size >= 256:
x = self.act(self.layer6(x))
x = x.view(x.size(0), -1)
mu = self.fc_mu(x)
logvar = self.fc_var(x)
return mu, logvar
```
|
{
"source": "JEONGSEJIN/OneDayOneCoding",
"score": 4
}
|
#### File: 2021.11./16/solution_sjin.py
```python
n = int(input())
def Factorial(n):
if n == 0:
return 1
else:
return Factorial(n - 1) * n
print(Factorial(n))
```
|
{
"source": "Jeongseo21/Algorithm-1",
"score": 3
}
|
#### File: leetcode/easy/953. Verifying an Alien Dictionary.py
```python
class Solution:
def isAlienSorted(self, words: List[str], order: str) -> bool:
orderList = ['a'] * 26
for ind in range(len(list(order))):
orderList[ord(order[ind]) - ord('a')] =chr(ind + ord('a'))
for ind in range(len(words)):
newWord = ''
for e in list(words[ind]):
newWord += orderList[ord(e) - ord('a')]
words[ind] = newWord
return sorted(words) == words
```
#### File: leetcode/hard/145. Binary Tree Postorder Traversal.py
```python
class Solution:
def postorderTraversal(self, root: TreeNode) -> List[int]:
ans = []
s = [(root, 0)]
while len(s) > 0:
node, appearCnt = s.pop()
if node is None:
continue
if appearCnt == 0:
s.append((node, 1))
s.append((node.right, 0))
s.append((node.left, 0))
else:
ans.append(node.val)
return ans
```
#### File: leetcode/medium/1286. Iterator for Combination.py
```python
class CombinationIterator:
def __init__(self, characters: str, combinationLength: int):
self.ind = 0
self.ans = []
N = len(characters)
def dfs(index, tempStr):
if len(tempStr) == combinationLength:
self.ans.append(tempStr)
return
if index >= N:
return
dfs(index+1, tempStr + characters[index])
dfs(index+1, tempStr)
dfs(0, "")
def next(self) -> str:
if self.ind >= len(self.ans):
return None
ans = self.ans[self.ind]
self.ind +=1
return ans
def hasNext(self) -> bool:
return self.ind < len(self.ans)
# Your CombinationIterator object will be instantiated and called as such:
# obj = CombinationIterator(characters, combinationLength)
# param_1 = obj.next()
# param_2 = obj.hasNext()
```
#### File: leetcode/medium/467. Unique Substrings in Wraparound String.py
```python
class Solution:
def findSubstringInWraproundString(self, p: str) -> int:
lengths = [0]*26
currentLength = 0
for i in range(len(p)):
now = ord(p[i])-97
prev = ord(p[i-1])-97
if i > 0 and (now - prev) % 26 == 1:
currentLength += 1
else:
currentLength = 1
lengths[now] = max(lengths[now], currentLength)
return sum(lengths)
```
#### File: leetcode/medium/518. Coin Change 2.py
```python
class Solution:
def change(self, amount: int, coins: List[int]) -> int:
if amount == 0 and len(coins) == 0:
return 1
dp = [[0] * (amount + 1)] * (len(coins) + 1)
for coinInd in range(len(coins)):
coin = coins[coinInd]
prev = dp[coinInd]
now = dp[coinInd + 1]
now[0] = 1
for ind in range(coin, amount + 1):
now[ind] = now[ind-coin] + prev[ind]
return dp[len(coins)][amount]
```
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.