blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
sequencelengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
5152ff9fa4d4291d942ccc6fd3f6511795a4a637 | 1b862f34c125ce200244dd79e4fda4b5b605ce2e | /.history/ML_T2_Validation_20210612130332.py | 169861bd3d148603548c014dfb83caa726baef74 | [] | no_license | edwino26/CoreImages | 26085a49cf1cb79442ae563a88354b2fdceace87 | 6bf6e68cac8ab36c87b1e6ea702bfe6882b0f40e | refs/heads/master | 2023-06-22T12:53:37.344895 | 2021-07-21T04:31:44 | 2021-07-21T04:31:44 | 309,553,247 | 0 | 4 | null | 2021-04-29T23:23:15 | 2020-11-03T02:45:07 | Lasso | UTF-8 | Python | false | false | 9,233 | py | #T2 TEST DATA
# %%
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import pickle
from scipy import interpolate
from scipy.integrate import simps
from numpy import trapz
# %%
#Load Stack
UVStack = pd.read_excel('./ML_Results/T2_test/ImgStack.xls')
ImgStackk = UVStack.copy().to_numpy()
# %%
def integrate(y_vals, h):
i = 1
total = y_vals[0] + y_vals[-1]
for y in y_vals[1:-1]:
if i % 2 == 0:
total += 2 * y
else:
total += 4 * y
i += 1
return total * (h / 3.0)
# %% Load and resample "results" (res) file
sub = pd.read_excel('./ML_Results/T2_test/sub.xls')
res = pd.read_excel('./ML_Results/T2_test/Results.xls')
res = res[res.Well == 'T2']
res.sort_values(by=['DEPT'])
res.drop(['Unnamed: 0', 'Set'], axis=1, inplace=True)
res.reset_index(inplace=True, drop=True)
dep = np.arange(min(res.DEPT), max(res.DEPT),0.5) #res is not at 0.5 thanks to balancing
res_rs = pd.DataFrame(columns=[res.columns])
res_rs.DEPT = dep
for i in range(len(res.columns)):
if i != 8:
f = interpolate.interp1d(res.DEPT, res.iloc[:,i])
res_rs.iloc[:,i] =f(dep)
else:
res_rs.iloc[:,i] = res.Well[0]
#T2_rs.dropna(inplace=True)
res = res_rs.copy()
difference = res.DEPT.diff()
difference.describe()
# %%
TT = pd.read_excel('./ML_Results/Train_Test_Results.xls')
istr = 0
iend = 42344
dplot_o = 3671
dplot_n = 3750
shading = 'bone'
# %% Load Log Calculations
T2_x = pd.read_excel('./Excel_Files/T2.xls',sheet_name='T2_data')
T2_x = T2_x[['DEPTH','GR_EDTC','RHOZ','AT90','NPHI','Vsh','Vclay','grain_density','porosity',
'RW2','Sw_a','Sw_a1','Sw_p','Sw_p1','SwWS','Swsim','Swsim1','PAY_archie',
'PAY_poupon','PAY_waxman','PAY_simandoux']]
# %%
T2_rs = pd.DataFrame(columns=[T2_x.columns])
T2_rs.iloc[:,0] = dep
for i in range(len(T2_x.columns)):
f = interpolate.interp1d(T2_x.DEPTH, T2_x.iloc[:,i])
T2_rs.iloc[:,i] =f(dep)
#T2_rs.dropna(inplace=True)
T2_x = T2_rs.copy()
difference_T2 = T2_x.DEPTH.diff()
difference.describe()
# %%
plt.figure()
plt.subplot2grid((1, 10), (0, 0), colspan=3)
plt.plot(sub['GRAY'], sub['DEPTH'], 'mediumseagreen', linewidth=0.5);
plt.axis([50, 250, dplot_o, dplot_n]);
plt.gca().invert_yaxis();
plt.fill_between(sub['GRAY'], 0, sub['DEPTH'], facecolor='green', alpha=0.5)
plt.xlabel('Gray Scale RGB')
plt.subplot2grid((1, 10), (0, 3), colspan=7)
plt.imshow(ImgStackk[istr:iend,80:120], aspect='auto', origin='upper', extent=[0,1,dplot_n,dplot_o], cmap=shading);
plt.axis([0, 1, dplot_o, dplot_n]);
plt.gca().invert_yaxis()
plt.xlabel('Processed Image')
plt.colorbar()
p_50 = np.percentile(sub['DEPTH'], 50)
plt.yticks([]); plt.xticks([])
plt.subplots_adjust(wspace = 20, left = 0.1, right = 0.9, bottom = 0.1, top = 0.9)
plt.show()
# %%
CORE =pd.read_excel('./CORE/CORE.xlsx',sheet_name='XRD')
mask = CORE.Well.isin(['T2'])
T2_Core = CORE[mask]
prof=T2_Core['Depth']
clays=T2_Core['Clays']
xls1 = pd.read_excel ('./CORE/CORE.xlsx', sheet_name='Saturation')
mask = xls1.Well.isin(['T2'])
T2_sat = xls1[mask]
long=T2_sat ['Depth']
poro=T2_sat ['PHIT']
grain=T2_sat ['RHOG']
sw_core=T2_sat ['Sw']
klinkenberg = T2_sat ['K']
minimo=grain.min()
maximo=grain.max()
c=2.65
d=2.75
norm=(((grain-minimo)*(d-c)/(maximo-minimo))+c)
xls2 = pd.read_excel ('./CORE/CORE.xlsx', sheet_name='Gamma')
mask = xls2.Well.isin(['T2'])
T2_GR = xls2[mask]
h=T2_GR['Depth']
cg1=T2_GR['GR_Scaled']
# %%
# ~~~~~~~~~~~~~~~~~~ Plot Results ~~~~~~~~~~~~~~~~~~~~~~
ct = 0
top= dplot_o
bottom= dplot_n
no_plots = 9
ct+=1
plt.figure(figsize=(10,9))
plt.subplot(1,no_plots,ct)
plt.plot (T2_x.GR_EDTC,T2_x.DEPTH,'g', lw=3)
#plt.fill_between(T2_x.GR_EDTC.values.reshape(-1), T2_x.DEPTH.values.reshape(-1), y2=0,color='g', alpha=0.8)
plt.title('$Gamma Ray$',fontsize=8)
plt.axis([40,130,top,bottom])
plt.xticks(fontsize=8)
plt.yticks(fontsize=8)
plt.xlabel('Gamma Ray ',fontsize=6)
plt.gca().invert_yaxis()
plt.grid(True)
plt.hlines(y=3665.65, xmin=0, xmax=130)
plt.hlines(y=3889.5, xmin=0, xmax=130)
ct+=1
plt.subplot(1,no_plots,ct)
plt.plot (T2_x.PAY_poupon,T2_x.DEPTH,'r',lw=0.5)
h_P = integrate(T2_x.PAY_poupon.values, 0.5)
plt.title('$PAY Poupon$',fontsize=8)
plt.fill_between(T2_x.PAY_poupon.values.reshape(-1),T2_x.DEPTH.values.reshape(-1), color='r', alpha=0.8)
plt.axis([0.01,0.0101,top,bottom])
plt.xticks(fontsize=8)
plt.gca().invert_yaxis()
plt.gca().xaxis.set_visible(False)
plt.gca().yaxis.set_visible(False)
plt.grid(True)
plt.hlines(y=3665.65, xmin=0, xmax=130)
plt.hlines(y=3889.5, xmin=0, xmax=130)
#Waxman-Smits
ct+=1
plt.subplot(1,no_plots,ct)
plt.plot (T2_x.PAY_waxman,T2_x.DEPTH,'g',lw=0.5)
h_WS = integrate(T2_x.PAY_waxman.values, 0.5)
plt.title('$PAY Waxman$',fontsize=8)
plt.fill_between(T2_x.PAY_waxman.values.reshape(-1),T2_x.DEPTH.values.reshape(-1), color='g', alpha=0.8)
plt.axis([0.01,0.0101,top,bottom])
plt.xticks(fontsize=8)
plt.gca().invert_yaxis()
plt.gca().xaxis.set_visible(False)
plt.gca().yaxis.set_visible(False)
plt.grid(True)
plt.hlines(y=3665.65, xmin=0, xmax=130)
plt.hlines(y=3889.5, xmin=0, xmax=130)
#Simandoux
ct+=1
plt.subplot(1,no_plots,ct)
plt.plot (T2_x.PAY_simandoux,T2_x.DEPTH,'y',lw=0.5)
h_S = integrate(T2_x.PAY_simandoux.values, 0.5)
plt.title('$PAY Simandoux$',fontsize=8)
plt.fill_between(T2_x.PAY_simandoux.values.reshape(-1),T2_x.DEPTH.values.reshape(-1), color='y', alpha=0.8)
plt.axis([0.01,0.0101,top,bottom])
plt.xticks(fontsize=8)
plt.gca().invert_yaxis()
plt.gca().xaxis.set_visible(False)
plt.gca().yaxis.set_visible(False)
plt.grid(True)
plt.hlines(y=3665.65, xmin=0, xmax=130)
plt.hlines(y=3889.5, xmin=0, xmax=130)
ct+=1 #RGB Gray from Image
plt.subplot(1,no_plots,ct)
plt.plot(sub['GRAY'], sub['DEPTH'], 'mediumseagreen', linewidth=0.5);
plt.axis([50, 250, dplot_o, dplot_n]);
plt.xticks(fontsize=8)
#plt.title('$Core Img$',fontsize=8)
plt.gca().invert_yaxis();
plt.gca().yaxis.set_visible(False)
plt.fill_between(sub['GRAY'], 0, sub['DEPTH'], facecolor='green', alpha=0.5)
plt.xlabel('Gray Scale RGB', fontsize=7)
ct+=1 # True UV from Image
plt.subplot(1,no_plots,ct, facecolor='#302f43')
corte= 170
PAY_Gray_scale = res['GRAY'].copy()
PAY_Gray_scale.GRAY[PAY_Gray_scale.GRAY<corte] = 0
PAY_Gray_scale.GRAY[PAY_Gray_scale.GRAY>=corte] = 1
h_TRUE_UV = integrate(PAY_Gray_scale.values, 0.5)
plt.plot (PAY_Gray_scale,res.DEPT,'#7d8d9c',lw=0.5)
plt.title('$OBJETIVO (suavizado-a-2.5ft)$',fontsize=10)
plt.fill_between(PAY_Gray_scale.values.reshape(-1),res.DEPT.values.reshape(-1), color='#7d8d9c', alpha=0.8)
plt.axis([0.01,0.0101,top,bottom])
plt.xticks(fontsize=8)
plt.gca().invert_yaxis()
plt.gca().xaxis.set_visible(False)
plt.gca().yaxis.set_visible(False)
plt.grid(True)
ct+=1
plt.subplot(1,no_plots,ct)
plt.imshow(ImgStackk[istr:iend,80:120], aspect='auto', origin='upper', extent=[0,1,dplot_n,dplot_o], cmap=shading);
plt.axis([0, 1, dplot_o, dplot_n]);
plt.xticks(fontsize=8)
plt.gca().invert_yaxis()
plt.xlabel('Stacked UV Photos', fontsize=7)
plt.colorbar()
p_50 = np.percentile(sub['DEPTH'], 50)
plt.yticks([]); plt.xticks([])
ct+=1
plt.subplot(1,no_plots,ct)
plt.plot (res['RandomForest'],res.DEPT,'r',lw=1)
plt.plot (res.GRAY,res.DEPT,'k',lw=0.5)
plt.title('ML: GRIS',fontsize=12)
plt.axis([0,2,top,bottom])
plt.xticks(fontsize=8)
plt.xlabel('RandomForest',fontsize=7)
plt.gca().invert_yaxis()
plt.gca().invert_xaxis()
plt.gca().yaxis.set_visible(False)
plt.grid(True)
plt.xlim(0, 255)
plt.hlines(y=3665.65, xmin=0, xmax=130)
plt.hlines(y=3889.5, xmin=0, xmax=130)
ct+=1
plt.subplot(1,no_plots,ct, facecolor='#302f43')
PAY_Gray_scale2 = res['RandomForest'].copy().rename(columns={'RandomForest':'GRAY'})
PAY_Gray_scale2.GRAY[PAY_Gray_scale2.GRAY<corte] = 0
PAY_Gray_scale2.GRAY[PAY_Gray_scale2.GRAY>=corte] = 1
h_ML = integrate(PAY_Gray_scale2.values, 0.5)
plt.plot (PAY_Gray_scale2, res.DEPT,'#7d8d9c',lw=0.5)
plt.title('$RESULTADO: TEST Set$',fontsize=8)
plt.fill_between(PAY_Gray_scale2.values.reshape(-1),res.DEPT.values.reshape(-1), color='#7d8d9c', alpha=0.8)
plt.axis([0.01,0.0101,top,bottom])
plt.xticks(fontsize=8)
plt.gca().invert_yaxis()
plt.gca().xaxis.set_visible(False)
plt.gca().yaxis.set_visible(False)
plt.grid(True)
plt.suptitle('Pozo T2: Comparación Final')
plt.show()
# %%
# %%
plt.figure(figsize=(10,9))
plt.subplot(1,1,1)
plt.plot(res.GRAY, res['RandomForest'], 'ko')
plt.plot(res.GRAY, res.GRAY, 'r')
plt.xlim(0, 255)
plt.ylim(0, 255)
plt.xlabel('Valor en Escala de Gris Suavizado a res. de Registros',fontsize=17)
plt.ylabel('Predicción de Escala de Gris usando Random Forest',fontsize=17)
plt.show()
# %% Erro Calculation
# T2_x.PAY_poupon,T2_x.DEPTH
# T2_x.PAY_waxman
# T2_x.PAY_simandoux
def integrate(y_vals, h):
i = 1
total = y_vals[0] + y_vals[-1]
for y in y_vals[1:-1]:
if i % 2 == 0:
total += 2 * y
else:
total += 4 * y
i += 1
return total * (h / 3.0)
# %%
pay = pd.DataFrame(columns=['Poupon', 'Waxman_Smits', 'Simandoux', 'Machine_L', 'True_UV'])
pay.Poupon = h_P
pay.Waxman_Smits = h_WS
pay.Simandoux = h_S
pay.Machine_L = h_ML
pay.True_UV = h_TRUE_UV
pay.head()
#rmse['Poupon'] = mean_squared_error(y_test, y_pred_test, squared=False)
# %%
| [
"[email protected]"
] | |
b057388d4b0072d6e373997a4e954586075298a2 | 423244213eca7573f36157d08c94f95ce22968a5 | /forest_puller/viz/inc_aggregate.py | 97bf8368ce4af36fe442436e5b64e598d9a5c17c | [
"MIT"
] | permissive | xapple/forest_puller | f9745f0c2c9aa047715a6cc12f03fa1f84dcdef3 | 804e17947bc98139154c708d958a5dcfebff76a0 | refs/heads/master | 2022-06-03T15:26:06.818739 | 2022-05-19T14:13:40 | 2022-05-19T14:13:40 | 234,941,204 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 9,671 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Written by Lucas Sinclair and Paul Rougieux.
JRC Biomass Project.
Unit D1 Bioeconomy.
Typically you can use this class like this:
>>> from forest_puller.viz.inc_aggregate import inc_agg_ipcc
>>> print(inc_agg_ipcc.df)
"""
# Built-in modules #
# Internal modules #
from forest_puller import cache_dir
# First party modules #
from plumbing.graphs import Graph
from plumbing.cache import property_cached
# Third party modules #
import pandas, matplotlib
from matplotlib import pyplot
###############################################################################
class IncAggregate(Graph):
# Basic params #
height = 7
width = 10
y_grid = True
x_label = 'Year'
def add_main_legend(self, axes):
items = self.name_to_color.items()
patches = [matplotlib.patches.Patch(color=v, label=k) for k,v in items]
axes.legend(handles = patches,
borderpad = 1,
prop = {'size': 12},
frameon = True,
shadow = True,
loc = 'center left',
bbox_to_anchor = (1.03, 0.5))
def draw(self, axes):
self.line_plot(axes)
def plot(self, **kw):
# Plot #
fig = pyplot.figure()
axes = fig.add_subplot(111)
# Plot the line #
self.draw(axes)
# Force integer ticks on the x axis (no half years) #
locator = matplotlib.ticker.MaxNLocator(integer=True)
pyplot.gca().xaxis.set_major_locator(locator)
# Leave space for the legend #
fig.subplots_adjust(left=0.1, right=0.75, top=0.95)
# Add legend #
self.add_main_legend(axes)
# Save #
self.save_plot(**kw)
# Return for display in notebooks for instance #
return fig
###############################################################################
class IncAggregateIPCC(IncAggregate):
"""
This graph will show the combined increments (loss, gain, net) of all
countries together into one graph for the IPCC data source.
"""
# Name #
short_name = 'inc_aggregate_ipcc'
# Colors #
name_to_color = {'Net (Gain+Loss)': 'black'}
@property
def y_label(self):
from forest_puller.viz.increments import GainsLossNetGraph
return GainsLossNetGraph.source_to_y_label['ipcc']
@property_cached
def df(self):
# Import #
import forest_puller.ipcc.concat
# Load #
df = forest_puller.ipcc.concat.df.copy()
# Common years #
from forest_puller.ipcc.agg import source
df = df.query("year in @source.common_years")
# Filter #
df = df.query("land_use == 'total_forest'").copy()
# Columns #
cols = ['country', 'year', 'biomass_net_change', 'area']
# Filter columns #
df = df[cols]
# Assert there are no NaNs #
assert not df.isna().any().any()
# Sum the countries and keep the years #
df = df.groupby(['year']).agg({'area': 'sum',
'biomass_net_change': 'sum'})
# Compute per hectare values #
df['net_per_ha'] = df['biomass_net_change'] / df['area']
# Reset index #
df = df.reset_index()
# Return #
return df
def line_plot(self, axes, x='year', y='net_per_ha', **kw):
axes.plot(self.df[x], self.df[y],
marker = ".",
markersize = 10.0,
color = 'black',
**kw)
###############################################################################
class IncAggregateSOEF(IncAggregate):
"""
This graph will show the combined increments (loss, gain, net) of all
countries together into one graph for the SOEF data source.
"""
# Name #
short_name = 'inc_aggregate_soef'
# Mapping of lines to colors #
col_to_color = {'gain_per_ha': 'green',
'loss_per_ha': 'red',
'net_per_ha': 'black'}
name_to_color = {'Gains': 'green',
'Losses': 'red',
'Net (Gain+Loss)': 'black'}
@property
def y_label(self):
from forest_puller.viz.increments import GainsLossNetGraph
return GainsLossNetGraph.source_to_y_label['soef']
@property_cached
def df(self):
# Import #
import forest_puller.soef.concat
# Load #
area = forest_puller.soef.concat.tables['forest_area'].copy()
fell = forest_puller.soef.concat.tables['fellings'].copy()
# Keep only the columns we want #
info_cols = ['gross_increment', 'natural_losses', 'fellings_total']
fell = fell[['country', 'year'] + info_cols]
# Get the area that matches the right category #
area = area.query("category == 'forest_avail_for_supply'")
area = area.drop(columns=['category'])
# Add the area #
df = area.left_join(fell, on=['country', 'year'])
# Drop lines with missing values #
df = df.dropna()
# Pick countries #
codes = ['AT', 'BE', 'HR', 'CY', 'DK', 'FI',
'HU', 'IT', 'NL', 'RO', 'SI']
df = df.query("country in @codes").copy()
# Columns #
cols = ['year', 'area', 'gross_increment',
'natural_losses', 'fellings_total']
# Filter columns #
df = df[cols]
# Aggregate #
df = df.groupby(['year'])
df = df.agg(pandas.DataFrame.sum, skipna=False)
# Compute per hectare values #
df['gain_per_ha'] = df['gross_increment'] / df['area']
df['loss_per_ha'] = (df['natural_losses'] + df['fellings_total']) / df['area']
# By convention, losses should be negative values #
df['loss_per_ha'] = - df['loss_per_ha']
# The net #
df['net_per_ha'] = df['gain_per_ha'] + df['loss_per_ha']
# Reset index #
df = df.reset_index()
# Return #
return df
def draw(self, axes):
self.line_plot(axes, y='gain_per_ha')
self.line_plot(axes, y='loss_per_ha')
self.line_plot(axes, y='net_per_ha')
def line_plot(self, axes, x='year', y=None, **kw):
axes.plot(self.df[x], self.df[y],
marker = ".",
markersize = 10.0,
color = self.col_to_color[y],
**kw)
###############################################################################
class IncAggregateFAOSTAT(IncAggregate):
"""
This graph will show the losses of all
countries together into one graph for the FAOSTAT data source.
"""
# Name #
short_name = 'inc_aggregate_faostat'
# Colors #
name_to_color = {'Losses': 'red'}
@property
def y_label(self):
from forest_puller.viz.increments import GainsLossNetGraph
return GainsLossNetGraph.source_to_y_label['faostat']
@property_cached
def df(self):
# Import #
import forest_puller.faostat.forestry.concat
import forest_puller.faostat.land.concat
# Load #
fell = forest_puller.faostat.forestry.concat.df.copy()
area = forest_puller.faostat.land.concat.df.copy()
# Filter fell #
fell = fell.query("element == 'Production'")
fell = fell.query("unit == 'm3'")
# Group fell #
fell = (fell.groupby(['country', 'year'])
.agg({'value': sum})
.reset_index())
# Filter area #
area = area.query('element == "Area"')
area = area.query('item == "Forest land"')
area = area.query('flag == "A"')
# Keep columns #
area = area[['country', 'year', 'value']]
# Rename columns #
fell = fell.rename(columns = {'value': 'loss'})
area = area.rename(columns = {'value': 'area'})
# Add the area #
df = fell.inner_join(area, on=['country', 'year'])
# Assert there are no NaNs #
assert not df.isna().any().any()
# Sort the result #
df = df.sort_values(['country', 'year'])
# Compute common years #
common_years = df.groupby('country').apply(lambda x: set(x.year))
common_years = set.intersection(*common_years.values)
# Filter by common years #
df = df.query("year in @common_years")
# Columns #
cols = ['year', 'area', 'loss']
# Filter columns #
df = df[cols]
# Aggregate #
df = df.groupby(['year'])
df = df.agg(pandas.DataFrame.sum, skipna=False)
# Compute per hectare values #
df['loss_per_ha'] = df['loss'] / df['area']
# By convention, losses should be negative values #
df['loss_per_ha'] = - df['loss_per_ha']
# Reset index #
df = df.reset_index()
# Return #
return df
def line_plot(self, axes, x='year', y='loss_per_ha', **kw):
# Plot #
axes.plot(self.df[x], self.df[y],
marker = ".",
markersize = 10.0,
color = 'red',
**kw)
###############################################################################
# Create the graphs #
export_dir = cache_dir + 'graphs/eu_tot/'
inc_agg_ipcc = IncAggregateIPCC(base_dir = export_dir)
inc_agg_soef = IncAggregateSOEF(base_dir = export_dir)
inc_agg_faostat = IncAggregateFAOSTAT(base_dir = export_dir)
| [
"[email protected]"
] | |
ae7367178e0e60131384ac607edca90ef9c8223b | 3a01d6f6e9f7db7428ae5dc286d6bc267c4ca13e | /unittests/pytests/meshio/TestDataWriterVTK.py | c334c27a8c2de840f1b352498638889d757e5a00 | [
"MIT"
] | permissive | youngsolar/pylith | 1ee9f03c2b01560706b44b4ccae99c3fb6b9fdf4 | 62c07b91fa7581641c7b2a0f658bde288fa003de | refs/heads/master | 2020-12-26T04:04:21.884785 | 2014-10-06T21:42:42 | 2014-10-06T21:42:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,454 | py | #!/usr/bin/env python
#
# ======================================================================
#
# Brad T. Aagaard, U.S. Geological Survey
# Charles A. Williams, GNS Science
# Matthew G. Knepley, University of Chicago
#
# This code was developed as part of the Computational Infrastructure
# for Geodynamics (http://geodynamics.org).
#
# Copyright (c) 2010-2014 University of California, Davis
#
# See COPYING for license information.
#
# ======================================================================
#
## @file unittests/pytests/meshio/TestDataWriterVTK.py
## @brief Unit testing of Python DataWriterVTK object.
import unittest
from pylith.meshio.DataWriterVTK import DataWriterVTK
# ----------------------------------------------------------------------
class TestDataWriterVTK(unittest.TestCase):
"""
Unit testing of Python DataWriterVTK object.
"""
def test_constructor(self):
"""
Test constructor.
"""
filter = DataWriterVTK()
filter._configure()
return
def test_initialize(self):
"""
Test constructor.
"""
filter = DataWriterVTK()
filter._configure()
from spatialdata.units.Nondimensional import Nondimensional
normalizer = Nondimensional()
filter.initialize(normalizer)
return
def test_factory(self):
"""
Test factory method.
"""
from pylith.meshio.DataWriterVTK import data_writer
filter = data_writer()
return
# End of file
| [
"[email protected]"
] | |
f66d8b1db0ed02a43f7fd494ec762de4b5aa8153 | 4f4f2b808729b4820a4cf11d0dff23951e2c9b71 | /plugins/m/__init__.py | 035a9c98dd10a81960ac8d14852bcfa8ef3efcd3 | [
"MIT"
] | permissive | MikePopoloski/m.css | b3c32b1c4bf40881462933966b987d4147c63b49 | a93186270f4707e61b77e54361111e126aa54187 | refs/heads/master | 2023-06-29T10:56:29.764245 | 2023-06-11T14:51:55 | 2023-06-11T14:51:55 | 235,468,011 | 0 | 0 | MIT | 2020-01-22T00:23:06 | 2020-01-22T00:23:06 | null | UTF-8 | Python | false | false | 1,313 | py | #
# This file is part of m.css.
#
# Copyright © 2017, 2018, 2019, 2020, 2021, 2022
# Vladimír Vondruš <[email protected]>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
# This file is here only to make python unittest work, it's not needed
# otherwise
| [
"[email protected]"
] | |
f0055bfba813d68295b97211e807c73f3abaaf55 | 4271919fbb7775e3ac8a092c2387f5cc3a31c783 | /managePage/apps.py | b011de83319ef2de1019d6f365b29897fa2d1ef2 | [] | no_license | redcliver/rodao1 | baede64009ec2048aafa1b408baebfc43fb7b639 | 5d085fd04e126ace4f4f8a4b7260e24d4182e42c | refs/heads/master | 2023-05-04T15:23:07.547013 | 2019-12-11T18:32:57 | 2019-12-11T18:32:57 | 134,269,354 | 0 | 0 | null | 2023-04-21T20:42:11 | 2018-05-21T12:57:35 | CSS | UTF-8 | Python | false | false | 95 | py | from django.apps import AppConfig
class ManagepageConfig(AppConfig):
name = 'managePage'
| [
"[email protected]"
] | |
b360d090334b535299fa0d74c81c35df3d4ae0ed | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2583/60643/284273.py | 485f61abe4ae07954935acfa01760795366b68b6 | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,791 | py | #最大公因数
def GCD(a,b):
while b>0:
temp=a%b
a=b
b=temp
return a#是a啊不是b
#最小公倍数
def MCM(a,b):
return a*b/GCD(a,b)
#二分搜索
def binSearch(low,high,n,a,b,c):
if low>=high:
return low
else:
middle=(low+high)>>1#相当于模200.
#独立的丑数个数为,当前数分别除以a、b、c,相加求和,减去当前数除以a、b、c两两间最小公倍数的和,再加上当前数除以 a、b、c三者的最小公倍数 就等于[low,当前数]之间的丑数因子!的数量
temp=int(middle//a+middle//b+middle//c-middle//MCM(a,b)-middle//MCM(b,c)-middle//MCM(a,c)+middle//MCM(MCM(a,b),c))#temp是low边界到当前位置之间丑数因子的个数
if temp==n:
return middle
elif temp<n:
return binSearch(middle+1,high,n,a,b,c)#middle+1!!
else:
return binSearch(low,middle-1,n,a,b,c)#middle-1!!!
def nthUglyNum(n:int,a:int,b:int,c:int):
low=min(a,b,c)
high=low*n
roughRange=binSearch(low,high,n,a,b,c)
res=roughRange-min(roughRange%a,roughRange%b,roughRange%c)
return res
#比如第n个丑数是X,那么[X,X + min(a,b,c))这个半开区间内的所有数都同时包含n个丑数因子,
# 我们通过二分法得到的答案也随机分布于这个区间中。而实际上我们只需要得到该区间的左端即可。
# 处理方法很简单:假设我们得到的临时答案是K(K∈[X,X + min(a,b,c))),那么K - min(K%a,K%b,K%c) = X.
# 也就是只需要把临时答案减去其与a、b、c三者中取余的最小值即可!
if __name__=="__main__":
n=int(input())
a=int(input())
b=int(input())
c=int(input())
ans=nthUglyNum(n,a,b,c)
print(ans) | [
"[email protected]"
] | |
2fb82d7e82ce5617dbb8ec0a3ad74c12882b234f | 37fef592f365194c28579f95abd222cc4e1243ae | /streamlit/venv/venv/lib/python3.7/site-packages/plotly/graph_objs/scattersmith/_line.py | 911809421c69d7bec53edefb3704a0de02d2c159 | [] | no_license | edimaudo/Python-projects | be61e0d3fff63fb7bd00513dbf1401e2c1822cfb | 85d54badf82a0b653587a02e99daf389df62e012 | refs/heads/master | 2023-04-07T03:26:23.259959 | 2023-03-24T12:03:03 | 2023-03-24T12:03:03 | 72,611,253 | 4 | 3 | null | 2022-10-31T18:10:41 | 2016-11-02T06:37:17 | null | UTF-8 | Python | false | false | 9,231 | py | from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Line(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "scattersmith"
_path_str = "scattersmith.line"
_valid_props = {"color", "dash", "shape", "smoothing", "width"}
# color
# -----
@property
def color(self):
"""
Sets the line color.
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
# dash
# ----
@property
def dash(self):
"""
Sets the dash style of lines. Set to a dash type string
("solid", "dot", "dash", "longdash", "dashdot", or
"longdashdot") or a dash length list in px (eg
"5px,10px,2px,2px").
The 'dash' property is a string and must be specified as:
- One of the following strings:
['solid', 'dot', 'dash', 'longdash', 'dashdot',
'longdashdot']
- A number that will be converted to a string
Returns
-------
str
"""
return self["dash"]
@dash.setter
def dash(self, val):
self["dash"] = val
# shape
# -----
@property
def shape(self):
"""
Determines the line shape. With "spline" the lines are drawn
using spline interpolation. The other available values
correspond to step-wise line shapes.
The 'shape' property is an enumeration that may be specified as:
- One of the following enumeration values:
['linear', 'spline']
Returns
-------
Any
"""
return self["shape"]
@shape.setter
def shape(self, val):
self["shape"] = val
# smoothing
# ---------
@property
def smoothing(self):
"""
Has an effect only if `shape` is set to "spline" Sets the
amount of smoothing. 0 corresponds to no smoothing (equivalent
to a "linear" shape).
The 'smoothing' property is a number and may be specified as:
- An int or float in the interval [0, 1.3]
Returns
-------
int|float
"""
return self["smoothing"]
@smoothing.setter
def smoothing(self, val):
self["smoothing"] = val
# width
# -----
@property
def width(self):
"""
Sets the line width (in px).
The 'width' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["width"]
@width.setter
def width(self, val):
self["width"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
color
Sets the line color.
dash
Sets the dash style of lines. Set to a dash type string
("solid", "dot", "dash", "longdash", "dashdot", or
"longdashdot") or a dash length list in px (eg
"5px,10px,2px,2px").
shape
Determines the line shape. With "spline" the lines are
drawn using spline interpolation. The other available
values correspond to step-wise line shapes.
smoothing
Has an effect only if `shape` is set to "spline" Sets
the amount of smoothing. 0 corresponds to no smoothing
(equivalent to a "linear" shape).
width
Sets the line width (in px).
"""
def __init__(
self,
arg=None,
color=None,
dash=None,
shape=None,
smoothing=None,
width=None,
**kwargs,
):
"""
Construct a new Line object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.scattersmith.Line`
color
Sets the line color.
dash
Sets the dash style of lines. Set to a dash type string
("solid", "dot", "dash", "longdash", "dashdot", or
"longdashdot") or a dash length list in px (eg
"5px,10px,2px,2px").
shape
Determines the line shape. With "spline" the lines are
drawn using spline interpolation. The other available
values correspond to step-wise line shapes.
smoothing
Has an effect only if `shape` is set to "spline" Sets
the amount of smoothing. 0 corresponds to no smoothing
(equivalent to a "linear" shape).
width
Sets the line width (in px).
Returns
-------
Line
"""
super(Line, self).__init__("line")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.scattersmith.Line
constructor must be a dict or
an instance of :class:`plotly.graph_objs.scattersmith.Line`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("color", None)
_v = color if color is not None else _v
if _v is not None:
self["color"] = _v
_v = arg.pop("dash", None)
_v = dash if dash is not None else _v
if _v is not None:
self["dash"] = _v
_v = arg.pop("shape", None)
_v = shape if shape is not None else _v
if _v is not None:
self["shape"] = _v
_v = arg.pop("smoothing", None)
_v = smoothing if smoothing is not None else _v
if _v is not None:
self["smoothing"] = _v
_v = arg.pop("width", None)
_v = width if width is not None else _v
if _v is not None:
self["width"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
| [
"[email protected]"
] | |
d93b9fba81f15e2d48d1303a7d79ec25511d27a7 | 1e76baee819a897eb45a50d907575723c2329fda | /math/0x00-linear_algebra/10-ill_use_my_scale.py | 43774c9ccc049db8379fb0921fa8d846b6703d57 | [] | no_license | paisap/holbertonschool-machine_learning | ce1942d3c5e8753104a40cfc4b9fc0953628a3dc | bdb84f37f44e52a073887a8fee306092165c3329 | refs/heads/main | 2023-08-31T17:32:46.337737 | 2021-10-04T03:30:53 | 2021-10-04T03:30:53 | 317,323,784 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 178 | py | #!/usr/bin/env python3
""" hat calculates the shape of a numpy.ndarray """
def np_shape(matrix):
""" hat calculates the shape of a numpy.ndarray"""
return matrix.shape
| [
"[email protected]"
] | |
3f1ae58368ea03f06b92ab49cd208c15071bf614 | db415f2470905729ff6301ed9dfd4a21b916f616 | /setup.py | 3b91f8ba50005fb166b43c935a1a7ecea9b1c8e0 | [] | no_license | biobakery/anpan-legacy | 9786aae0cf114909882508c21193fe28bd0fbf37 | e9171321bd34dc43820ebce729399098368418f3 | refs/heads/master | 2022-08-11T08:35:29.323448 | 2015-03-03T15:49:26 | 2015-03-03T15:49:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 926 | py |
from setuptools import setup, find_packages
setup(
name='anpan',
version='0.0.1',
description='AnADAMA Put on A Network',
packages=find_packages(exclude=['ez_setup', 'tests', 'tests.*']),
zip_safe=False,
install_requires=[
'nose>=1.3.0',
'python-dateutil>=2.2',
'bottle>=0.10',
# doit, six, networkx, etc should come with anadama
'anadama',
'anadama_workflows',
],
dependency_links=[
'git+https://bitbucket.org/biobakery/anadama.git@master#egg=anadama-0.0.1',
'git+https://bitbucket.org/biobakery/anadama_workflows.git@master#egg=anadama_workflows-0.0.1',
],
classifiers=[
"Development Status :: 2 - Pre-Alpha"
],
entry_points= {
'console_scripts': [
'anpan-email-validate = anpan.email.cli:main',
'anpan = anpan.automated.cli:main',
],
}
)
| [
"[email protected]"
] | |
c1f3ffa56f2c34ff83428defc65f75f91513c543 | 999879f8d18e041d7fa313132408b252aded47f8 | /01-codes/scikit-learn-master/sklearn/utils/testing.py | 484a04a9c6c5d8e87cc423935555449c5ee9e90e | [
"MIT",
"BSD-3-Clause"
] | permissive | QPanProjects/Surrogate-Model | ebcaf05728e82dcbcd924c2edca1b490ab085173 | 848c7128201218b0819c9665e2cec72e3b1d29ac | refs/heads/master | 2022-10-11T19:03:55.224257 | 2020-06-09T14:37:35 | 2020-06-09T14:37:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 27,128 | py | """Testing utilities."""
# Copyright (c) 2011, 2012
# Authors: Pietro Berkes,
# Andreas Muller
# Mathieu Blondel
# Olivier Grisel
# Arnaud Joly
# Denis Engemann
# Giorgio Patrini
# License: BSD 3 clause
import inspect
import os
import pkgutil
import platform
import re
import struct
import sys
import warnings
from functools import wraps
from operator import itemgetter
import scipy as sp
import scipy.io
try:
# Python 2
from urllib2 import urlopen
from urllib2 import HTTPError
except ImportError:
# Python 3+
from urllib.request import urlopen
from urllib.error import HTTPError
import tempfile
import shutil
import os.path as op
import atexit
# WindowsError only exist on Windows
try:
WindowsError
except NameError:
WindowsError = None
import sklearn
from sklearn.base import BaseEstimator
from sklearn.externals import joblib
# Conveniently import all assertions in one place.
from nose.tools import assert_equal
from nose.tools import assert_not_equal
from nose.tools import assert_true
from nose.tools import assert_false
from nose.tools import assert_raises
from nose.tools import raises
from nose import SkipTest
from nose import with_setup
from numpy.testing import assert_almost_equal
from numpy.testing import assert_array_equal
from numpy.testing import assert_array_almost_equal
from numpy.testing import assert_array_less
from numpy.testing import assert_approx_equal
import numpy as np
from sklearn.base import (ClassifierMixin, RegressorMixin, TransformerMixin,
ClusterMixin)
from sklearn.cluster import DBSCAN
__all__ = ["assert_equal", "assert_not_equal", "assert_raises",
"assert_raises_regexp", "raises", "with_setup", "assert_true",
"assert_false", "assert_almost_equal", "assert_array_equal",
"assert_array_almost_equal", "assert_array_less",
"assert_less", "assert_less_equal",
"assert_greater", "assert_greater_equal",
"assert_approx_equal"]
try:
from nose.tools import assert_in, assert_not_in
except ImportError:
# Nose < 1.0.0
def assert_in(x, container):
assert_true(x in container, msg="%r in %r" % (x, container))
def assert_not_in(x, container):
assert_false(x in container, msg="%r in %r" % (x, container))
try:
from nose.tools import assert_raises_regex
except ImportError:
# for Python 2
def assert_raises_regex(expected_exception, expected_regexp,
callable_obj=None, *args, **kwargs):
"""Helper function to check for message patterns in exceptions"""
not_raised = False
try:
callable_obj(*args, **kwargs)
not_raised = True
except expected_exception as e:
error_message = str(e)
if not re.compile(expected_regexp).search(error_message):
raise AssertionError("Error message should match pattern "
"%r. %r does not." %
(expected_regexp, error_message))
if not_raised:
raise AssertionError("%s not raised by %s" %
(expected_exception.__name__,
callable_obj.__name__))
# assert_raises_regexp is deprecated in Python 3.4 in favor of
# assert_raises_regex but lets keep the backward compat in scikit-learn with
# the old name for now
assert_raises_regexp = assert_raises_regex
def _assert_less(a, b, msg=None):
message = "%r is not lower than %r" % (a, b)
if msg is not None:
message += ": " + msg
assert a < b, message
def _assert_greater(a, b, msg=None):
message = "%r is not greater than %r" % (a, b)
if msg is not None:
message += ": " + msg
assert a > b, message
def assert_less_equal(a, b, msg=None):
message = "%r is not lower than or equal to %r" % (a, b)
if msg is not None:
message += ": " + msg
assert a <= b, message
def assert_greater_equal(a, b, msg=None):
message = "%r is not greater than or equal to %r" % (a, b)
if msg is not None:
message += ": " + msg
assert a >= b, message
def assert_warns(warning_class, func, *args, **kw):
"""Test that a certain warning occurs.
Parameters
----------
warning_class : the warning class
The class to test for, e.g. UserWarning.
func : callable
Calable object to trigger warnings.
*args : the positional arguments to `func`.
**kw : the keyword arguments to `func`
Returns
-------
result : the return value of `func`
"""
# very important to avoid uncontrolled state propagation
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
# Trigger a warning.
result = func(*args, **kw)
if hasattr(np, 'VisibleDeprecationWarning'):
# Filter out numpy-specific warnings in numpy >= 1.9
w = [e for e in w
if e.category is not np.VisibleDeprecationWarning]
# Verify some things
if not len(w) > 0:
raise AssertionError("No warning raised when calling %s"
% func.__name__)
found = any(warning.category is warning_class for warning in w)
if not found:
raise AssertionError("%s did not give warning: %s( is %s)"
% (func.__name__, warning_class, w))
return result
def assert_warns_message(warning_class, message, func, *args, **kw):
# very important to avoid uncontrolled state propagation
"""Test that a certain warning occurs and with a certain message.
Parameters
----------
warning_class : the warning class
The class to test for, e.g. UserWarning.
message : str | callable
The entire message or a substring to test for. If callable,
it takes a string as argument and will trigger an assertion error
if it returns `False`.
func : callable
Calable object to trigger warnings.
*args : the positional arguments to `func`.
**kw : the keyword arguments to `func`.
Returns
-------
result : the return value of `func`
"""
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
if hasattr(np, 'VisibleDeprecationWarning'):
# Let's not catch the numpy internal DeprecationWarnings
warnings.simplefilter('ignore', np.VisibleDeprecationWarning)
# Trigger a warning.
result = func(*args, **kw)
# Verify some things
if not len(w) > 0:
raise AssertionError("No warning raised when calling %s"
% func.__name__)
found = [issubclass(warning.category, warning_class) for warning in w]
if not any(found):
raise AssertionError("No warning raised for %s with class "
"%s"
% (func.__name__, warning_class))
message_found = False
# Checks the message of all warnings belong to warning_class
for index in [i for i, x in enumerate(found) if x]:
# substring will match, the entire message with typo won't
msg = w[index].message # For Python 3 compatibility
msg = str(msg.args[0] if hasattr(msg, 'args') else msg)
if callable(message): # add support for certain tests
check_in_message = message
else:
check_in_message = lambda msg: message in msg
if check_in_message(msg):
message_found = True
break
if not message_found:
raise AssertionError("Did not receive the message you expected "
"('%s') for <%s>, got: '%s'"
% (message, func.__name__, msg))
return result
# To remove when we support numpy 1.7
def assert_no_warnings(func, *args, **kw):
# XXX: once we may depend on python >= 2.6, this can be replaced by the
# warnings module context manager.
# very important to avoid uncontrolled state propagation
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
result = func(*args, **kw)
if hasattr(np, 'VisibleDeprecationWarning'):
# Filter out numpy-specific warnings in numpy >= 1.9
w = [e for e in w
if e.category is not np.VisibleDeprecationWarning]
if len(w) > 0:
raise AssertionError("Got warnings when calling %s: %s"
% (func.__name__, w))
return result
def ignore_warnings(obj=None):
""" Context manager and decorator to ignore warnings
Note. Using this (in both variants) will clear all warnings
from all python modules loaded. In case you need to test
cross-module-warning-logging this is not your tool of choice.
Examples
--------
>>> with ignore_warnings():
... warnings.warn('buhuhuhu')
>>> def nasty_warn():
... warnings.warn('buhuhuhu')
... print(42)
>>> ignore_warnings(nasty_warn)()
42
"""
if callable(obj):
return _ignore_warnings(obj)
else:
return _IgnoreWarnings()
def _ignore_warnings(fn):
"""Decorator to catch and hide warnings without visual nesting"""
@wraps(fn)
def wrapper(*args, **kwargs):
# very important to avoid uncontrolled state propagation
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
return fn(*args, **kwargs)
w[:] = []
return wrapper
class _IgnoreWarnings(object):
"""Improved and simplified Python warnings context manager
Copied from Python 2.7.5 and modified as required.
"""
def __init__(self):
"""
Parameters
==========
category : warning class
The category to filter. Defaults to Warning. If None,
all categories will be muted.
"""
self._record = True
self._module = sys.modules['warnings']
self._entered = False
self.log = []
def __repr__(self):
args = []
if self._record:
args.append("record=True")
if self._module is not sys.modules['warnings']:
args.append("module=%r" % self._module)
name = type(self).__name__
return "%s(%s)" % (name, ", ".join(args))
def __enter__(self):
clean_warning_registry() # be safe and not propagate state + chaos
warnings.simplefilter('always')
if self._entered:
raise RuntimeError("Cannot enter %r twice" % self)
self._entered = True
self._filters = self._module.filters
self._module.filters = self._filters[:]
self._showwarning = self._module.showwarning
if self._record:
self.log = []
def showwarning(*args, **kwargs):
self.log.append(warnings.WarningMessage(*args, **kwargs))
self._module.showwarning = showwarning
return self.log
else:
return None
def __exit__(self, *exc_info):
if not self._entered:
raise RuntimeError("Cannot exit %r without entering first" % self)
self._module.filters = self._filters
self._module.showwarning = self._showwarning
self.log[:] = []
clean_warning_registry() # be safe and not propagate state + chaos
try:
from nose.tools import assert_less
except ImportError:
assert_less = _assert_less
try:
from nose.tools import assert_greater
except ImportError:
assert_greater = _assert_greater
def _assert_allclose(actual, desired, rtol=1e-7, atol=0,
err_msg='', verbose=True):
actual, desired = np.asanyarray(actual), np.asanyarray(desired)
if np.allclose(actual, desired, rtol=rtol, atol=atol):
return
msg = ('Array not equal to tolerance rtol=%g, atol=%g: '
'actual %s, desired %s') % (rtol, atol, actual, desired)
raise AssertionError(msg)
if hasattr(np.testing, 'assert_allclose'):
assert_allclose = np.testing.assert_allclose
else:
assert_allclose = _assert_allclose
def assert_raise_message(exceptions, message, function, *args, **kwargs):
"""Helper function to test error messages in exceptions
Parameters
----------
exceptions : exception or tuple of exception
Name of the estimator
func : callable
Calable object to raise error
*args : the positional arguments to `func`.
**kw : the keyword arguments to `func`
"""
try:
function(*args, **kwargs)
except exceptions as e:
error_message = str(e)
if message not in error_message:
raise AssertionError("Error message does not include the expected"
" string: %r. Observed error message: %r" %
(message, error_message))
else:
# concatenate exception names
if isinstance(exceptions, tuple):
names = " or ".join(e.__name__ for e in exceptions)
else:
names = exceptions.__name__
raise AssertionError("%s not raised by %s" %
(names, function.__name__))
def fake_mldata(columns_dict, dataname, matfile, ordering=None):
"""Create a fake mldata data set.
Parameters
----------
columns_dict : dict, keys=str, values=ndarray
Contains data as columns_dict[column_name] = array of data.
dataname : string
Name of data set.
matfile : string or file object
The file name string or the file-like object of the output file.
ordering : list, default None
List of column_names, determines the ordering in the data set.
Notes
-----
This function transposes all arrays, while fetch_mldata only transposes
'data', keep that into account in the tests.
"""
datasets = dict(columns_dict)
# transpose all variables
for name in datasets:
datasets[name] = datasets[name].T
if ordering is None:
ordering = sorted(list(datasets.keys()))
# NOTE: setting up this array is tricky, because of the way Matlab
# re-packages 1D arrays
datasets['mldata_descr_ordering'] = sp.empty((1, len(ordering)),
dtype='object')
for i, name in enumerate(ordering):
datasets['mldata_descr_ordering'][0, i] = name
scipy.io.savemat(matfile, datasets, oned_as='column')
class mock_mldata_urlopen(object):
def __init__(self, mock_datasets):
"""Object that mocks the urlopen function to fake requests to mldata.
`mock_datasets` is a dictionary of {dataset_name: data_dict}, or
{dataset_name: (data_dict, ordering).
`data_dict` itself is a dictionary of {column_name: data_array},
and `ordering` is a list of column_names to determine the ordering
in the data set (see `fake_mldata` for details).
When requesting a dataset with a name that is in mock_datasets,
this object creates a fake dataset in a StringIO object and
returns it. Otherwise, it raises an HTTPError.
"""
self.mock_datasets = mock_datasets
def __call__(self, urlname):
dataset_name = urlname.split('/')[-1]
if dataset_name in self.mock_datasets:
resource_name = '_' + dataset_name
from io import BytesIO
matfile = BytesIO()
dataset = self.mock_datasets[dataset_name]
ordering = None
if isinstance(dataset, tuple):
dataset, ordering = dataset
fake_mldata(dataset, resource_name, matfile, ordering)
matfile.seek(0)
return matfile
else:
raise HTTPError(urlname, 404, dataset_name + " is not available",
[], None)
def install_mldata_mock(mock_datasets):
# Lazy import to avoid mutually recursive imports
from sklearn import datasets
datasets.mldata.urlopen = mock_mldata_urlopen(mock_datasets)
def uninstall_mldata_mock():
# Lazy import to avoid mutually recursive imports
from sklearn import datasets
datasets.mldata.urlopen = urlopen
# Meta estimators need another estimator to be instantiated.
META_ESTIMATORS = ["OneVsOneClassifier",
"OutputCodeClassifier", "OneVsRestClassifier", "RFE",
"RFECV", "BaseEnsemble"]
# estimators that there is no way to default-construct sensibly
OTHER = ["Pipeline", "FeatureUnion", "GridSearchCV", "RandomizedSearchCV",
"SelectFromModel"]
# some trange ones
DONT_TEST = ['SparseCoder', 'EllipticEnvelope', 'DictVectorizer',
'LabelBinarizer', 'LabelEncoder',
'MultiLabelBinarizer', 'TfidfTransformer',
'TfidfVectorizer', 'IsotonicRegression',
'OneHotEncoder', 'RandomTreesEmbedding',
'FeatureHasher', 'DummyClassifier', 'DummyRegressor',
'TruncatedSVD', 'PolynomialFeatures',
'GaussianRandomProjectionHash', 'HashingVectorizer',
'CheckingClassifier', 'PatchExtractor', 'CountVectorizer',
# GradientBoosting base estimators, maybe should
# exclude them in another way
'ZeroEstimator', 'ScaledLogOddsEstimator',
'QuantileEstimator', 'MeanEstimator',
'LogOddsEstimator', 'PriorProbabilityEstimator',
'_SigmoidCalibration', 'VotingClassifier']
def all_estimators(include_meta_estimators=False,
include_other=False, type_filter=None,
include_dont_test=False):
"""Get a list of all estimators from sklearn.
This function crawls the module and gets all classes that inherit
from BaseEstimator. Classes that are defined in test-modules are not
included.
By default meta_estimators such as GridSearchCV are also not included.
Parameters
----------
include_meta_estimators : boolean, default=False
Whether to include meta-estimators that can be constructed using
an estimator as their first argument. These are currently
BaseEnsemble, OneVsOneClassifier, OutputCodeClassifier,
OneVsRestClassifier, RFE, RFECV.
include_other : boolean, default=False
Wether to include meta-estimators that are somehow special and can
not be default-constructed sensibly. These are currently
Pipeline, FeatureUnion and GridSearchCV
include_dont_test : boolean, default=False
Whether to include "special" label estimator or test processors.
type_filter : string, list of string, or None, default=None
Which kind of estimators should be returned. If None, no filter is
applied and all estimators are returned. Possible values are
'classifier', 'regressor', 'cluster' and 'transformer' to get
estimators only of these specific types, or a list of these to
get the estimators that fit at least one of the types.
Returns
-------
estimators : list of tuples
List of (name, class), where ``name`` is the class name as string
and ``class`` is the actuall type of the class.
"""
def is_abstract(c):
if not (hasattr(c, '__abstractmethods__')):
return False
if not len(c.__abstractmethods__):
return False
return True
all_classes = []
# get parent folder
path = sklearn.__path__
for importer, modname, ispkg in pkgutil.walk_packages(
path=path, prefix='sklearn.', onerror=lambda x: None):
if (".tests." in modname):
continue
module = __import__(modname, fromlist="dummy")
classes = inspect.getmembers(module, inspect.isclass)
all_classes.extend(classes)
all_classes = set(all_classes)
estimators = [c for c in all_classes
if (issubclass(c[1], BaseEstimator)
and c[0] != 'BaseEstimator')]
# get rid of abstract base classes
estimators = [c for c in estimators if not is_abstract(c[1])]
if not include_dont_test:
estimators = [c for c in estimators if not c[0] in DONT_TEST]
if not include_other:
estimators = [c for c in estimators if not c[0] in OTHER]
# possibly get rid of meta estimators
if not include_meta_estimators:
estimators = [c for c in estimators if not c[0] in META_ESTIMATORS]
if type_filter is not None:
if not isinstance(type_filter, list):
type_filter = [type_filter]
else:
type_filter = list(type_filter) # copy
filtered_estimators = []
filters = {'classifier': ClassifierMixin,
'regressor': RegressorMixin,
'transformer': TransformerMixin,
'cluster': ClusterMixin}
for name, mixin in filters.items():
if name in type_filter:
type_filter.remove(name)
filtered_estimators.extend([est for est in estimators
if issubclass(est[1], mixin)])
estimators = filtered_estimators
if type_filter:
raise ValueError("Parameter type_filter must be 'classifier', "
"'regressor', 'transformer', 'cluster' or None, got"
" %s." % repr(type_filter))
# drop duplicates, sort for reproducibility
# itemgetter is used to ensure the sort does not extend to the 2nd item of
# the tuple
return sorted(set(estimators), key=itemgetter(0))
def set_random_state(estimator, random_state=0):
"""Set random state of an estimator if it has the `random_state` param.
Classes for whom random_state is deprecated are ignored. Currently DBSCAN
is one such class.
"""
if isinstance(estimator, DBSCAN):
return
if "random_state" in estimator.get_params():
estimator.set_params(random_state=random_state)
def if_matplotlib(func):
"""Test decorator that skips test if matplotlib not installed. """
@wraps(func)
def run_test(*args, **kwargs):
try:
import matplotlib
matplotlib.use('Agg', warn=False)
# this fails if no $DISPLAY specified
import matplotlib.pyplot as plt
plt.figure()
except ImportError:
raise SkipTest('Matplotlib not available.')
else:
return func(*args, **kwargs)
return run_test
def skip_if_32bit(func):
"""Test decorator that skips tests on 32bit platforms."""
@wraps(func)
def run_test(*args, **kwargs):
bits = 8 * struct.calcsize("P")
if bits == 32:
raise SkipTest('Test skipped on 32bit platforms.')
else:
return func(*args, **kwargs)
return run_test
def if_not_mac_os(versions=('10.7', '10.8', '10.9'),
message='Multi-process bug in Mac OS X >= 10.7 '
'(see issue #636)'):
"""Test decorator that skips test if OS is Mac OS X and its
major version is one of ``versions``.
"""
warnings.warn("if_not_mac_os is deprecated in 0.17 and will be removed"
" in 0.19: use the safer and more generic"
" if_safe_multiprocessing_with_blas instead",
DeprecationWarning)
mac_version, _, _ = platform.mac_ver()
skip = '.'.join(mac_version.split('.')[:2]) in versions
def decorator(func):
if skip:
@wraps(func)
def func(*args, **kwargs):
raise SkipTest(message)
return func
return decorator
def if_safe_multiprocessing_with_blas(func):
"""Decorator for tests involving both BLAS calls and multiprocessing
Under POSIX (e.g. Linux or OSX), using multiprocessing in conjunction with
some implementation of BLAS (or other libraries that manage an internal
posix thread pool) can cause a crash or a freeze of the Python process.
In practice all known packaged distributions (from Linux distros or
Anaconda) of BLAS under Linux seems to be safe. So we this problem seems to
only impact OSX users.
This wrapper makes it possible to skip tests that can possibly cause
this crash under OS X with.
Under Python 3.4+ it is possible to use the `forkserver` start method
for multiprocessing to avoid this issue. However it can cause pickling
errors on interactively defined functions. It therefore not enabled by
default.
"""
@wraps(func)
def run_test(*args, **kwargs):
if sys.platform == 'darwin':
raise SkipTest(
"Possible multi-process bug with some BLAS")
return func(*args, **kwargs)
return run_test
def clean_warning_registry():
"""Safe way to reset warnings """
warnings.resetwarnings()
reg = "__warningregistry__"
for mod_name, mod in list(sys.modules.items()):
if 'six.moves' in mod_name:
continue
if hasattr(mod, reg):
getattr(mod, reg).clear()
def check_skip_network():
if int(os.environ.get('SKLEARN_SKIP_NETWORK_TESTS', 0)):
raise SkipTest("Text tutorial requires large dataset download")
def check_skip_travis():
"""Skip test if being run on Travis."""
if os.environ.get('TRAVIS') == "true":
raise SkipTest("This test needs to be skipped on Travis")
def _delete_folder(folder_path, warn=False):
"""Utility function to cleanup a temporary folder if still existing.
Copy from joblib.pool (for independence)"""
try:
if os.path.exists(folder_path):
# This can fail under windows,
# but will succeed when called by atexit
shutil.rmtree(folder_path)
except WindowsError:
if warn:
warnings.warn("Could not delete temporary folder %s" % folder_path)
class TempMemmap(object):
def __init__(self, data, mmap_mode='r'):
self.temp_folder = tempfile.mkdtemp(prefix='sklearn_testing_')
self.mmap_mode = mmap_mode
self.data = data
def __enter__(self):
fpath = op.join(self.temp_folder, 'data.pkl')
joblib.dump(self.data, fpath)
data_read_only = joblib.load(fpath, mmap_mode=self.mmap_mode)
atexit.register(lambda: _delete_folder(self.temp_folder, warn=True))
return data_read_only
def __exit__(self, exc_type, exc_val, exc_tb):
_delete_folder(self.temp_folder)
with_network = with_setup(check_skip_network)
with_travis = with_setup(check_skip_travis)
| [
"[email protected]"
] | |
56c08c5ab688bc3f57fc936aec023213cd7ce516 | c208954de92470c0144fad2e07a92ed1822edd59 | /GhostFuckerCms/GhostFucker_deob.py | e39a30a81586406992322237a01a57599ed713bb | [
"MIT"
] | permissive | rendy026/reverse-enginnering | 4217f3b723569fb792bac0f22a56a305199db1dc | f04cec0bf518a2617fc4fd7155f755fafc2af799 | refs/heads/master | 2023-01-07T15:49:15.791052 | 2020-10-13T09:22:02 | 2020-10-13T09:22:02 | 303,575,571 | 0 | 0 | MIT | 2020-10-13T09:41:59 | 2020-10-13T03:17:42 | Python | UTF-8 | Python | false | false | 8,268 | py | # Source Generated With Python 2.7
# Decompile At : Mon Apr 6 14:45:52 WIB 2020
# CODED BY : JHON
# CODENAME : E-XPLOIT1337 & MR-X666X
# TEAM WORK: BLACK CODERS ANONYMOUS & SEVEN GHOST TEAM
# TOOLS NM : PRIV8 SUPER FAST CMS DETECTORS
# SPECIAL THANKS TO MY BIG FAMILY :
# MAURITANIA ATTACKER, ANON GHOST TEAM, GHOST SQUAD HACKER
# ./K1TSUN3-6H057, SEA-GHOST, BROSE666, ./K4IZ3N-6H05T
# Z3R0H1D3N, K4TSUY4-GH05T, L4ZYXPL0I7, TAMPANSKY-ID
# HOW TO USE ON TERMUX?
# python2 1337.py
# THREADS POOL CAN BE CHANGED AGAIN
# THREADS POOL IS ON THE LINE 187
# INSTALL MODULE ON TERMUX :
# pip2 install -r requirements.txt
# !/usr/bin/python2.x
# -*- coding: utf-8 -*-
import datetime
import requests, threading
from multiprocessing.dummy import Pool
import os, sys, time
if os.name == "nt":
os.system("cls")
else:
os.system("clear")
def banner_logo():
print ("""\033[1;95m
_____ _ _ ______ _
/ ____| | \033[1;97mBCA \033[1;95m/ \033[1;97m7GT \033[1;95m| | | ____| | |
| | __| |__ ___ ___| |_ | |__ _ _ ___| | _____ _ __
| | |_ | '_ \ / _ \/ __| __| | __| | | |/ __| |/ / _ \ '__|
| |__| | | | | (_) \__ \ |_ | | | |_| | (__| < __/ |
\_____|_| |_|\___/|___/\__| |_| \__,_|\___|_|\_\___|_|
\033[1;97m
GHOST FUCKER VERY FAST CMS DETECTOR CODERS BY JHON
\033[1;95m-- \033[1;97mPRIVATE7 CODE AND PRIVATE7 BOT \033[1;95m--
\033[1;97m """)
banner_logo()
now = datetime.datetime.now()
print(" \033[1;95mSTARTED AT: " + str(now))
def scan(site):
try:
if "http" in site:
url = site
else:
url = "http://" + site
r = requests.get(url,timeout=20)
# 1. CMS WORDPRESS
if "/wp-content/" in r.text or "/wp-login.php" in r.text or "/wp-admin/" in r.text or "/license.txt" in r.text:
print(" \033[1;95m[\033[1;92m+\033[1;95m] \033[1;97mWORDPRESS \033[1;95m...............\033[1;97m " + url)
with open("cms_result/wordpress.txt","a") as f:
f.write(url + "\n")
# 2. CMS JOOMLA
elif "/Joomla!" in r.text or "/index.php?option=com_" in r.text or "/administrator/index.php" in r.text or "/administrator/" in r.text or "/administrator/manifests/files/joomla.xml" in r.text or "/<version>(.*?)<\/version>" in r.text or "/language/en-GB/en-GB.xml" in r.text or "<version>(.*?)<\/version>" in r.text:
print(" \033[1;95m[\033[1;92m+\033[1;95m] \033[1;97mJOOMLA \033[1;95m..................\033[1;97m " + url)
with open("cms_result/joomla.txt","a") as f:
f.write(url + "\n")
# 3. CMS OPENCART
elif "/index.php?route=common/home" in r.text:
print(" \033[1;95m[\033[1;92m+\033[1;95m] \033[1;97mOPENCART \033[1;95m................\033[1;97m " + url)
with open("cms_result/opencart.txt","a") as f:
f.write(url + "\n")
# 4. CMS DRUPAL
elif "/sites/default" in r.text:
print(" \033[1;95m[\033[1;92m+\033[1;95m] \033[1;97mDRUPAL \033[1;95m..................\033[1;97m " + url)
with open("cms_result/drupal.txt","a") as f:
f.write(url + "\n")
# 5. CMS PRESTASHOP
elif "/prestashop" in r.text or "/PrestaShop" in r.text:
print(" \033[1;95m[\033[1;92m+\033[1;95m] \033[1;97mPRESTASHOP \033[1;95m..............\033[1;97m " + url)
with open("cms_result/prestashop.txt","a") as f:
f.write(url + "\n")
# 6. CMS OSCOMMERCE
elif "/osCommerce" in r.text or "/admin/login.php" in r.text or "/admin/images/cal_date_over.gif" in r.text:
print(" \033[1;95m[\033[1;92m+\033[1;95m] \033[1;97mOSCOMMERCE \033[1;95m..............\033[1;97m " + url)
with open("cms_result/oscommerce.txt","a") as f:
f.write(url + "\n")
# 7. CMS VBULLETIN
elif "/osCommerce" in r.text or "/admin/login.php" in r.text or "/admin/images/cal_date_over.gif" in r.text:
print(" \033[1;95m[\033[1;92m+\033[1;95m] \033[1;97mVBULLETIN \033[1;95m...............\033[1;97m " + url)
with open("cms_result/vbulletin.txt","a") as f:
f.write(url + "\n")
# 8. CMS MAGENTO
elif "/Mage.Cookies" in r.text:
print(" \033[1;95m[\033[1;92m+\033[1;95m] \033[1;97mMAGENTO \033[1;95m.................\033[1;97m " + url)
with open("cms_result/magento.txt","a") as f:
f.write(url + "\n")
# 9. CMS ZENCART
elif "/application/configs/application.ini" in r.text:
print(" \033[1;95m[\033[1;92m+\033[1;95m] \033[1;97mZENCART \033[1;95m.................\033[1;97m " + url)
with open("cms_result/zencart.txt","a") as f:
f.write(url + "\n")
# 10. CMS SHOPIFY
elif "/collections/all/Powered by Shopify/cdn.shopify.com/" in r.text or "/all/" in r.text or "/collections/all" in r.text or "/Powered by Shopify/" in r.text or "/cdn.shopify.com" in r.text:
print(" \033[1;95m[\033[1;92m+\033[1;95m] \033[1;97mSHOPIFY \033[1;95m.................\033[1;97m " + url)
with open("cms_result/shopify.txt","a") as f:
f.write(url + "\n")
# 11. CMS LARAVEL PHP UNIT
elif "/vendor/phpunit/phpunit/src/Util/PHP/eval-stdin.php" in r.text:
print(" \033[1;95m[\033[1;92m+\033[1;95m] \033[1;97mLARAVEL PHPUNIT \033[1;95m.........\033[1;97m " + url)
with open("cms_result/laravel_phpunit.txt","a") as f:
f.write(url + "\n")
# 12. CMS SITEFINITY
elif "/Sitefinity" in r.text or "/sitefinity/UserControls/Dialogs/DocumentEditorDialog.aspx" in r.text:
print(" \033[1;95m[\033[1;92m+\033[1;95m] \033[1;97mSITEFINITY \033[1;95m..............\033[1;97m " + url)
with open("cms_result/sitefinity.txt","a") as f:
f.write(url + "\n")
# 13. CMS MYBB
elif "/jscripts/general.js?ver=" in r.text:
print(" \033[1;95m[\033[1;92m+\033[1;95m] \033[1;97mMYBB \033[1;95m....................\033[1;97m " + url)
with open("cms_result/mybb.txt","a") as f:
f.write(url + "\n")
# 14. CMS UBERCART
elif "/uc_cart" in r.text:
print(" \033[1;95m[\033[1;92m+\033[1;95m] \033[1;97mUBERCART \033[1;95m................\033[1;97m " + url)
with open("cms_result/ubercart.txt","a") as f:
f.write(url + "\n")
# 15. CMS PROTOTYPE
elif "/sites/default" in r.text or "/prototype.js" in r.text:
print(" \033[1;95m[\033[1;92m+\033[1;95m] \033[1;97mPROTOTYPE \033[1;95m...............\033[1;97m " + url)
with open("cms_result/prototype.txt","a") as f:
f.write(url + "\n")
# 16. CMS JQUERY FILE UPLOAD
elif "/assets/global/plugins/jquery-file-upload/server/php/" in r.text or "/jQuery/server/php" in r.text:
print(" \033[1;95m[\033[1;92m+\033[1;95m] \033[1;97mJQUERY FILE UPLOAD \033[1;95m......\033[1;97m " + url)
with open("cms_result/jquery_file_upload.txt","a") as f:
f.write(url + "\n")
# 17. CMS JALIOS JCMS
elif "/Jalios JCMS/" in r.text:
print(" \033[1;95m[\033[1;92m+\033[1;95m] \033[1;97mJALIOS JCMS \033[1;95m.............\033[1;97m " + url)
with open("cms_result/jalios_jcms.txt","a") as f:
f.write(url + "\n")
# 18. CMS SHAREPOINT
elif "/SharePoint/" in r.text:
print(" \033[1;95m[\033[1;92m+\033[1;95m] \033[1;97mSHAREPOINT \033[1;95m..............\033[1;97m " + url)
with open("cms_result/sharepoint.txt","a") as f:
f.write(url + "\n")
# 19. CMS BIGACE
elif "/BIGACE/" in r.text:
print(" \033[1;95m[\033[1;92m+\033[1;95m] \033[1;97mBIGACE \033[1;95m..................\033[1;97m " + url)
with open("cms_result/bigace.txt","a") as f:
f.write(url + "\n")
# 20. CMS ZENPHOTO
elif "/zp-core/js/" in r.text:
print(" \033[1;95m[\033[1;92m+\033[1;95m] \033[1;97mZENPHOTO \033[1;95m................\033[1;97m " + url)
with open("cms_result/zenphoto.txt","a") as f:
f.write(url + "\n")
# 00. CMS NOT FOUND / NOT WORKING
else:
print(" \033[1;95m[\033[1;92m+\033[1;95m] \033[1;97mNOT FOUND \033[1;95m...............\033[1;97m " + url)
with open("cms_result/othercms.txt","a") as f:
f.write(url + "\n")
except:
print(" \033[1;95m[\033[1;92m+\033[1;95m] \033[1;97mNOT WORKING \033[1;95m.............\033[1;97m " + site)
sitelist = raw_input("\n \033[1;97mSITE LIST SEND TO HELL \033[1;95m> \033[1;97m")
print("")
try:
sites = open(sitelist,"r").read().splitlines()
pp = Pool(100)
pr = pp.map(scan, sites)
except:
print(" \033[1;95mWEBSITE LIST FILE NOT FOUND !!!\033[1;97m")
sys.exit()
| [
"[email protected]"
] | |
e7a1d1ac8906987075dbea0b976e57dd7b9d6898 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2714/60705/295697.py | 72590979ac543842bbbb9cc5637ef64f4f8e8d60 | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,083 | py | def judge(word1, word2):
if len(word2) != len(word1) + 1:
return False
dic1 = {}
dic2 = {}
for char in word1:
dic1.setdefault(char, 0)
dic1[char] += 1
for char in word2:
dic2.setdefault(char, 0)
dic2[char] += 1
key1 = list(dic1.keys())
key2 = list(dic2.keys())
for k in key1:
if k not in key2:
return False
if dic1[k] > dic2[k]:
return False
return True
if __name__ == '__main__':
words = []
while True:
try:
words.append(input())
except EOFError:
break
words.sort(key=lambda k: len(k))
print(words)
ans = []
for i in range(0, len(words)):
temp_ans = [words[i]]
j = i+1
while j < len(words):
if judge(temp_ans[-1], words[j]):
temp_ans.append(words[j])
if len(temp_ans) > len(ans):
ans = temp_ans
temp_ans.remove(words[j])
j += 1
print(len(ans))
for a in ans:
print(a)
| [
"[email protected]"
] | |
f5cc3606a67a533e7c20254c5a9b9fe5cc417556 | 408099135939ccdb7fc52f110792ce651fb6b00a | /test/unit/tools/test_yacc.py | 2f91ecacf66b15203e3b2c0950bf6f4db372f818 | [
"BSD-3-Clause"
] | permissive | thomasrockhu/bfg9000 | 757271db484ddcd06e8b391c3b8818882857f66e | 1cd1226eab9bed2fc2ec6acccf7864fdcf2ed31a | refs/heads/master | 2022-11-29T00:07:15.914649 | 2020-07-24T21:12:38 | 2020-07-24T21:12:38 | 282,257,111 | 0 | 0 | BSD-3-Clause | 2020-07-24T15:37:41 | 2020-07-24T15:37:40 | null | UTF-8 | Python | false | false | 3,616 | py | from .. import *
from bfg9000 import options as opts
from bfg9000.file_types import *
from bfg9000.languages import Languages
from bfg9000.path import Path, Root
from bfg9000.tools.yacc import YaccBuilder
known_langs = Languages()
with known_langs.make('yacc') as x:
x.vars(compiler='YACC', flags='YFLAGS')
class TestYaccBuilder(CrossPlatformTestCase):
def __init__(self, *args, **kwargs):
super().__init__(clear_variables=True, *args, **kwargs)
def setUp(self):
self.yacc = YaccBuilder(self.env, known_langs['yacc'], ['yacc'],
'version')
self.compiler = self.yacc.transpiler
def test_properties(self):
self.assertEqual(self.compiler.num_outputs, 1)
self.assertEqual(self.compiler.deps_flavor, None)
def test_call(self):
self.assertEqual(self.compiler('in', 'out'),
[self.compiler, 'in', '-o', 'out'])
self.assertEqual(self.compiler('in', 'out', ['flags']),
[self.compiler, 'flags', 'in', '-o', 'out'])
def test_default_name(self):
src = SourceFile(Path('file.l', Root.srcdir), 'yacc')
self.assertEqual(self.compiler.default_name(src, None),
['file.tab.c', 'file.tab.h'])
self.assertEqual(self.compiler.default_name(src, AttrDict(
user_options=opts.option_list(opts.lang('c++'))
)), ['file.tab.cpp', 'file.tab.hpp'])
with self.assertRaises(ValueError):
self.compiler.default_name(src, AttrDict(
user_options=opts.option_list(opts.lang('java'))
))
def test_output_file(self):
src = SourceFile(Path('file.tab.c'), 'c')
hdr = HeaderFile(Path('file.tab.h'), 'c')
self.assertEqual(self.compiler.output_file('file.tab.c', None), src)
self.assertEqual(self.compiler.output_file(
['file.tab.c', 'file.tab.h'], None
), [src, hdr])
src = SourceFile(Path('file.tab.cpp'), 'c++')
hdr = HeaderFile(Path('file.tab.hpp'), 'c++')
context = AttrDict(user_options=opts.option_list(opts.lang('c++')))
self.assertEqual(self.compiler.output_file('file.tab.cpp', context),
src)
self.assertEqual(self.compiler.output_file(
['file.tab.cpp', 'file.tab.hpp'], context
), [src, hdr])
with self.assertRaises(ValueError):
self.compiler.output_file(['file.tab.c', 'file.tab.h', 'extra'],
None)
def test_flags_empty(self):
self.assertEqual(self.compiler.flags(opts.option_list()), [])
def test_flags_define(self):
self.assertEqual(self.compiler.flags(opts.option_list(
opts.define('NAME')
)), ['-DNAME'])
self.assertEqual(self.compiler.flags(opts.option_list(
opts.define('NAME', 'value')
)), ['-DNAME=value'])
def test_flags_warning(self):
self.assertEqual(self.compiler.flags(opts.option_list(
opts.warning('disable')
)), ['-w'])
with self.assertRaises(ValueError):
self.compiler.flags(opts.option_list(opts.warning('all')))
def test_flags_lang(self):
self.assertEqual(self.compiler.flags(opts.option_list(
opts.lang('c++')
)), ['--language=c++'])
def test_flags_string(self):
self.assertEqual(self.compiler.flags(opts.option_list('-i')), ['-i'])
def test_flags_invalid(self):
with self.assertRaises(TypeError):
self.compiler.flags(opts.option_list(123))
| [
"[email protected]"
] | |
3ae926ab8843eec0a48e5311cb84ca5ea56307a6 | c9ddbdb5678ba6e1c5c7e64adf2802ca16df778c | /cases/pa2/sample/class_def_methods-90.py | 3dcbb59597615ba1a047cbb30d4e3c1e003f222d | [] | no_license | Virtlink/ccbench-chocopy | c3f7f6af6349aff6503196f727ef89f210a1eac8 | c7efae43bf32696ee2b2ee781bdfe4f7730dec3f | refs/heads/main | 2023-04-07T15:07:12.464038 | 2022-02-03T15:42:39 | 2022-02-03T15:42:39 | 451,969,776 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 343 | py | class A(object):
x:int = 1
def get_A(self: "A") -> int:
return self.x
class B(A):
def __init__(self: "B"):
pass
class C(B):
z:bool = True
def set_A(self: "C", val: int) -> object:
$Statement
a:A = None
b:B = None
c:C = None
a = A()
b = B()
c = C()
b.x = a.get_A()
a.x = b.get_A()
c.set_A(0)
| [
"[email protected]"
] | |
fbdbec2ed50db76f9752144a6a1158b6cdf6b24d | 9184e230f8b212e8f686a466c84ecc89abe375d1 | /histogrammode/tests/reduction/histCompat/generateTestInstrument.py | 653265a59f8187ce84e4fb2669cbee2008cb6252 | [] | no_license | danse-inelastic/DrChops | 75b793d806e6351dde847f1d92ab6eebb1ef24d2 | 7ba4ce07a5a4645942192b4b81f7afcae505db90 | refs/heads/master | 2022-04-26T17:37:41.666851 | 2015-05-02T23:21:13 | 2015-05-02T23:21:13 | 34,094,584 | 0 | 1 | null | 2020-09-10T01:50:10 | 2015-04-17T03:30:52 | Python | UTF-8 | Python | false | false | 2,597 | py | #!/usr/bin/env python
# Timothy M. Kelley Copyright (c) 2005 All rights reserved
# Jiao Lin Copyright (c) 2007 All rights reserved
def generate():
"""generate an instrument graph appropriate for testing"""
from instrument.elements import instrument, detectorArray, detectorPack, \
detector, moderator, monitor
from instrument.geometers import ARCSGeometer
test = Instrument.Instrument("Test")
geometer = ARCSGeometer.Geometer()
geometer.register( test, [0,0,0], [0,0,0])
detArrayID = test.getUniqueID()
detArray = DetectorArray.DetectorArray( detArrayID, test.guid())
test.addDetectorArray( detArray)
# make a detector pack
dpackGuid = test.getUniqueID()
dpack = DetectorPack.DetectorPack( dpackGuid, test.guid())
detArray.addElement( dpack)
geometer.register( dpack, [1.,1.,1.], [1.,1.,1.])
dpack.setAttribute('name', 'detPack1')
# put an LPSD in the pack
lpsd1id = test.getUniqueID()
detectorID = detArray.getLongDetectorID()
lpsd1 = LPSD.LPSD( lpsd1id, dpackGuid, detectorID)
dpack.addElement( lpsd1)
geometer.register( lpsd1, [2.,90.0,2.0], [2.,2.,2.])
lpsd1.setAttribute('name', 'LPSD1')
# add some pixels to the lpsd
for i in range(5):
pixid = test.getUniqueID()
pixel = LPSDPixel.Pixel( pixid, detectorID, i, 0.01, 200.0, 12.7)
lpsd1.addElement( pixel)
geometer.register( pixel, [i+3.0,i+3.0,i+3.0], [i+3.0,i+3.0,i+3.0])
pixel.setAttribute( 'name', 'pixel%s' % i)
# add a monitor
monid = test.getUniqueID()
monitor = Monitor.Monitor( monid, test.guid(), 'nifty', 20.0, 100.0, 100.0,
'testMonitor')
geometer.register( monitor, [8.,8.,8.], [8.,8.,8.])
test.addElement( monitor)
# add a moderator
modid = test.getUniqueID()
moderator = Moderator.Moderator( modid, test.guid(), 100.0, 100.0, 100.0,
'testModerator')
# position in spherical coords (x=-14.0, y=0.0, z = 0.0)
modPosition = [14000.0, 90.0, 180.0]
modOrientation = [0.0, 0.0, 0.0]
geometer.register( moderator, modPosition, modOrientation)
test.addModerator( moderator)
return test, geometer
if __name__ == '__main__':
import journal
journal.debug("instrument.elements").activate()
instrument, geometer = generate()
from InstrumentPrinter import Printer
printer = Printer()
printer.render( instrument, geometer)
# version
__id__ = "$Id: generateTestInstrument.py 1431 2007-11-03 20:36:41Z linjiao $"
# End of file
| [
"[email protected]"
] | |
48ff8544d059e0a034a3c07fe27cb062dff8c1a8 | c10ef416832b3e99e58fb93c85f414d94bbdbc2e | /py3canvas/tests/result.py | 3aeaf5a7cd1dfa532943ac1a6d246a344ffa8b73 | [
"MIT"
] | permissive | tylerclair/py3canvas | 83bab26d1624a11acffaeb0392c6a9a38f995f16 | 7485d458606b65200f0ffa5bbe597a9d0bee189f | refs/heads/master | 2021-10-26T03:27:48.418437 | 2021-10-23T15:07:26 | 2021-10-23T15:07:26 | 92,841,638 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 991 | py | """Result API Tests for Version 1.0.
This is a testing template for the generated ResultAPI Class.
"""
import unittest
import requests
import secrets
from py3canvas.apis.result import ResultAPI
from py3canvas.apis.result import Result
class TestResultAPI(unittest.TestCase):
"""Tests for the ResultAPI."""
def setUp(self):
self.client = ResultAPI(secrets.instance_address, secrets.access_token)
def test_show_collection_of_results(self):
"""Integration test for the ResultAPI.show_collection_of_results method."""
course_id = None # Change me!!
line_item_id = None # Change me!!
r = self.client.show_collection_of_results(course_id, line_item_id)
def test_show_result(self):
"""Integration test for the ResultAPI.show_result method."""
course_id = None # Change me!!
line_item_id = None # Change me!!
id = None # Change me!!
r = self.client.show_result(course_id, id, line_item_id)
| [
"[email protected]"
] | |
8ad089f44de9540d84c38dda8d42ea64d4a44194 | 55c250525bd7198ac905b1f2f86d16a44f73e03a | /Python/Flask/Book_evaluator/venv/Lib/site-packages/cryptography/hazmat/primitives/keywrap.py | a3220495a10c5372ad9237bae667cf23b76a957c | [] | no_license | NateWeiler/Resources | 213d18ba86f7cc9d845741b8571b9e2c2c6be916 | bd4a8a82a3e83a381c97d19e5df42cbababfc66c | refs/heads/master | 2023-09-03T17:50:31.937137 | 2023-08-28T23:50:57 | 2023-08-28T23:50:57 | 267,368,545 | 2 | 1 | null | 2022-09-08T15:20:18 | 2020-05-27T16:18:17 | null | UTF-8 | Python | false | false | 129 | py | version https://git-lfs.github.com/spec/v1
oid sha256:2f23dbacdbaddb11439413b821d3b9344d7857d70b3ee31b511de764f4cdc224
size 5453
| [
"[email protected]"
] | |
ade99175de46f946bb72da1575a7f965f4589768 | c6d0bc814c3c7b621f2a07e293d3c4e36521b1f0 | /validators.py | 262fe543b2902a2506348f07794667f5dc811474 | [] | no_license | captain204/Quiz-app | 9ead59d74df6ad35fa25eb209762f57e3b64e780 | 72136fc27dc18254c5de8149ff631deb6f6eaccc | refs/heads/master | 2020-08-25T04:43:39.415027 | 2019-10-28T01:18:10 | 2019-10-28T01:18:10 | 216,962,636 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,651 | py | from config import *
class Post(Form):
title = StringField(u'Title',validators=[validators.input_required(),
validators.Length(min=10,max=250)])
body = TextAreaField(u'Body',validators=[validators.input_required(),
validators.Length(min=10,max=2500)])
class User(Form):
username = StringField(u'Username',validators=[validators.input_required(),
validators.Length(min=3, max=250)])
email = StringField(u'email',validators=[validators.input_required(),
validators.Length(min=3,max=50)])
password = PasswordField('Password',[validators.DataRequired(),
validators.EqualTo('confirm',message='Passwords do not match')])
confirm = PasswordField('Confirm Password')
stack = SelectField('Select Stack', choices=[('python', 'python'),('php', 'php'),('javascript', 'javascript'),])
class Add(Form):
number = StringField(u'Question Number',validators=[validators.input_required(),
validators.Length(max=250)])
question = TextAreaField(u'Question',validators=[validators.input_required(),
validators.Length(min=10,max=2500)])
option_a = StringField(u'Option A',validators=[validators.input_required(),
validators.Length(max=250)])
option_b = StringField(u'Option B',validators=[validators.input_required(),
validators.Length(max=250)])
option_c = StringField(u'Option C',validators=[validators.input_required(),
validators.Length(max=250)])
option_d = StringField(u'Option D',validators=[validators.input_required(),
validators.Length(max=250)])
correct = StringField(u'Correct Answer',validators=[validators.input_required(),
validators.Length(max=250)]) | [
"[email protected]"
] | |
4655d48747e83026599fd27c8933c9c4f593b3b4 | 2a24dba82767419cf7d2269875bf0a297f41580c | /vispy/scene/widgets/widget.py | bfe4e89710bddb6814a7c0ec13ffb1df9c71637c | [
"BSD-3-Clause",
"LicenseRef-scancode-public-domain"
] | permissive | shjoshi/vispy | 58b300d23486b7478b786977b3548dd7225de847 | 2f3d169aa60c738467e766c59096f51570483d6f | refs/heads/master | 2020-12-25T12:40:36.545768 | 2014-08-06T22:59:35 | 2014-08-06T22:59:35 | 22,704,584 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,178 | py | # -*- coding: utf-8 -*-
# Copyright (c) 2014, Vispy Development Team.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
from __future__ import division
import numpy as np
from ..visuals.visual import Visual
from ..visuals.line import Line
from ..transforms import STTransform
from ...util.event import Event
from ...util.geometry import Rect
from ...color import Color
class Widget(Visual):
""" A widget takes up a rectangular space, intended for use in
a 2D pixel coordinate frame.
The widget is positioned using the transform attribute (as any
entity), and its extend (size) is kept as a separate property.
"""
def __init__(self, *args, **kwargs):
self._border = kwargs.pop('border', (0.2, 0.2, 0.2, 0.5))
# for drawing border
self._visual = Line(color=self._border)
# whether this widget should clip its children
self._clip = kwargs.pop('clip', False)
# reserved space inside border
self._padding = kwargs.pop('padding', 0)
# reserved space outside border
self._margin = kwargs.pop('margin', 0)
pos = kwargs.pop('pos', (0, 0))
size = kwargs.pop('size', (10, 10))
Visual.__init__(self, *args, **kwargs)
self.events.add(resize=Event)
self._size = 16, 16
self.transform = STTransform()
# todo: TTransform (translate only for widgets)
self._widgets = []
self.pos = pos
self.size = size
@property
def pos(self):
return tuple(self.transform.translate[:2])
@pos.setter
def pos(self, p):
assert isinstance(p, tuple)
assert len(p) == 2
self.transform.translate = p[0], p[1], 0, 0
self._update_line()
self.events.resize()
@property
def size(self):
# Note that we cannot let the size be reflected in the transform.
# Consider a widget of 40x40 in a pixel grid, a child widget therin
# with size 20x20 would get a scale of 800x800!
return self._size
@size.setter
def size(self, s):
assert isinstance(s, tuple)
assert len(s) == 2
self._size = s
self._update_line()
self.events.resize()
self._update_child_widgets()
@property
def rect(self):
return Rect((0, 0), self.size)
@rect.setter
def rect(self, r):
with self.events.resize.blocker():
self.pos = r.pos
self.size = r.size
self.update()
self.events.resize()
@property
def border(self):
return self._border
@border.setter
def border(self, b):
self._border = b
self._visual.set_data(color=b)
self.update()
@property
def background(self):
""" The background color of the Widget.
"""
return self._background
@background.setter
def background(self, value):
self._background = Color(value)
self.update()
@property
def margin(self):
return self._margin
@margin.setter
def margin(self, m):
self._margin = m
self._update_line()
@property
def padding(self):
return self._padding
@padding.setter
def padding(self, p):
self._padding = p
self._update_child_boxes()
def _update_line(self):
""" Update border line to match new shape """
m = self.margin
r = self.size[0] - m
t = self.size[1] - m
pos = np.array([
[m, m],
[r, m],
[r, t],
[m, t],
[m, m]]).astype(np.float32)
self._visual.set_data(pos=pos)
def draw(self, event):
self._visual.draw(event)
def on_resize(self, ev):
self._update_child_widgets()
def _update_child_widgets(self):
# Set the position and size of child boxes (only those added
# using add_widget)
for ch in self._widgets:
ch.rect = self.rect.padded(self.padding + self.margin)
def add_widget(self, widget):
"""
Add a Widget as a managed child of this Widget. The child will be
automatically positioned and sized to fill the entire space inside
this Widget (unless _update_child_widgets is redefined).
"""
self._widgets.append(widget)
widget.parent = self
self._update_child_widgets()
return widget
def add_grid(self, *args, **kwds):
"""
Create a new Grid and add it as a child widget.
All arguments are given to add_widget().
"""
from .grid import Grid
grid = Grid()
return self.add_widget(grid, *args, **kwds)
def add_view(self, *args, **kwds):
"""
Create a new ViewBox and add it as a child widget.
All arguments are given to add_widget().
"""
from .viewbox import ViewBox
view = ViewBox()
return self.add_widget(view, *args, **kwds)
def remove_widget(self, widget):
self._widgets.remove(widget)
widget.remove_parent(self)
self._update_child_widgets()
| [
"[email protected]"
] | |
2d8099c9724448fa929eb0d7a1a81d01928c712e | 969f28be98f607767d5564a6bbbc08fdfb778633 | /pypenrose/net_tests.py | cb25db1cb6d83cf7c21b683d3aa35454be138c07 | [] | no_license | meawoppl/penrose-play | 66dde0e2ec1c6997bc06f53852af2c4240dff79c | c44d40893a70176efb1ee29b395db973e27f730f | refs/heads/master | 2020-01-23T21:56:22.765852 | 2017-01-16T06:36:23 | 2017-01-16T06:36:23 | 74,727,801 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,736 | py | import math
from mock import MagicMock
import nose.tools
from pypenrose.line import Line
import pypenrose.net
import pypenrose.net_testlib
from pypenrose.net_testlib import assert_graph_props
import pypenrose.space
def test_net_graphgen_degenerate():
# No lines to intersect
g = pypenrose.net.gridlines_to_gridgraph([])
assert_graph_props(g, nodes=0, edges=0)
# Too few lines to intersect
g = pypenrose.net.gridlines_to_gridgraph([Line(0, 1, 1)])
assert_graph_props(g, nodes=0, edges=0)
# All parallel lines
g = pypenrose.net.gridlines_to_gridgraph([
Line(0, 1, 1),
Line(0, 1, 2),
Line(0, 1, 3),
])
assert_graph_props(g, nodes=0, edges=0)
def test_net_graphgen_1():
# One intersection, no connected
g = pypenrose.net.gridlines_to_gridgraph([
Line(0, 1, 1),
Line(1, 0, 1),
])
assert_graph_props(g, nodes=1, edges=0)
def test_net_graphgen_2():
# Two intersection, one connection
g = pypenrose.net.gridlines_to_gridgraph([
Line(0, 1, 1),
Line(1, 0, 1),
Line(1, 0, 2),
])
assert_graph_props(g, nodes=2, edges=1)
def test_net_graphgen_3():
# Triangle, 3 intersects, 3 edges
g = pypenrose.net.gridlines_to_gridgraph([
Line(1, 1, 0),
Line(1, 0, 0),
Line(0, 1, 1),
])
assert_graph_props(g, nodes=3, edges=3)
def test_net_graphgen_5d():
for line_count in range(1, 7):
lol_of_lines = pypenrose.space.get_nd_grid_p1(line_count)
all_lines = sum(lol_of_lines, [])
g = pypenrose.net.gridlines_to_gridgraph(all_lines)
expected_nodecount = 10 * line_count**2
assert_graph_props(g, nodes=expected_nodecount)
def test_determine_winding():
net = pypenrose.net_testlib.get_simple_net()
center, edge_node = pypenrose.net_testlib.get_center_edge(net.g)
winding = net.determine_winding(center, edge_node)
nose.tools.assert_equal(len(winding), 4)
nose.tools.assert_equal(
winding[0],
edge_node
)
for node in winding:
nose.tools.assert_in(node, net.g)
def test_compute_angles():
net = pypenrose.net_testlib.get_simple_net()
center, edge_node = pypenrose.net_testlib.get_center_edge(net.g)
# For the square mesh, all angles should be 90
for angle in net.compute_angles(center, edge_node):
nose.tools.assert_equal(angle, math.pi / 2)
def test_get_primary_spoke():
net = pypenrose.net_testlib.get_simple_net()
center, edge_node = pypenrose.net_testlib.get_center_edge(net.g)
# Graph directions should point up in x and y
# Y is CCW from X, so X sorts first
spoke_node = net.get_primary_spoke(center)
nose.tools.assert_equal(
net.g.node[spoke_node]["intersection"],
(1.0, 0.0)
)
def test_get_node_on_line():
# Pull out a node to draw from and the center
net = pypenrose.net_testlib.get_simple_net()
for line in net.lines:
net.get_node_on_line(line)
def test_get_line_root():
# Pull out a node to draw from and the center
net = pypenrose.net_testlib.get_simple_net()
root_nodes = set()
for line in net.lines:
root_node = net.get_line_root(line)
root_nodes.add(root_node)
nose.tools.assert_equal(len(root_nodes), 5)
def _assert_displacement(mock_call, displacement):
x_sum, y_sum = 0, 0
for (dx, dy), _ in mock_call.call_args_list:
x_sum += dx
y_sum += dy
try:
nose.tools.assert_almost_equal(x_sum, displacement[0])
nose.tools.assert_almost_equal(y_sum, displacement[1])
except AssertionError:
print("\n_assert_displacement failure.")
print("Call dump follows:")
for (dx, dy), _ in mock_call.call_args_list:
print("call(", dx, ",", dy, ")")
raise
def test_draw_tile():
# Pull out a node to draw from and the center
net = pypenrose.net_testlib.get_simple_net()
center, edge_node = pypenrose.net_testlib.get_center_edge(net.g)
ctx_mock = MagicMock()
line_to_mock = ctx_mock.rel_line_to
net.draw_tile(ctx_mock, edge_node, center)
# Should make 4 relative line calls
nose.tools.assert_equal(line_to_mock.call_count, 4)
# Line calls should close the graphing loop
_assert_displacement(line_to_mock, (0, 0))
def test_draw_ribbon():
net = pypenrose.net_testlib.get_simple_net(shape=(3, 5))
line = net.lines[1]
ctx_mock = MagicMock()
move_to_mock = ctx_mock.move_to
line_to_mock = ctx_mock.rel_line_to
net.draw_ribbon(ctx_mock, line)
# These should all be closed loops
nose.tools.assert_equal(line_to_mock.call_count, 12)
_assert_displacement(line_to_mock, (0, 0))
| [
"[email protected]"
] | |
395ca378d374ed81267bce67ad399409aefe0bd2 | c1350fbcb269cdab0f36a12a27f694697e08ce7f | /libs/db/db_handler.py | 53add1030fb2b880c28b40c507df2732190f2892 | [] | no_license | deepmicrosystems/object-detection-server | 86dd4178c9dbfe8e1e802062a1a44c4f758ed758 | 97893dfcef89219a11a87bb34a663912b273fce3 | refs/heads/master | 2022-12-18T00:44:58.906967 | 2019-07-29T18:50:25 | 2019-07-29T18:50:25 | 147,842,577 | 0 | 0 | null | 2022-11-22T01:57:57 | 2018-09-07T15:33:49 | Python | UTF-8 | Python | false | false | 2,323 | py | import sqlite3
import time
import datetime
class DataBaseManager:
def __init__(self, db_name = None):
self.conn = None
self.cursor = None
if db_name:
self.open(db_name)
def open(self, db_name):
try:
self.conn = sqlite3.connect(db_name)
self.cursor = self.conn.cursor()
self.create_table()
except Exception as e:
print(f'Cannot connect to db or {e}')
def create_table(self, case):
if (case == "detections"):
self.cursor.execute("CREATE TABLE IF NOT EXISTS \
detections( item_id REAL, w REAL, h REAL, x REAL, y REAL, prob REAL,\
datestamp TEXT, class TEXT, imgPath TEXT)")
elif (case == "plates"):
self.cursor.execute("CREATE TABLE IF NOT EXISTS \
plates( item_id REAL, w REAL, h REAL, x REAL, y REAL, prob REAL,\
datestamp TEXT, imgPath TEXT)")
def close(self):
self.cursor.close()
self.conn.close()
def __enter__(self):
return self
# def __exit__(self,exc_type,exc_value,traceback):
# self.close()
def dynamic_data_entry(self, item_id, image_path, detection, prob, obj_class, date):
x = detection["xmin"]
y = detection["ymin"]
h = detection["xmax"]
w = detection["ymax"]
self.cursor.execute("INSERT INTO detections \
(item_id, w, h, x, y, prob, datestamp, class, imgPath) \
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)",
(item_id, w, h, x, y, prob, date, obj_class, image_path))
self.conn.commit()
#self.close()
def dynamic_data_entry_plates(self,image_path_crop, detection, plate, prob , date, item_id):
x = detection["xmin"]
y = detection["ymin"]
h = detection["xmax"]
w = detection["ymax"]
self.cursor.execute("INSERT INTO plates \
(item_id, w, h, x, y, prob, datestamp, imgPath) \
VALUES (?, ?, ?, ?, ?, ?, ?, ?)",
(item_id, w, h, x, y, prob, date, image_path_crop))
self.conn.commit()
#self.close()
| [
"[email protected]"
] | |
9e2292ed6aad43b70783af1b63396c473b5483e2 | 2e08552e1eb86ffa4870d9208d83b903f0f847d0 | /paperswithcode/models/method.py | 68db3851e56de877c738be0007eb111575f3a402 | [
"Apache-2.0",
"CC-BY-SA-4.0"
] | permissive | paperswithcode/paperswithcode-client | c1fffc0a17066f0eb7128ea726bf4f0485f47c5d | 70bd7ee5157fcaeff39145d2e03cc9ed5bb7421b | refs/heads/develop | 2022-12-12T21:20:17.493109 | 2021-12-01T17:44:04 | 2021-12-01T17:44:04 | 283,026,422 | 113 | 17 | Apache-2.0 | 2022-12-01T22:24:42 | 2020-07-27T21:58:46 | Python | UTF-8 | Python | false | false | 909 | py | from typing import List, Optional
from tea_client.models import TeaClientModel
from paperswithcode.models.page import Page
class Method(TeaClientModel):
"""Method object.
Attributes:
id (str): Method ID.
name (str): Method short name.
full_name (str): Method full name.
description (str): Method description.
paper (str, optional): ID of the paper that describes the method.
"""
id: str
name: str
full_name: str
description: str
paper: Optional[str]
class Methods(Page):
"""Object representing a paginated page of methods.
Attributes:
count (int): Number of elements matching the query.
next_page (int, optional): Number of the next page.
previous_page (int, optional): Number of the previous page.
results (List[Method]): List of methods on this page.
"""
results: List[Method]
| [
"[email protected]"
] | |
59139ee9c6c59c9e6407b92820cac368678556b0 | 79df1e2fde419883fba5f3a79eddfd6c3b7875cc | /udacity/cs253/Lesson02a_Templates/Page.py | 4ce93600c8582ba5e8b020de17473b34e04ae73b | [] | no_license | jJayyyyyyy/network | 284a2845c4431e802a48ea331135a3e2035663cd | 86794dd9d828fe66b7ada28233fbbd5c66ecc50d | refs/heads/master | 2022-03-01T23:51:10.619772 | 2019-08-11T12:27:35 | 2019-08-11T12:27:35 | 116,240,231 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,050 | py | from html import escape
from flask import render_template # flask will help us auto escape html
import os
import ROT13, FizzBuzz
def fill_template(name='index', **kw):
filename = '%s.html' % name
return render_template(filename, **kw)
def get_default_signup_args():
return {'username': '',
'username_error': '',
'password_error': '',
'verify_error': '',
'email': '',
'email_error': ''}
def render_fizzbuzz(n):
fizzbuzz = FizzBuzz.get(n)
page = fill_template('fizzbuzz', FizzBuzz=fizzbuzz)
return page
def render_index():
page = fill_template('index')
return page
def render_rot13(text=''):
text = ROT13.encode(text)
args = {'text': text}
return fill_template('rot13', **args)
def render_signup(form={}):
if form:
args = form
else:
args = get_default_signup_args()
print(args)
return fill_template('signup', **args)
def render_welcome(username=''):
if username:
args = {'username': username, 'a': 'a'}
return fill_template('welcome', **args)
else:
return 'Invalid username<br><br><a href="/">Back</a>'
| [
"[email protected]"
] | |
4571754551752803df00023b5b17c08759238d50 | e0980f704a573894350e285f66f4cf390837238e | /.history/streams/blocks_20201022115527.py | 3674f140da693d1c6dea4759795d6a2e1dd4f2fe | [] | no_license | rucpata/WagtailWebsite | 28008474ec779d12ef43bceb61827168274a8b61 | 5aa44f51592f49c9a708fc5515ad877c6a29dfd9 | refs/heads/main | 2023-02-09T15:30:02.133415 | 2021-01-05T14:55:45 | 2021-01-05T14:55:45 | 303,961,094 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,747 | py | from wagtail.core import blocks
from wagtail.images.blocks import ImageChooserBlock
class TitleBlock(blocks.StructBlock):
text = blocks.CharBlock(
required = True,
elp_text='Tekst do wyświetlenia',
)
class Meta:
template = 'streams/title_block.html'
icon = 'edycja'
label = 'Tytuł'
help_text = 'Wyśrodkowany tekst do wyświetlenia na stronie.'
class LinkValue(blocks.StructValue):
"""Dodatkowao logika dla lików"""
def url(self):
internal_page = self.get('internal_page')
external_link = self.get('external_link')
if internal_page:
return internal_page.url
elif external_link:
return external_link
class Link(blocks.StructBlock):
link_text = blocks.CharBlock(
max_length=50,
default='Więcej szczegółów'
)
interal_page = blocks.PageChooserBlock(
required=False
)
external_link = blocks.URLBlock(
required=False
)
class Meta:
value_class = LinkValue
class Card(blocks.StructBlock):
title = blocks.CharBlock(
max_length=100,
help_text = 'Pogrubiony tytuł tej karty. Maksymalnie 100 znaków.'
)
text = blocks.TextBlock(
max_length=255,
help_text='Opcjonalny tekst tej karty. Maksymalnie 255 znaków.'
)
image = ImageChooserBlock(
help_text = 'Obraz zostanie automatycznie przycięty o 570 na 370 pikseli'
)
link = Link(help_text = 'Wwybierz link')
class CardsBlock(blocks.StructBlock):
cards = blocks.ListBlock(
Card()
)
class Meta:
template = 'streams/card_block.html'
icon = 'image'
label = 'Karty standardowe'
| [
"[email protected]"
] | |
26761f1dbd577ed8967580930935c94e28db222a | 9b854d21fea95f24f7bde41e7a172e8a8a6327f9 | /tensorflow/python/ops/nn_grad.py | f0fa6d5401c8e18b5209d9dc988bbefed73de112 | [
"Apache-2.0"
] | permissive | devsangwoo/tensor | 84345bb05d969c732f70a8a64f2d070bf71d1f9b | 066592c9f9cdf4acdd1b9b104766271133e9088e | refs/heads/master | 2022-12-09T00:33:43.272931 | 2015-11-07T00:27:58 | 2020-01-10T07:33:06 | 232,987,148 | 1 | 0 | NOASSERTION | 2022-10-04T23:56:16 | 2020-01-10T07:06:05 | C++ | UTF-8 | Python | false | false | 44,457 | py | <<<<<<< HEAD
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Gradients for operators defined in nn_ops.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.eager import backprop
from tensorflow.python.eager import context
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_nn_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
@ops.RegisterGradient("Conv2DBackpropInput")
def _Conv2DBackpropInputGrad(op, grad):
"""The derivatives for deconvolution.
Args:
op: the Deconvolution op.
grad: the tensor representing the gradient w.r.t. the output
Returns:
the gradients w.r.t. the input and the filter
"""
# We call the gen_nn_ops backprop functions instead of nn_ops backprop
# functions for performance reasons in Eager mode. See _Conv2DGrad.
return [
None,
gen_nn_ops.conv2d_backprop_filter(
grad,
array_ops.shape(op.inputs[1]),
op.inputs[2],
dilations=op.get_attr("dilations"),
strides=op.get_attr("strides"),
padding=op.get_attr("padding"),
explicit_paddings=op.get_attr("explicit_paddings"),
use_cudnn_on_gpu=op.get_attr("use_cudnn_on_gpu"),
data_format=op.get_attr("data_format").decode()),
gen_nn_ops.conv2d(
grad,
op.inputs[1],
dilations=op.get_attr("dilations"),
strides=op.get_attr("strides"),
padding=op.get_attr("padding"),
explicit_paddings=op.get_attr("explicit_paddings"),
use_cudnn_on_gpu=op.get_attr("use_cudnn_on_gpu"),
data_format=op.get_attr("data_format").decode())
]
@ops.RegisterGradient("Conv2DBackpropFilter")
def _Conv2DBackpropFilterGrad(op, grad):
# We call the gen_nn_ops backprop functions instead of nn_ops backprop
# functions for performance reasons in Eager mode. See _Conv2DGrad.
return [
gen_nn_ops.conv2d_backprop_input(
array_ops.shape(op.inputs[0]),
grad,
op.inputs[2],
dilations=op.get_attr("dilations"),
strides=op.get_attr("strides"),
padding=op.get_attr("padding"),
explicit_paddings=op.get_attr("explicit_paddings"),
use_cudnn_on_gpu=op.get_attr("use_cudnn_on_gpu"),
data_format=op.get_attr("data_format").decode()), None,
gen_nn_ops.conv2d(
op.inputs[0],
grad,
dilations=op.get_attr("dilations"),
strides=op.get_attr("strides"),
padding=op.get_attr("padding"),
explicit_paddings=op.get_attr("explicit_paddings"),
use_cudnn_on_gpu=op.get_attr("use_cudnn_on_gpu"),
data_format=op.get_attr("data_format").decode())
]
@ops.RegisterGradient("DepthwiseConv2dNativeBackpropInput")
def _DepthwiseConv2dNativeBackpropInputGrad(op, grad):
=======
"""Gradients for operators defined in nn_ops.py."""
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import gen_nn_ops
@ops.RegisterGradient("Conv2DBackpropInput")
def _DeConv2DGrad(op, grad):
>>>>>>> f41959ccb2... TensorFlow: Initial commit of TensorFlow library.
"""The derivatives for deconvolution.
Args:
op: the Deconvolution op.
grad: the tensor representing the gradient w.r.t. the output
Returns:
the gradients w.r.t. the input and the filter
"""
<<<<<<< HEAD
return [
None,
nn_ops.depthwise_conv2d_native_backprop_filter(
grad,
array_ops.shape(op.inputs[1]),
op.inputs[2],
dilations=op.get_attr("dilations"),
strides=op.get_attr("strides"),
padding=op.get_attr("padding"),
data_format=op.get_attr("data_format")),
nn_ops.depthwise_conv2d_native(
grad,
op.inputs[1],
dilations=op.get_attr("dilations"),
strides=op.get_attr("strides"),
padding=op.get_attr("padding"),
data_format=op.get_attr("data_format"))
]
@ops.RegisterGradient("DepthwiseConv2dNativeBackpropFilter")
def _DepthwiseConv2dNativeBackpropFilterGrad(op, grad):
return [
nn_ops.depthwise_conv2d_native_backprop_input(
array_ops.shape(op.inputs[0]),
grad,
op.inputs[2],
dilations=op.get_attr("dilations"),
strides=op.get_attr("strides"),
padding=op.get_attr("padding"),
data_format=op.get_attr("data_format")), None,
nn_ops.depthwise_conv2d_native(
op.inputs[0],
grad,
dilations=op.get_attr("dilations"),
strides=op.get_attr("strides"),
padding=op.get_attr("padding"),
data_format=op.get_attr("data_format"))
]
@ops.RegisterGradient("Conv3D")
def _Conv3DGrad(op, grad):
data_format = op.get_attr("data_format").decode()
return [
nn_ops.conv3d_backprop_input_v2(
array_ops.shape(op.inputs[0]),
op.inputs[1],
grad,
dilations=op.get_attr("dilations"),
strides=op.get_attr("strides"),
padding=op.get_attr("padding"),
data_format=data_format),
nn_ops.conv3d_backprop_filter_v2(
op.inputs[0],
array_ops.shape(op.inputs[1]),
grad,
dilations=op.get_attr("dilations"),
strides=op.get_attr("strides"),
padding=op.get_attr("padding"),
data_format=data_format)
]
@ops.RegisterGradient("Conv3DBackpropInputV2")
def _Conv3DBackpropInputGrad(op, grad):
data_format = op.get_attr("data_format").decode()
return [
None,
nn_ops.conv3d_backprop_filter_v2(
grad,
array_ops.shape(op.inputs[1]),
op.inputs[2],
dilations=op.get_attr("dilations"),
strides=op.get_attr("strides"),
padding=op.get_attr("padding"),
data_format=data_format),
nn_ops.conv3d(
grad,
op.inputs[1],
dilations=op.get_attr("dilations"),
strides=op.get_attr("strides"),
padding=op.get_attr("padding"),
data_format=data_format)
]
@ops.RegisterGradient("Conv3DBackpropFilterV2")
def _Conv3DBackpropFilterGrad(op, grad):
data_format = op.get_attr("data_format").decode()
return [
nn_ops.conv3d_backprop_input_v2(
array_ops.shape(op.inputs[0]),
grad,
op.inputs[2],
dilations=op.get_attr("dilations"),
strides=op.get_attr("strides"),
padding=op.get_attr("padding"),
data_format=data_format), None,
nn_ops.conv3d(
op.inputs[0],
grad,
dilations=op.get_attr("dilations"),
strides=op.get_attr("strides"),
padding=op.get_attr("padding"),
data_format=data_format)
]
@ops.RegisterGradient("AvgPool3D")
def _AvgPool3DGrad(op, grad):
return gen_nn_ops.avg_pool3d_grad(
array_ops.shape(op.inputs[0]),
grad,
ksize=op.get_attr("ksize"),
strides=op.get_attr("strides"),
padding=op.get_attr("padding"),
data_format=op.get_attr("data_format").decode())
@ops.RegisterGradient("AvgPool3DGrad")
def _AvgPool3DGradGrad(op, grad):
return (array_ops.stop_gradient(op.inputs[0]),
gen_nn_ops.avg_pool3d(
grad,
op.get_attr("ksize"),
op.get_attr("strides"),
op.get_attr("padding"),
data_format=op.get_attr("data_format").decode()))
@ops.RegisterGradient("MaxPool3D")
def _MaxPool3DGrad(op, grad):
return gen_nn_ops.max_pool3d_grad(
op.inputs[0],
op.outputs[0],
grad,
ksize=op.get_attr("ksize"),
strides=op.get_attr("strides"),
padding=op.get_attr("padding"),
data_format=op.get_attr("data_format").decode())
@ops.RegisterGradient("MaxPool3DGrad")
def _MaxPool3DGradGrad(op, grad):
return (array_ops.zeros(
shape=array_ops.shape(op.inputs[0]), dtype=op.inputs[0].dtype),
array_ops.zeros(
shape=array_ops.shape(op.inputs[1]), dtype=op.inputs[1].dtype),
gen_nn_ops.max_pool3d_grad_grad(
op.inputs[0],
op.inputs[1],
grad,
op.get_attr("ksize"),
op.get_attr("strides"),
padding=op.get_attr("padding"),
data_format=op.get_attr("data_format").decode()))
@ops.RegisterGradient("MaxPool3DGradGrad")
def _MaxPool3DGradGradGrad(op, grad):
return (array_ops.zeros(
shape=array_ops.shape(op.inputs[0]), dtype=op.inputs[0].dtype),
array_ops.zeros(
shape=array_ops.shape(op.inputs[1]), dtype=op.inputs[1].dtype),
gen_nn_ops.max_pool3d_grad(
op.inputs[0],
op.inputs[1],
grad,
op.get_attr("ksize"),
op.get_attr("strides"),
padding=op.get_attr("padding"),
data_format=op.get_attr("data_format").decode()))
=======
return [None,
nn_ops.conv2d_backprop_filter(grad,
array_ops.shape(op.inputs[1]),
op.inputs[2],
op.get_attr("strides"),
op.get_attr("padding")),
nn_ops.conv2d(grad,
op.inputs[1],
op.get_attr("strides"),
op.get_attr("padding"))]
>>>>>>> f41959ccb2... TensorFlow: Initial commit of TensorFlow library.
@ops.RegisterGradient("Softmax")
def _SoftmaxGrad(op, grad_softmax):
"""The derivative of the softmax nonlinearity.
We assume that probs is of shape [batch_size * dim]
The formula for dsoftmax / dx = (diag(softmax) - softmax * softmax').
This matrix is diagonal minus a rank one matrix, so it is easy to implement
as follows:
grad_x = grad_softmax * softmax - sum(grad_softmax * softmax) * softmax
Args:
op: the Softmax op.
<<<<<<< HEAD
grad_softmax: the tensor representing the gradient w.r.t. the softmax
output.
=======
grad_softmax: the tensor representing the gradient w.r.t. the
softmax output.
>>>>>>> f41959ccb2... TensorFlow: Initial commit of TensorFlow library.
Returns:
gradient w.r.t the input to the softmax
"""
<<<<<<< HEAD
softmax = op.outputs[0]
sum_channels = math_ops.reduce_sum(grad_softmax * softmax, -1, keepdims=True)
return (grad_softmax - sum_channels) * softmax
@ops.RegisterGradient("LogSoftmax")
def _LogSoftmaxGrad(op, grad):
"""The gradient for log_softmax.
log_softmax = input - log(sum(exp(input))
dlog_softmax/dinput = diag - softmax(input)
Args:
op: The log softmax op.
grad: The tensor representing the gradient w.r.t. the output.
Returns:
The gradients w.r.t. the input.
"""
softmax = math_ops.exp(op.outputs[0])
return grad - math_ops.reduce_sum(grad, -1, keepdims=True) * softmax
@ops.RegisterGradient("BiasAdd")
def _BiasAddGrad(op, received_grad):
=======
# TODO(ilyasu): assert that the tensor has two dimensions at
# graph-construction time? Alternatively: do different things
# depending on the dimensionality of the input tensors.
softmax = op.outputs[0]
grad_x = ((grad_softmax -
array_ops.reshape(math_ops.reduce_sum(grad_softmax * softmax, [1]),
[-1, 1]))
* softmax)
return grad_x
@ops.RegisterGradient("BiasAdd")
def _BiasAddGrad(unused_bias_op, received_grad):
>>>>>>> f41959ccb2... TensorFlow: Initial commit of TensorFlow library.
"""Return the gradients for the 2 inputs of bias_op.
The first input of unused_bias_op is the tensor t, and its gradient is
just the gradient the unused_bias_op received.
The second input of unused_bias_op is the bias vector which has one fewer
dimension than "received_grad" (the batch dimension.) Its gradient is the
received gradient Summed on the batch dimension, which is the first dimension.
Args:
<<<<<<< HEAD
op: The BiasOp for which we need to generate gradients.
=======
unused_bias_op: The BiasOp for which we need to generate gradients.
>>>>>>> f41959ccb2... TensorFlow: Initial commit of TensorFlow library.
received_grad: Tensor. The gradients passed to the BiasOp.
Returns:
Two tensors, the first one for the "tensor" input of the BiasOp,
the second one for the "bias" input of the BiasOp.
"""
<<<<<<< HEAD
try:
data_format = op.get_attr("data_format")
except ValueError:
data_format = None
return (received_grad,
gen_nn_ops.bias_add_grad(
out_backprop=received_grad, data_format=data_format))
@ops.RegisterGradient("BiasAddGrad")
def _BiasAddGradGrad(op, received_grad):
"""Gradient for the BiasAddGrad op.
Args:
op: BiasAddGrad op for which we are calculating gradients.
received_grad: The gradients passed to the BiasAddGrad op.
Returns:
A single gradient Tensor for the input to BiasAddGrad (which
is the gradient of the bias term in BiasAdd)
"""
try:
data_format = op.get_attr("data_format")
except ValueError:
data_format = None
shape = array_ops.shape(op.inputs[0])
bias_shape = array_ops.shape(received_grad)
if data_format == b"NCHW":
expanded_shape = array_ops.concat([
array_ops.ones_like(shape[:1]), bias_shape,
array_ops.ones_like(shape[2:])
], 0)
tile_mults = array_ops.concat([shape[:1], [1], shape[2:]], 0)
else:
expanded_shape = array_ops.concat(
[array_ops.ones_like(shape[:-1]), bias_shape], 0)
tile_mults = array_ops.concat([shape[:-1], [1]], 0)
expanded_grad = array_ops.reshape(received_grad, expanded_shape)
return array_ops.tile(expanded_grad, tile_mults)
@ops.RegisterGradient("BiasAddV1")
def _BiasAddGradV1(unused_bias_op, received_grad):
"""Return the gradients for the 2 inputs of bias_op.
The first input of unused_bias_op is the tensor t, and its gradient is
just the gradient the unused_bias_op received.
The second input of unused_bias_op is the bias vector which has one fewer
dimension than "received_grad" (the batch dimension.) Its gradient is the
received gradient Summed on the batch dimension, which is the first dimension.
Args:
unused_bias_op: The BiasOp for which we need to generate gradients.
received_grad: Tensor. The gradients passed to the BiasOp.
Returns:
Two tensors, the first one for the "tensor" input of the BiasOp,
the second one for the "bias" input of the BiasOp.
"""
reduction_dim_tensor = math_ops.range(array_ops.rank(received_grad) - 1)
return (received_grad, math_ops.reduce_sum(received_grad,
reduction_dim_tensor))
=======
reduction_dim_tensor = math_ops.range(0, array_ops.rank(received_grad) - 1)
return (received_grad, math_ops.reduce_sum(received_grad, reduction_dim_tensor))
def _VerifyTensor(t, name, msg):
"""Assert that the tensor does not contain any NaN's.
Args:
t: Tensor
name: name
msg: message to log
Returns:
Tensor, but verified
"""
with ops.name_scope(name):
with ops.device(t.device or ops.get_default_graph().get_default_device()):
verify_input = array_ops.check_numerics(t, message=msg)
out = control_flow_ops.with_dependencies([verify_input], t)
return out
>>>>>>> f41959ccb2... TensorFlow: Initial commit of TensorFlow library.
@ops.RegisterGradient("Relu")
def _ReluGrad(op, grad):
<<<<<<< HEAD
return gen_nn_ops.relu_grad(grad, op.outputs[0])
@ops.RegisterGradient("EluGrad")
def _EluGradGrad(op, grad):
elu_x = op.inputs[1]
return (gen_nn_ops.elu_grad(grad, elu_x),
array_ops.where(
elu_x < 0, grad * op.inputs[0], array_ops.zeros_like(elu_x)))
@ops.RegisterGradient("SeluGrad")
def _SeluGradGrad(op, grad):
selu_x = op.inputs[1]
return (gen_nn_ops.selu_grad(grad, selu_x),
array_ops.where(
selu_x < 0., grad * op.inputs[0], array_ops.zeros_like(selu_x)))
=======
t = _VerifyTensor(op.inputs[0], op.name, "ReluGrad input is not finite.")
return gen_nn_ops._relu_grad(grad, t)
>>>>>>> f41959ccb2... TensorFlow: Initial commit of TensorFlow library.
@ops.RegisterGradient("Relu6")
def _Relu6Grad(op, grad):
<<<<<<< HEAD
return gen_nn_ops.relu6_grad(grad, op.outputs[0])
@ops.RegisterGradient("Relu6Grad")
def _Relu6GradGrad(op, grad):
x = op.inputs[1]
return (gen_nn_ops.relu6_grad(grad, x),
array_ops.zeros(shape=array_ops.shape(x), dtype=x.dtype))
@ops.RegisterGradient("LeakyRelu")
def _LeakyReluGrad(op, grad):
x = op.inputs[0]
alpha = op.get_attr("alpha")
return gen_nn_ops.leaky_relu_grad(grad, x, alpha=alpha)
@ops.RegisterGradient("LeakyReluGrad")
def _LeakyReluGradGrad(op, grad):
x = op.inputs[1]
alpha = op.get_attr("alpha")
return (gen_nn_ops.leaky_relu_grad(grad, x, alpha=alpha),
array_ops.zeros(shape=array_ops.shape(x), dtype=x.dtype))
@ops.RegisterGradient("Elu")
def _EluGrad(op, grad):
return gen_nn_ops.elu_grad(grad, op.outputs[0])
@ops.RegisterGradient("Selu")
def _SeluGrad(op, grad):
return gen_nn_ops.selu_grad(grad, op.outputs[0])
=======
return gen_nn_ops._relu6_grad(grad, op.inputs[0])
>>>>>>> f41959ccb2... TensorFlow: Initial commit of TensorFlow library.
@ops.RegisterGradient("Softplus")
def _SoftplusGrad(op, grad):
<<<<<<< HEAD
return grad * math_ops.sigmoid(op.inputs[0])
@ops.RegisterGradient("SoftplusGrad")
def _SoftplusGradGrad(op, grad):
# Let:
# y = tf.nn.softplus(x)
# dx = gen_nn_ops.softplus_grad(dy, x) = dy / (1 + exp(-x))
# This op computes (ddy, d2x) from op.inputs == [dy, x] and grad == ddx.
dy, x = op.inputs
with ops.control_dependencies([grad]):
ddy = gen_nn_ops.softplus_grad(grad, x)
d2x = grad * dy / (math_ops.exp(-x) + 2.0 + math_ops.exp(x))
return (ddy, d2x)
@ops.RegisterGradient("Softsign")
def _SoftsignGrad(op, grad):
return gen_nn_ops.softsign_grad(grad, op.inputs[0])
=======
return gen_nn_ops._softplus_grad(grad, op.inputs[0])
>>>>>>> f41959ccb2... TensorFlow: Initial commit of TensorFlow library.
@ops.RegisterGradient("ReluGrad")
def _ReluGradGrad(op, grad):
x = op.inputs[1]
<<<<<<< HEAD
return (gen_nn_ops.relu_grad(grad, x),
=======
return (gen_nn_ops._relu_grad(grad, x),
>>>>>>> f41959ccb2... TensorFlow: Initial commit of TensorFlow library.
array_ops.zeros(shape=array_ops.shape(x), dtype=x.dtype))
def _BroadcastMul(vec, mat):
"""Multiply after broadcasting vec to match dimensions of mat.
Args:
vec: A 1-D tensor of dimension [D0]
mat: A 2-D tensor of dimension [D0, D1]
Returns:
A tensor of dimension [D0, D1], the result of vec * mat
"""
# Reshape vec to [D0, 1]
vec = array_ops.expand_dims(vec, -1)
return vec * mat
@ops.RegisterGradient("SoftmaxCrossEntropyWithLogits")
<<<<<<< HEAD
def _SoftmaxCrossEntropyWithLogitsGrad(op, grad_loss, grad_grad):
"""Gradient function for SoftmaxCrossEntropyWithLogits."""
# grad_loss is the backprop for cost, and we multiply it with the gradients
# (which is output[1])
# grad_grad is the backprop for softmax gradient.
#
# Second derivative is just softmax derivative w.r.t. logits.
softmax_grad = op.outputs[1]
grad = _BroadcastMul(grad_loss, softmax_grad)
def IsZero(g):
# Some introspection to check if the gradient is feeding zeros
if context.executing_eagerly():
# TODO(apassos) add an efficient way to detect eager zeros here.
return False
if g.op.type in ("ZerosLike", "Zeros"):
return True
const_fill_value = tensor_util.constant_value(g)
return const_fill_value is not None and (const_fill_value == 0).all()
logits = op.inputs[0]
if grad_grad is not None and not IsZero(grad_grad):
softmax = nn_ops.softmax(logits)
grad += ((grad_grad - array_ops.squeeze(
math_ops.matmul(
array_ops.expand_dims(grad_grad, 1),
array_ops.expand_dims(softmax, 2)),
axis=1)) * softmax)
return grad, _BroadcastMul(grad_loss, -nn_ops.log_softmax(logits))
@ops.RegisterGradient("SparseSoftmaxCrossEntropyWithLogits")
def _SparseSoftmaxCrossEntropyWithLogitsGrad(op, grad_0, _):
"""Gradient function for SparseSoftmaxCrossEntropyWithLogits."""
# grad_0 is the backprop for cost, and we multiply it with the gradients
# (which is output[1])
# There is no gradient for the labels
#
# Currently there is no way to take the second derivative of this op
# due to the fused implementation's interaction with tf.gradients(),
# so we make sure we prevent silently incorrect results by raising
# an error if the second derivative is requested via prevent_gradient.
sparse_softmax_grad_without_gradient = array_ops.prevent_gradient(
op.outputs[1],
message="Currently there is no way to take the second "
"derivative of sparse_softmax_cross_entropy_with_logits due to the fused "
"implementation's interaction with tf.gradients()")
return _BroadcastMul(grad_0, sparse_softmax_grad_without_gradient), None
=======
def _SoftmaxCrossEntropyWithLogitsGrad(op, grad_0, _):
# grad_0 is the backprop for cost, and we multiply it with the gradients
# (which is output[1])
# There is no gradient for the labels
return _BroadcastMul(grad_0, op.outputs[1]), None
>>>>>>> f41959ccb2... TensorFlow: Initial commit of TensorFlow library.
@ops.RegisterGradient("Conv2D")
def _Conv2DGrad(op, grad):
<<<<<<< HEAD
"""Gradient function for Conv2D."""
dilations = op.get_attr("dilations")
strides = op.get_attr("strides")
padding = op.get_attr("padding")
explicit_paddings = op.get_attr("explicit_paddings")
use_cudnn_on_gpu = op.get_attr("use_cudnn_on_gpu")
data_format = op.get_attr("data_format")
shape_0, shape_1 = array_ops.shape_n([op.inputs[0], op.inputs[1]])
# We call the gen_nn_ops backprop functions instead of nn_ops backprop
# functions for performance reasons in Eager mode. gen_nn_ops functions take a
# `explicit_paddings` parameter, but nn_ops functions do not. So if were were
# to use the nn_ops functions, we would have to convert `padding` and
# `explicit_paddings` into a single `padding` parameter, increasing overhead
# in Eager mode.
return [
gen_nn_ops.conv2d_backprop_input(
shape_0,
op.inputs[1],
grad,
dilations=dilations,
strides=strides,
padding=padding,
explicit_paddings=explicit_paddings,
use_cudnn_on_gpu=use_cudnn_on_gpu,
data_format=data_format),
gen_nn_ops.conv2d_backprop_filter(
op.inputs[0],
shape_1,
grad,
dilations=dilations,
strides=strides,
padding=padding,
explicit_paddings=explicit_paddings,
use_cudnn_on_gpu=use_cudnn_on_gpu,
data_format=data_format)
]
@ops.RegisterGradient("DepthwiseConv2dNative")
def _DepthwiseConv2dNativeGrad(op, grad):
return [
nn_ops.depthwise_conv2d_native_backprop_input(
array_ops.shape(op.inputs[0]),
op.inputs[1],
grad,
dilations=op.get_attr("dilations"),
strides=op.get_attr("strides"),
padding=op.get_attr("padding"),
data_format=op.get_attr("data_format")),
nn_ops.depthwise_conv2d_native_backprop_filter(
op.inputs[0],
array_ops.shape(op.inputs[1]),
grad,
dilations=op.get_attr("dilations"),
strides=op.get_attr("strides"),
padding=op.get_attr("padding"),
data_format=op.get_attr("data_format"))
]
@ops.RegisterGradient("Dilation2D")
def _Dilation2DGrad(op, grad):
return [
nn_ops.dilation2d_backprop_input(op.inputs[0], op.inputs[1], grad,
op.get_attr("strides"),
op.get_attr("rates"),
op.get_attr("padding")),
nn_ops.dilation2d_backprop_filter(op.inputs[0], op.inputs[1], grad,
op.get_attr("strides"),
op.get_attr("rates"),
op.get_attr("padding"))
]
=======
return [nn_ops.conv2d_backprop_input(array_ops.shape(op.inputs[0]),
op.inputs[1],
grad,
op.get_attr("strides"),
op.get_attr("padding")),
nn_ops.conv2d_backprop_filter(op.inputs[0],
array_ops.shape(op.inputs[1]),
grad,
op.get_attr("strides"),
op.get_attr("padding"))]
>>>>>>> f41959ccb2... TensorFlow: Initial commit of TensorFlow library.
@ops.RegisterGradient("LRN")
def _LRNGrad(op, grad):
depth_radius = op.get_attr("depth_radius")
bias = op.get_attr("bias")
alpha = op.get_attr("alpha")
beta = op.get_attr("beta")
<<<<<<< HEAD
return [
gen_nn_ops.lrn_grad(grad, op.inputs[0], op.outputs[0], depth_radius, bias,
alpha, beta)
]
=======
return [gen_nn_ops._lrn_grad(grad, op.inputs[0], op.outputs[0],
depth_radius, bias, alpha, beta)]
>>>>>>> f41959ccb2... TensorFlow: Initial commit of TensorFlow library.
@ops.RegisterGradient("AvgPool")
def _AvgPoolGrad(op, grad):
<<<<<<< HEAD
return gen_nn_ops.avg_pool_grad(
array_ops.shape(op.inputs[0]),
grad,
op.get_attr("ksize"),
op.get_attr("strides"),
op.get_attr("padding"),
data_format=op.get_attr("data_format"))
@ops.RegisterGradient("AvgPoolGrad")
def _AvgPoolGradGrad(op, grad):
return (array_ops.stop_gradient(op.inputs[0]),
gen_nn_ops.avg_pool(
grad,
op.get_attr("ksize"),
op.get_attr("strides"),
op.get_attr("padding"),
data_format=op.get_attr("data_format")))
=======
return gen_nn_ops._avg_pool_grad(array_ops.shape(op.inputs[0]), grad,
op.get_attr("ksize"),
op.get_attr("strides"),
op.get_attr("padding"))
>>>>>>> f41959ccb2... TensorFlow: Initial commit of TensorFlow library.
@ops.RegisterGradient("MaxPool")
def _MaxPoolGrad(op, grad):
<<<<<<< HEAD
return gen_nn_ops.max_pool_grad(
op.inputs[0],
op.outputs[0],
grad,
op.get_attr("ksize"),
op.get_attr("strides"),
padding=op.get_attr("padding"),
data_format=op.get_attr("data_format"))
@ops.RegisterGradient("MaxPoolV2")
def _MaxPoolGradV2(op, grad):
ksize = op.inputs[1]
strides = op.inputs[2]
return gen_nn_ops.max_pool_grad_v2(
op.inputs[0],
op.outputs[0],
grad,
ksize,
strides,
padding=op.get_attr("padding"),
data_format=op.get_attr("data_format")), None, None
@ops.RegisterGradient("MaxPoolWithArgmax")
def _MaxPoolGradWithArgmax(op, grad, unused_argmax_grad):
del unused_argmax_grad
return gen_nn_ops.max_pool_grad_with_argmax(
op.inputs[0],
grad,
op.outputs[1],
op.get_attr("ksize"),
op.get_attr("strides"),
padding=op.get_attr("padding"),
include_batch_in_index=op.get_attr("include_batch_in_index"))
@ops.RegisterGradient("MaxPoolGrad")
def _MaxPoolGradGrad(op, grad):
return (array_ops.zeros(
shape=array_ops.shape(op.inputs[0]), dtype=op.inputs[0].dtype),
array_ops.zeros(
shape=array_ops.shape(op.inputs[1]), dtype=op.inputs[1].dtype),
gen_nn_ops.max_pool_grad_grad(
op.inputs[0],
op.inputs[1],
grad,
op.get_attr("ksize"),
op.get_attr("strides"),
padding=op.get_attr("padding"),
data_format=op.get_attr("data_format")))
@ops.RegisterGradient("MaxPoolGradV2")
def _MaxPoolGradGradV2(op, grad):
ksize = op.inputs[3]
strides = op.inputs[4]
return (array_ops.zeros(
shape=array_ops.shape(op.inputs[0]), dtype=op.inputs[0].dtype),
array_ops.zeros(
shape=array_ops.shape(op.inputs[1]), dtype=op.inputs[1].dtype),
gen_nn_ops.max_pool_grad_grad_v2(
op.inputs[0],
op.inputs[1],
grad,
ksize,
strides,
padding=op.get_attr("padding"),
data_format=op.get_attr("data_format")), None, None)
@ops.RegisterGradient("MaxPoolGradGrad")
def _MaxPoolGradGradGrad(op, grad):
return (array_ops.zeros(
shape=array_ops.shape(op.inputs[0]), dtype=op.inputs[0].dtype),
array_ops.zeros(
shape=array_ops.shape(op.inputs[1]), dtype=op.inputs[1].dtype),
gen_nn_ops.max_pool_grad(
op.inputs[0],
op.inputs[1],
grad,
op.get_attr("ksize"),
op.get_attr("strides"),
padding=op.get_attr("padding"),
data_format=op.get_attr("data_format")))
@ops.RegisterGradient("FractionalMaxPool")
def _FractionalMaxPoolGrad(op, grad_0, unused_grad_1, unused_grad_2):
"""Returns gradient for FractionalMaxPool.
Since FractionalMaxPool has three outputs, there are three gradients passed in
for each of the outputs. Only the first one is useful, the other two gradients
are empty.
Args:
op: The FractionalMaxPoolOp.
grad_0: Gradient with respect to op.outputs[0]
unused_grad_1: Gradient with respect to op.outputs[1]/row_seq. It is empty.
unused_grad_2: Gradient with respect to op.outputs[2]/col_seq. It is empty.
Returns:
Input backprop for FractionalMaxPool op.
"""
return gen_nn_ops.fractional_max_pool_grad(
op.inputs[0], op.outputs[0], grad_0, op.outputs[1], op.outputs[2],
op.get_attr("overlapping"))
@ops.RegisterGradient("FractionalAvgPool")
def _FractionalAvgPoolGrad(op, grad_0, unused_grad_1, unused_grad_2):
"""Returns gradient for FractionalAvgPool.
Since FractionalAvgPool has three outputs, there are three gradients passed in
for each of the outputs. Only the first one is useful, the other two gradients
are empty.
Args:
op: The FractionalAvgPoolOp.
grad_0: Gradient with respect to op.outputs[0]
unused_grad_1: Gradient with respect to op.outputs[1]/row_seq. It is empty.
unused_grad_2: Gradient with respect to op.outputs[2]/col_seq. It is empty.
Returns:
Input backprop for FractionalAvgPool op.
"""
return gen_nn_ops.fractional_avg_pool_grad(op.inputs[0].get_shape(), grad_0,
op.outputs[1], op.outputs[2],
op.get_attr("overlapping"))
=======
return gen_nn_ops._max_pool_grad(op.inputs[0], op.outputs[0], grad,
op.get_attr("ksize"),
op.get_attr("strides"),
padding=op.get_attr("padding"))
>>>>>>> f41959ccb2... TensorFlow: Initial commit of TensorFlow library.
@ops.RegisterGradient("BatchNormWithGlobalNormalization")
def _BatchNormWithGlobalNormalizationGrad(op, grad):
"""Return the gradients for the 5 inputs of BatchNormWithGlobalNormalization.
We do not backprop anything for the mean and var intentionally as they are
not being trained with backprop in the operation.
Args:
op: The BatchNormOp for which we need to generate gradients.
grad: Tensor. The gradients passed to the BatchNormOp.
Returns:
dx: Backprop for input, which is (grad * (g * rsqrt(v + epsilon)))
dm: Backprop for mean, which is
sum_over_rest(grad * g) * (-1 / rsqrt(v + epsilon))
dv: Backprop for variance, which is
sum_over_rest(grad * g * (x - m)) * (-1/2) * (v + epsilon) ^ (-3/2)
db: Backprop for beta, which is grad reduced in all except the
last dimension.
dg: Backprop for gamma, which is (grad * ((x - m) * rsqrt(v + epsilon)))
"""
<<<<<<< HEAD
dx, dm, dv, db, dg = gen_nn_ops.batch_norm_with_global_normalization_grad(
=======
dx, dm, dv, db, dg = gen_nn_ops._batch_norm_with_global_normalization_grad(
>>>>>>> f41959ccb2... TensorFlow: Initial commit of TensorFlow library.
op.inputs[0], op.inputs[1], op.inputs[2], op.inputs[4], grad,
op.get_attr("variance_epsilon"), op.get_attr("scale_after_normalization"))
return dx, dm, dv, db, dg
<<<<<<< HEAD
def _BaseFusedBatchNormGrad(op, version, *grad):
"""Return the gradients for the 3 inputs of BatchNorm.
Args:
op: The BatchNormOp for which we need to compute gradients.
version: Integer indicating which version to use of the fused batch
norm gradient.
*grad: An argument list for tensors of gradients wrt the outputs
with grad[0] as grad_y.
Returns:
grad_x: gradient for x, which is scale * rsqrt(variance + epsilon) *
[grad_y - mean(grad_y) - (x - mean(x)) *
mean(grad_y * (x - mean(x))) / (variance + epsilon)]
in training mode; grad_y * scale * rsqrt(pop_variance + epsilon)
in freeze mode.
grad_scale: gradient for scale, which is sum(grad_y * (x - mean(x)) *
rsqrt(variance + epsilon)) in training mode;
sum(grad_y * (x - pop_mean) * rsqrt(pop_variance + epsilon))
in freeze mode.
grad_offset: gradient for offset, which is sum(grad_y) in training mode;
sum(grad_y) in freeze mode.
"""
x = op.inputs[0]
grad_y = grad[0]
scale = op.inputs[1]
epsilon = op.get_attr("epsilon")
data_format = op.get_attr("data_format")
is_training = op.get_attr("is_training")
if version == 2:
grad_fun = gen_nn_ops.fused_batch_norm_grad_v3
elif version == 1:
grad_fun = gen_nn_ops.fused_batch_norm_grad_v2
else:
grad_fun = gen_nn_ops.fused_batch_norm_grad
if is_training:
args = {
"y_backprop": grad_y,
"x": x,
"scale": scale,
"reserve_space_1": op.outputs[3],
"reserve_space_2": op.outputs[4],
"epsilon": epsilon,
"data_format": data_format,
"is_training": is_training
}
if version == 2:
args["reserve_space_3"] = op.outputs[5]
return grad_fun(**args)
else:
pop_mean = op.inputs[3]
pop_var = op.inputs[4]
if data_format == b"NCHW":
x = array_ops.transpose(x, [0, 2, 3, 1])
grad_y = array_ops.transpose(grad_y, [0, 2, 3, 1])
args = {
"y_backprop": grad_y,
"x": x,
"scale": scale,
"reserve_space_1": pop_mean,
"reserve_space_2": pop_var,
"epsilon": epsilon,
"data_format": "NHWC",
"is_training": is_training
}
if version == 2:
args["reserve_space_3"] = op.outputs[5]
dx, dscale, doffset, _, _ = grad_fun(**args)
if data_format == b"NCHW":
dx = array_ops.transpose(dx, [0, 3, 1, 2])
return dx, dscale, doffset, None, None
@ops.RegisterGradient("FusedBatchNorm")
def _FusedBatchNormGrad(op, *grad):
return _BaseFusedBatchNormGrad(op, 0, *grad)
@ops.RegisterGradient("FusedBatchNormV2")
def _FusedBatchNormV2Grad(op, *grad):
return _BaseFusedBatchNormGrad(op, 1, *grad)
@ops.RegisterGradient("FusedBatchNormV3")
def _FusedBatchNormV3Grad(op, *grad):
return _BaseFusedBatchNormGrad(op, 2, *grad)
def _BatchNormGrad(grad_y,
x,
scale,
pop_mean,
pop_var,
epsilon,
data_format,
is_training=True):
"""Returns the gradients for the 3 inputs of BatchNorm.
Args:
grad_y: A `Tensor` of 4 dimensions for gradient for y.
x: A `Tensor` of 4 dimensions for x.
scale: A `Tensor` of 1 dimension for scaling.
pop_mean: A `Tensor` of 1 dimension for the population mean. Only used when
is_training=False.
pop_var: A `Tensor` of 1 dimension for the population variance. Only used
when is_training=False.
epsilon: A small float number added to the variance of x.
data_format: The data format for input. Either b"NHWC" or b"NCHW".
is_training: A bool value to indicate the operation is for training
(default) or inference.
Returns:
A tuple (grad_x, grad_scale, grad_offset), where grad_x is the gradient
for x, grad_scale the gradient for scale, and grad_offset the gradient
for offset.
"""
x_dtype = x.dtype.base_dtype
if x_dtype == dtypes.float16:
# float16 math is too imprecise, so we do the batch norm gradient
# computations in float32.
x = math_ops.cast(x, dtypes.float32)
grad_y = math_ops.cast(grad_y, dtypes.float32)
if is_training:
if data_format == b"NHWC":
keepdims = False
reduce_axis = [0, 1, 2]
else:
keepdims = True
reduce_axis = [0, 2, 3]
shape = [1, array_ops.size(scale), 1, 1]
scale = array_ops.reshape(scale, shape)
mean_grad_y = math_ops.reduce_mean(grad_y, reduce_axis, keepdims=keepdims)
mean_x = math_ops.reduce_mean(x, reduce_axis, keepdims=keepdims)
var_x = math_ops.reduce_mean(
math_ops.squared_difference(x, array_ops.stop_gradient(mean_x)),
reduce_axis,
keepdims=keepdims)
grad_y_offset = grad_y - mean_grad_y
x_offset = x - mean_x
mean = math_ops.reduce_mean(
grad_y * x_offset, axis=reduce_axis, keepdims=keepdims)
grad_x = scale * math_ops.rsqrt(var_x + epsilon) * (
grad_y_offset - math_ops.reciprocal(var_x + epsilon) * mean * x_offset)
grad_scale = math_ops.rsqrt(var_x + epsilon) * math_ops.reduce_sum(
grad_y * x_offset, axis=reduce_axis, keepdims=keepdims)
if data_format == b"NCHW":
grad_scale = array_ops.squeeze(grad_scale)
grad_offset = math_ops.reduce_sum(grad_y, axis=reduce_axis)
return math_ops.cast(grad_x, x_dtype), grad_scale, grad_offset
else:
if data_format == b"NHWC":
reduce_axis = [0, 1, 2]
else:
reduce_axis = [0, 2, 3]
shape = [1, array_ops.size(pop_mean), 1, 1]
pop_mean = array_ops.reshape(pop_mean, shape)
pop_var = array_ops.reshape(pop_var, shape)
scale = array_ops.reshape(scale, shape)
grad_offset = math_ops.reduce_sum(grad_y, axis=reduce_axis)
var_rsqrt = math_ops.rsqrt(pop_var + epsilon)
grad_scale = math_ops.reduce_sum(
grad_y * (x - pop_mean) * var_rsqrt, axis=reduce_axis)
grad_x = grad_y * scale * var_rsqrt
return math_ops.cast(grad_x, x_dtype), grad_scale, grad_offset
@ops.RegisterGradient("FusedBatchNormGrad")
def _FusedBatchNormGradGrad(op, *grad):
"""Returns the gradients for the 3 inputs of FusedBatchNormGrad.
Args:
op: The FusedBatchNormGradOp for which we need to compute gradients.
*grad: An argument list for tensors of gradients wrt the outputs with
grad[0] as grad_grad_x, grad[1] as grad_grad_scale, grad[2] as
grad_grad_offset.
Returns:
A tuple (grad_grad_y, grad_x, grad_scale, None, None), where grad_grad_y
is the gradient for grad_y, grad_x the gradient for x, grad_scale the
gradient for scale.
"""
data_format = op.get_attr("data_format")
epsilon = op.get_attr("epsilon")
is_training = op.get_attr("is_training")
grad_y = op.inputs[0]
x = op.inputs[1]
scale = op.inputs[2]
pop_mean = op.inputs[3]
pop_var = op.inputs[4]
grad_grad_x = grad[0]
grad_grad_scale = grad[1]
grad_grad_offset = grad[2]
with backprop.GradientTape() as tape:
tape.watch(grad_y)
tape.watch(x)
tape.watch(scale)
grad_x, grad_scale, grad_offset = _BatchNormGrad(
grad_y, x, scale, pop_mean, pop_var, epsilon, data_format, is_training)
grad_initial = [grad_grad_x, grad_grad_scale, grad_grad_offset]
grad_grad_y, grad_x, grad_scale = tape.gradient(
[grad_x, grad_scale, grad_offset], [grad_y, x, scale], grad_initial)
return grad_grad_y, grad_x, grad_scale, None, None
@ops.RegisterGradient("FusedBatchNormGradV2")
def _FusedBatchNormGradGradV2(op, *grad):
return _FusedBatchNormGradGrad(op, *grad)
@ops.RegisterGradient("FusedBatchNormGradV3")
def _FusedBatchNormGradGradV3(op, *grad):
grad_grad_y, grad_x, grad_scale, _, _ = _FusedBatchNormGradGrad(op, *grad)
return grad_grad_y, grad_x, grad_scale, None, None, None
=======
>>>>>>> f41959ccb2... TensorFlow: Initial commit of TensorFlow library.
@ops.RegisterGradient("L2Loss")
def _L2LossGrad(op, grad):
"""Return the gradients for L2Loss.
Args:
op: The L2LossOp for which we need to generate gradients.
grad: Tensor containing a single number.
Returns:
The gradient, which is (x * grad).
"""
return op.inputs[0] * grad
<<<<<<< HEAD
@ops.RegisterGradient("TopK")
@ops.RegisterGradient("TopKV2")
def _TopKGrad(op, grad, _):
"""Return the gradients for TopK.
Args:
op: The TopKOp for which we need to generate gradients.
grad: Tensor. The gradients passed to the TopKOp.
Returns:
A list of two tensors, the first being the gradient w.r.t to the input and
TopK, and the second being the gradient w.r.t. to the indices (all zero).
"""
in_shape = array_ops.shape(op.inputs[0])
ind_shape = array_ops.shape(op.outputs[1])
# int32 is not supported on GPU hence up-casting
ind_lastdim = array_ops.gather(
math_ops.cast(ind_shape, dtypes.int64),
array_ops.size(ind_shape) - 1)
# Flatten indices to 2D.
ind_2d = array_ops.reshape(op.outputs[1], array_ops.stack([-1, ind_lastdim]))
in_lastdim = array_ops.gather(
math_ops.cast(in_shape, dtypes.int64),
array_ops.size(in_shape) - 1)
outerdim = array_ops.shape(ind_2d)[0]
# Compute linear indices (flattened to 1D).
ind = array_ops.reshape(
ind_2d + math_ops.cast(
array_ops.expand_dims(
math_ops.range(0,
math_ops.cast(outerdim, dtypes.int64) * in_lastdim,
in_lastdim), -1), dtypes.int32), [-1])
# Substitute grad to appropriate locations and fill the rest with zeros,
# finally reshaping it to the original input shape.
return [
array_ops.reshape(
array_ops.scatter_nd(
array_ops.expand_dims(ind, -1), array_ops.reshape(grad, [-1]),
[math_ops.reduce_prod(in_shape)]), in_shape),
array_ops.zeros([], dtype=dtypes.int32)
]
@ops.RegisterGradient("NthElement")
def _NthElementGrad(op, grad):
"""Return the gradients for NthElement.
Args:
op: The NthElementOp for which we need to generate gradients.
grad: Tensor. The gradients passed to the NthElementOp
Returns:
A list of two tensors, the first being the gradient w.r.t. the input,
the second being the gradient w.r.t. the N (None).
"""
input = op.inputs[0] # pylint: disable=redefined-builtin
output = op.outputs[0]
# Compute the number of elements which equal to output in each reduction
# dimension. If there are multiple elements then the gradient will be
# divided between them.
indicators = math_ops.cast(
math_ops.equal(array_ops.expand_dims(output, -1), input), grad.dtype)
grad = array_ops.expand_dims(grad, -1)
num_selected = array_ops.expand_dims(math_ops.reduce_sum(indicators, -1), -1)
return [math_ops.divide(indicators, num_selected) * grad, None]
=======
>>>>>>> f41959ccb2... TensorFlow: Initial commit of TensorFlow library.
| [
"[email protected]"
] | |
ca7a71cd0e1dece862f0b5ed7066061699c5661c | c7ec556bfdc2ec5eaf234c679ffb1fbcc58d651a | /cftda/wsgi.py | 061f4071370b887c2c842852ef9259770162346a | [] | no_license | dmitryduev/cftda | ba2a7c4b7295b42c36e3ad4cd89d2b2bbfa9a76c | 4cc1456f2794f52529ba354d94bfff2bb8405434 | refs/heads/master | 2020-03-10T03:40:25.090449 | 2018-04-17T08:36:02 | 2018-04-17T08:36:02 | 129,171,296 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 472 | py | """
WSGI config for cftda project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
# os.environ.setdefault("DJANGO_SETTINGS_MODULE", "cftda.settings.dev")
# os.environ.setdefault("DJANGO_SETTINGS_MODULE", "cftda.settings.production")
application = get_wsgi_application()
| [
"[email protected]"
] | |
e9537acd041786dc77ea7aa000b3f43627ecdea4 | d97e90a0ff598249a3b28fe7a4bb8e4c5b39d210 | /tests/test_service_tax_category.py | 2d165f52cfa2f155fabc08e56b9591eeeb7f6b46 | [
"MIT"
] | permissive | jeroenubbink/commercetools-python-sdk | 024ef08d04d8d8e8609bd265d1eaac5a067a47b0 | ee27768d6fdde3e12618059891d1d4f75dd61390 | refs/heads/master | 2022-12-01T11:24:26.953904 | 2020-08-05T09:12:50 | 2020-08-05T15:22:06 | 287,386,564 | 0 | 0 | MIT | 2020-08-13T21:50:57 | 2020-08-13T21:50:57 | null | UTF-8 | Python | false | false | 1,025 | py | from commercetools import types
def test_tax_category_create(client):
tax_category = client.tax_categories.create(types.TaxCategoryDraft(name="Hoog"))
assert tax_category.id
assert tax_category.name == "Hoog"
def test_tax_category_get_by_id(client):
tax_category = client.tax_categories.create(types.TaxCategoryDraft(name="Hoog"))
assert tax_category.id
assert tax_category.name == "Hoog"
tax_category = client.tax_categories.get_by_id(tax_category.id)
assert tax_category.id
assert tax_category.name == "Hoog"
def test_tax_category_update_by_id(client):
tax_category = client.tax_categories.create(types.TaxCategoryDraft(name="Hoog"))
assert tax_category.id
assert tax_category.name == "Hoog"
tax_category = client.tax_categories.update_by_id(
tax_category.id,
version=tax_category.version,
actions=[types.TaxCategorySetDescriptionAction(description="Some text")],
)
assert tax_category.id
assert tax_category.name == "Hoog"
| [
"[email protected]"
] | |
2b6e279d904c9d7ac089522c567cdbe2a1cc707d | 2eb7cac33991ecf95e6ed0af0b7a440c319d0913 | /viz/migrations/0003_auto_20170924_0138.py | fb66dfa6c948e109b23860f4e9adcf7e612b13a1 | [] | no_license | felipinbombin/osirisWebPlatform | f1fb4cda7be2776dc6dfa21029fd1b8d68b48efd | 20005616be153015d71a9853f789db427b9e753b | refs/heads/master | 2021-03-27T13:24:29.141698 | 2018-07-30T04:53:21 | 2018-07-30T04:53:21 | 87,222,468 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 454 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2017-09-24 04:38
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('viz', '0002_auto_20170924_0014'),
]
operations = [
migrations.AlterField(
model_name='modelanswer',
name='direction',
field=models.CharField(max_length=1),
),
]
| [
"[email protected]"
] | |
9b8cfc58c987bce1af4a5e147b49af7f7b096e33 | 2c872fedcdc12c89742d10c2f1c821eed0470726 | /pbase/day04/code/while_qiantao1.py | c12a856d18d1b1539bf2ad72d88c61dbf3017ff7 | [] | no_license | zuigehulu/AID1811 | 581c3c7a37df9fa928bc632e4891fc9bafe69201 | 10cab0869875290646a9e5d815ff159d0116990e | refs/heads/master | 2020-04-19T16:33:04.174841 | 2019-01-30T07:58:24 | 2019-01-30T07:58:24 | 168,307,918 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 179 | py | num = int(input("请输入正方形的宽度:"))
i = 1
while i <= num:
j = 1
while j <= num:
print(j,end = " ")
j += 1
else:
print()
i += 1 | [
"[email protected]"
] | |
e4247d34a3e43a9024946c14f468f1b5215719b4 | 8263e388d00e1beaaac587df6d2e5b20c0ba2981 | /4.py | ae5fc1397d85132af7f4db047819d498c4e41a0e | [] | no_license | shivamdattapurkayastha99/natrural-language-processing | 3064c0a2fb2b830579565f8157d7d3208cf1a884 | b47ab27eb468469cc9e5ec07c6580d2c8a959124 | refs/heads/master | 2023-07-04T12:50:00.282883 | 2021-08-13T18:04:10 | 2021-08-13T18:04:10 | 395,746,985 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 244 | py | import nltk
from nltk.corpus import wordnet
# syn=wordnet.synsets('computer')
# print(syn[0].definition())
synonyms=[]
for syn in wordnet.synsets('computer'):
for lemma in syn.lemmas():
synonyms.append(lemma.name())
print(synonyms)
| [
"[email protected]"
] | |
2e9e94d73080d69d4190c219e5d655c8823d94d1 | 935b9efca392b124d571319568c08ba45446d2a0 | /lino_book/projects/lydia/tests/dumps/18.8.0/cal_guestrole.py | 1b937b29d05f60388ba0afed921493063edf56be | [
"BSD-2-Clause"
] | permissive | wallento/book | 6efba2baa1e42bb99514a937342000271dfe798b | 8c5a68f30f9ab65479a988608bda66ea6209afd8 | refs/heads/master | 2020-04-06T10:58:01.629671 | 2018-11-07T09:41:54 | 2018-11-07T09:41:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 303 | py | # -*- coding: UTF-8 -*-
logger.info("Loading 2 objects to table cal_guestrole...")
# fields: id, ref, name
loader.save(create_cal_guestrole(1,None,['Attendee', 'Teilnehmer', 'Attendee']))
loader.save(create_cal_guestrole(2,None,['Colleague', 'Colleague', 'Colleague']))
loader.flush_deferred_objects()
| [
"[email protected]"
] | |
54f1a14fc79a4ff7d2e664ba0f98eb3bdba4474f | af717d07cb2bb9c6d9fcc7ba4290cf02a0890815 | /homework-05-09/homework_07_import.py | b2d4827ccdbdf606454ddeed0ed0b45b9b0b560f | [] | no_license | liuluyang/homework | 0070d39640de8777f0656f0346adc1a5a6cfa1ab | e65db68e96bbfe1d987b2809321e59e303ab5ee8 | refs/heads/master | 2020-09-25T18:44:45.368491 | 2019-12-05T09:38:17 | 2019-12-05T09:38:17 | 226,066,124 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 444 | py |
from homework_07 import *
def func_08():
"""
将上述函数放在一个模块中,再写一个源程序文件,并在该源程序中实现对模块中函数的调用
:return:
"""
print(func_01())
print(func_02(100, 80))
print(func_03())
print(func_04('hello', 'e'))
print(func_05(100))
print(func_06(5))
print(func_07([1, 2, 3, 4, 5], 2))
if __name__ == '__main__':
# func_08()
pass
| [
"[email protected]"
] | |
f660b301415bf623ca90a9372bf5662b4783352b | 8a497f9e412eb74d9eca41488fae3f1947bdc87c | /{{cookiecutter.repo_name}}/{{cookiecutter.repo_name}}/{{cookiecutter.repo_name}}/middleware.py | 56cdd1fe1420c4de35e14e90fe745cd8bb7f9138 | [] | no_license | bwarren2/cookiecutter-simple-django | 012a788512d969f1a5f34cc0e51d26d65ad9ff18 | 08ca73e55e133ad097e3551b0af258b29cbb3ab9 | refs/heads/master | 2020-05-29T12:19:34.691916 | 2015-07-25T17:32:59 | 2015-07-25T17:32:59 | 39,597,383 | 0 | 0 | null | 2015-07-23T22:39:45 | 2015-07-23T22:39:45 | Python | UTF-8 | Python | false | false | 510 | py | from django.http import HttpResponseRedirect
class ForceHttps(object):
def process_request(self, request):
secure_request = (
# settings.DEBUG,
request.is_secure(),
request.META.get("HTTP_X_FORWARDED_PROTO", "").lower() == "https",
)
if not any(secure_request):
url = request.build_absolute_uri(request.get_full_path())
secure_url = url.replace("http://", "https://")
return HttpResponseRedirect(secure_url)
| [
"[email protected]"
] | |
b72b5b0739c393c93728b3588b7836d89dfab62a | bc6f0f731f7ad72fd11c6a332f47b972d1d14cb3 | /codewars-challenges/6 kyu/does-points-form-square.py | c2b92005bc0eb951e130b19fa6bdcd1d7d5d29a0 | [] | no_license | greatertomi/problem-solving | ee8d35f5f8bf76d9942adec79479c36585f7b90b | a2507fa8fa649ba70f8994d8bc36b07d28512861 | refs/heads/master | 2023-09-03T20:49:22.759696 | 2021-11-11T22:41:39 | 2021-11-11T22:41:39 | 283,445,532 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,142 | py | # Problem Link: https://www.codewars.com/kata/618688793385370019f494ae
import math
def distanceBetweenTwoPoints(pointA, pointB):
valueA = (pointB[1] - pointA[1]) ** 2
valueB = (pointB[0] - pointA[0]) ** 2
return math.sqrt(valueA + valueB)
def isSquare(points):
if len(points) < 4:
return False
result1 = distanceBetweenTwoPoints(points[0], points[1])
result2 = distanceBetweenTwoPoints(points[1], points[2])
result3 = distanceBetweenTwoPoints(points[2], points[3])
result4 = distanceBetweenTwoPoints(points[3], points[0])
print(result1, result2, result3, result4)
if result1 == result2 == result3 == result4 == 0:
return False
return result1 == result2 == result3 == result4
value1 = ((1, 1), (3, 3), (1, 3), (3, 1))
value2 = ((0, 0), (0, 2), (2, 0), (2, 1))
value3 = ((0, 2), (0, -2), (1, 0), (-1, 0))
value4 = ((2, 6), (5, 1), (0, -2), (-3, 3))
value5 = ((0, 0), (0, 0), (0, 0), (0, 0))
value6 = ((1, 1), (3, 3), (1, 3), (3, 1))
value7 = [(0, 0), (0, 0), (2, 0), (2, 0)]
# print(isSquare(value1))
# print(isSquare(value2))
# print(isSquare(value3))
print(isSquare(value7))
| [
"[email protected]"
] | |
95f8d3d0b927460d31612a2870b9c41833aee495 | dd3b8bd6c9f6f1d9f207678b101eff93b032b0f0 | /basis/AbletonLive10.1_MIDIRemoteScripts/AxiomPro/TransportViewModeSelector.py | cac299bfde53b7b762969e7cce8ddb19dea81a2c | [] | no_license | jhlax/les | 62955f57c33299ebfc4fca8d0482b30ee97adfe7 | d865478bf02778e509e61370174a450104d20a28 | refs/heads/master | 2023-08-17T17:24:44.297302 | 2019-12-15T08:13:29 | 2019-12-15T08:13:29 | 228,120,861 | 3 | 0 | null | 2023-08-03T16:40:44 | 2019-12-15T03:02:27 | Python | UTF-8 | Python | false | false | 2,827 | py | # uncompyle6 version 3.4.1
# Python bytecode 2.7 (62211)
# Decompiled from: Python 2.7.16 (v2.7.16:413a49145e, Mar 2 2019, 14:32:10)
# [GCC 4.2.1 Compatible Apple LLVM 6.0 (clang-600.0.57)]
# Embedded file name: /Users/versonator/Jenkins/live/output/mac_64_static/Release/python-bundle/MIDI Remote Scripts/AxiomPro/TransportViewModeSelector.py
# Compiled at: 2019-04-09 19:23:44
from __future__ import absolute_import, print_function, unicode_literals
from _Framework.ModeSelectorComponent import ModeSelectorComponent
from _Framework.ButtonElement import ButtonElement
from _Framework.TransportComponent import TransportComponent
from _Framework.SessionComponent import SessionComponent
class TransportViewModeSelector(ModeSelectorComponent):
u""" Class that reassigns specific buttons based on the views visible in Live """
def __init__(self, transport, session, ffwd_button, rwd_button, loop_button):
assert isinstance(transport, TransportComponent)
assert isinstance(session, SessionComponent)
assert isinstance(ffwd_button, ButtonElement)
assert isinstance(rwd_button, ButtonElement)
assert isinstance(loop_button, ButtonElement)
ModeSelectorComponent.__init__(self)
self._transport = transport
self._session = session
self._ffwd_button = ffwd_button
self._rwd_button = rwd_button
self._loop_button = loop_button
self.application().view.add_is_view_visible_listener('Session', self._on_view_changed)
self.update()
def disconnect(self):
ModeSelectorComponent.disconnect(self)
self._transport = None
self._session = None
self._ffwd_button = None
self._rwd_button = None
self._loop_button = None
self.application().view.remove_is_view_visible_listener('Session', self._on_view_changed)
return
def update(self):
super(TransportViewModeSelector, self).update()
if self.is_enabled():
if self._mode_index == 0:
self._transport.set_seek_buttons(self._ffwd_button, self._rwd_button)
self._transport.set_loop_button(self._loop_button)
self._session.set_select_buttons(None, None)
self._session.selected_scene().set_launch_button(None)
else:
self._transport.set_seek_buttons(None, None)
self._transport.set_loop_button(None)
self._session.set_select_buttons(self._ffwd_button, self._rwd_button)
self._session.selected_scene().set_launch_button(self._loop_button)
return
def _on_view_changed(self):
if self.application().view.is_view_visible('Session'):
self._mode_index = 1
else:
self._mode_index = 0
self.update() | [
"[email protected]"
] | |
2132f8724d3ff2cfd875cfaad4696b09f7eea6dd | a04c9e34c8abb6eb5857cb6e35fbbed0743ea8d4 | /Week3/BackspaceStringCompare.py | edeac380910ae744782c39a51d5bbe99f7102a5a | [] | no_license | SrikanthAmudala/PythonWorkShopConcordia | a2fd0a3103524733913c00767907bafecd1c6ad6 | d2e383a89bc995d96313fd0723c064a0a45db6f9 | refs/heads/master | 2021-05-19T13:02:42.173832 | 2020-05-27T21:48:34 | 2020-05-27T21:48:34 | 251,713,287 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 200 | py | S = "###c#ab###fs#j"
T = "ad#c"
def stringComp(S):
temp = []
for i in S:
if i=="#" and len(temp)!=0:
temp.pop(-1)
elif i!="#":
temp.append(i)
return "".join(temp)
print(stringComp(S)) | [
"[email protected]"
] | |
ab98f101089e6c3bcd638d092b91b731d7669ba7 | 2da6b95fe4237cc00014f80c45d268ab62fc90cd | /backbones/cifar/lenet2.py | bafe3e227b2332d56de8d4a8c6585b7880269478 | [] | no_license | lvzongyao/Open-Set-Recognition-1 | 7e26cd1d97f67b6c075f4e64296ce7a82d479168 | 26a8a1cca199f4e23df98abca6893e3eef3307da | refs/heads/master | 2023-08-19T09:15:16.119377 | 2021-09-13T04:21:18 | 2021-09-13T04:21:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,673 | py | '''LeNetPlus in PyTorch.
Specifically, designed for MNIST dataset.
Reference:
[1] Wen, Yandong, et al. "A discriminative feature learning approach for deep face recognition."
European conference on computer vision. Springer, Cham, 2016.
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
__all__ = ['LeNetHiera']
class LeNetHiera(nn.Module):
def __init__(self, num_classes=10, backbone_fc=True):
super(LeNetHiera, self).__init__()
self.conv1_1 = nn.Conv2d(1, 32, 5, stride=1, padding=2)
self.prelu1_1 = nn.PReLU()
self.conv1_2 = nn.Conv2d(32, 32, 5, stride=1, padding=2)
self.prelu1_2 = nn.PReLU()
self.conv2_1 = nn.Conv2d(32, 64, 5, stride=1, padding=2)
self.prelu2_1 = nn.PReLU()
self.conv2_2 = nn.Conv2d(64, 64, 5, stride=1, padding=2)
self.prelu2_2 = nn.PReLU()
self.conv3_1 = nn.Conv2d(64, 128, 5, stride=1, padding=2)
self.prelu3_1 = nn.PReLU()
self.conv3_2 = nn.Conv2d(128, 128, 5, stride=1, padding=2)
self.prelu3_2 = nn.PReLU()
self.gap_prelu = nn.Sequential(
nn.AdaptiveAvgPool2d(1),
nn.PReLU()
)
self.extractor1 = nn.Conv2d(32, 128 * 3 * 3, 1)
self.extractor2 = nn.Conv2d(64, 128 * 3 * 3, 1)
self.fuse = nn.Parameter(torch.Tensor([[[[0.], [0.], [1.]]]]))
if backbone_fc:
self.linear = nn.Sequential(
nn.Linear(128 * 3 * 3, 2),
nn.PReLU(),
nn.Linear(2, num_classes)
)
def forward(self, x):
x = self.prelu1_1(self.conv1_1(x))
x = self.prelu1_2(self.conv1_2(x))
extractor1 = self.extractor1(self.gap_prelu(x))
x = F.max_pool2d(x, 2)
x = self.prelu2_1(self.conv2_1(x))
x = self.prelu2_2(self.conv2_2(x))
extractor2 = self.extractor2(self.gap_prelu(x))
x = F.max_pool2d(x, 2)
x = self.prelu3_1(self.conv3_1(x))
x = self.prelu3_2(self.conv3_2(x))
x = F.max_pool2d(x, 2)
x = x.view(-1, 128 * 3 * 3)
# for unified style for DFPNet
out = x.unsqueeze(dim=-1).unsqueeze(dim=-1)
out = torch.cat([extractor1,extractor2, out],dim=2)
out = (out*self.fuse).sum(dim=2,keepdim=True)
# return the original feature map if no FC layers.
if hasattr(self, 'linear'):
out = F.adaptive_avg_pool2d(out, 1)
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
def demo():
net = LeNetHiera(num_classes=10, backbone_fc=False)
y = net(torch.randn(2, 1, 28, 28))
print(y.size())
# demo()
| [
"[email protected]"
] | |
c24f833353ca7bf80df320a57b2d8f201a33bb6a | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02900/s596241684.py | d8b236ea95aa163221e6926aa26888658463f2cb | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 445 | py | from math import*
def factrization_prime(number):
factor = {}
div = 2
s = sqrt(number)
while div < s:
div_cnt = 0
while number % div == 0:
div_cnt += 1
number //= div
if div_cnt != 0:
factor[div] = div_cnt
div += 1
if number > 1:
factor[number] = 1
return factor
A, B = map(int, input().split())
f = factrization_prime(gcd(A,B))
print(len(f)+1)
| [
"[email protected]"
] | |
560839c298d5303d6e8119983b822f06ccf876e7 | 13b46582bb6bbfe08a2e24127198ded24e6c0ad3 | /server/lighthouse/admin.py | 83201ba793fb89885f899a2319619cbac73b3426 | [] | no_license | dmetrosoft/seo-audits-toolkit | 9b12735d8345ef5075e87e6ea09440e01e32746f | c3e95fc4bf51d72e61c0507c14bd384d2368f475 | refs/heads/master | 2023-08-25T06:11:54.055464 | 2021-04-08T15:51:23 | 2021-04-08T15:51:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 290 | py | from django.contrib import admin
from .models import Lighthouse, Lighthouse_Result
# Register your models here.
admin.site.register(Lighthouse)
admin.site.register(Lighthouse_Result)
# Allows the Model to be administered via the /admin interface
# Highly recommendeded for easier debug
| [
"[email protected]"
] | |
af66740e6d1849e019c0ffdd525e58709a401252 | 50dd2a43daa8316fc11e0c176b5872738fcc5dde | /Learning/130_Fluent_Python/fp2-utf8/blocinteractive/example 16-9.py | 3234ee9c3d215d29019fad27ad046ceb874110fa | [] | no_license | FrenchBear/Python | 58204d368e3e72071eef298ff00d06ff51bd7914 | b41ab4b6a59ee9e145ef2cd887a5fe306973962b | refs/heads/master | 2023-08-31T18:43:37.792427 | 2023-08-26T15:53:20 | 2023-08-26T15:53:20 | 124,466,047 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 506 | py | # Example 16-9. Vector.__add__ method needs an iterable with numeric items
>>> v1 + 'ABC'
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "vector_v6.py", line 329, in __add__
return Vector(a + b for a, b in pairs)
File "vector_v6.py", line 243, in __init__
self._components = array(self.typecode, components)
File "vector_v6.py", line 329, in <genexpr>
return Vector(a + b for a, b in pairs)
TypeError: unsupported operand type(s) for +: 'float' and 'str'
| [
"[email protected]"
] | |
54154c6ac7f3eb42bb9745b4c24314cd78ab21af | da85d4caf3e5e1c9df8839fafd51f960f02daadd | /develop/io/async.py | 28958d34db443cb715939f72ced88e07280fcfd0 | [
"Apache-2.0"
] | permissive | shuaih7/FabricUI | 6efe58f3dbefebbd49607094a28bf2d7bc9314ca | 6501e8e6370d1f90174002f5768b5ef63e8412bc | refs/heads/main | 2023-04-13T10:07:42.090043 | 2021-04-13T02:55:12 | 2021-04-13T02:55:12 | 314,152,777 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 525 | py |
import asyncio
import time
from threading import Thread
def start_loop(loop):
asyncio.set_event_loop(loop)
print("start loop", time.time())
loop.run_forever()
async def do_some_work(x):
print('start {}'.format(x))
await asyncio.sleep(x)
print('Done after {}s'.format(x))
new_loop = asyncio.new_event_loop()
t = Thread(target=start_loop, args=(new_loop,))
t.start()
asyncio.run_coroutine_threadsafe(do_some_work(6), new_loop)
asyncio.run_coroutine_threadsafe(do_some_work(4), new_loop) | [
"[email protected]"
] | |
230c7c268af9b219156f9bd55fbfdead55bdde13 | a5698f82064aade6af0f1da21f504a9ef8c9ac6e | /huaweicloud-sdk-projectman/huaweicloudsdkprojectman/v4/model/list_projects_v4_response.py | 5b0873986a953ab7fb205e092a7cdb760a8bd5b4 | [
"Apache-2.0"
] | permissive | qizhidong/huaweicloud-sdk-python-v3 | 82a2046fbb7d62810984399abb2ca72b3b47fac6 | 6cdcf1da8b098427e58fc3335a387c14df7776d0 | refs/heads/master | 2023-04-06T02:58:15.175373 | 2021-03-30T10:47:29 | 2021-03-30T10:47:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,634 | py | # coding: utf-8
import pprint
import re
import six
from huaweicloudsdkcore.sdk_response import SdkResponse
class ListProjectsV4Response(SdkResponse):
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'projects': 'list[ListProjectsV4ResponseBodyProjects]',
'total': 'int'
}
attribute_map = {
'projects': 'projects',
'total': 'total'
}
def __init__(self, projects=None, total=None):
"""ListProjectsV4Response - a model defined in huaweicloud sdk"""
super().__init__()
self._projects = None
self._total = None
self.discriminator = None
if projects is not None:
self.projects = projects
if total is not None:
self.total = total
@property
def projects(self):
"""Gets the projects of this ListProjectsV4Response.
项目信息列表
:return: The projects of this ListProjectsV4Response.
:rtype: list[ListProjectsV4ResponseBodyProjects]
"""
return self._projects
@projects.setter
def projects(self, projects):
"""Sets the projects of this ListProjectsV4Response.
项目信息列表
:param projects: The projects of this ListProjectsV4Response.
:type: list[ListProjectsV4ResponseBodyProjects]
"""
self._projects = projects
@property
def total(self):
"""Gets the total of this ListProjectsV4Response.
项目总数
:return: The total of this ListProjectsV4Response.
:rtype: int
"""
return self._total
@total.setter
def total(self, total):
"""Sets the total of this ListProjectsV4Response.
项目总数
:param total: The total of this ListProjectsV4Response.
:type: int
"""
self._total = total
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ListProjectsV4Response):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"[email protected]"
] | |
229153641d7110152bf260eee9651fafce2aa55c | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_2/209.py | ed64df919f88a28c7ad0f8764bf7151e0b04cfe0 | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,054 | py |
import time
input = open("B-large.in", "r")
output = open("B-large.out", "w")
i = int(input.readline().strip())
for case in range(i):
turnaroundTime = int(input.readline().strip())
(AtoBNumber, BtoANumber) = map(int, input.readline().strip().split(" "))
timeList = []
for j in range(AtoBNumber):
(departureString, arrivalString) = input.readline().strip().split(" ")
departure = time.strptime(departureString + " 1971", "%H:%M %Y")
#print str(departure)
departure = time.localtime(time.mktime(departure))
arrival = time.strptime(arrivalString + " 1971", "%H:%M %Y")
arrival = time.localtime(time.mktime(arrival) + 60*turnaroundTime)
timeList.append((departure, "A", "departure"))
timeList.append((arrival, "B", "arrival"))
for j in range(BtoANumber):
(departureString, arrivalString) = input.readline().strip().split(" ")
departure = time.strptime(departureString + " 1971", "%H:%M %Y")
departure = time.localtime(time.mktime(departure))
arrival = time.strptime(arrivalString + " 1971", "%H:%M %Y")
arrival = time.localtime(time.mktime(arrival) + 60*turnaroundTime)
timeList.append((departure, "B", "departure"))
timeList.append((arrival, "A", "arrival"))
timeList.sort();
tmpAtoB = 0
tmpBtoA = 0
AtoB = 0
BtoA = 0
for timeTable in timeList:
if timeTable[2] == "arrival":
if timeTable[1] == "A":
tmpAtoB += 1
else:
tmpBtoA += 1
else:
if timeTable[1] == "A":
if tmpAtoB > 0:
tmpAtoB -= 1
else:
AtoB +=1
else:
if tmpBtoA > 0:
tmpBtoA -=1
else:
BtoA += 1
output.write("Case #%d: %d %d\n" %(case+1, AtoB, BtoA))
| [
"[email protected]"
] | |
69cf7fe229d5d1e49c81cb3e5ff63cc464e78512 | ff7c392e46baa2774b305a4999d7dbbcf8a3c0b3 | /ask-sdk-model/ask_sdk_model/interfaces/alexa/presentation/apl/rotate_transform_property.py | 7723824c757f66b7e1818dc1f7fa549b10be7f77 | [
"Apache-2.0"
] | permissive | rivamarco/alexa-apis-for-python | 83d035ba5beb5838ae977777191fa41cbe4ea112 | 62e3a9057a26003e836fa09aa12a2e1c8b62d6e0 | refs/heads/master | 2021-01-03T20:44:12.977804 | 2020-02-13T10:27:27 | 2020-02-13T10:29:24 | 240,229,385 | 2 | 0 | Apache-2.0 | 2020-02-13T10:05:45 | 2020-02-13T10:05:45 | null | UTF-8 | Python | false | false | 3,428 | py | # coding: utf-8
#
# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file
# except in compliance with the License. A copy of the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for
# the specific language governing permissions and limitations under the License.
#
import pprint
import re # noqa: F401
import six
import typing
from enum import Enum
from ask_sdk_model.interfaces.alexa.presentation.apl.transform_property import TransformProperty
if typing.TYPE_CHECKING:
from typing import Dict, List, Optional, Union
from datetime import datetime
class RotateTransformProperty(TransformProperty):
"""
:param rotate: Rotation angle, in degrees. Positive angles rotate in the clockwise direction.
:type rotate: float
"""
deserialized_types = {
'rotate': 'float'
} # type: Dict
attribute_map = {
'rotate': 'rotate'
} # type: Dict
supports_multiple_types = False
def __init__(self, rotate=0.0):
# type: (Union[float, str, None]) -> None
"""
:param rotate: Rotation angle, in degrees. Positive angles rotate in the clockwise direction.
:type rotate: float
"""
self.__discriminator_value = None # type: str
super(RotateTransformProperty, self).__init__()
self.rotate = rotate
def to_dict(self):
# type: () -> Dict[str, object]
"""Returns the model properties as a dict"""
result = {} # type: Dict
for attr, _ in six.iteritems(self.deserialized_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else
x.value if isinstance(x, Enum) else x,
value
))
elif isinstance(value, Enum):
result[attr] = value.value
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else
(item[0], item[1].value)
if isinstance(item[1], Enum) else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
# type: () -> str
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
# type: () -> str
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
# type: (object) -> bool
"""Returns true if both objects are equal"""
if not isinstance(other, RotateTransformProperty):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
# type: (object) -> bool
"""Returns true if both objects are not equal"""
return not self == other
| [
"[email protected]"
] | |
cf6792f25eabe0aa2d1f7706d4894f2eb5107393 | 21aff79a45a410c69a17f6b6aa6623b0001559f3 | /apps/mapserver/apps.py | 611581e596df8ffa82e1ca00fb9b561109d3e675 | [] | no_license | jqchang/TamagotchiServer | 1e7c721894c0e38da9583f8887b032cec192e6a1 | 31667e7997f71f3aec38b25ced1359bab67f780a | refs/heads/master | 2021-01-23T01:07:01.638012 | 2017-03-23T02:58:07 | 2017-03-23T02:58:07 | 85,871,192 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 134 | py | from __future__ import unicode_literals
from django.apps import AppConfig
class MapserverConfig(AppConfig):
name = 'mapserver'
| [
"[email protected]"
] | |
15a31b23ac174a1fa304a180f2caa0648df99bf6 | 3dab01e032134fd5cde6d29a127a8e8d93d48796 | /accounts/migrations/0003_auto_20210205_1107.py | b9490de61d22c0e9ea5dc7f6764010c42648a609 | [] | no_license | mariachacko93/FoacloidBankProject | 41baa9433814087f2e45d458bcfccc3c3bc44d6e | fbbb424ef4534c549ea74bfbe0d7d367a70802a1 | refs/heads/master | 2023-03-01T18:58:50.768867 | 2021-02-08T08:50:34 | 2021-02-08T08:50:34 | 337,012,353 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 394 | py | # Generated by Django 3.1.6 on 2021-02-05 07:07
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accounts', '0002_auto_20210205_1107'),
]
operations = [
migrations.AlterField(
model_name='createaccount',
name='accno',
field=models.IntegerField(unique=True),
),
]
| [
"[email protected]"
] | |
b2e112dc5c3e47d5622da839002d37aa9d979b26 | 7f2689163e4bb97252bec1d974163b64b7d376fd | /object_detection/core/target_assigner.py | 21897c08d8cb7f3fdbe4afadd9040a20321b26a2 | [
"MIT"
] | permissive | JahJajaka/afternoon_cleaner | c8e2299a7e0f9f02c28852d67beb09f8b604d192 | 590bdf58a216cbc6cfc47ef8f49d7af3df3703b7 | refs/heads/master | 2022-12-13T03:34:32.594707 | 2019-12-08T07:32:05 | 2019-12-08T07:32:05 | 218,489,388 | 15 | 7 | MIT | 2022-12-08T06:55:12 | 2019-10-30T09:23:55 | Python | UTF-8 | Python | false | false | 32,866 | py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Base target assigner module.
The job of a TargetAssigner is, for a given set of anchors (bounding boxes) and
groundtruth detections (bounding boxes), to assign classification and regression
targets to each anchor as well as weights to each anchor (specifying, e.g.,
which anchors should not contribute to training loss).
It assigns classification/regression targets by performing the following steps:
1) Computing pairwise similarity between anchors and groundtruth boxes using a
provided RegionSimilarity Calculator
2) Computing a matching based on the similarity matrix using a provided Matcher
3) Assigning regression targets based on the matching and a provided BoxCoder
4) Assigning classification targets based on the matching and groundtruth labels
Note that TargetAssigners only operate on detections from a single
image at a time, so any logic for applying a TargetAssigner to multiple
images must be handled externally.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from six.moves import range
from six.moves import zip
import tensorflow as tf
from object_detection.box_coders import faster_rcnn_box_coder
from object_detection.box_coders import mean_stddev_box_coder
from object_detection.core import box_coder as bcoder
from object_detection.core import box_list
from object_detection.core import matcher as mat
from object_detection.core import region_similarity_calculator as sim_calc
from object_detection.core import standard_fields as fields
from object_detection.matchers import argmax_matcher
from object_detection.matchers import bipartite_matcher
from object_detection.utils import shape_utils
class TargetAssigner(object):
"""Target assigner to compute classification and regression targets."""
def __init__(self,
similarity_calc,
matcher,
box_coder,
negative_class_weight=1.0):
"""Construct Object Detection Target Assigner.
Args:
similarity_calc: a RegionSimilarityCalculator
matcher: an object_detection.core.Matcher used to match groundtruth to
anchors.
box_coder: an object_detection.core.BoxCoder used to encode matching
groundtruth boxes with respect to anchors.
negative_class_weight: classification weight to be associated to negative
anchors (default: 1.0). The weight must be in [0., 1.].
Raises:
ValueError: if similarity_calc is not a RegionSimilarityCalculator or
if matcher is not a Matcher or if box_coder is not a BoxCoder
"""
if not isinstance(similarity_calc, sim_calc.RegionSimilarityCalculator):
raise ValueError('similarity_calc must be a RegionSimilarityCalculator')
if not isinstance(matcher, mat.Matcher):
raise ValueError('matcher must be a Matcher')
if not isinstance(box_coder, bcoder.BoxCoder):
raise ValueError('box_coder must be a BoxCoder')
self._similarity_calc = similarity_calc
self._matcher = matcher
self._box_coder = box_coder
self._negative_class_weight = negative_class_weight
@property
def box_coder(self):
return self._box_coder
# TODO(rathodv): move labels, scores, and weights to groundtruth_boxes fields.
def assign(self,
anchors,
groundtruth_boxes,
groundtruth_labels=None,
unmatched_class_label=None,
groundtruth_weights=None):
"""Assign classification and regression targets to each anchor.
For a given set of anchors and groundtruth detections, match anchors
to groundtruth_boxes and assign classification and regression targets to
each anchor as well as weights based on the resulting match (specifying,
e.g., which anchors should not contribute to training loss).
Anchors that are not matched to anything are given a classification target
of self._unmatched_cls_target which can be specified via the constructor.
Args:
anchors: a BoxList representing N anchors
groundtruth_boxes: a BoxList representing M groundtruth boxes
groundtruth_labels: a tensor of shape [M, d_1, ... d_k]
with labels for each of the ground_truth boxes. The subshape
[d_1, ... d_k] can be empty (corresponding to scalar inputs). When set
to None, groundtruth_labels assumes a binary problem where all
ground_truth boxes get a positive label (of 1).
unmatched_class_label: a float32 tensor with shape [d_1, d_2, ..., d_k]
which is consistent with the classification target for each
anchor (and can be empty for scalar targets). This shape must thus be
compatible with the groundtruth labels that are passed to the "assign"
function (which have shape [num_gt_boxes, d_1, d_2, ..., d_k]).
If set to None, unmatched_cls_target is set to be [0] for each anchor.
groundtruth_weights: a float tensor of shape [M] indicating the weight to
assign to all anchors match to a particular groundtruth box. The weights
must be in [0., 1.]. If None, all weights are set to 1. Generally no
groundtruth boxes with zero weight match to any anchors as matchers are
aware of groundtruth weights. Additionally, `cls_weights` and
`reg_weights` are calculated using groundtruth weights as an added
safety.
Returns:
cls_targets: a float32 tensor with shape [num_anchors, d_1, d_2 ... d_k],
where the subshape [d_1, ..., d_k] is compatible with groundtruth_labels
which has shape [num_gt_boxes, d_1, d_2, ... d_k].
cls_weights: a float32 tensor with shape [num_anchors, d_1, d_2 ... d_k],
representing weights for each element in cls_targets.
reg_targets: a float32 tensor with shape [num_anchors, box_code_dimension]
reg_weights: a float32 tensor with shape [num_anchors]
match: an int32 tensor of shape [num_anchors] containing result of anchor
groundtruth matching. Each position in the tensor indicates an anchor
and holds the following meaning:
(1) if match[i] >= 0, anchor i is matched with groundtruth match[i].
(2) if match[i]=-1, anchor i is marked to be background .
(3) if match[i]=-2, anchor i is ignored since it is not background and
does not have sufficient overlap to call it a foreground.
Raises:
ValueError: if anchors or groundtruth_boxes are not of type
box_list.BoxList
"""
if not isinstance(anchors, box_list.BoxList):
raise ValueError('anchors must be an BoxList')
if not isinstance(groundtruth_boxes, box_list.BoxList):
raise ValueError('groundtruth_boxes must be an BoxList')
if unmatched_class_label is None:
unmatched_class_label = tf.constant([0], tf.float32)
if groundtruth_labels is None:
groundtruth_labels = tf.ones(tf.expand_dims(groundtruth_boxes.num_boxes(),
0))
groundtruth_labels = tf.expand_dims(groundtruth_labels, -1)
unmatched_shape_assert = shape_utils.assert_shape_equal(
shape_utils.combined_static_and_dynamic_shape(groundtruth_labels)[1:],
shape_utils.combined_static_and_dynamic_shape(unmatched_class_label))
labels_and_box_shapes_assert = shape_utils.assert_shape_equal(
shape_utils.combined_static_and_dynamic_shape(
groundtruth_labels)[:1],
shape_utils.combined_static_and_dynamic_shape(
groundtruth_boxes.get())[:1])
if groundtruth_weights is None:
num_gt_boxes = groundtruth_boxes.num_boxes_static()
if not num_gt_boxes:
num_gt_boxes = groundtruth_boxes.num_boxes()
groundtruth_weights = tf.ones([num_gt_boxes], dtype=tf.float32)
# set scores on the gt boxes
scores = 1 - groundtruth_labels[:, 0]
groundtruth_boxes.add_field(fields.BoxListFields.scores, scores)
with tf.control_dependencies(
[unmatched_shape_assert, labels_and_box_shapes_assert]):
match_quality_matrix = self._similarity_calc.compare(groundtruth_boxes,
anchors)
match = self._matcher.match(match_quality_matrix,
valid_rows=tf.greater(groundtruth_weights, 0))
reg_targets = self._create_regression_targets(anchors,
groundtruth_boxes,
match)
cls_targets = self._create_classification_targets(groundtruth_labels,
unmatched_class_label,
match)
reg_weights = self._create_regression_weights(match, groundtruth_weights)
cls_weights = self._create_classification_weights(match,
groundtruth_weights)
# convert cls_weights from per-anchor to per-class.
class_label_shape = tf.shape(cls_targets)[1:]
weights_shape = tf.shape(cls_weights)
weights_multiple = tf.concat(
[tf.ones_like(weights_shape), class_label_shape],
axis=0)
for _ in range(len(cls_targets.get_shape()[1:])):
cls_weights = tf.expand_dims(cls_weights, -1)
cls_weights = tf.tile(cls_weights, weights_multiple)
num_anchors = anchors.num_boxes_static()
if num_anchors is not None:
reg_targets = self._reset_target_shape(reg_targets, num_anchors)
cls_targets = self._reset_target_shape(cls_targets, num_anchors)
reg_weights = self._reset_target_shape(reg_weights, num_anchors)
cls_weights = self._reset_target_shape(cls_weights, num_anchors)
return (cls_targets, cls_weights, reg_targets, reg_weights,
match.match_results)
def _reset_target_shape(self, target, num_anchors):
"""Sets the static shape of the target.
Args:
target: the target tensor. Its first dimension will be overwritten.
num_anchors: the number of anchors, which is used to override the target's
first dimension.
Returns:
A tensor with the shape info filled in.
"""
target_shape = target.get_shape().as_list()
target_shape[0] = num_anchors
target.set_shape(target_shape)
return target
def _create_regression_targets(self, anchors, groundtruth_boxes, match):
"""Returns a regression target for each anchor.
Args:
anchors: a BoxList representing N anchors
groundtruth_boxes: a BoxList representing M groundtruth_boxes
match: a matcher.Match object
Returns:
reg_targets: a float32 tensor with shape [N, box_code_dimension]
"""
matched_gt_boxes = match.gather_based_on_match(
groundtruth_boxes.get(),
unmatched_value=tf.zeros(4),
ignored_value=tf.zeros(4))
matched_gt_boxlist = box_list.BoxList(matched_gt_boxes)
if groundtruth_boxes.has_field(fields.BoxListFields.keypoints):
groundtruth_keypoints = groundtruth_boxes.get_field(
fields.BoxListFields.keypoints)
matched_keypoints = match.gather_based_on_match(
groundtruth_keypoints,
unmatched_value=tf.zeros(groundtruth_keypoints.get_shape()[1:]),
ignored_value=tf.zeros(groundtruth_keypoints.get_shape()[1:]))
matched_gt_boxlist.add_field(fields.BoxListFields.keypoints,
matched_keypoints)
matched_reg_targets = self._box_coder.encode(matched_gt_boxlist, anchors)
match_results_shape = shape_utils.combined_static_and_dynamic_shape(
match.match_results)
# Zero out the unmatched and ignored regression targets.
unmatched_ignored_reg_targets = tf.tile(
self._default_regression_target(), [match_results_shape[0], 1])
matched_anchors_mask = match.matched_column_indicator()
reg_targets = tf.where(matched_anchors_mask,
matched_reg_targets,
unmatched_ignored_reg_targets)
return reg_targets
def _default_regression_target(self):
"""Returns the default target for anchors to regress to.
Default regression targets are set to zero (though in
this implementation what these targets are set to should
not matter as the regression weight of any box set to
regress to the default target is zero).
Returns:
default_target: a float32 tensor with shape [1, box_code_dimension]
"""
return tf.constant([self._box_coder.code_size*[0]], tf.float32)
def _create_classification_targets(self, groundtruth_labels,
unmatched_class_label, match):
"""Create classification targets for each anchor.
Assign a classification target of for each anchor to the matching
groundtruth label that is provided by match. Anchors that are not matched
to anything are given the target self._unmatched_cls_target
Args:
groundtruth_labels: a tensor of shape [num_gt_boxes, d_1, ... d_k]
with labels for each of the ground_truth boxes. The subshape
[d_1, ... d_k] can be empty (corresponding to scalar labels).
unmatched_class_label: a float32 tensor with shape [d_1, d_2, ..., d_k]
which is consistent with the classification target for each
anchor (and can be empty for scalar targets). This shape must thus be
compatible with the groundtruth labels that are passed to the "assign"
function (which have shape [num_gt_boxes, d_1, d_2, ..., d_k]).
match: a matcher.Match object that provides a matching between anchors
and groundtruth boxes.
Returns:
a float32 tensor with shape [num_anchors, d_1, d_2 ... d_k], where the
subshape [d_1, ..., d_k] is compatible with groundtruth_labels which has
shape [num_gt_boxes, d_1, d_2, ... d_k].
"""
return match.gather_based_on_match(
groundtruth_labels,
unmatched_value=unmatched_class_label,
ignored_value=unmatched_class_label)
def _create_regression_weights(self, match, groundtruth_weights):
"""Set regression weight for each anchor.
Only positive anchors are set to contribute to the regression loss, so this
method returns a weight of 1 for every positive anchor and 0 for every
negative anchor.
Args:
match: a matcher.Match object that provides a matching between anchors
and groundtruth boxes.
groundtruth_weights: a float tensor of shape [M] indicating the weight to
assign to all anchors match to a particular groundtruth box.
Returns:
a float32 tensor with shape [num_anchors] representing regression weights.
"""
return match.gather_based_on_match(
groundtruth_weights, ignored_value=0., unmatched_value=0.)
def _create_classification_weights(self,
match,
groundtruth_weights):
"""Create classification weights for each anchor.
Positive (matched) anchors are associated with a weight of
positive_class_weight and negative (unmatched) anchors are associated with
a weight of negative_class_weight. When anchors are ignored, weights are set
to zero. By default, both positive/negative weights are set to 1.0,
but they can be adjusted to handle class imbalance (which is almost always
the case in object detection).
Args:
match: a matcher.Match object that provides a matching between anchors
and groundtruth boxes.
groundtruth_weights: a float tensor of shape [M] indicating the weight to
assign to all anchors match to a particular groundtruth box.
Returns:
a float32 tensor with shape [num_anchors] representing classification
weights.
"""
return match.gather_based_on_match(
groundtruth_weights,
ignored_value=0.,
unmatched_value=self._negative_class_weight)
def get_box_coder(self):
"""Get BoxCoder of this TargetAssigner.
Returns:
BoxCoder object.
"""
return self._box_coder
# TODO(rathodv): This method pulls in all the implementation dependencies into
# core. Therefore its best to have this factory method outside of core.
def create_target_assigner(reference, stage=None,
negative_class_weight=1.0, use_matmul_gather=False):
"""Factory function for creating standard target assigners.
Args:
reference: string referencing the type of TargetAssigner.
stage: string denoting stage: {proposal, detection}.
negative_class_weight: classification weight to be associated to negative
anchors (default: 1.0)
use_matmul_gather: whether to use matrix multiplication based gather which
are better suited for TPUs.
Returns:
TargetAssigner: desired target assigner.
Raises:
ValueError: if combination reference+stage is invalid.
"""
if reference == 'Multibox' and stage == 'proposal':
similarity_calc = sim_calc.NegSqDistSimilarity()
matcher = bipartite_matcher.GreedyBipartiteMatcher()
box_coder = mean_stddev_box_coder.MeanStddevBoxCoder()
elif reference == 'FasterRCNN' and stage == 'proposal':
similarity_calc = sim_calc.IouSimilarity()
matcher = argmax_matcher.ArgMaxMatcher(matched_threshold=0.7,
unmatched_threshold=0.3,
force_match_for_each_row=True,
use_matmul_gather=use_matmul_gather)
box_coder = faster_rcnn_box_coder.FasterRcnnBoxCoder(
scale_factors=[10.0, 10.0, 5.0, 5.0])
elif reference == 'FasterRCNN' and stage == 'detection':
similarity_calc = sim_calc.IouSimilarity()
# Uses all proposals with IOU < 0.5 as candidate negatives.
matcher = argmax_matcher.ArgMaxMatcher(matched_threshold=0.5,
negatives_lower_than_unmatched=True,
use_matmul_gather=use_matmul_gather)
box_coder = faster_rcnn_box_coder.FasterRcnnBoxCoder(
scale_factors=[10.0, 10.0, 5.0, 5.0])
elif reference == 'FastRCNN':
similarity_calc = sim_calc.IouSimilarity()
matcher = argmax_matcher.ArgMaxMatcher(matched_threshold=0.5,
unmatched_threshold=0.1,
force_match_for_each_row=False,
negatives_lower_than_unmatched=False,
use_matmul_gather=use_matmul_gather)
box_coder = faster_rcnn_box_coder.FasterRcnnBoxCoder()
else:
raise ValueError('No valid combination of reference and stage.')
return TargetAssigner(similarity_calc, matcher, box_coder,
negative_class_weight=negative_class_weight)
def batch_assign(target_assigner,
anchors_batch,
gt_box_batch,
gt_class_targets_batch,
unmatched_class_label=None,
gt_weights_batch=None):
"""Batched assignment of classification and regression targets.
Args:
target_assigner: a target assigner.
anchors_batch: BoxList representing N box anchors or list of BoxList objects
with length batch_size representing anchor sets.
gt_box_batch: a list of BoxList objects with length batch_size
representing groundtruth boxes for each image in the batch
gt_class_targets_batch: a list of tensors with length batch_size, where
each tensor has shape [num_gt_boxes_i, classification_target_size] and
num_gt_boxes_i is the number of boxes in the ith boxlist of
gt_box_batch.
unmatched_class_label: a float32 tensor with shape [d_1, d_2, ..., d_k]
which is consistent with the classification target for each
anchor (and can be empty for scalar targets). This shape must thus be
compatible with the groundtruth labels that are passed to the "assign"
function (which have shape [num_gt_boxes, d_1, d_2, ..., d_k]).
gt_weights_batch: A list of 1-D tf.float32 tensors of shape
[num_boxes] containing weights for groundtruth boxes.
Returns:
batch_cls_targets: a tensor with shape [batch_size, num_anchors,
num_classes],
batch_cls_weights: a tensor with shape [batch_size, num_anchors,
num_classes],
batch_reg_targets: a tensor with shape [batch_size, num_anchors,
box_code_dimension]
batch_reg_weights: a tensor with shape [batch_size, num_anchors],
match: an int32 tensor of shape [batch_size, num_anchors] containing result
of anchor groundtruth matching. Each position in the tensor indicates an
anchor and holds the following meaning:
(1) if match[x, i] >= 0, anchor i is matched with groundtruth match[x, i].
(2) if match[x, i]=-1, anchor i is marked to be background .
(3) if match[x, i]=-2, anchor i is ignored since it is not background and
does not have sufficient overlap to call it a foreground.
Raises:
ValueError: if input list lengths are inconsistent, i.e.,
batch_size == len(gt_box_batch) == len(gt_class_targets_batch)
and batch_size == len(anchors_batch) unless anchors_batch is a single
BoxList.
"""
if not isinstance(anchors_batch, list):
anchors_batch = len(gt_box_batch) * [anchors_batch]
if not all(
isinstance(anchors, box_list.BoxList) for anchors in anchors_batch):
raise ValueError('anchors_batch must be a BoxList or list of BoxLists.')
if not (len(anchors_batch)
== len(gt_box_batch)
== len(gt_class_targets_batch)):
raise ValueError('batch size incompatible with lengths of anchors_batch, '
'gt_box_batch and gt_class_targets_batch.')
cls_targets_list = []
cls_weights_list = []
reg_targets_list = []
reg_weights_list = []
match_list = []
if gt_weights_batch is None:
gt_weights_batch = [None] * len(gt_class_targets_batch)
for anchors, gt_boxes, gt_class_targets, gt_weights in zip(
anchors_batch, gt_box_batch, gt_class_targets_batch, gt_weights_batch):
(cls_targets, cls_weights,
reg_targets, reg_weights, match) = target_assigner.assign(
anchors, gt_boxes, gt_class_targets, unmatched_class_label, gt_weights)
cls_targets_list.append(cls_targets)
cls_weights_list.append(cls_weights)
reg_targets_list.append(reg_targets)
reg_weights_list.append(reg_weights)
match_list.append(match)
batch_cls_targets = tf.stack(cls_targets_list)
batch_cls_weights = tf.stack(cls_weights_list)
batch_reg_targets = tf.stack(reg_targets_list)
batch_reg_weights = tf.stack(reg_weights_list)
batch_match = tf.stack(match_list)
return (batch_cls_targets, batch_cls_weights, batch_reg_targets,
batch_reg_weights, batch_match)
# Assign an alias to avoid large refactor of existing users.
batch_assign_targets = batch_assign
def batch_get_targets(batch_match, groundtruth_tensor_list,
groundtruth_weights_list, unmatched_value,
unmatched_weight):
"""Returns targets based on anchor-groundtruth box matching results.
Args:
batch_match: An int32 tensor of shape [batch, num_anchors] containing the
result of target assignment returned by TargetAssigner.assign(..).
groundtruth_tensor_list: A list of groundtruth tensors of shape
[num_groundtruth, d_1, d_2, ..., d_k]. The tensors can be of any type.
groundtruth_weights_list: A list of weights, one per groundtruth tensor, of
shape [num_groundtruth].
unmatched_value: A tensor of shape [d_1, d_2, ..., d_k] of the same type as
groundtruth tensor containing target value for anchors that remain
unmatched.
unmatched_weight: Scalar weight to assign to anchors that remain unmatched.
Returns:
targets: A tensor of shape [batch, num_anchors, d_1, d_2, ..., d_k]
containing targets for anchors.
weights: A float tensor of shape [batch, num_anchors] containing the weights
to assign to each target.
"""
match_list = tf.unstack(batch_match)
targets_list = []
weights_list = []
for match_tensor, groundtruth_tensor, groundtruth_weight in zip(
match_list, groundtruth_tensor_list, groundtruth_weights_list):
match_object = mat.Match(match_tensor)
targets = match_object.gather_based_on_match(
groundtruth_tensor,
unmatched_value=unmatched_value,
ignored_value=unmatched_value)
targets_list.append(targets)
weights = match_object.gather_based_on_match(
groundtruth_weight,
unmatched_value=unmatched_weight,
ignored_value=tf.zeros_like(unmatched_weight))
weights_list.append(weights)
return tf.stack(targets_list), tf.stack(weights_list)
def batch_assign_confidences(target_assigner,
anchors_batch,
gt_box_batch,
gt_class_confidences_batch,
gt_weights_batch=None,
unmatched_class_label=None,
include_background_class=True,
implicit_class_weight=1.0):
"""Batched assignment of classification and regression targets.
This differences between batch_assign_confidences and batch_assign_targets:
- 'batch_assign_targets' supports scalar (agnostic), vector (multiclass) and
tensor (high-dimensional) targets. 'batch_assign_confidences' only support
scalar (agnostic) and vector (multiclass) targets.
- 'batch_assign_targets' assumes the input class tensor using the binary
one/K-hot encoding. 'batch_assign_confidences' takes the class confidence
scores as the input, where 1 means positive classes, 0 means implicit
negative classes, and -1 means explicit negative classes.
- 'batch_assign_confidences' assigns the targets in the similar way as
'batch_assign_targets' except that it gives different weights for implicit
and explicit classes. This allows user to control the negative gradients
pushed differently for implicit and explicit examples during the training.
Args:
target_assigner: a target assigner.
anchors_batch: BoxList representing N box anchors or list of BoxList objects
with length batch_size representing anchor sets.
gt_box_batch: a list of BoxList objects with length batch_size
representing groundtruth boxes for each image in the batch
gt_class_confidences_batch: a list of tensors with length batch_size, where
each tensor has shape [num_gt_boxes_i, classification_target_size] and
num_gt_boxes_i is the number of boxes in the ith boxlist of
gt_box_batch. Note that in this tensor, 1 means explicit positive class,
-1 means explicit negative class, and 0 means implicit negative class.
gt_weights_batch: A list of 1-D tf.float32 tensors of shape
[num_gt_boxes_i] containing weights for groundtruth boxes.
unmatched_class_label: a float32 tensor with shape [d_1, d_2, ..., d_k]
which is consistent with the classification target for each
anchor (and can be empty for scalar targets). This shape must thus be
compatible with the groundtruth labels that are passed to the "assign"
function (which have shape [num_gt_boxes, d_1, d_2, ..., d_k]).
include_background_class: whether or not gt_class_confidences_batch includes
the background class.
implicit_class_weight: the weight assigned to implicit examples.
Returns:
batch_cls_targets: a tensor with shape [batch_size, num_anchors,
num_classes],
batch_cls_weights: a tensor with shape [batch_size, num_anchors,
num_classes],
batch_reg_targets: a tensor with shape [batch_size, num_anchors,
box_code_dimension]
batch_reg_weights: a tensor with shape [batch_size, num_anchors],
match: an int32 tensor of shape [batch_size, num_anchors] containing result
of anchor groundtruth matching. Each position in the tensor indicates an
anchor and holds the following meaning:
(1) if match[x, i] >= 0, anchor i is matched with groundtruth match[x, i].
(2) if match[x, i]=-1, anchor i is marked to be background .
(3) if match[x, i]=-2, anchor i is ignored since it is not background and
does not have sufficient overlap to call it a foreground.
Raises:
ValueError: if input list lengths are inconsistent, i.e.,
batch_size == len(gt_box_batch) == len(gt_class_targets_batch)
and batch_size == len(anchors_batch) unless anchors_batch is a single
BoxList, or if any element in gt_class_confidences_batch has rank > 2.
"""
if not isinstance(anchors_batch, list):
anchors_batch = len(gt_box_batch) * [anchors_batch]
if not all(
isinstance(anchors, box_list.BoxList) for anchors in anchors_batch):
raise ValueError('anchors_batch must be a BoxList or list of BoxLists.')
if not (len(anchors_batch)
== len(gt_box_batch)
== len(gt_class_confidences_batch)):
raise ValueError('batch size incompatible with lengths of anchors_batch, '
'gt_box_batch and gt_class_confidences_batch.')
cls_targets_list = []
cls_weights_list = []
reg_targets_list = []
reg_weights_list = []
match_list = []
if gt_weights_batch is None:
gt_weights_batch = [None] * len(gt_class_confidences_batch)
for anchors, gt_boxes, gt_class_confidences, gt_weights in zip(
anchors_batch, gt_box_batch, gt_class_confidences_batch,
gt_weights_batch):
if (gt_class_confidences is not None and
len(gt_class_confidences.get_shape().as_list()) > 2):
raise ValueError('The shape of the class target is not supported. ',
gt_class_confidences.get_shape())
cls_targets, _, reg_targets, _, match = target_assigner.assign(
anchors, gt_boxes, gt_class_confidences, unmatched_class_label,
groundtruth_weights=gt_weights)
if include_background_class:
cls_targets_without_background = tf.slice(
cls_targets, [0, 1], [-1, -1])
else:
cls_targets_without_background = cls_targets
positive_mask = tf.greater(cls_targets_without_background, 0.0)
negative_mask = tf.less(cls_targets_without_background, 0.0)
explicit_example_mask = tf.logical_or(positive_mask, negative_mask)
positive_anchors = tf.reduce_any(positive_mask, axis=-1)
regression_weights = tf.cast(positive_anchors, dtype=tf.float32)
regression_targets = (
reg_targets * tf.expand_dims(regression_weights, axis=-1))
regression_weights_expanded = tf.expand_dims(regression_weights, axis=-1)
cls_targets_without_background = (
cls_targets_without_background *
(1 - tf.cast(negative_mask, dtype=tf.float32)))
cls_weights_without_background = ((1 - implicit_class_weight) * tf.cast(
explicit_example_mask, dtype=tf.float32) + implicit_class_weight)
if include_background_class:
cls_weights_background = (
(1 - implicit_class_weight) * regression_weights_expanded
+ implicit_class_weight)
classification_weights = tf.concat(
[cls_weights_background, cls_weights_without_background], axis=-1)
cls_targets_background = 1 - regression_weights_expanded
classification_targets = tf.concat(
[cls_targets_background, cls_targets_without_background], axis=-1)
else:
classification_targets = cls_targets_without_background
classification_weights = cls_weights_without_background
cls_targets_list.append(classification_targets)
cls_weights_list.append(classification_weights)
reg_targets_list.append(regression_targets)
reg_weights_list.append(regression_weights)
match_list.append(match)
batch_cls_targets = tf.stack(cls_targets_list)
batch_cls_weights = tf.stack(cls_weights_list)
batch_reg_targets = tf.stack(reg_targets_list)
batch_reg_weights = tf.stack(reg_weights_list)
batch_match = tf.stack(match_list)
return (batch_cls_targets, batch_cls_weights, batch_reg_targets,
batch_reg_weights, batch_match)
| [
"[email protected]"
] | |
cc2c5970cfe0af932efe8fe15b1f9437d823c6b5 | d2845579ea6aa51a2e150f0ffe6ccfda85d035ce | /kernel/examples/handler/component/horz_pearson.py | 88694a0dc34a7576b3327fff3c49a1214411c858 | [
"Apache-2.0"
] | permissive | as23187/WeFe | d8de9ff626f9f3e5d98e0850b0b717a80fd73e72 | ba92871d4b1d2eef6c606c34795f4575e84703bd | refs/heads/main | 2023-08-22T12:01:06.718246 | 2021-10-28T01:54:05 | 2021-10-28T01:54:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,052 | py | # Copyright 2021 Tianmian Tech. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from common.python.utils import log_utils
from kernel.components.correlation.horzpearson.param import HorzPearsonParam
from kernel.examples.handler.component.component_base import Component
from kernel.examples.handler.interface import Input
from kernel.examples.handler.interface import Output
LOGGER = log_utils.get_logger()
class HorzPearson(Component, HorzPearsonParam):
def __init__(self, **kwargs):
Component.__init__(self, **kwargs)
# print (self.name)
LOGGER.debug(f"{self.name} component created")
new_kwargs = self.erase_component_base_param(**kwargs)
HorzPearsonParam.__init__(self, **new_kwargs)
self.input = Input(self.name, data_type="multi")
self.output = Output(self.name)
self._module_name = "HorzPearson"
self._param_name = "HorzPearsonParam"
| [
"[email protected]"
] | |
5c59092aef917fb302c3a4ee9334f5b75ac50252 | dbf81359567f718f43e513891a4e761a2d8c7c5a | /users/forms.py | 2e13681d309d10c36e01e3bdd25217cd480c0bcd | [] | no_license | peterbe/w91011 | 675e5cbed1847010ab5ddb27c1fe56b935b1a26f | b816c8dacb246db730db3e678248c32cf021fc36 | refs/heads/master | 2021-01-01T17:31:27.595419 | 2011-05-20T22:10:51 | 2011-05-20T22:10:51 | 1,371,606 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 136 | py | # python
import re
# django
from django import forms
from django.contrib.auth.models import User
from models import UserProfile
# app
| [
"[email protected]"
] | |
6afcc4bb8f1dec65bb86982e1b24b08f1b51732f | 047c0a87013a9e5cfbcfc1c812496df70f69053d | /api/utils.py | d9a113ad1d27ae4526afd8074cc8d51dac2123b2 | [] | no_license | tperrier/infx_598c | 13e471e26f8efd1728861b9d76bc1d783e70da6c | 96d51feaa174c68d30dfe531594a9c4cc4f5dc18 | refs/heads/master | 2021-01-19T03:13:01.315031 | 2015-03-14T02:30:22 | 2015-03-14T02:30:22 | 31,053,943 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,012 | py | import re,math,collections
from private import cookie
"""
Helper functions and data to drive the API
"""
HEADERS = {
'User-Agent':'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Ubuntu Chromium/39.0.2171.65 Chrome/39.0.2171.65 Safari/537.36',
'Cookie':cookie,
}
FETCH_URL = 'http://www.google.com/trends/fetchComponent'
TRENDS_URL = 'http://www.google.com/trends/trendsReport'
REGEX_DATE = re.compile('new Date\((\d{4}),[ ]?(\d{1,2})(,[ ]?\d{1,2}){1,3}\)')
def sub_date(s):
#Search for Javascript Dates "new Date(yyyy,mm,dd,hh,mm,ss)" and replace with yyyy-mm
return REGEX_DATE.sub('"\g<1>-\g<2>"',s)
def make_list(l):
if isinstance(l,str):
return [l]
return l
def base_five_split(n):
log = int(math.floor(math.log(n,5)))
power = 5**log
if power==n: #perfect power of 5
power = 5**(log-1)
times = int(math.floor(n/power))
remainder = n-times*power
s = [power for i in range(times)]
if remainder > 0: #perfect power of 5 no remainder
s += [remainder]
return s
def find_local(s,p=0,*args):
#similar to css selectors but for raw text
p = int(p)
if len(args) == 0:
return p
p = s.index(args[0],p)
return find_local(s,p,*args[1:])
def fetch_years():
return ['20%02i'%y for y in range(4,16)]
def fetch_dates():
dates = []
for y in range(4,16):
for m in range(1,13):
dates.append('%02i-%02i'%(y,m))
return dates + ['16-1','16-2']
def list_round(l):
return [round(i,2) if isinstance(i,(int,float,long)) else i for i in l]
def mean(data):
n = len(data)
if n<1:
raise ValueError('mean requires at least one data point')
return sum(data)/float(n)
def sd(data):
m = mean(data)
n = len(data)
if n<2:
raise ValueError('variance requires at least two data points')
ss = sum((i-m)**2 for i in data)
return (ss/n)**0.5
COUNTRY_CODES = collections.OrderedDict([
('AU','Australia'),('BD','Bangladesh'),('BW','Botswana'),
('CM','Cameroon '),('CA','Canada '),('DK','Denmark'),
('EG','Egypt'),('EE','Estonia'),('FJ','Fiji'),
('DE','Germany'),('GH','Ghana'),('GR','Greece'),
('IN','India'),('IQ','Iraq'),('IE','Ireland'),
('JM','Jamaica'),('JO','Jordan'),('KE','Kenya'),
('LS','Lesotho'),('LR','Liberia'),('MY','Malaysia'),
('MW','Malawi'),('NA','Namibia'),('NL','Netherlands'),
('NZ','New Zealand'),('NG','Nigeria'),('PK','Pakistan'),
('PH','Philippines'),('PR','Puerto Rico'),('RW','Rwanda'),
('SL','Sierra Leone'),('SG','Singapore'),('ZA','South Africa'),
('SR','Suriname'),('SZ','Swaziland'),('TZ','Tanzania'),
('TH','Thailand'),('UG','Uganda'),('GB','United Kingdom'),
('US','United States'),('VU','Vanuatu'),('ZM','Zambia'),
('ZW','Zimbabwe')
])
COUNTRY_ALL = collections.OrderedDict([('AF','Afghanistan'),('AL','Albania'),('DZ','Algeria'),('AS','American Samoa'),('AD','Andorra'),('AO','Angola'),('AI','Anguilla'),('AG','Antigua and Barbuda'),('AR','Argentina'),('AM','Armenia'),('AW','Aruba'),('AU','Australia'),('AT','Austria'),('AZ','Azerbaijan'),('BS','Bahamas'),('BH','Bahrain'),('BB','Barbados'),('BD','Bangladesh'),('BY','Belarus'),('BE','Belgium'),('BZ','Belize'),('BJ','Benin'),('BM','Bermuda'),('BT','Bhutan'),('BW','Botswana'),('BO','Bolivia'),('BA','Bosnia and Herzegovina'),('BR','Brazil'),('BG','Bulgaria'),('BF','Burkina Faso'),('BI','Burundi'),('KH','Cambodia'),('CM','Cameroon'),('CA','Canada'),('CF','Central African Republic'),('TD','Chad'),('CL','Chile'),('CN','China'),('CO','Colombia'),('KM','Comoros'),('CG','Congo'),('CD','Congo Democratic Republic'),('CR','Costa Rica'),('CI','Cote D\'Ivoire'),('HR','Croatia '),('CU','Cuba'),('CY','Cyprus'),('CZ','Czech Republic'),('CS','Czechoslovakia '),('DK','Denmark'),('DJ','Djibouti'),('DO','Dominican Republic'),('TP','East Timor'),('EC','Ecuador'),('EG','Egypt'),('SV','El Salvador'),('GQ','Equatorial Guinea'),('ER','Eritrea'),('EE','Estonia'),('ET','Ethiopia'),('FJ','Fiji'),('FI','Finland'),('FR','France'),('GF','French Guiana'),('PF','French Polynesia'),('GA','Gabon'),('GM','Gambia'),('GE','Georgia'),('DE','Germany'),('GH','Ghana'),('GB','Great Britain '),('GR','Greece'),('GL','Greenland'),('GT','Guatemala'),('GN','Guinea'),('GW','Guinea-Bissau'),('GY','Guyana'),('HT','Haiti'),('IS','Iceland'),('IN','India'),('ID','Indonesia'),('IR','Iran'),('IQ','Iraq'),('IE','Ireland'),('IL','Israel'),('IT','Italy'),('JM','Jamaica'),('JP','Japan'),('JO','Jordan'),('KZ','Kazakhstan'),('KE','Kenya'),('KP','North Korea'),('KR','South Korea'),('KW','Kuwait'),('KG','Kyrgyzstan'),('LA','Laos'),('LV','Latvia'),('LB','Lebanon'),('LR','Liberia'),('LY','Libya'),('LS','Lesotho'),('LT','Lithuania'),('MG','Madagascar'),('MW','Malawi'),('MY','Malaysia'),('ML','Mali'),('MR','Mauritania'),('MX','Mexico'),('MN','Mongolia'),('MA','Morocco'),('MZ','Mozambique'),('MM','Myanmar'),('NA','Namibia'),('NP','Nepal'),('NL','Netherlands'),('NZ','New Zealand '),('NI','Nicaragua'),('NE','Niger'),('NG','Nigeria'),('NO','Norway'),('OM','Oman'),('PK','Pakistan'),('PA','Panama'),('PG','Papua New Guinea'),('PY','Paraguay'),('PE','Peru'),('PH','Philippines'),('PL','Poland'),('PT','Portugal'),('PR','Puerto Rico'),('QA','Qatar'),('RO','Romania'),('RU','Russian Federation'),('RW','Rwanda'),('SA','Saudi Arabia'),('SN','Senegal'),('RS','Serbia'),('SL','Sierra Leone'),('SG','Singapore'),('SI','Slovenia'),('SK','Slovak Republic'),('SO','Somalia'),('ZA','South Africa'),('ES','Spain'),('LK','Sri Lanka'),('SD','Sudan'),('SR','Suriname'),('SZ','Swaziland'),('SE','Sweden'),('CH','Switzerland'),('SY','Syria'),('TW','Taiwan'),('TJ','Tajikistan'),('TZ','Tanzania'),('TH','Thailand'),('TG','Togo'),('TO','Tonga'),('TT','Trinidad and Tobago'),('TR','Turkey'),('TM','Turkmenistan'),('UG','Uganda'),('UA','Ukraine'),('AE','United Arab Emirates'),('UK','United Kingdom'),('US','United States'),('UY','Uruguay'),('UZ','Uzbekistan'),('VU','Vanuatu'),('VE','Venezuela'),('VN','Viet Nam'),('YE','Yemen'),('ZM','Zambia'),('ZW','Zimbabwe')])
# /m/01b_21, /m/0c58k, /m/07jwr, /m/0d19y2, /m/0cjf0
TERMS = {
'diabetes':'/m/0c58k',
'tb':'/m/07jwr',
'hiv':'/m/0d19y2',
'fever':'/m/0cjf0',
'cough':'/m/01b_21',
} | [
"[email protected]"
] | |
8661a95e0c4ade5cddd4f3acd778ce88a3e17a6d | 9d6218ca6c75a0e1ec1674fe410100d93d6852cb | /app/notifier/virtualenvs/notifier/bin/dynamodb_load | f7ed5a0cb31e695fab50e6b5e6b33df3a0797d40 | [] | no_license | bopopescu/uceo-2015 | 164694268969dd884904f51b00bd3dc034695be8 | 5abcbfc4ff32bca6ca237d71cbb68fab4b9f9f91 | refs/heads/master | 2021-05-28T21:12:05.120484 | 2015-08-05T06:46:36 | 2015-08-05T06:46:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,503 | #!/edx/app/notifier/virtualenvs/notifier/bin/python
import argparse
import os
import boto
from boto.compat import json
from boto.dynamodb.schema import Schema
DESCRIPTION = """Load data into one or more DynamoDB tables.
For each table, data is read from two files:
- {table_name}.metadata for the table's name, schema and provisioned
throughput (only required if creating the table).
- {table_name}.data for the table's actual contents.
Both files are searched for in the current directory. To read them from
somewhere else, use the --in-dir parameter.
This program does not wipe the tables prior to loading data. However, any
items present in the data files will overwrite the table's contents.
"""
def _json_iterload(fd):
"""Lazily load newline-separated JSON objects from a file-like object."""
buffer = ""
eof = False
while not eof:
try:
# Add a line to the buffer
buffer += fd.next()
except StopIteration:
# We can't let that exception bubble up, otherwise the last
# object in the file will never be decoded.
eof = True
try:
# Try to decode a JSON object.
json_object = json.loads(buffer.strip())
# Success: clear the buffer (everything was decoded).
buffer = ""
except ValueError:
if eof and buffer.strip():
# No more lines to load and the buffer contains something other
# than whitespace: the file is, in fact, malformed.
raise
# We couldn't decode a complete JSON object: load more lines.
continue
yield json_object
def create_table(metadata_fd):
"""Create a table from a metadata file-like object."""
def load_table(table, in_fd):
"""Load items into a table from a file-like object."""
for i in _json_iterload(in_fd):
# Convert lists back to sets.
data = {}
for k, v in i.iteritems():
if isinstance(v, list):
data[k] = set(v)
else:
data[k] = v
table.new_item(attrs=data).put()
def dynamodb_load(tables, in_dir, create_tables):
conn = boto.connect_dynamodb()
for t in tables:
metadata_file = os.path.join(in_dir, "%s.metadata" % t)
data_file = os.path.join(in_dir, "%s.data" % t)
if create_tables:
with open(metadata_file) as meta_fd:
metadata = json.load(meta_fd)
table = conn.create_table(
name=t,
schema=Schema(metadata["schema"]),
read_units=metadata["read_units"],
write_units=metadata["write_units"],
)
table.refresh(wait_for_active=True)
else:
table = conn.get_table(t)
with open(data_file) as in_fd:
load_table(table, in_fd)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
prog="dynamodb_load",
description=DESCRIPTION
)
parser.add_argument(
"--create-tables",
action="store_true",
help="Create the tables if they don't exist already (without this flag, attempts to load data into non-existing tables fail)."
)
parser.add_argument("--in-dir", default=".")
parser.add_argument("tables", metavar="TABLES", nargs="+")
namespace = parser.parse_args()
dynamodb_load(namespace.tables, namespace.in_dir, namespace.create_tables)
| [
"[email protected]"
] | ||
39ecf8a6bf6320ef772e70a2165129e85befec7b | 26d6c34df00a229dc85ad7326de6cb5672be7acc | /msgraph-cli-extensions/v1_0/devicescorpmgt_v1_0/azext_devicescorpmgt_v1_0/vendored_sdks/devicescorpmgt/operations/_device_app_management_device_app_management_operations.py | a2163d104f46288f513f0a9b6c9009c40e5b4402 | [
"MIT"
] | permissive | BrianTJackett/msgraph-cli | 87f92471f68f85e44872939d876b9ff5f0ae6b2c | 78a4b1c73a23b85c070fed2fbca93758733f620e | refs/heads/main | 2023-06-23T21:31:53.306655 | 2021-07-09T07:58:56 | 2021-07-09T07:58:56 | 386,993,555 | 0 | 0 | NOASSERTION | 2021-07-17T16:56:05 | 2021-07-17T16:56:05 | null | UTF-8 | Python | false | false | 7,111 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, List, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class DeviceAppManagementDeviceAppManagementOperations(object):
"""DeviceAppManagementDeviceAppManagementOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~devices_corporate_management.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def get_device_app_management(
self,
select=None, # type: Optional[List[Union[str, "models.Get0ItemsItem"]]]
expand=None, # type: Optional[List[Union[str, "models.Get1ItemsItem"]]]
**kwargs # type: Any
):
# type: (...) -> "models.MicrosoftGraphDeviceAppManagement"
"""Get deviceAppManagement.
Get deviceAppManagement.
:param select: Select properties to be returned.
:type select: list[str or ~devices_corporate_management.models.Get0ItemsItem]
:param expand: Expand related entities.
:type expand: list[str or ~devices_corporate_management.models.Get1ItemsItem]
:keyword callable cls: A custom type or function that will be passed the direct response
:return: MicrosoftGraphDeviceAppManagement, or the result of cls(response)
:rtype: ~devices_corporate_management.models.MicrosoftGraphDeviceAppManagement
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.MicrosoftGraphDeviceAppManagement"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
accept = "application/json"
# Construct URL
url = self.get_device_app_management.metadata['url'] # type: ignore
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if select is not None:
query_parameters['$select'] = self._serialize.query("select", select, '[str]', div=',')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, '[str]', div=',')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('MicrosoftGraphDeviceAppManagement', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_device_app_management.metadata = {'url': '/deviceAppManagement'} # type: ignore
def update_device_app_management(
self,
body, # type: "models.MicrosoftGraphDeviceAppManagement"
**kwargs # type: Any
):
# type: (...) -> None
"""Update deviceAppManagement.
Update deviceAppManagement.
:param body: New property values.
:type body: ~devices_corporate_management.models.MicrosoftGraphDeviceAppManagement
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update_device_app_management.metadata['url'] # type: ignore
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(body, 'MicrosoftGraphDeviceAppManagement')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
update_device_app_management.metadata = {'url': '/deviceAppManagement'} # type: ignore
| [
"[email protected]"
] | |
3d35d8b74a1d0dc166e48617b352c76eeb91b235 | 3b28e825ce1ad8bb33d86891e33482f73d96719f | /vspk/v4_0/nuperformancemonitor.py | bcffcd5207f6748e22af24d98336f8c9216d362a | [
"BSD-3-Clause"
] | permissive | jsenjali/vspk-python | 8a4cd98ee3151cf66f835f688d04da86b26bc148 | 3aeff2343d91b81eaf60329ae2170c255723c02c | refs/heads/master | 2021-01-11T00:31:54.670370 | 2016-10-10T18:27:46 | 2016-10-10T18:27:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,323 | py | # -*- coding: utf-8 -*-
#
# Copyright (c) 2015, Alcatel-Lucent Inc
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from .fetchers import NUApplicationperformancemanagementsFetcher
from bambou import NURESTObject
class NUPerformanceMonitor(NURESTObject):
""" Represents a PerformanceMonitor in the VSD
Notes:
None
"""
__rest_name__ = "performancemonitor"
__resource_name__ = "performancemonitors"
## Constants
CONST_SERVICE_CLASS_H = "H"
CONST_SERVICE_CLASS_A = "A"
CONST_SERVICE_CLASS_B = "B"
CONST_SERVICE_CLASS_C = "C"
CONST_SERVICE_CLASS_D = "D"
CONST_SERVICE_CLASS_E = "E"
CONST_SERVICE_CLASS_F = "F"
CONST_SERVICE_CLASS_G = "G"
def __init__(self, **kwargs):
""" Initializes a PerformanceMonitor instance
Notes:
You can specify all parameters while calling this methods.
A special argument named `data` will enable you to load the
object from a Python dictionary
Examples:
>>> performancemonitor = NUPerformanceMonitor(id=u'xxxx-xxx-xxx-xxx', name=u'PerformanceMonitor')
>>> performancemonitor = NUPerformanceMonitor(data=my_dict)
"""
super(NUPerformanceMonitor, self).__init__()
# Read/Write Attributes
self._name = None
self._payload_size = None
self._read_only = None
self._service_class = None
self._description = None
self._interval = None
self._number_of_packets = None
self.expose_attribute(local_name="name", remote_name="name", attribute_type=str, is_required=True, is_unique=False)
self.expose_attribute(local_name="payload_size", remote_name="payloadSize", attribute_type=int, is_required=True, is_unique=False)
self.expose_attribute(local_name="read_only", remote_name="readOnly", attribute_type=bool, is_required=False, is_unique=False)
self.expose_attribute(local_name="service_class", remote_name="serviceClass", attribute_type=str, is_required=False, is_unique=False, choices=[u'A', u'B', u'C', u'D', u'E', u'F', u'G', u'H'])
self.expose_attribute(local_name="description", remote_name="description", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="interval", remote_name="interval", attribute_type=int, is_required=True, is_unique=False)
self.expose_attribute(local_name="number_of_packets", remote_name="numberOfPackets", attribute_type=int, is_required=True, is_unique=False)
# Fetchers
self.applicationperformancemanagements = NUApplicationperformancemanagementsFetcher.fetcher_with_object(parent_object=self, relationship="member")
self._compute_args(**kwargs)
# Properties
@property
def name(self):
""" Get name value.
Notes:
Name of the application group probe
"""
return self._name
@name.setter
def name(self, value):
""" Set name value.
Notes:
Name of the application group probe
"""
self._name = value
@property
def payload_size(self):
""" Get payload_size value.
Notes:
Payload size
This attribute is named `payloadSize` in VSD API.
"""
return self._payload_size
@payload_size.setter
def payload_size(self, value):
""" Set payload_size value.
Notes:
Payload size
This attribute is named `payloadSize` in VSD API.
"""
self._payload_size = value
@property
def read_only(self):
""" Get read_only value.
Notes:
Determines whether this entity is read only. Read only objects cannot be modified or deleted.
This attribute is named `readOnly` in VSD API.
"""
return self._read_only
@read_only.setter
def read_only(self, value):
""" Set read_only value.
Notes:
Determines whether this entity is read only. Read only objects cannot be modified or deleted.
This attribute is named `readOnly` in VSD API.
"""
self._read_only = value
@property
def service_class(self):
""" Get service_class value.
Notes:
Class of service to be used. Service classes in order of priority are A, B, C, D, E, F, G, and H.
This attribute is named `serviceClass` in VSD API.
"""
return self._service_class
@service_class.setter
def service_class(self, value):
""" Set service_class value.
Notes:
Class of service to be used. Service classes in order of priority are A, B, C, D, E, F, G, and H.
This attribute is named `serviceClass` in VSD API.
"""
self._service_class = value
@property
def description(self):
""" Get description value.
Notes:
Description of application group probe
"""
return self._description
@description.setter
def description(self, value):
""" Set description value.
Notes:
Description of application group probe
"""
self._description = value
@property
def interval(self):
""" Get interval value.
Notes:
interval in seconds
"""
return self._interval
@interval.setter
def interval(self, value):
""" Set interval value.
Notes:
interval in seconds
"""
self._interval = value
@property
def number_of_packets(self):
""" Get number_of_packets value.
Notes:
number of packets
This attribute is named `numberOfPackets` in VSD API.
"""
return self._number_of_packets
@number_of_packets.setter
def number_of_packets(self, value):
""" Set number_of_packets value.
Notes:
number of packets
This attribute is named `numberOfPackets` in VSD API.
"""
self._number_of_packets = value
| [
"[email protected]"
] | |
25bc7cbddc8d89f7cc525f2f18b273c5df20a624 | f9a2e67dd2f40b37d8ff81bf6cdce47c38d2dee4 | /.c9/metadata/environment/ib_miniprojects_backend/project_management_portal/tests/interactors/test_get_projects_interactor.py | cbc68ce3255d82001401ee8438a32c39e3eb7cb5 | [] | no_license | mohan277/backend_repo | 4eae065cf0fffa29866a2b549028cb8df4c97643 | 25dbb4d0f1c174b6da95f4c73737e49db9978429 | refs/heads/master | 2022-11-13T00:08:37.600743 | 2020-07-09T04:36:44 | 2020-07-09T04:36:44 | 278,259,585 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 24,589 | py | {"filter":false,"title":"test_get_projects_interactor.py","tooltip":"/ib_miniprojects_backend/project_management_portal/tests/interactors/test_get_projects_interactor.py","undoManager":{"mark":100,"position":100,"stack":[[{"start":{"row":12,"column":0},"end":{"row":12,"column":1},"action":"insert","lines":["4"],"id":574}],[{"start":{"row":12,"column":0},"end":{"row":12,"column":1},"action":"remove","lines":["4"],"id":575}],[{"start":{"row":65,"column":41},"end":{"row":65,"column":42},"action":"insert","lines":[","],"id":576}],[{"start":{"row":65,"column":42},"end":{"row":65,"column":43},"action":"insert","lines":[" "],"id":577}],[{"start":{"row":65,"column":43},"end":{"row":65,"column":71},"action":"insert","lines":["list_of_project_details_dtos"],"id":578}],[{"start":{"row":79,"column":29},"end":{"row":79,"column":48},"action":"remove","lines":["project_details_dto"],"id":579},{"start":{"row":79,"column":29},"end":{"row":79,"column":57},"action":"insert","lines":["list_of_project_details_dtos"]}],[{"start":{"row":71,"column":30},"end":{"row":71,"column":31},"action":"remove","lines":["1"],"id":580}],[{"start":{"row":71,"column":30},"end":{"row":71,"column":31},"action":"insert","lines":["2"],"id":581}],[{"start":{"row":65,"column":4},"end":{"row":65,"column":28},"action":"remove","lines":["get_is_admin_valid_dto, "],"id":582}],[{"start":{"row":65,"column":19},"end":{"row":65,"column":43},"action":"insert","lines":["get_is_admin_valid_dto, "],"id":583}],[{"start":{"row":65,"column":71},"end":{"row":65,"column":72},"action":"insert","lines":[","],"id":584}],[{"start":{"row":65,"column":72},"end":{"row":65,"column":73},"action":"insert","lines":[" "],"id":585},{"start":{"row":65,"column":73},"end":{"row":66,"column":0},"action":"insert","lines":["",""]},{"start":{"row":66,"column":0},"end":{"row":66,"column":4},"action":"insert","lines":[" "]}],[{"start":{"row":65,"column":73},"end":{"row":65,"column":74},"action":"insert","lines":["\\"],"id":586}],[{"start":{"row":66,"column":4},"end":{"row":66,"column":5},"action":"insert","lines":["u"],"id":587},{"start":{"row":66,"column":5},"end":{"row":66,"column":6},"action":"insert","lines":["s"]},{"start":{"row":66,"column":6},"end":{"row":66,"column":7},"action":"insert","lines":["e"]},{"start":{"row":66,"column":7},"end":{"row":66,"column":8},"action":"insert","lines":["r"]},{"start":{"row":66,"column":8},"end":{"row":66,"column":9},"action":"insert","lines":["_"]},{"start":{"row":66,"column":9},"end":{"row":66,"column":10},"action":"insert","lines":["d"]}],[{"start":{"row":66,"column":10},"end":{"row":66,"column":11},"action":"insert","lines":["e"],"id":588},{"start":{"row":66,"column":11},"end":{"row":66,"column":12},"action":"insert","lines":["t"]},{"start":{"row":66,"column":12},"end":{"row":66,"column":13},"action":"insert","lines":["a"]},{"start":{"row":66,"column":13},"end":{"row":66,"column":14},"action":"insert","lines":["i"]},{"start":{"row":66,"column":14},"end":{"row":66,"column":15},"action":"insert","lines":["l"]},{"start":{"row":66,"column":15},"end":{"row":66,"column":16},"action":"insert","lines":["s"]},{"start":{"row":66,"column":16},"end":{"row":66,"column":17},"action":"insert","lines":["_"]}],[{"start":{"row":66,"column":17},"end":{"row":66,"column":18},"action":"insert","lines":["d"],"id":589},{"start":{"row":66,"column":18},"end":{"row":66,"column":19},"action":"insert","lines":["t"]},{"start":{"row":66,"column":19},"end":{"row":66,"column":20},"action":"insert","lines":["o"]},{"start":{"row":66,"column":20},"end":{"row":66,"column":21},"action":"insert","lines":["s"]}],[{"start":{"row":66,"column":4},"end":{"row":66,"column":8},"action":"insert","lines":[" "],"id":590}],[{"start":{"row":65,"column":0},"end":{"row":65,"column":4},"action":"remove","lines":[" "],"id":591},{"start":{"row":64,"column":53},"end":{"row":65,"column":0},"action":"remove","lines":["",""]}],[{"start":{"row":64,"column":53},"end":{"row":65,"column":0},"action":"insert","lines":["",""],"id":592},{"start":{"row":65,"column":0},"end":{"row":65,"column":4},"action":"insert","lines":[" "]}],[{"start":{"row":65,"column":19},"end":{"row":66,"column":0},"action":"insert","lines":["",""],"id":593},{"start":{"row":66,"column":0},"end":{"row":66,"column":4},"action":"insert","lines":[" "]}],[{"start":{"row":66,"column":28},"end":{"row":67,"column":0},"action":"insert","lines":["",""],"id":594},{"start":{"row":67,"column":0},"end":{"row":67,"column":4},"action":"insert","lines":[" "]}],[{"start":{"row":67,"column":34},"end":{"row":67,"column":35},"action":"remove","lines":["\\"],"id":595},{"start":{"row":67,"column":33},"end":{"row":67,"column":34},"action":"remove","lines":[" "]}],[{"start":{"row":68,"column":4},"end":{"row":68,"column":8},"action":"remove","lines":[" "],"id":596}],[{"start":{"row":65,"column":18},"end":{"row":65,"column":19},"action":"remove","lines":[" "],"id":597},{"start":{"row":66,"column":27},"end":{"row":66,"column":28},"action":"remove","lines":[" "]}],[{"start":{"row":68,"column":21},"end":{"row":68,"column":22},"action":"insert","lines":[","],"id":598}],[{"start":{"row":68,"column":22},"end":{"row":69,"column":0},"action":"insert","lines":["",""],"id":599},{"start":{"row":69,"column":0},"end":{"row":69,"column":4},"action":"insert","lines":[" "]},{"start":{"row":69,"column":4},"end":{"row":69,"column":5},"action":"insert","lines":["g"]},{"start":{"row":69,"column":5},"end":{"row":69,"column":6},"action":"insert","lines":["e"]},{"start":{"row":69,"column":6},"end":{"row":69,"column":7},"action":"insert","lines":["t"]}],[{"start":{"row":69,"column":7},"end":{"row":69,"column":8},"action":"insert","lines":["_"],"id":600},{"start":{"row":69,"column":8},"end":{"row":69,"column":9},"action":"insert","lines":["p"]},{"start":{"row":69,"column":9},"end":{"row":69,"column":10},"action":"insert","lines":["r"]},{"start":{"row":69,"column":10},"end":{"row":69,"column":11},"action":"insert","lines":["o"]},{"start":{"row":69,"column":11},"end":{"row":69,"column":12},"action":"insert","lines":["j"]},{"start":{"row":69,"column":12},"end":{"row":69,"column":13},"action":"insert","lines":["e"]},{"start":{"row":69,"column":13},"end":{"row":69,"column":14},"action":"insert","lines":["c"]},{"start":{"row":69,"column":14},"end":{"row":69,"column":15},"action":"insert","lines":["t"]},{"start":{"row":69,"column":15},"end":{"row":69,"column":16},"action":"insert","lines":["s"]}],[{"start":{"row":69,"column":16},"end":{"row":69,"column":17},"action":"insert","lines":["_"],"id":601},{"start":{"row":69,"column":17},"end":{"row":69,"column":18},"action":"insert","lines":["e"]},{"start":{"row":69,"column":18},"end":{"row":69,"column":19},"action":"insert","lines":["c"]}],[{"start":{"row":69,"column":19},"end":{"row":69,"column":20},"action":"insert","lines":["x"],"id":602}],[{"start":{"row":69,"column":19},"end":{"row":69,"column":20},"action":"remove","lines":["x"],"id":603},{"start":{"row":69,"column":18},"end":{"row":69,"column":19},"action":"remove","lines":["c"]}],[{"start":{"row":69,"column":18},"end":{"row":69,"column":19},"action":"insert","lines":["e"],"id":604},{"start":{"row":69,"column":19},"end":{"row":69,"column":20},"action":"insert","lines":["x"]},{"start":{"row":69,"column":20},"end":{"row":69,"column":21},"action":"insert","lines":["c"]}],[{"start":{"row":69,"column":20},"end":{"row":69,"column":21},"action":"remove","lines":["c"],"id":605},{"start":{"row":69,"column":19},"end":{"row":69,"column":20},"action":"remove","lines":["x"]},{"start":{"row":69,"column":18},"end":{"row":69,"column":19},"action":"remove","lines":["e"]}],[{"start":{"row":69,"column":18},"end":{"row":69,"column":19},"action":"insert","lines":["x"],"id":606},{"start":{"row":69,"column":19},"end":{"row":69,"column":20},"action":"insert","lines":["p"]},{"start":{"row":69,"column":20},"end":{"row":69,"column":21},"action":"insert","lines":["e"]},{"start":{"row":69,"column":21},"end":{"row":69,"column":22},"action":"insert","lines":["c"]},{"start":{"row":69,"column":22},"end":{"row":69,"column":23},"action":"insert","lines":["t"]},{"start":{"row":69,"column":23},"end":{"row":69,"column":24},"action":"insert","lines":["e"]},{"start":{"row":69,"column":24},"end":{"row":69,"column":25},"action":"insert","lines":["d"]}],[{"start":{"row":69,"column":25},"end":{"row":69,"column":26},"action":"insert","lines":["_"],"id":607},{"start":{"row":69,"column":26},"end":{"row":69,"column":27},"action":"insert","lines":["o"]},{"start":{"row":69,"column":27},"end":{"row":69,"column":28},"action":"insert","lines":["u"]}],[{"start":{"row":69,"column":28},"end":{"row":69,"column":29},"action":"insert","lines":["t"],"id":608},{"start":{"row":69,"column":29},"end":{"row":69,"column":30},"action":"insert","lines":["p"]},{"start":{"row":69,"column":30},"end":{"row":69,"column":31},"action":"insert","lines":["u"]}],[{"start":{"row":69,"column":31},"end":{"row":69,"column":32},"action":"insert","lines":["t"],"id":609}],[{"start":{"row":94,"column":46},"end":{"row":94,"column":47},"action":"insert","lines":["_"],"id":610},{"start":{"row":94,"column":47},"end":{"row":94,"column":48},"action":"insert","lines":["w"]},{"start":{"row":94,"column":48},"end":{"row":94,"column":49},"action":"insert","lines":["r"]},{"start":{"row":94,"column":49},"end":{"row":94,"column":50},"action":"insert","lines":["a"]},{"start":{"row":94,"column":50},"end":{"row":94,"column":51},"action":"insert","lines":["p"]},{"start":{"row":94,"column":51},"end":{"row":94,"column":52},"action":"insert","lines":["p"]},{"start":{"row":94,"column":52},"end":{"row":94,"column":53},"action":"insert","lines":["e"]}],[{"start":{"row":94,"column":53},"end":{"row":94,"column":54},"action":"insert","lines":["r"],"id":611}],[{"start":{"row":95,"column":4},"end":{"row":95,"column":8},"action":"remove","lines":[" "],"id":612},{"start":{"row":95,"column":0},"end":{"row":95,"column":4},"action":"remove","lines":[" "]},{"start":{"row":94,"column":55},"end":{"row":95,"column":0},"action":"remove","lines":["",""]}],[{"start":{"row":94,"column":68},"end":{"row":95,"column":0},"action":"insert","lines":["",""],"id":613},{"start":{"row":95,"column":0},"end":{"row":95,"column":4},"action":"insert","lines":[" "]}],[{"start":{"row":95,"column":4},"end":{"row":95,"column":8},"action":"insert","lines":[" "],"id":614}],[{"start":{"row":95,"column":8},"end":{"row":95,"column":12},"action":"insert","lines":[" "],"id":615}],[{"start":{"row":95,"column":12},"end":{"row":95,"column":16},"action":"insert","lines":[" "],"id":616}],[{"start":{"row":95,"column":16},"end":{"row":95,"column":20},"action":"insert","lines":[" "],"id":617}],[{"start":{"row":95,"column":20},"end":{"row":95,"column":24},"action":"insert","lines":[" "],"id":618}],[{"start":{"row":95,"column":24},"end":{"row":95,"column":28},"action":"insert","lines":[" "],"id":619}],[{"start":{"row":95,"column":28},"end":{"row":95,"column":32},"action":"insert","lines":[" "],"id":620}],[{"start":{"row":95,"column":32},"end":{"row":95,"column":36},"action":"insert","lines":[" "],"id":621}],[{"start":{"row":95,"column":36},"end":{"row":95,"column":40},"action":"insert","lines":[" "],"id":622}],[{"start":{"row":95,"column":40},"end":{"row":95,"column":44},"action":"insert","lines":[" "],"id":623}],[{"start":{"row":95,"column":44},"end":{"row":95,"column":48},"action":"insert","lines":[" "],"id":624}],[{"start":{"row":95,"column":48},"end":{"row":95,"column":52},"action":"insert","lines":[" "],"id":625}],[{"start":{"row":95,"column":52},"end":{"row":95,"column":56},"action":"insert","lines":[" "],"id":626}],[{"start":{"row":95,"column":52},"end":{"row":95,"column":56},"action":"remove","lines":[" "],"id":627}],[{"start":{"row":95,"column":52},"end":{"row":95,"column":53},"action":"insert","lines":[" "],"id":628},{"start":{"row":95,"column":53},"end":{"row":95,"column":54},"action":"insert","lines":[" "]},{"start":{"row":95,"column":54},"end":{"row":95,"column":55},"action":"insert","lines":[" "]}],[{"start":{"row":95,"column":70},"end":{"row":96,"column":0},"action":"insert","lines":["",""],"id":629},{"start":{"row":96,"column":0},"end":{"row":96,"column":55},"action":"insert","lines":[" "]}],[{"start":{"row":96,"column":70},"end":{"row":96,"column":71},"action":"insert","lines":[","],"id":630}],[{"start":{"row":96,"column":71},"end":{"row":97,"column":0},"action":"insert","lines":["",""],"id":631},{"start":{"row":97,"column":0},"end":{"row":97,"column":55},"action":"insert","lines":[" "]},{"start":{"row":97,"column":55},"end":{"row":97,"column":56},"action":"insert","lines":["p"]},{"start":{"row":97,"column":56},"end":{"row":97,"column":57},"action":"insert","lines":["e"]},{"start":{"row":97,"column":57},"end":{"row":97,"column":58},"action":"insert","lines":["r"]}],[{"start":{"row":97,"column":57},"end":{"row":97,"column":58},"action":"remove","lines":["r"],"id":632},{"start":{"row":97,"column":56},"end":{"row":97,"column":57},"action":"remove","lines":["e"]}],[{"start":{"row":97,"column":56},"end":{"row":97,"column":57},"action":"insert","lines":["r"],"id":633},{"start":{"row":97,"column":57},"end":{"row":97,"column":58},"action":"insert","lines":["e"]},{"start":{"row":97,"column":58},"end":{"row":97,"column":59},"action":"insert","lines":["s"]},{"start":{"row":97,"column":59},"end":{"row":97,"column":60},"action":"insert","lines":["e"]},{"start":{"row":97,"column":60},"end":{"row":97,"column":61},"action":"insert","lines":["n"]},{"start":{"row":97,"column":61},"end":{"row":97,"column":62},"action":"insert","lines":["t"]},{"start":{"row":97,"column":62},"end":{"row":97,"column":63},"action":"insert","lines":["e"]}],[{"start":{"row":97,"column":63},"end":{"row":97,"column":64},"action":"insert","lines":["r"],"id":634},{"start":{"row":97,"column":64},"end":{"row":97,"column":65},"action":"insert","lines":["="]},{"start":{"row":97,"column":65},"end":{"row":97,"column":66},"action":"insert","lines":["p"]}],[{"start":{"row":97,"column":66},"end":{"row":97,"column":67},"action":"insert","lines":["e"],"id":635}],[{"start":{"row":97,"column":66},"end":{"row":97,"column":67},"action":"remove","lines":["e"],"id":636}],[{"start":{"row":97,"column":66},"end":{"row":97,"column":67},"action":"insert","lines":["r"],"id":637},{"start":{"row":97,"column":67},"end":{"row":97,"column":68},"action":"insert","lines":["e"]},{"start":{"row":97,"column":68},"end":{"row":97,"column":69},"action":"insert","lines":["s"]},{"start":{"row":97,"column":69},"end":{"row":97,"column":70},"action":"insert","lines":["e"]},{"start":{"row":97,"column":70},"end":{"row":97,"column":71},"action":"insert","lines":["n"]},{"start":{"row":97,"column":71},"end":{"row":97,"column":72},"action":"insert","lines":["t"]},{"start":{"row":97,"column":72},"end":{"row":97,"column":73},"action":"insert","lines":["e"]},{"start":{"row":97,"column":73},"end":{"row":97,"column":74},"action":"insert","lines":["r"]}],[{"start":{"row":94,"column":67},"end":{"row":94,"column":68},"action":"remove","lines":[" "],"id":638},{"start":{"row":95,"column":69},"end":{"row":95,"column":70},"action":"remove","lines":[" "]}],[{"start":{"row":107,"column":4},"end":{"row":107,"column":6},"action":"insert","lines":["# "],"id":639}],[{"start":{"row":87,"column":42},"end":{"row":87,"column":43},"action":"insert","lines":["#"],"id":640}],[{"start":{"row":87,"column":42},"end":{"row":87,"column":43},"action":"insert","lines":["\""],"id":641},{"start":{"row":87,"column":43},"end":{"row":87,"column":44},"action":"insert","lines":["\""]}],[{"start":{"row":87,"column":43},"end":{"row":87,"column":44},"action":"insert","lines":["m"],"id":642},{"start":{"row":87,"column":44},"end":{"row":87,"column":45},"action":"insert","lines":["o"]},{"start":{"row":87,"column":45},"end":{"row":87,"column":46},"action":"insert","lines":["h"]},{"start":{"row":87,"column":46},"end":{"row":87,"column":47},"action":"insert","lines":["a"]},{"start":{"row":87,"column":47},"end":{"row":87,"column":48},"action":"insert","lines":["n"]}],[{"start":{"row":87,"column":49},"end":{"row":87,"column":50},"action":"remove","lines":["#"],"id":643},{"start":{"row":87,"column":48},"end":{"row":87,"column":49},"action":"remove","lines":["\""]},{"start":{"row":87,"column":47},"end":{"row":87,"column":48},"action":"remove","lines":["n"]},{"start":{"row":87,"column":46},"end":{"row":87,"column":47},"action":"remove","lines":["a"]},{"start":{"row":87,"column":45},"end":{"row":87,"column":46},"action":"remove","lines":["h"]},{"start":{"row":87,"column":44},"end":{"row":87,"column":45},"action":"remove","lines":["o"]},{"start":{"row":87,"column":43},"end":{"row":87,"column":44},"action":"remove","lines":["m"]},{"start":{"row":87,"column":42},"end":{"row":87,"column":43},"action":"remove","lines":["\""]}],[{"start":{"row":89,"column":54},"end":{"row":89,"column":55},"action":"insert","lines":["\""],"id":644},{"start":{"row":89,"column":55},"end":{"row":89,"column":56},"action":"insert","lines":["\""]}],[{"start":{"row":89,"column":56},"end":{"row":89,"column":57},"action":"insert","lines":["#"],"id":645}],[{"start":{"row":89,"column":56},"end":{"row":89,"column":57},"action":"remove","lines":["#"],"id":646},{"start":{"row":89,"column":55},"end":{"row":89,"column":56},"action":"remove","lines":["\""]},{"start":{"row":89,"column":54},"end":{"row":89,"column":55},"action":"remove","lines":["\""]}],[{"start":{"row":87,"column":4},"end":{"row":88,"column":0},"action":"insert","lines":["",""],"id":648},{"start":{"row":88,"column":0},"end":{"row":88,"column":4},"action":"insert","lines":[" "]}],[{"start":{"row":87,"column":4},"end":{"row":88,"column":50},"action":"insert","lines":["get_is_admin_valid_dto.return_value = is_admin_valid_dto"," get_user_dtos.return_value = user_details_dtos"],"id":649}],[{"start":{"row":89,"column":4},"end":{"row":90,"column":50},"action":"remove","lines":["get_is_admin_valid_dto.return_value = is_admin_valid_dto"," get_user_dtos.return_value = user_details_dtos"],"id":650},{"start":{"row":89,"column":0},"end":{"row":89,"column":4},"action":"remove","lines":[" "]},{"start":{"row":88,"column":50},"end":{"row":89,"column":0},"action":"remove","lines":["",""]}],[{"start":{"row":88,"column":4},"end":{"row":88,"column":6},"action":"insert","lines":["# "],"id":652}],[{"start":{"row":65,"column":0},"end":{"row":66,"column":0},"action":"remove","lines":[" get_user_dtos,",""],"id":653},{"start":{"row":66,"column":0},"end":{"row":67,"column":0},"action":"insert","lines":[" get_user_dtos,",""]}],[{"start":{"row":88,"column":4},"end":{"row":88,"column":6},"action":"remove","lines":["# "],"id":654}],[{"start":{"row":85,"column":5},"end":{"row":86,"column":0},"action":"insert","lines":["",""],"id":655},{"start":{"row":86,"column":0},"end":{"row":86,"column":4},"action":"insert","lines":[" "]},{"start":{"row":86,"column":4},"end":{"row":87,"column":0},"action":"insert","lines":["",""]},{"start":{"row":87,"column":0},"end":{"row":87,"column":4},"action":"insert","lines":[" "]}],[{"start":{"row":87,"column":4},"end":{"row":89,"column":81},"action":"insert","lines":["final_list_of_projects_details_dto = ListOfProjectsDto("," list_of_project_dtos=final_list_of_project_details_dtos,"," total_count_of_projects=list_of_projects_dto.total_count_of_projects)"],"id":657}],[{"start":{"row":86,"column":0},"end":{"row":86,"column":4},"action":"remove","lines":[" "],"id":658}],[{"start":{"row":86,"column":0},"end":{"row":86,"column":4},"action":"insert","lines":[" "],"id":659}],[{"start":{"row":86,"column":4},"end":{"row":87,"column":0},"action":"insert","lines":["",""],"id":660},{"start":{"row":87,"column":0},"end":{"row":87,"column":4},"action":"insert","lines":[" "]},{"start":{"row":87,"column":4},"end":{"row":88,"column":0},"action":"insert","lines":["",""]},{"start":{"row":88,"column":0},"end":{"row":88,"column":4},"action":"insert","lines":[" "]}],[{"start":{"row":87,"column":4},"end":{"row":90,"column":13},"action":"insert","lines":["final_project_details_dto = FinalProjectDTO("," user_details_dtos=user_details_dtos,"," project_details_dto=project_dto"," )"],"id":661}],[{"start":{"row":11,"column":33},"end":{"row":11,"column":34},"action":"insert","lines":[","],"id":662}],[{"start":{"row":11,"column":34},"end":{"row":11,"column":35},"action":"insert","lines":[" "],"id":663},{"start":{"row":11,"column":35},"end":{"row":11,"column":36},"action":"insert","lines":["F"]}],[{"start":{"row":11,"column":35},"end":{"row":11,"column":36},"action":"remove","lines":["F"],"id":664},{"start":{"row":11,"column":35},"end":{"row":11,"column":50},"action":"insert","lines":["FinalProjectDTO"]}],[{"start":{"row":86,"column":0},"end":{"row":86,"column":4},"action":"remove","lines":[" "],"id":665},{"start":{"row":91,"column":0},"end":{"row":91,"column":4},"action":"remove","lines":[" "]}],[{"start":{"row":69,"column":32},"end":{"row":69,"column":33},"action":"insert","lines":[","],"id":666}],[{"start":{"row":69,"column":33},"end":{"row":70,"column":0},"action":"insert","lines":["",""],"id":667},{"start":{"row":70,"column":0},"end":{"row":70,"column":4},"action":"insert","lines":[" "]}],[{"start":{"row":70,"column":4},"end":{"row":70,"column":29},"action":"insert","lines":["final_project_details_dto"],"id":668}],[{"start":{"row":88,"column":4},"end":{"row":91,"column":13},"action":"remove","lines":["final_project_details_dto = FinalProjectDTO("," user_details_dtos=user_details_dtos,"," project_details_dto=project_dto"," )"],"id":669},{"start":{"row":88,"column":0},"end":{"row":88,"column":4},"action":"remove","lines":[" "]},{"start":{"row":87,"column":0},"end":{"row":88,"column":0},"action":"remove","lines":["",""]},{"start":{"row":86,"column":5},"end":{"row":87,"column":0},"action":"remove","lines":["",""]}],[{"start":{"row":70,"column":10},"end":{"row":70,"column":11},"action":"insert","lines":["l"],"id":670},{"start":{"row":70,"column":11},"end":{"row":70,"column":12},"action":"insert","lines":["i"]},{"start":{"row":70,"column":12},"end":{"row":70,"column":13},"action":"insert","lines":["s"]},{"start":{"row":70,"column":13},"end":{"row":70,"column":14},"action":"insert","lines":["t"]}],[{"start":{"row":70,"column":14},"end":{"row":70,"column":15},"action":"insert","lines":["_"],"id":671}],[{"start":{"row":70,"column":4},"end":{"row":70,"column":15},"action":"remove","lines":["final_list_"],"id":672},{"start":{"row":70,"column":4},"end":{"row":70,"column":38},"action":"insert","lines":["final_list_of_projects_details_dto"]}],[{"start":{"row":70,"column":38},"end":{"row":70,"column":57},"action":"remove","lines":["project_details_dto"],"id":673}],[{"start":{"row":67,"column":4},"end":{"row":67,"column":32},"action":"remove","lines":["list_of_project_details_dtos"],"id":674},{"start":{"row":67,"column":4},"end":{"row":67,"column":32},"action":"insert","lines":["list_of_project_details_dtos"]}],[{"start":{"row":94,"column":54},"end":{"row":94,"column":74},"action":"remove","lines":["list_of_project_dtos"],"id":675},{"start":{"row":94,"column":54},"end":{"row":94,"column":82},"action":"insert","lines":["list_of_project_details_dtos"]}],[{"start":{"row":82,"column":0},"end":{"row":90,"column":81},"action":"remove","lines":[""," list_of_project_dtos = ListOfProjectsDto("," list_of_project_dtos=list_of_project_details_dtos,"," total_count_of_projects=total_count_of_projects"," )",""," final_list_of_projects_details_dto = ListOfProjectsDto("," list_of_project_dtos=final_list_of_project_details_dtos,"," total_count_of_projects=list_of_projects_dto.total_count_of_projects)"],"id":676},{"start":{"row":81,"column":55},"end":{"row":82,"column":0},"action":"remove","lines":["",""]}],[{"start":{"row":100,"column":29},"end":{"row":100,"column":49},"action":"remove","lines":["list_of_project_dtos"],"id":677},{"start":{"row":100,"column":29},"end":{"row":100,"column":57},"action":"insert","lines":["list_of_project_details_dtos"]}]]},"ace":{"folds":[],"scrolltop":0,"scrollleft":0,"selection":{"start":{"row":137,"column":14},"end":{"row":137,"column":14},"isBackwards":false},"options":{"guessTabSize":true,"useWrapMode":false,"wrapToView":true},"firstLineState":0},"timestamp":1593660223429,"hash":"787cfe152e9e945ec40648219c5bbcd1bb558589"} | [
"[email protected]"
] | |
a2cc0d8b9ee7768ddb30a445139a75e96b07c107 | 7a42d40a351824464a3c78dc0c3e78bbd8e0a92f | /RestProject/API/models.py | d5633eab44e77e7fc0bc778eae43c34180e41df2 | [] | no_license | AhMay/DerekBlogLearn | 6595063eafbc237b932e187b5cb3ad8ff32637fc | fdd5ea2fc5732cdc82ad006f7be0a2a1f30d0ba9 | refs/heads/master | 2020-07-09T05:20:33.283672 | 2019-09-29T10:10:23 | 2019-09-29T10:10:23 | 203,891,215 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 751 | py | from django.db import models
# Create your models here.
class UserInfo(models.Model):
USER_TYPE = (
(1,'普通用户'),
(2,'VIP'),
(3,'SVIP')
)
user_type = models.IntegerField(choices=USER_TYPE)
username = models.CharField(max_length=32)
password = models.CharField(max_length=64)
group = models.ForeignKey('UserGroup', on_delete=models.CASCADE, null=True, blank=True)
roles = models.ManyToManyField('Role')
class UserToken(models.Model):
user = models.OneToOneField(UserInfo,on_delete=models.CASCADE)
token = models.CharField(max_length=64)
class UserGroup(models.Model):
title = models.CharField(max_length=32)
class Role(models.Model):
title = models.CharField(max_length=32) | [
"[email protected]"
] | |
dfbba17d8c8e485a34a89c32f8fe71b59d124f0a | 0f504dab15e85d95695999eb7ad6fb5d0fedf627 | /backend/course/api/v1/urls.py | ac318e9affa6f0ceb6d946ccd239fb5f8e476bdb | [] | no_license | crowdbotics-apps/quiz2-21690 | e4ac495291069051c2750b4d520a7e783c422525 | b9839d7cc02f631877ae7625f7ec32355d0bc90e | refs/heads/master | 2022-12-30T18:40:06.017656 | 2020-10-18T23:49:48 | 2020-10-18T23:49:48 | 305,218,317 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 944 | py | from django.urls import path, include
from rest_framework.routers import DefaultRouter
from .viewsets import (
RecordingViewSet,
EventViewSet,
SubscriptionViewSet,
CourseViewSet,
GroupViewSet,
ModuleViewSet,
PaymentMethodViewSet,
SubscriptionTypeViewSet,
EnrollmentViewSet,
LessonViewSet,
CategoryViewSet,
)
router = DefaultRouter()
router.register("category", CategoryViewSet)
router.register("paymentmethod", PaymentMethodViewSet)
router.register("subscriptiontype", SubscriptionTypeViewSet)
router.register("subscription", SubscriptionViewSet)
router.register("course", CourseViewSet)
router.register("recording", RecordingViewSet)
router.register("event", EventViewSet)
router.register("module", ModuleViewSet)
router.register("enrollment", EnrollmentViewSet)
router.register("lesson", LessonViewSet)
router.register("group", GroupViewSet)
urlpatterns = [
path("", include(router.urls)),
]
| [
"[email protected]"
] | |
67dbc7b5ac7b949db359a0aecf1cf5b6cae00c61 | 2df82b931c89ac70d49b0716d642d8e355926d50 | /product/migrations/0001_initial.py | 3716702e72959b27f723bc7d805418f6cdc9c2ad | [] | no_license | khanansha/producthunt | 1a638104e83803b9afc4a51ff3ead438ae47cab6 | 03b8d45091c88a2ff142f0a3082910ac1fa0ba41 | refs/heads/master | 2021-05-26T03:21:35.246011 | 2020-04-08T08:41:17 | 2020-04-08T08:41:17 | 254,031,608 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,086 | py | # Generated by Django 2.2.10 on 2020-04-02 15:30
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Product',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=255)),
('pub_date', models.DateTimeField()),
('body', models.TextField()),
('url', models.TextField()),
('image', models.ImageField(upload_to='images/')),
('icon', models.ImageField(upload_to='images/')),
('votes_total', models.IntegerField(default=1)),
('hunter', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"[email protected]"
] | |
649ccaa41cf34423f7cb4ea871756bc46167d0e9 | 34b7e1ec50f0ebf3c0389baebd0940801cb49016 | /get_update_gaps.py | 4491613af1df28cd30e2e76e230ea6f17586d52b | [] | no_license | chenc10/TensorFlow-RRSP-INFOCOM19 | c5827378dfd3fe4cbe6b85c4f1d6aee4615b6e4e | 131eb664f91111646bccf5b7490f5d1e9562ebeb | refs/heads/master | 2020-04-29T22:18:40.729132 | 2019-03-19T06:52:51 | 2019-03-19T06:52:51 | 176,443,203 | 8 | 3 | null | null | null | null | UTF-8 | Python | false | false | 413 | py | import sys
num = 16
update_times = []
for i in range(num):
f = open(sys.argv[1] + '/slave'+str(i)+'.log', 'r')
d = f.readlines()
f.close()
for i in range(len(d)):
if 'loss =' in d[i]:
tmp = d[i].split('time:')[1].split(';')[0]
update_times.append(float(tmp))
update_times.sort()
gaps = []
for i in range(len(update_times)-1):
gaps.append(update_times[i+1] - update_times[i])
gaps.sort()
print gaps
| [
"[email protected]"
] | |
35948d5e5ca5054abf5106ee59c00b4eefdda3da | c404b4da78b1ceed2f8dfa50425a04ad68f8d34e | /2_route/request.py | 7b9aca4b9c7a386fe85c96c83cd46b3bc3b43f34 | [] | no_license | ydPro-G/Flask | 9bca2db3d19193f07c86cd628cbebaade65451dd | d34a9577901dabf4018ba1050263709f2d69e6a8 | refs/heads/master | 2022-12-15T22:11:39.391134 | 2020-09-17T07:43:55 | 2020-09-17T07:43:55 | 285,768,905 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,317 | py | # HTTP请求方法设置,Get,Post,Put,Delete
from flask import request
from flask import Flask
# 创建Web应用实例
app = Flask(__name__)
# 指定路由与HTTP请求方法,不同的请求方法可以返回不同的数据
@app.route('/login',methods=['GET','POST'])
# 路由函数
def login():
# 请求路由为什么就反什么数据
if request.method == 'POST':
return 'This is a POST request'
else:
return 'This is a GET request'
# 启动 Web服务器
if __name__ == '__main__':
app.run()
# 2.URL构建方法
# Flask提供了【url_for()】方法来快速获取及构建URL,方法第一个参数指向函数名(被@app.route注解的函数),
# 后续的参数对应要构建的URL变量
url_for('login') # 返回/login
url_for('login',id='1') # 将id作为URL参数,返回/login?id=1
url_for('hello',name='man') # 适配hello函数的name参数 返回/hello/man
url_for('static') # 获取静态文件目录
url_for('static',filename='style.css') # 静态文件地址,返回/static/style.css
#3. 静态文件位置
# 一个web应用的静态文件包括了JS,CSS,图片等,将所有文件放进static子目录中
# 使用url_for('static')来获取静态文件目录
# 改变静态目录位置;
app = Flask(__name__, static_folder='files') | [
"[email protected]"
] | |
51d687d3a65ca402d94f83473d89873f6bc053ea | c4f01eec090833762b884c2078161df087d09b0d | /Calculation methods/CalcMethods_Lab_3_V15_Task_5_3_1/venv/Scripts/pip-script.py | 2a4c7ff61e1d675d875a14a4d4569cdd48595e73 | [] | no_license | areyykarthik/Zhukouski_Pavel_BSU_Projects | 47a30144c5614b10af521a78fba538a0e9184efa | 3540979e680732d38e25a6b39f09338985de6743 | refs/heads/master | 2023-08-07T02:49:34.736155 | 2021-10-05T21:57:03 | 2021-10-05T21:57:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 435 | py | #!C:\Users\user\PycharmProjects\CalcMethods_Lab_3_V15_Task_5_3_1\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==19.0.3','console_scripts','pip'
__requires__ = 'pip==19.0.3'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==19.0.3', 'console_scripts', 'pip')()
)
| [
"[email protected]"
] | |
696900b90a3dc9d880cb3d1bb40e987430548ae4 | 8d2a124753905fb0455f624b7c76792c32fac070 | /pytnon-month01/month01-class notes/day17-fb/demo02.py | f3a3aa8c1778164bc8a83ee97e32c7c518ca0ef4 | [] | no_license | Jeremy277/exercise | f38e4f19aae074c804d265f6a1c49709fd2cae15 | a72dd82eb2424e4ae18e2f3e9cc66fc4762ec8fa | refs/heads/master | 2020-07-27T09:14:00.286145 | 2019-09-17T11:31:44 | 2019-09-17T11:31:44 | 209,041,629 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,193 | py | """
迭代器 --> yield
练习: exercise04-装饰器.py
目标:让自定义类所创建的对象,可以参与for.
iter价值:可以被for
next价值:返回数据/抛出异常
class 自定义类的迭代器:
def __next__(self):
pass
class 自定义类:
def __iter__(self):
pass
for item in 自定义类():
pass
"""
# class SkillIterator:
# def __init__(self,data):
# self.__target = data
# self.__index = -1
#
# def __next__(self):
# # 如果没有数据则抛出异常
# if self.__index >= len(self.__target)-1:
# raise StopIteration
# # 返回数据
# self.__index += 1
# return self.__target[self.__index]
class SkillManager:
"""
技能管理器 可迭代对象
"""
def __init__(self):
self.__skills = []
def add_skill(self,str_skill):
self.__skills.append(str_skill)
def __iter__(self):
# return SkillIterator(self.__skills)
# 执行过程:
# 1. 调用__iter__()不执行
# 2. 调用__next__()才执行当前代码
# 3. 执行到yield语句暂时离开
# 4. 再次调用__next__()继续执行
# ....
# yield作用:标记着下列代码会自动转换为迭代器代码.
# 转换大致过程:
# 1. 将yield关键字以前的代码,放到next方法中。
# 2. 将yield关键字后面的数据,作为next返回值.
# print("准备数据:")
# yield "降龙十八掌"
#
# print("准备数据:")
# yield "黑虎掏心"
#
# print("准备数据:")
# yield "六脉神剑"
for item in self.__skills:
yield item
manager = SkillManager()
manager.add_skill("降龙十八掌")
manager.add_skill("黑虎掏心")
manager.add_skill("六脉神剑")
# 错误:manager必须是可迭代对象__iter__(),
# for item in manager:
# print(item)
iterator = manager.__iter__()
while True:
try:
item = iterator.__next__()
print(item)
except StopIteration:
break
| [
"[email protected]"
] | |
6dd671548eb54a66d884c51832342a6f633a2883 | 6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4 | /9cuQrhEMwiESfKznk_22.py | 4beda351a6e7e00fbc28e2b58d5e33dbf762429c | [] | no_license | daniel-reich/ubiquitous-fiesta | 26e80f0082f8589e51d359ce7953117a3da7d38c | 9af2700dbe59284f5697e612491499841a6c126f | refs/heads/master | 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 635 | py |
def eng2nums(s):
ans=''
nums=[('zero'),('one'),('two','twenty'),('three','thirty'),('four','forty'),\
('five','fifty'),('six','sixty'),('seven','seventy'),('eight','eighty'),\
('nine','ninety'),('ten'),('eleven'),('twelve'),('thirteen'),('fourteen'),\
('fifteen'),('sixteen'),('seventeen'),('eighteen'),('nineteen')]
sl=s.split()
for i in sl:
for j in range(len(nums)):
if i in nums[j]:
ans+=str(j)
break
if s[-2:]=='ty':
ans+='0'
elif 'hundred' in s:
ans=ans[0]+'0'*abs(len(ans)-3)+ans[1:]
return int(ans)
| [
"[email protected]"
] | |
bb54248f5b5ab49021f8e14e557a7e1a0f7253cb | d190750d6cb34e9d86ae96724cf4b56a2f57a74a | /tests/r/test_wine.py | fde49d1570f4c69b88eb7454a338809439bfce4b | [
"Apache-2.0"
] | permissive | ROAD2018/observations | a119f61a48213d791de0620804adb8d21c2ad9fb | 2c8b1ac31025938cb17762e540f2f592e302d5de | refs/heads/master | 2021-09-24T04:28:02.725245 | 2018-09-16T23:06:30 | 2018-09-16T23:06:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 496 | py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import shutil
import sys
import tempfile
from observations.r.wine import wine
def test_wine():
"""Test module wine.py by downloading
wine.csv and testing shape of
extracted data has 21 rows and 5 columns
"""
test_path = tempfile.mkdtemp()
x_train, metadata = wine(test_path)
try:
assert x_train.shape == (21, 5)
except:
shutil.rmtree(test_path)
raise()
| [
"[email protected]"
] | |
8a78ad47f05a803e2d09a235e1e6dd69439e958c | 8787b2fbb5017b61dcf6075a5261071b403847bf | /Programmers/피보나치 수.py | fa2b3172cfb8dd924d6d630734bc34a370b3b1fd | [] | no_license | khw5123/Algorithm | a6fe0009e33289813959553c2366d77c93d7b4b9 | 323a829f17a10276ab6f1aec719c496a3e76b974 | refs/heads/master | 2023-01-02T00:12:21.848924 | 2020-10-23T06:37:41 | 2020-10-23T06:37:41 | 282,162,235 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 584 | py | def matmul(a, b, mod):
result = [[0, 0], [0, 0]]
for i in range(2):
for j in range(2):
for k in range(2):
result[i][j] = (result[i][j] + a[i][k] * b[k][j]) % mod
return result
def fibonacci_matrix(n, mod):
arr, constant = [[1, 0], [0, 1]], [[1, 1], [1, 0]]
while n > 0:
if n % 2 == 1:
arr = matmul(arr, constant, mod)
constant = matmul(constant, constant, mod)
n = n // 2
return arr[0][0]
def solution(n):
answer = fibonacci_matrix(n-1, 1234567)
return answer | [
"[email protected]"
] | |
27422c5a1f0ff024ae1c6f90a890aee82bc4fcdb | c56ddcc2807151a5c44d3a1d65a1984bc8fd9b84 | /6 кю/Multiples of 3 or 5.py | d64d9b66c2b54376f88bf68fe0a418f6c91a6640 | [] | no_license | kelpasa/Code_Wars_Python | 2cd18dd404603a6535887e8e6ed2d08da19562ba | 939ec1dd08ffc7939bb9a139bf42901d6f24fbdd | refs/heads/master | 2022-12-17T02:00:28.319351 | 2020-09-23T09:11:20 | 2020-09-23T09:11:20 | 246,642,898 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 134 | py | def solution(number):
lst = []
for i in range(number):
if i % 3 == 0 or i % 5 == 0: lst.append(i)
return sum(lst)
| [
"[email protected]"
] | |
820f6939597b6231ca028aa4cfb0446606990eae | 511b7b19ec49be34bec240ee7c7cf4178cd36ca3 | /gasolinestation/migrations/0005_auto_20200228_0841.py | 24b1e028159d61f88186ceefaefa79403001fad9 | [] | no_license | francisguchie/360POS | 58de516fe52e83d6b99bd195d22c8aa902daee18 | 68f9e20ac263c75ec0c9b0fe75d7f648b8744ea8 | refs/heads/master | 2023-02-08T16:38:42.667538 | 2020-03-12T16:05:00 | 2020-03-12T16:05:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 563 | py | # Generated by Django 3.0.3 on 2020-02-28 08:41
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('gasolinestation', '0004_auto_20200228_0832'),
]
operations = [
migrations.AlterField(
model_name='gasstations',
name='site_staff',
field=models.ManyToManyField(blank=True, related_name='site_staffs', to=settings.AUTH_USER_MODEL),
),
]
| [
"[email protected]"
] | |
2d59e32a6dcbc39603ae17ab49edec7087cf3ec4 | f68cd225b050d11616ad9542dda60288f6eeccff | /testscripts/RDKB/component/CosaCM/TS_COSACM_GetLoopDiagnosticsStart_WithInvalidBuffer.py | 237d4cfa6fdc2e7d9e44c6fc5e9438d8bc5e5679 | [
"Apache-2.0"
] | permissive | cablelabs/tools-tdkb | 18fb98fadcd169fa9000db8865285fbf6ff8dc9d | 1fd5af0f6b23ce6614a4cfcbbaec4dde430fad69 | refs/heads/master | 2020-03-28T03:06:50.595160 | 2018-09-04T11:11:00 | 2018-09-05T00:24:38 | 147,621,410 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,628 | py | ##########################################################################
# If not stated otherwise in this file or this component's Licenses.txt
# file the following copyright and licenses apply:
#
# Copyright 2016 RDK Management
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##########################################################################
'''
<?xml version="1.0" encoding="UTF-8"?><xml>
<id/>
<version>1</version>
<name>TS_COSACM_GetLoopDiagnosticsStart_WithInvalidBuffer</name>
<primitive_test_id/>
<primitive_test_name>CosaCM_GetLoopDiagnosticsStart</primitive_test_name>
<primitive_test_version>2</primitive_test_version>
<status>FREE</status>
<synopsis/>
<groups_id>4</groups_id>
<execution_time>1</execution_time>
<long_duration>false</long_duration>
<remarks/>
<skip>false</skip>
<box_types>
<box_type>Broadband</box_type>
</box_types>
<rdk_versions>
<rdk_version>RDKB</rdk_version>
</rdk_versions>
<test_cases>
<test_case_id>TC_COSACM_40</test_case_id>
<test_objective>To Validate Cable Modem
"CosaDmlCMGetLoopDiagnosticsStart" API under Negative scenario</test_objective>
<test_type>Positive</test_type>
<test_setup>Emulator,
XB3</test_setup>
<pre_requisite>1.Ccsp Components should be in a running state of DUT that includes component under test Cable Modem
2.TDK Agent should be in running state or invoke it through StartTdk.sh script
</pre_requisite>
<api_or_interface_used>None</api_or_interface_used>
<input_parameters>Json Interface:
API Name
CosaCM_GetLoopDiagnosticsStart
Input
N/A
</input_parameters>
<automation_approch>1.Configure the Function info in Test Manager GUI which needs to be tested
(CosaCM_GetLoopDiagnosticsStart - func name - "If not exists already" ( This is considered as default Primitive test case)
cosacm - module name
Necessary I/P args if needed as Mentioned in Input)
2.Create a Python Script in Test Manager with default primitive test case through add new rdkb script option (TS_COSACM_GetLoopDiagnosticsStart_WithInvalidBuffer.py)
3.Customize the generated script template to handle load/unload and pass/fail scenarios
3.Execute the generated Script(TS_COSACM_GetLoopDiagnosticsStart_WithInvalidBuffer.py) using execution page of Test Manager GUI
4.cosacmstub which is a part of TDK Agent process, will be in listening mode to execute TDK Component function named CosaCM_GetLoopDiagnosticsStart through registered TDK cosacmstub function along with necessary Entry Values as arguments
5.CosaCM_GetLoopDiagnosticsStart function will call ssp_CosaCMGetLoopDiagnosticsStart,that inturn will call relevant cm hal Function to get/set data model value
6.Responses(printf) from TDK Component,Ccsp Library function and cosacmstub would be logged in Agent Console log based on the debug info redirected to agent console
7.cosacmstub function CosaCM_GetLoopDiagnosticsStart will validate the available result (return value from ssp_CosaCMGetLoopDiagnosticsStart as success(0)) with expected result (success(0)) and the outpur argument value is updated in agent console log and json output variable along with return value
8.TestManager will publish the result in GUI as PASS/FAILURE based on the response from CosaCM_GetLoopDiagnosticsStart function</automation_approch>
<except_output>CheckPoint 1:
Cosa CM "Get Loop Diagnostics Start" success log from DUT should be available in Agent Console Log
CheckPoint 2:
TDK agent Test Function will log the test case result as PASS based on API response which will be available in Test Manager Result ( XLS)
CheckPoint 3:
TestManager GUI will publish the result as PASS in Execution/Console page of Test Manager</except_output>
<priority>High</priority>
<test_stub_interface>None</test_stub_interface>
<test_script>TS_COSACM_GetLoopDiagnosticsStart_WithInvalidBuffer</test_script>
<skipped>No</skipped>
<release_version/>
<remarks>None</remarks>
</test_cases>
</xml>
'''
#use tdklib library,which provides a wrapper for tdk testcase script
import tdklib;
import time;
#Test component to be tested
obj = tdklib.TDKScriptingLibrary("cosacm","RDKB");
#IP and Port of box, No need to change,
#This will be replaced with correspoing Box Ip and port while executing script
ip = <ipaddress>
port = <port>
obj.configureTestCase(ip,port,'TS_COSACM_GetLoopDiagnosticsStart_NegArg');
#Get the result of connection with test component and STB
loadmodulestatus =obj.getLoadModuleResult();
print "[LIB LOAD STATUS] : %s" %loadmodulestatus ;
if "SUCCESS" in loadmodulestatus.upper():
obj.setLoadModuleStatus("SUCCESS");
#Script to load the configuration file of the component
tdkTestObj = obj.createTestStep("CosaCM_GetLoopDiagnosticsStart");
tdkTestObj.addParameter("handleType",0);
tdkTestObj.addParameter("boolValue",1);
expectedresult="FAILURE";
tdkTestObj.executeTestCase(expectedresult);
actualresult = tdkTestObj.getResult();
if expectedresult in actualresult:
#Set the result status of execution
tdkTestObj.setResultStatus("SUCCESS");
details = tdkTestObj.getResultDetails();
print "TEST STEP 1: Should not get the loop diagonostics start details";
print "EXPECTED RESULT 1: Fail to get the loop diagnostics start details ";
print "ACTUAL RESULT 1: %s" %details;
#Get the result of execution
print "[TEST EXECUTION RESULT] : %s" %actualresult ;
else:
tdkTestObj.setResultStatus("FAILURE");
details = tdkTestObj.getResultDetails();
print "TEST STEP 1: Should not get the loop diagonostics start details";
print "EXPECTED RESULT 1: Fail to get the loop diagnostics start details ";
print "ACTUAL RESULT 1: %s" %details;
print "[TEST EXECUTION RESULT] : %s" %actualresult ;
obj.unloadModule("cosacm");
else:
print "Failed to load the module";
obj.setLoadModuleStatus("FAILURE");
print "Module loading failed";
| [
"[email protected]"
] | |
7cc65b6f39eee379bed59fe9296f6f4f7706dab0 | a1615563bb9b124e16f4163f660d677f3224553c | /LI/lib/python3.8/site-packages/numpy/typing/tests/data/pass/array_constructors.py | 63208f139c39667febc30a53fb13a4109a74d410 | [
"BSD-3-Clause",
"GPL-3.0-or-later",
"BSD-3-Clause-Open-MPI",
"GPL-3.0-only",
"GCC-exception-3.1",
"MIT"
] | permissive | honeybhardwaj/Language_Identification | 2a247d98095bd56c1194a34a556ddfadf6f001e5 | 1b74f898be5402b0c1a13debf595736a3f57d7e7 | refs/heads/main | 2023-04-19T16:22:05.231818 | 2021-05-15T18:59:45 | 2021-05-15T18:59:45 | 351,470,447 | 5 | 4 | MIT | 2021-05-15T18:59:46 | 2021-03-25T14:42:26 | Python | UTF-8 | Python | false | false | 2,362 | py | from typing import List, Any
import numpy as np
class Index:
def __index__(self) -> int:
return 0
class SubClass(np.ndarray): ...
i8 = np.int64(1)
A = np.array([1])
B = A.view(SubClass).copy()
B_stack = np.array([[1], [1]]).view(SubClass)
C = [1]
def func(i: int, j: int, **kwargs: Any) -> SubClass:
return B
np.array(1, dtype=float)
np.array(1, copy=False)
np.array(1, order='F')
np.array(1, order=None)
np.array(1, subok=True)
np.array(1, ndmin=3)
np.array(1, str, copy=True, order='C', subok=False, ndmin=2)
np.asarray(A)
np.asarray(B)
np.asarray(C)
np.asanyarray(A)
np.asanyarray(B)
np.asanyarray(B, dtype=int)
np.asanyarray(C)
np.ascontiguousarray(A)
np.ascontiguousarray(B)
np.ascontiguousarray(C)
np.asfortranarray(A)
np.asfortranarray(B)
np.asfortranarray(C)
np.require(A)
np.require(B)
np.require(B, dtype=int)
np.require(B, requirements=None)
np.require(B, requirements="E")
np.require(B, requirements=["ENSUREARRAY"])
np.require(B, requirements={"F", "E"})
np.require(B, requirements=["C", "OWNDATA"])
np.require(B, requirements="W")
np.require(B, requirements="A")
np.require(C)
np.linspace(0, 2)
np.linspace(0.5, [0, 1, 2])
np.linspace([0, 1, 2], 3)
np.linspace(0j, 2)
np.linspace(0, 2, num=10)
np.linspace(0, 2, endpoint=True)
np.linspace(0, 2, retstep=True)
np.linspace(0j, 2j, retstep=True)
np.linspace(0, 2, dtype=bool)
np.linspace([0, 1], [2, 3], axis=Index())
np.logspace(0, 2, base=2)
np.logspace(0, 2, base=2)
np.logspace(0, 2, base=[1j, 2j], num=2)
np.geomspace(1, 2)
np.zeros_like(A)
np.zeros_like(C)
np.zeros_like(B)
np.zeros_like(B, dtype=np.int64)
np.ones_like(A)
np.ones_like(C)
np.ones_like(B)
np.ones_like(B, dtype=np.int64)
np.empty_like(A)
np.empty_like(C)
np.empty_like(B)
np.empty_like(B, dtype=np.int64)
np.full_like(A, i8)
np.full_like(C, i8)
np.full_like(B, i8)
np.full_like(B, i8, dtype=np.int64)
np.ones(1)
np.ones([1, 1, 1])
np.full(1, i8)
np.full([1, 1, 1], i8)
np.indices([1, 2, 3])
np.indices([1, 2, 3], sparse=True)
np.fromfunction(func, (3, 5))
np.identity(10)
np.atleast_1d(C)
np.atleast_1d(A)
np.atleast_1d(C, C)
np.atleast_1d(C, A)
np.atleast_1d(A, A)
np.atleast_2d(C)
np.atleast_3d(C)
np.vstack([C, C])
np.vstack([C, A])
np.vstack([A, A])
np.hstack([C, C])
np.stack([C, C])
np.stack([C, C], axis=0)
np.stack([C, C], out=B_stack)
np.block([[C, C], [C, C]])
np.block(A)
| [
"[email protected]"
] | |
5e736de360ea4a465167243925e0eb88349d59c9 | 7f7dd8b279a19b623c57723ffe8d788423bd359e | /Summary/WC2TPCEff/FlatEff/G4XSPiMinus_60A.py | 5621c4610be96a1b387608af292c1e508dfaf8df | [] | no_license | ElenaGramellini/LArIATPionXSAna | 35925398b8f7d8ada14bf78664ca243a74b8e946 | 0fb26e915b987084f553d64560e5a4e6adcb65fa | refs/heads/master | 2020-11-28T10:13:57.769651 | 2019-12-23T15:37:38 | 2019-12-23T15:37:38 | 229,779,168 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,899 | py | from ROOT import *
import os
import math
import argparse
from ROOT import TEfficiency
from ROOT import gStyle , TCanvas , TGraphErrors
from array import array
def is_number(s):
try:
int(s)
return True
except ValueError:
return False
def graphTruth():
fname = "PionMinusG4.txt"
kineticEnergy = []
crossSec = []
crossSec_el = []
crossSec_inel = []
zero = []
title = ""
with open(fname) as f:
for fLine in f.readlines():
w = fLine.split()
if is_number(w[0]):
runIn = int(w[0])
ke = float(w[1])
xstot = float(w[4])
kineticEnergy.append(ke)
crossSec.append(xstot)
zero.append(0.)
else:
if "for" not in fLine:
continue
title = fLine[9:]
#define some data points . . .
x = array('f', kineticEnergy )
y = array('f', crossSec)
y_el = array('f', crossSec_el)
y_inel = array('f', crossSec_inel)
exl = array('f', zero)
exr = array('f', zero)
nPoints=len(x)
# . . . and hand over to TGraphErros object
gr = TGraphErrors ( nPoints , x , y , exl, exr )
gr.SetTitle(title+"; Kinetic Energy [MeV]; Cross Section [barn]")
gr . GetXaxis().SetRangeUser(0,1000)
gr . GetYaxis().SetRangeUser(0,2.)
gr . SetLineWidth(2) ;
gr . SetLineColor(kGreen-2) ;
gr . SetFillColor(0)
return gr
c1=TCanvas("c1" ,"Data" ,200 ,10 ,700 ,700) #make nice
c1.SetGrid ()
gr = graphTruth()
f = TFile("../../FiducialVolumeStudy/askForInt/FidVol_Z90.0_19.0_-19.0_46.0_1.0_TrueInt_60A.root")
h = f.Get( "XS")
h.SetMarkerColor(kGreen-2)
h.SetLineColor(kGreen-2)
h.SetMarkerStyle(22)
h.SetMarkerSize(.72)
f3 = TFile("FlatEff0.8SameFidVol_Z86.0_19.0_-19.0_46.0_1.060.root")
h3 = f3.Get( "XS")
h3.SetMarkerColor(kBlack)
h3.SetLineColor(kBlack)
h3.SetMarkerStyle(22)
h3.SetMarkerSize(.72)
f5 = TFile("FlatEff0.5SameFidVol_Z86.0_19.0_-19.0_46.0_1.060.root")
h5 = f5.Get( "XS")
h5.SetMarkerColor(kRed)
h5.SetLineColor(kRed)
h5.SetMarkerStyle(22)
h5.SetMarkerSize(.72)
f4 = TFile("FlatEff0.3SameFidVol_Z86.0_19.0_-19.0_46.0_1.060.root")
h4 = f4.Get( "XS")
h4.SetMarkerColor(kOrange)
h4.SetLineColor(kOrange)
h4.SetMarkerStyle(22)
h4.SetMarkerSize(.72)
gr .Draw ( "APL" ) ;
h .Draw("same")
h3 .Draw("same")
h5 .Draw("same")
h4 .Draw("same")
legend = TLegend(.44,.70,.84,.89)
legend.AddEntry(gr,"G4 Prediction Tot XS")
legend.AddEntry(h,"True Interaction, Z [0., 90.] cm")
legend.AddEntry(h3,"Fid Vol, Z [0., 86.] cm, flat wc2tpc eff 0.8, 60A")
legend.AddEntry(h5,"Fid Vol, Z [0., 86.] cm, flat wc2tpc eff 0.5, 60A")
legend.AddEntry(h4,"Fid Vol, Z [0., 86.] cm, flat wc2tpc eff 0.3, 60A")
legend.Draw("same")
c1 . Update ()
raw_input()
| [
"[email protected]"
] | |
656b3bbc3c04e4d80829ccf04b91cedb148ddf99 | 1b9656800d5088309fde46bbc5a02efa2d12f948 | /lifelines/fitters/aalen_johansen_fitter.py | 6f6075f0dd4fe0046c42349f185cb6376600851d | [
"MIT"
] | permissive | sjoerdapp/lifelines | 9045dc588295a2a35c3d4d2135595ef8b2c1ada3 | b51234508133233e8fb2790e4d881d38042b6571 | refs/heads/master | 2020-04-10T02:38:55.575518 | 2018-12-01T23:58:03 | 2018-12-01T23:58:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,518 | py | from __future__ import print_function
from __future__ import division
import numpy as np
import pandas as pd
import warnings
from lifelines.fitters import UnivariateFitter
from lifelines.utils import _preprocess_inputs, inv_normal_cdf
from lifelines.fitters.kaplan_meier_fitter import KaplanMeierFitter
class AalenJohansenFitter(UnivariateFitter):
"""Class for fitting the Aalen-Johansen estimate for the cumulative incidence function in a competing risks framework.
Treating competing risks as censoring can result in over-estimated cumulative density functions. Using the Kaplan
Meier estimator with competing risks as censored is akin to estimating the cumulative density if all competing risks
had been prevented. If you are interested in learning more, I (Paul Zivich) recommend the following open-access
paper; Edwards JK, Hester LL, Gokhale M, Lesko CR. Methodologic Issues When Estimating Risks in
Pharmacoepidemiology. Curr Epidemiol Rep. 2016;3(4):285-296.
AalenJohansenFitter(alpha=0.95, jitter_level=0.00001, seed=None)
Aalen-Johansen cannot deal with tied times. We can get around this by randomy jittering the event times
slightly. This will be done automatically and generates a warning.
"""
def __init__(self, jitter_level=0.0001, seed=None, alpha=0.95):
UnivariateFitter.__init__(self, alpha=alpha)
self._jitter_level = jitter_level
self._seed = seed # Seed is for the jittering process
def fit(self, durations, event_observed, event_of_interest, timeline=None, entry=None, label='AJ_estimate',
alpha=None, ci_labels=None, weights=None):
"""
Parameters:
durations: an array or pd.Series of length n -- duration of subject was observed for
event_observed: an array, or pd.Series, of length n. Integer indicator of distinct events. Must be
only positive integers, where 0 indicates censoring.
event_of_interest: integer -- indicator for event of interest. All other integers are considered competing events
Ex) event_observed contains 0, 1, 2 where 0:censored, 1:lung cancer, and 2:death. If event_of_interest=1, then death (2)
is considered a competing event. The returned cumulative incidence function corresponds to risk of lung cancer
timeline: return the best estimate at the values in timelines (postively increasing)
entry: an array, or pd.Series, of length n -- relative time when a subject entered the study. This is
useful for left-truncated (not left-censored) observations. If None, all members of the population
were born at time 0.
label: a string to name the column of the estimate.
alpha: the alpha value in the confidence intervals. Overrides the initializing
alpha for this call to fit only.
ci_labels: add custom column names to the generated confidence intervals
as a length-2 list: [<lower-bound name>, <upper-bound name>]. Default: <label>_lower_<alpha>
weights: n array, or pd.Series, of length n, if providing a weighted dataset. For example, instead
of providing every subject as a single element of `durations` and `event_observed`, one could
weigh subject differently.
Returns:
self, with new properties like 'cumulative_incidence_'.
"""
# Checking for tied event times
if np.sum(pd.Series(durations).duplicated()) > 0:
# Seeing if there is a large amount of ties in the data (>20%)
if np.sum(pd.Series(durations).duplicated()) / len(durations) > 0.2:
warnings.warn('''It looks like there are many tied events in your data set. The Aalen-Johansen
estimator should only be used when there are no/few tied events''', Warning)
# I am unaware of a recommended cut-off, but 20% would be suggestive of issues
# Raise warning if duplicated times, then randomly jitter times
warnings.warn('''Tied event times were detected. The Aalen-Johansen estimator cannot handle tied event times.
To resolve ties, data is randomly jittered.''', Warning)
durations = self._jitter(durations=pd.Series(durations), event=pd.Series(event_observed),
jitter_level=self._jitter_level, seed=self._seed)
# Creating label for event of interest & indicator for that event
cmprisk_label = 'CIF_' + str(int(event_of_interest))
self.label_cmprisk = 'observed_' + str(int(event_of_interest))
# Fitting Kaplan-Meier for either event of interest OR competing risk
km = KaplanMeierFitter()
km.fit(durations, event_observed=event_observed, timeline=timeline, entry=entry, weights=weights)
aj = km.event_table
aj['overall_survival'] = km.survival_function_
aj['lagged_overall_survival'] = aj['overall_survival'].shift()
# Setting up table for calculations and to return to user
event_spec = np.where(pd.Series(event_observed) == event_of_interest, 1, 0)
event_spec_proc = _preprocess_inputs(durations=durations, event_observed=event_spec, timeline=timeline,
entry=entry, weights=weights)
event_spec_times = event_spec_proc[-1]['observed']
event_spec_times = event_spec_times.rename(self.label_cmprisk)
aj = pd.concat([aj, event_spec_times], axis=1).reset_index()
# Estimator of Cumulative Incidence (Density) Function
aj[cmprisk_label] = ((aj[self.label_cmprisk]) / (aj['at_risk']) * aj['lagged_overall_survival']).cumsum()
aj.loc[0, cmprisk_label] = 0 # Setting initial CIF to be zero
aj = aj.set_index('event_at')
# Setting attributes
self._estimation_method = "cumulative_density_"
self._estimate_name = "cumulative_density_"
self._predict_label = label
self._update_docstrings()
alpha = alpha if alpha else self.alpha
self._label = label
self.cumulative_density_ = pd.DataFrame(aj[cmprisk_label])
# Technically, cumulative incidence, but consistent with KaplanMeierFitter
self.event_table = aj[['removed', 'observed', self.label_cmprisk, 'censored', 'entrance', 'at_risk']] # Event table
self.variance, self.confidence_interval_ = self._bounds(aj['lagged_overall_survival'],
alpha=alpha, ci_labels=ci_labels)
return self
def _jitter(self, durations, event, jitter_level, seed=None):
"""Determine extent to jitter tied event times. Automatically called by fit if tied event times are detected
"""
if jitter_level <= 0:
raise ValueError('The jitter level is less than zero, please select a jitter value greater than 0')
if seed is not None:
np.random.seed(seed)
event_time = durations.loc[event != 0].copy()
# Determining whether to randomly shift event times up or down
mark = np.random.choice([-1, 1], size=event_time.shape[0])
# Determining extent to jitter event times up or down
shift = np.random.uniform(size=event_time.shape[0])*jitter_level
# Jittering times
event_time += mark*shift
durations_jitter = event_time.align(durations)[0].fillna(durations)
# Recursive call if event times are still tied after jitter
if np.sum(event_time.duplicated()) > 0:
return self._jitter(durations=durations_jitter, event=event, jitter_level=jitter_level, seed=seed)
else:
return durations_jitter
def _bounds(self, lagged_survival, alpha, ci_labels):
"""Bounds are based on pg411 of "Modelling Survival Data in Medical Research" David Collett 3rd Edition, which
is derived from Greenwood's variance estimator. Confidence intervals are obtained using the delta method
transformation of SE(log(-log(F_j))). This ensures that the confidence intervals all lie between 0 and 1.
Formula for the variance follows:
Var(F_j) = sum((F_j(t) - F_j(t_i))**2 * d/(n*(n-d) + S(t_i-1)**2 * ((d*(n-d))/n**3) +
-2 * sum((F_j(t) - F_j(t_i)) * S(t_i-1) * (d/n**2)
Delta method transformation:
SE(log(-log(F_j) = SE(F_j) / (F_j * absolute(log(F_j)))
More information can be found at: https://support.sas.com/documentation/onlinedoc/stat/141/lifetest.pdf
There is also an alternative method (Aalen) but this is not currently implemented
"""
# Preparing environment
df = self.event_table.copy()
df['Ft'] = self.cumulative_density_
df['lagS'] = lagged_survival.fillna(1)
if ci_labels is None:
ci_labels = ["%s_upper_%.2f" % (self._predict_label, alpha), "%s_lower_%.2f" % (self._predict_label, alpha)]
assert len(ci_labels) == 2, "ci_labels should be a length 2 array."
# Have to loop through each time independently. Don't think there is a faster way
all_vars = []
for i, r in df.iterrows():
sf = df.loc[df.index <= r.name].copy()
F_t = float(r['Ft'])
sf['part1'] = ((F_t - sf['Ft'])**2) * (sf['observed'] / (sf['at_risk']*(sf['at_risk'] - sf['observed'])))
sf['part2'] = ((sf['lagS'])**2) * sf[self.label_cmprisk] * ((sf['at_risk']-
sf[self.label_cmprisk]))/(sf['at_risk']**3)
sf['part3'] = (F_t - sf['Ft']) * sf['lagS'] * (sf[self.label_cmprisk] / (sf['at_risk']**2))
variance = (np.sum(sf['part1'])) + (np.sum(sf['part2'])) - 2*(np.sum(sf['part3']))
all_vars.append(variance)
df['variance'] = all_vars
# Calculating Confidence Intervals
df['F_transformed'] = np.log(-np.log(df['Ft']))
df['se_transformed'] = np.sqrt(df['variance']) / (df['Ft'] * np.absolute(np.log(df['Ft'])))
zalpha = inv_normal_cdf((1. + alpha) / 2.)
df[ci_labels[0]] = np.exp(-np.exp(df['F_transformed']+zalpha*df['se_transformed']))
df[ci_labels[1]] = np.exp(-np.exp(df['F_transformed']-zalpha*df['se_transformed']))
return df['variance'], df[ci_labels]
| [
"[email protected]"
] | |
42aa2414994823c9aeed0028a4e7d2eed4a9863d | a01aec3906af00d3d40caf933b63d059043cd21d | /数据分析/数组快速挑选/布尔矩阵.py | 51223ed4cc4d7e6c0b54921200b51f553ecf3247 | [] | no_license | liuaichao/python-work | d23dfcbffff95a50204ead88a809570304ba7995 | a5dcd1d74f1a7c1728faaa60d26a3ddb9369f939 | refs/heads/master | 2021-07-04T16:03:16.443540 | 2020-09-27T06:51:09 | 2020-09-27T06:51:09 | 182,085,446 | 6 | 0 | null | null | null | null | UTF-8 | Python | false | false | 721 | py | # -*- coding:utf-8 -*-
import numpy as np
from functools import reduce
a1 = ['我','你','他','她','他们','我们']
a2 = ['喜欢','想','拥有','练习','讨厌','学习']
a3 = ['豪车','别墅','python','数据分析','金钱','美酒']
arr_1 = np.column_stack(((np.column_stack((np.array(a1),np.array(a2)))),np.array(a3)))
arr_2 = np.column_stack(((np.column_stack((np.array(a1),np.array(a2)))),np.array(a3)))
np.random.shuffle(arr_1)
np.random.shuffle(arr_2)
random_ar = [[True if np.random.rand()>=0.5 else False for i in range(3)] for j in range(6)]
random_ar = np.array(random_ar)
print(arr_1)
print(arr_2)
print(random_ar)
al = np.where(random_ar,arr_1,arr_2)
print(al)
print(reduce(lambda x,y:x+y,al[2]))
| [
"[email protected]"
] | |
c7eb8d5964b35c8f24b6b4fd95646ca4e6d7ea49 | e36c5a91306f8d8cf487368d3a1dfae4c03da3c0 | /build/kobuki/kobuki_bumper2pc/catkin_generated/pkg.installspace.context.pc.py | 3ffee9fd146d172cc2c7437ea17aeec32e5fdf02 | [] | no_license | DocDouze/RobMob | 84ae5b96a16028586c9da2008f7c7772bdaa1334 | 6a2e7505eb2207d61b1c354cfd255075b1efbc73 | refs/heads/master | 2020-04-11T07:24:28.958201 | 2018-12-17T11:56:54 | 2018-12-17T11:56:54 | 161,607,677 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 574 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "/home/aubailly/Bureau/RobMob/install/include".split(';') if "/home/aubailly/Bureau/RobMob/install/include" != "" else []
PROJECT_CATKIN_DEPENDS = "roscpp;nodelet;pluginlib;sensor_msgs;kobuki_msgs".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "-lkobuki_bumper2pc_nodelet".split(';') if "-lkobuki_bumper2pc_nodelet" != "" else []
PROJECT_NAME = "kobuki_bumper2pc"
PROJECT_SPACE_DIR = "/home/aubailly/Bureau/RobMob/install"
PROJECT_VERSION = "0.7.6"
| [
"[email protected]"
] | |
dac3ebec146fc48e57be2d5e229e98045b9607a5 | c0ad282ab743a315e2f252a627933cb168434c1d | /shapeworld/captioners/conjunction.py | 5c6d9150477133f12ca4c36847ac38928275c407 | [
"MIT"
] | permissive | AlexKuhnle/ShapeWorld | 6d1e16adc94e860abae99ade869f72575f573bc4 | e720bf46e57fc01326d04d639fa6133d9c12158f | refs/heads/master | 2021-07-09T00:02:33.808969 | 2021-04-19T11:10:52 | 2021-04-19T11:10:52 | 80,815,972 | 58 | 28 | MIT | 2021-04-19T11:10:53 | 2017-02-03T09:40:19 | Python | UTF-8 | Python | false | false | 5,552 | py | from copy import deepcopy
from shapeworld import util
from shapeworld.captions import Proposition
from shapeworld.captioners import WorldCaptioner
class ConjunctionCaptioner(WorldCaptioner):
# incorrect modes
# 0: first incorrect
# 1: second incorrect
# 2: both incorrect
def __init__(
self,
captioner,
pragmatical_redundancy_rate=1.0,
pragmatical_tautology_rate=0.0,
logical_redundancy_rate=0.0,
logical_tautology_rate=0.0,
logical_contradiction_rate=0.0,
incorrect_distribution=(1, 1, 1)
):
super(ConjunctionCaptioner, self).__init__(
internal_captioners=(captioner, deepcopy(captioner)),
pragmatical_redundancy_rate=pragmatical_redundancy_rate,
pragmatical_tautology_rate=pragmatical_tautology_rate,
logical_redundancy_rate=logical_redundancy_rate,
logical_tautology_rate=logical_tautology_rate,
logical_contradiction_rate=logical_contradiction_rate
)
self.captioner1, self.captioner2 = self.internal_captioners
self.incorrect_distribution = util.cumulative_distribution(incorrect_distribution)
def set_realizer(self, realizer):
if not super(ConjunctionCaptioner, self).set_realizer(realizer=realizer):
return False
assert 'conjunction' in realizer.propositions
return True
def pn_length(self):
return super(ConjunctionCaptioner, self).pn_length() * 2 + 1
def pn_symbols(self):
return super(ConjunctionCaptioner, self).pn_symbols() | \
{'{}-{}{}'.format(Proposition.__name__, 'conjunction', n) for n in range(2, 3)}
def pn_arity(self):
arity = super(ConjunctionCaptioner, self).pn_arity()
arity.update({'{}-{}{}'.format(Proposition.__name__, 'conjunction', n): n for n in range(2, 3)})
return arity
def sample_values(self, mode, predication):
assert predication.empty()
if not super(ConjunctionCaptioner, self).sample_values(mode=mode, predication=predication):
return False
predication1 = predication.copy()
predication2 = predication.copy()
if not self.captioner1.sample_values(mode=mode, predication=predication1):
return False
if not self.captioner2.sample_values(mode=mode, predication=predication2):
return False
for _ in range(self.__class__.MAX_SAMPLE_ATTEMPTS):
self.incorrect_mode = util.sample(self.incorrect_distribution)
if self.incorrect_mode in (0, 2) and not self.captioner1.incorrect_possible():
continue
elif self.incorrect_mode in (1, 2) and not self.captioner2.incorrect_possible():
continue
break
else:
return False
return True
def incorrect_possible(self):
return self.captioner1.incorrect_possible() or self.captioner2.incorrect_possible()
def model(self):
return util.merge_dicts(
dict1=super(ConjunctionCaptioner, self).model(),
dict2=dict(
incorrect_mode=self.incorrect_mode,
captioner1=self.captioner1.model(),
captioner2=self.captioner2.model()
)
)
def caption(self, predication, world):
assert predication.empty()
predication1 = predication.copy()
predication2 = predication1.sub_predication()
clause2 = self.captioner2.caption(predication=predication2, world=world)
if clause2 is None:
return None
clause1 = self.captioner1.caption(predication=predication1, world=world)
if clause1 is None:
return None
proposition = Proposition(proptype='conjunction', clauses=(clause1, clause2))
if not self.correct(caption=proposition, predication=predication):
return None
return proposition
def incorrect(self, caption, predication, world):
assert predication.empty()
if self.incorrect_mode == 0: # 0: first incorrect
predication1 = predication.copy()
if not self.captioner1.incorrect(caption=caption.clauses[0], predication=predication1, world=world):
return False
if caption.clauses[0].agreement(predication=predication1, world=world) >= 0.0:
return False
elif self.incorrect_mode == 1: # 1: second incorrect
predication2 = predication.copy()
if not self.captioner2.incorrect(caption=caption.clauses[1], predication=predication2, world=world):
return False
if caption.clauses[1].agreement(predication=predication2, world=world) >= 0.0:
return False
elif self.incorrect_mode == 2: # 2: both incorrect
predication1 = predication.copy()
if not self.captioner1.incorrect(caption=caption.clauses[0], predication=predication1, world=world):
return False
if caption.clauses[0].agreement(predication=predication1, world=world) >= 0.0:
return False
predication2 = predication.copy()
if not self.captioner2.incorrect(caption=caption.clauses[1], predication=predication2, world=world):
return False
if caption.clauses[1].agreement(predication=predication2, world=world) >= 0.0:
return False
return self.correct(caption=caption, predication=predication)
| [
"[email protected]"
] | |
3cf5de725a5578d429689fda8de9500589456d15 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03436/s889620274.py | 7e3633505d9278af94081bda9da29bfc113148f5 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 672 | py | H,W = map(int,input().split())
S = [input() for i in range(H)]
blk = sum(row.count('#') for row in S)
from collections import deque
dxy = [(0,1),(1,0),(0,-1),(-1,0)]
dist = [[0]*W for i in range(H)]
visited = [[0]*W for i in range(H)]
visited[0][0] = 1
q = deque([(0,0)])
while q:
x,y = q.popleft()
for dx,dy in dxy:
nx,ny = x+dx,y+dy
if not 0 <= nx < W: continue
if not 0 <= ny < H: continue
if visited[ny][nx]: continue
if S[ny][nx] == '#': continue
visited[ny][nx] = 1
dist[ny][nx] = dist[y][x] + 1
q.append((nx,ny))
if visited[-1][-1]:
print(H*W - blk - dist[-1][-1] - 1)
else:
print(-1) | [
"[email protected]"
] | |
baec99e59f1be0ebe09c25bc8f1b105e189f52f4 | e3c8f786d09e311d6ea1cab50edde040bf1ea988 | /Incident-Response/Tools/grr/grr/server/grr_response_server/flows/general/checks_test.py | 0be3d9152c25d7959746892d62bdb237f98c8a51 | [
"Apache-2.0",
"MIT"
] | permissive | foss2cyber/Incident-Playbook | d1add8aec6e28a19e515754c6ce2e524d67f368e | a379a134c0c5af14df4ed2afa066c1626506b754 | refs/heads/main | 2023-06-07T09:16:27.876561 | 2021-07-07T03:48:54 | 2021-07-07T03:48:54 | 384,988,036 | 1 | 0 | MIT | 2021-07-11T15:45:31 | 2021-07-11T15:45:31 | null | UTF-8 | Python | false | false | 4,368 | py | #!/usr/bin/env python
"""Test the collector flows."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import os
from absl import app
from grr_response_core import config
from grr_response_core.lib.parsers import config_file
from grr_response_core.lib.parsers import linux_file_parser
from grr_response_core.lib.rdfvalues import client as rdf_client
from grr_response_server.check_lib import checks
from grr_response_server.check_lib import checks_test_lib
from grr_response_server.flows.general import checks as flow_checks
from grr.test_lib import action_mocks
from grr.test_lib import flow_test_lib
from grr.test_lib import parser_test_lib
from grr.test_lib import test_lib
from grr.test_lib import vfs_test_lib
# pylint: mode=test
class TestCheckFlows(flow_test_lib.FlowTestsBaseclass,
checks_test_lib.HostCheckTest):
checks_loaded = False
def setUp(self):
super().setUp()
self.client_id = self.SetupClient(0)
# Only load the checks once.
if self.checks_loaded is False:
self.checks_loaded = self.LoadChecks()
if not self.checks_loaded:
raise RuntimeError("No checks to test.")
self.client_mock = action_mocks.FileFinderClientMock()
def SetupLinuxUser(self):
user = rdf_client.User(username="user1", homedir="/home/user1")
return self.SetupClient(0, system="Linux", users=[user], os_version="12.04")
def SetupWindowsUser(self):
return self.SetupClient(0, system="Windows", os_version="6.2")
def RunFlow(self, client_id):
with vfs_test_lib.FakeTestDataVFSOverrider():
session_id = flow_test_lib.TestFlowHelper(
flow_checks.CheckRunner.__name__,
client_mock=self.client_mock,
client_id=client_id,
creator=self.test_username)
results = flow_test_lib.GetFlowResults(client_id, session_id)
return session_id, {r.check_id: r for r in results}
def LoadChecks(self):
"""Load the checks, returning the names of the checks that were loaded."""
checks.CheckRegistry.Clear()
check_configs = ("sshd.yaml", "sw.yaml", "unix_login.yaml")
cfg_dir = os.path.join(config.CONFIG["Test.data_dir"], "checks")
chk_files = [os.path.join(cfg_dir, f) for f in check_configs]
checks.LoadChecksFromFiles(chk_files)
return list(checks.CheckRegistry.checks.keys())
def testSelectArtifactsForChecks(self):
client_id = self.SetupLinuxUser()
session_id, _ = self.RunFlow(client_id)
state = flow_test_lib.GetFlowState(self.client_id, session_id)
self.assertIn("DebianPackagesStatus", state.artifacts_wanted)
self.assertIn("SshdConfigFile", state.artifacts_wanted)
client_id = self.SetupWindowsUser()
session_id, _ = self.RunFlow(client_id)
state = flow_test_lib.GetFlowState(self.client_id, session_id)
self.assertIn("WMIInstalledSoftware", state.artifacts_wanted)
def testCheckFlowSelectsChecks(self):
"""Confirm the flow runs checks for a target machine."""
client_id = self.SetupLinuxUser()
_, results = self.RunFlow(client_id)
expected = ["SHADOW-HASH", "SSHD-CHECK", "SSHD-PERMS", "SW-CHECK"]
self.assertRanChecks(expected, results)
@parser_test_lib.WithParser("Sshd", config_file.SshdConfigParser)
@parser_test_lib.WithParser("Pswd", linux_file_parser.LinuxSystemPasswdParser)
def testChecksProcessResultContext(self):
"""Test the flow returns parser results."""
client_id = self.SetupLinuxUser()
_, results = self.RunFlow(client_id)
# Detected by result_context: PARSER
exp = "Found: Sshd allows protocol 1."
self.assertCheckDetectedAnom("SSHD-CHECK", results, exp)
# Detected by result_context: RAW
exp = "Found: The filesystem supports stat."
found = ["/etc/ssh/sshd_config"]
self.assertCheckDetectedAnom("SSHD-PERMS", results, exp, found)
# Detected by result_context: ANOMALY
exp = "Found: Unix system account anomalies."
found = [
"Accounts with invalid gid.", "Mismatched passwd and shadow files."
]
self.assertCheckDetectedAnom("ODD-PASSWD", results, exp, found)
# No findings.
self.assertCheckUndetected("SHADOW-HASH", results)
self.assertCheckUndetected("SW-CHECK", results)
def main(argv):
# Run the full test suite
test_lib.main(argv)
if __name__ == "__main__":
app.run(main)
| [
"[email protected]"
] | |
022e9a1d589b47f9157014098339ea295b9492f5 | d5552cda58e251e6a5983876681be8f641dea86f | /tests/models/clipseg/test_modeling_clipseg.py | 8378e75a3fe7dc4e83d55f4ea1db2e2bc8350210 | [
"Apache-2.0"
] | permissive | patrickvonplaten/transformers | feb121e1ee82c317ac7561836b8f95a7de25fc1f | f738502979f6787609dcf0180e6606f464692e27 | refs/heads/master | 2022-12-08T10:15:34.743198 | 2022-11-22T11:00:20 | 2022-11-22T11:00:20 | 226,201,271 | 6 | 1 | Apache-2.0 | 2019-12-05T22:39:46 | 2019-12-05T22:39:45 | null | UTF-8 | Python | false | false | 29,281 | py | # coding=utf-8
# Copyright 2022 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Testing suite for the PyTorch CLIPSeg model. """
import inspect
import os
import tempfile
import unittest
import numpy as np
import requests
import transformers
from transformers import MODEL_MAPPING, CLIPSegConfig, CLIPSegProcessor, CLIPSegTextConfig, CLIPSegVisionConfig
from transformers.models.auto import get_values
from transformers.testing_utils import (
is_flax_available,
is_pt_flax_cross_test,
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import (
ModelTesterMixin,
_config_zero_init,
floats_tensor,
ids_tensor,
random_attention_mask,
)
if is_torch_available():
import torch
from torch import nn
from transformers import CLIPSegForImageSegmentation, CLIPSegModel, CLIPSegTextModel, CLIPSegVisionModel
from transformers.models.clipseg.modeling_clipseg import CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
if is_flax_available():
import jax.numpy as jnp
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
class CLIPSegVisionModelTester:
def __init__(
self,
parent,
batch_size=12,
image_size=30,
patch_size=2,
num_channels=3,
is_training=True,
hidden_size=32,
num_hidden_layers=5,
num_attention_heads=4,
intermediate_size=37,
dropout=0.1,
attention_dropout=0.1,
initializer_range=0.02,
scope=None,
):
self.parent = parent
self.batch_size = batch_size
self.image_size = image_size
self.patch_size = patch_size
self.num_channels = num_channels
self.is_training = is_training
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.dropout = dropout
self.attention_dropout = attention_dropout
self.initializer_range = initializer_range
self.scope = scope
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
num_patches = (image_size // patch_size) ** 2
self.seq_length = num_patches + 1
def prepare_config_and_inputs(self):
pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
config = self.get_config()
return config, pixel_values
def get_config(self):
return CLIPSegVisionConfig(
image_size=self.image_size,
patch_size=self.patch_size,
num_channels=self.num_channels,
hidden_size=self.hidden_size,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
intermediate_size=self.intermediate_size,
dropout=self.dropout,
attention_dropout=self.attention_dropout,
initializer_range=self.initializer_range,
)
def create_and_check_model(self, config, pixel_values):
model = CLIPSegVisionModel(config=config)
model.to(torch_device)
model.eval()
with torch.no_grad():
result = model(pixel_values)
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token)
image_size = (self.image_size, self.image_size)
patch_size = (self.patch_size, self.patch_size)
num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, num_patches + 1, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size))
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
config, pixel_values = config_and_inputs
inputs_dict = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class CLIPSegVisionModelTest(ModelTesterMixin, unittest.TestCase):
"""
Here we also overwrite some of the tests of test_modeling_common.py, as CLIPSeg does not use input_ids, inputs_embeds,
attention_mask and seq_length.
"""
all_model_classes = (CLIPSegVisionModel,) if is_torch_available() else ()
fx_compatible = False
test_pruning = False
test_resize_embeddings = False
test_head_masking = False
def setUp(self):
self.model_tester = CLIPSegVisionModelTester(self)
self.config_tester = ConfigTester(
self, config_class=CLIPSegVisionConfig, has_text_modality=False, hidden_size=37
)
def test_config(self):
self.config_tester.run_common_tests()
@unittest.skip(reason="CLIPSeg does not use inputs_embeds")
def test_inputs_embeds(self):
pass
def test_model_common_attributes(self):
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
self.assertIsInstance(model.get_input_embeddings(), (nn.Module))
x = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(x, nn.Linear))
def test_forward_signature(self):
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
signature = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
arg_names = [*signature.parameters.keys()]
expected_arg_names = ["pixel_values"]
self.assertListEqual(arg_names[:1], expected_arg_names)
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
def test_training(self):
pass
def test_training_gradient_checkpointing(self):
pass
@unittest.skip(reason="CLIPSegVisionModel has no base class and is not available in MODEL_MAPPING")
def test_save_load_fast_init_from_base(self):
pass
@unittest.skip(reason="CLIPSegVisionModel has no base class and is not available in MODEL_MAPPING")
def test_save_load_fast_init_to_base(self):
pass
@slow
def test_model_from_pretrained(self):
for model_name in CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
model = CLIPSegVisionModel.from_pretrained(model_name)
self.assertIsNotNone(model)
class CLIPSegTextModelTester:
def __init__(
self,
parent,
batch_size=12,
seq_length=7,
is_training=True,
use_input_mask=True,
use_labels=True,
vocab_size=99,
hidden_size=32,
num_hidden_layers=5,
num_attention_heads=4,
intermediate_size=37,
dropout=0.1,
attention_dropout=0.1,
max_position_embeddings=512,
initializer_range=0.02,
scope=None,
):
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.is_training = is_training
self.use_input_mask = use_input_mask
self.use_labels = use_labels
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.dropout = dropout
self.attention_dropout = attention_dropout
self.max_position_embeddings = max_position_embeddings
self.initializer_range = initializer_range
self.scope = scope
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
input_mask = None
if self.use_input_mask:
input_mask = random_attention_mask([self.batch_size, self.seq_length])
if input_mask is not None:
batch_size, seq_length = input_mask.shape
rnd_start_indices = np.random.randint(1, seq_length - 1, size=(batch_size,))
for batch_idx, start_index in enumerate(rnd_start_indices):
input_mask[batch_idx, :start_index] = 1
input_mask[batch_idx, start_index:] = 0
config = self.get_config()
return config, input_ids, input_mask
def get_config(self):
return CLIPSegTextConfig(
vocab_size=self.vocab_size,
hidden_size=self.hidden_size,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
intermediate_size=self.intermediate_size,
dropout=self.dropout,
attention_dropout=self.attention_dropout,
max_position_embeddings=self.max_position_embeddings,
initializer_range=self.initializer_range,
)
def create_and_check_model(self, config, input_ids, input_mask):
model = CLIPSegTextModel(config=config)
model.to(torch_device)
model.eval()
with torch.no_grad():
result = model(input_ids, attention_mask=input_mask)
result = model(input_ids)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size))
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
config, input_ids, input_mask = config_and_inputs
inputs_dict = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class CLIPSegTextModelTest(ModelTesterMixin, unittest.TestCase):
all_model_classes = (CLIPSegTextModel,) if is_torch_available() else ()
fx_compatible = False
test_pruning = False
test_head_masking = False
def setUp(self):
self.model_tester = CLIPSegTextModelTester(self)
self.config_tester = ConfigTester(self, config_class=CLIPSegTextConfig, hidden_size=37)
def test_config(self):
self.config_tester.run_common_tests()
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
def test_training(self):
pass
def test_training_gradient_checkpointing(self):
pass
@unittest.skip(reason="CLIPSeg does not use inputs_embeds")
def test_inputs_embeds(self):
pass
@unittest.skip(reason="CLIPSegTextModel has no base class and is not available in MODEL_MAPPING")
def test_save_load_fast_init_from_base(self):
pass
@unittest.skip(reason="CLIPSegTextModel has no base class and is not available in MODEL_MAPPING")
def test_save_load_fast_init_to_base(self):
pass
@slow
def test_model_from_pretrained(self):
for model_name in CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
model = CLIPSegTextModel.from_pretrained(model_name)
self.assertIsNotNone(model)
class CLIPSegModelTester:
def __init__(self, parent, text_kwargs=None, vision_kwargs=None, is_training=True):
if text_kwargs is None:
text_kwargs = {}
if vision_kwargs is None:
vision_kwargs = {}
self.parent = parent
self.text_model_tester = CLIPSegTextModelTester(parent, **text_kwargs)
self.vision_model_tester = CLIPSegVisionModelTester(parent, **vision_kwargs)
self.is_training = is_training
def prepare_config_and_inputs(self):
text_config, input_ids, attention_mask = self.text_model_tester.prepare_config_and_inputs()
vision_config, pixel_values = self.vision_model_tester.prepare_config_and_inputs()
config = self.get_config()
return config, input_ids, attention_mask, pixel_values
def get_config(self):
return CLIPSegConfig.from_text_vision_configs(
self.text_model_tester.get_config(),
self.vision_model_tester.get_config(),
projection_dim=64,
reduce_dim=32,
extract_layers=[1, 2, 3],
)
def create_and_check_model(self, config, input_ids, attention_mask, pixel_values):
model = CLIPSegModel(config).to(torch_device).eval()
with torch.no_grad():
result = model(input_ids, pixel_values, attention_mask)
self.parent.assertEqual(
result.logits_per_image.shape, (self.vision_model_tester.batch_size, self.text_model_tester.batch_size)
)
self.parent.assertEqual(
result.logits_per_text.shape, (self.text_model_tester.batch_size, self.vision_model_tester.batch_size)
)
def create_and_check_model_for_image_segmentation(self, config, input_ids, attention_maks, pixel_values):
model = CLIPSegForImageSegmentation(config).to(torch_device).eval()
with torch.no_grad():
result = model(input_ids, pixel_values)
self.parent.assertEqual(
result.logits.shape,
(
self.vision_model_tester.batch_size,
self.vision_model_tester.image_size,
self.vision_model_tester.image_size,
),
)
self.parent.assertEqual(
result.conditional_embeddings.shape, (self.text_model_tester.batch_size, config.projection_dim)
)
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
config, input_ids, attention_mask, pixel_values = config_and_inputs
inputs_dict = {
"input_ids": input_ids,
"attention_mask": attention_mask,
"pixel_values": pixel_values,
}
return config, inputs_dict
@require_torch
class CLIPSegModelTest(ModelTesterMixin, unittest.TestCase):
all_model_classes = (CLIPSegModel, CLIPSegForImageSegmentation) if is_torch_available() else ()
fx_compatible = False
test_head_masking = False
test_pruning = False
test_resize_embeddings = False
test_attention_outputs = False
def _prepare_for_class(self, inputs_dict, model_class, return_labels=False):
# CLIPSegForImageSegmentation requires special treatment
if return_labels:
if model_class.__name__ == "CLIPSegForImageSegmentation":
batch_size, _, height, width = inputs_dict["pixel_values"].shape
inputs_dict["labels"] = torch.zeros(
[batch_size, height, width], device=torch_device, dtype=torch.float
)
return inputs_dict
def setUp(self):
self.model_tester = CLIPSegModelTester(self)
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
def test_model_for_image_segmentation(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_for_image_segmentation(*config_and_inputs)
@unittest.skip(reason="Hidden_states is tested in individual model tests")
def test_hidden_states_output(self):
pass
@unittest.skip(reason="Inputs_embeds is tested in individual model tests")
def test_inputs_embeds(self):
pass
@unittest.skip(reason="Retain_grad is tested in individual model tests")
def test_retain_grad_hidden_states_attentions(self):
pass
@unittest.skip(reason="CLIPSegModel does not have input/output embeddings")
def test_model_common_attributes(self):
pass
# override as the some parameters require custom initialization
def test_initialization(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
configs_no_init = _config_zero_init(config)
for model_class in self.all_model_classes:
model = model_class(config=configs_no_init)
for name, param in model.named_parameters():
if param.requires_grad:
# check if `logit_scale` is initilized as per the original implementation
if "logit_scale" in name:
self.assertAlmostEqual(
param.data.item(),
np.log(1 / 0.07),
delta=1e-3,
msg=f"Parameter {name} of model {model_class} seems not properly initialized",
)
elif "film" in name or "transposed_conv" in name or "reduce" in name:
# those parameters use PyTorch' default nn.Linear initialization scheme
pass
else:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item(),
[0.0, 1.0],
msg=f"Parameter {name} of model {model_class} seems not properly initialized",
)
def _create_and_check_torchscript(self, config, inputs_dict):
if not self.test_torchscript:
return
configs_no_init = _config_zero_init(config) # To be sure we have no Nan
configs_no_init.torchscript = True
configs_no_init.return_dict = False
for model_class in self.all_model_classes:
model = model_class(config=configs_no_init)
model.to(torch_device)
model.eval()
try:
input_ids = inputs_dict["input_ids"]
pixel_values = inputs_dict["pixel_values"] # CLIPSeg needs pixel_values
traced_model = torch.jit.trace(model, (input_ids, pixel_values))
except RuntimeError:
self.fail("Couldn't trace module.")
with tempfile.TemporaryDirectory() as tmp_dir_name:
pt_file_name = os.path.join(tmp_dir_name, "traced_model.pt")
try:
torch.jit.save(traced_model, pt_file_name)
except Exception:
self.fail("Couldn't save module.")
try:
loaded_model = torch.jit.load(pt_file_name)
except Exception:
self.fail("Couldn't load module.")
model.to(torch_device)
model.eval()
loaded_model.to(torch_device)
loaded_model.eval()
model_state_dict = model.state_dict()
loaded_model_state_dict = loaded_model.state_dict()
self.assertEqual(set(model_state_dict.keys()), set(loaded_model_state_dict.keys()))
models_equal = True
for layer_name, p1 in model_state_dict.items():
p2 = loaded_model_state_dict[layer_name]
if p1.data.ne(p2.data).sum() > 0:
models_equal = False
self.assertTrue(models_equal)
def test_load_vision_text_config(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
# Save CLIPSegConfig and check if we can load CLIPSegVisionConfig from it
with tempfile.TemporaryDirectory() as tmp_dir_name:
config.save_pretrained(tmp_dir_name)
vision_config = CLIPSegVisionConfig.from_pretrained(tmp_dir_name)
self.assertDictEqual(config.vision_config.to_dict(), vision_config.to_dict())
# Save CLIPSegConfig and check if we can load CLIPSegTextConfig from it
with tempfile.TemporaryDirectory() as tmp_dir_name:
config.save_pretrained(tmp_dir_name)
text_config = CLIPSegTextConfig.from_pretrained(tmp_dir_name)
self.assertDictEqual(config.text_config.to_dict(), text_config.to_dict())
# overwrite from common since FlaxCLIPSegModel returns nested output
# which is not supported in the common test
@is_pt_flax_cross_test
def test_equivalence_pt_to_flax(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
# load PyTorch class
pt_model = model_class(config).eval()
# Flax models don't use the `use_cache` option and cache is not returned as a default.
# So we disable `use_cache` here for PyTorch model.
pt_model.config.use_cache = False
fx_model_class_name = "Flax" + model_class.__name__
if not hasattr(transformers, fx_model_class_name):
return
fx_model_class = getattr(transformers, fx_model_class_name)
# load Flax class
fx_model = fx_model_class(config, dtype=jnp.float32)
# make sure only flax inputs are forward that actually exist in function args
fx_input_keys = inspect.signature(fx_model.__call__).parameters.keys()
# prepare inputs
pt_inputs = self._prepare_for_class(inputs_dict, model_class)
# remove function args that don't exist in Flax
pt_inputs = {k: v for k, v in pt_inputs.items() if k in fx_input_keys}
fx_state = convert_pytorch_state_dict_to_flax(pt_model.state_dict(), fx_model)
fx_model.params = fx_state
with torch.no_grad():
pt_outputs = pt_model(**pt_inputs).to_tuple()
# convert inputs to Flax
fx_inputs = {k: np.array(v) for k, v in pt_inputs.items() if torch.is_tensor(v)}
fx_outputs = fx_model(**fx_inputs).to_tuple()
self.assertEqual(len(fx_outputs), len(pt_outputs), "Output lengths differ between Flax and PyTorch")
for fx_output, pt_output in zip(fx_outputs[:4], pt_outputs[:4]):
self.assert_almost_equals(fx_output, pt_output.numpy(), 4e-2)
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(tmpdirname)
fx_model_loaded = fx_model_class.from_pretrained(tmpdirname, from_pt=True)
fx_outputs_loaded = fx_model_loaded(**fx_inputs).to_tuple()
self.assertEqual(
len(fx_outputs_loaded), len(pt_outputs), "Output lengths differ between Flax and PyTorch"
)
for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4], pt_outputs[:4]):
self.assert_almost_equals(fx_output_loaded, pt_output.numpy(), 4e-2)
# overwrite from common since FlaxCLIPSegModel returns nested output
# which is not supported in the common test
@is_pt_flax_cross_test
def test_equivalence_flax_to_pt(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
# load corresponding PyTorch class
pt_model = model_class(config).eval()
# So we disable `use_cache` here for PyTorch model.
pt_model.config.use_cache = False
fx_model_class_name = "Flax" + model_class.__name__
if not hasattr(transformers, fx_model_class_name):
# no flax model exists for this class
return
fx_model_class = getattr(transformers, fx_model_class_name)
# load Flax class
fx_model = fx_model_class(config, dtype=jnp.float32)
# make sure only flax inputs are forward that actually exist in function args
fx_input_keys = inspect.signature(fx_model.__call__).parameters.keys()
pt_model = load_flax_weights_in_pytorch_model(pt_model, fx_model.params)
# make sure weights are tied in PyTorch
pt_model.tie_weights()
# prepare inputs
pt_inputs = self._prepare_for_class(inputs_dict, model_class)
# remove function args that don't exist in Flax
pt_inputs = {k: v for k, v in pt_inputs.items() if k in fx_input_keys}
with torch.no_grad():
pt_outputs = pt_model(**pt_inputs).to_tuple()
fx_inputs = {k: np.array(v) for k, v in pt_inputs.items() if torch.is_tensor(v)}
fx_outputs = fx_model(**fx_inputs).to_tuple()
self.assertEqual(len(fx_outputs), len(pt_outputs), "Output lengths differ between Flax and PyTorch")
for fx_output, pt_output in zip(fx_outputs[:4], pt_outputs[:4]):
self.assert_almost_equals(fx_output, pt_output.numpy(), 4e-2)
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(tmpdirname)
pt_model_loaded = model_class.from_pretrained(tmpdirname, from_flax=True)
with torch.no_grad():
pt_outputs_loaded = pt_model_loaded(**pt_inputs).to_tuple()
self.assertEqual(
len(fx_outputs), len(pt_outputs_loaded), "Output lengths differ between Flax and PyTorch"
)
for fx_output, pt_output in zip(fx_outputs[:4], pt_outputs_loaded[:4]):
self.assert_almost_equals(fx_output, pt_output.numpy(), 4e-2)
def test_training(self):
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.return_dict = True
if model_class in get_values(MODEL_MAPPING):
continue
print("Model class:", model_class)
model = model_class(config)
model.to(torch_device)
model.train()
inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
for k, v in inputs.items():
print(k, v.shape)
loss = model(**inputs).loss
loss.backward()
@slow
def test_model_from_pretrained(self):
for model_name in CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
model = CLIPSegModel.from_pretrained(model_name)
self.assertIsNotNone(model)
# We will verify our results on an image of cute cats
def prepare_img():
url = "http://images.cocodataset.org/val2017/000000039769.jpg"
image = Image.open(requests.get(url, stream=True).raw)
return image
@require_vision
@require_torch
class CLIPSegModelIntegrationTest(unittest.TestCase):
@slow
def test_inference_image_segmentation(self):
model_name = "CIDAS/clipseg-rd64-refined"
processor = CLIPSegProcessor.from_pretrained(model_name)
model = CLIPSegForImageSegmentation.from_pretrained(model_name).to(torch_device)
image = prepare_img()
texts = ["a cat", "a remote", "a blanket"]
inputs = processor(text=texts, images=[image] * len(texts), padding=True, return_tensors="pt").to(torch_device)
# forward pass
with torch.no_grad():
outputs = model(**inputs)
# verify the predicted masks
self.assertEqual(
outputs.logits.shape,
torch.Size((3, 352, 352)),
)
expected_masks_slice = torch.tensor(
[[-7.4577, -7.4952, -7.4072], [-7.3115, -7.0969, -7.1624], [-6.9472, -6.7641, -6.8911]]
)
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3], expected_masks_slice, atol=1e-3))
# verify conditional and pooled output
expected_conditional = torch.tensor([0.5601, -0.0314, 0.1980])
expected_pooled_output = torch.tensor([0.2692, -0.7197, -0.1328])
self.assertTrue(torch.allclose(outputs.conditional_embeddings[0, :3], expected_conditional, atol=1e-3))
self.assertTrue(torch.allclose(outputs.pooled_output[0, :3], expected_pooled_output, atol=1e-3))
| [
"[email protected]"
] | |
3d7de32b8583f42e1269b5b1e6b2aeddc8da1a10 | f08e50d55bbbb90e4c8f9a8811eaede98ede2694 | /erpbee/assets/doctype/asset/test_asset.py | 569152221ac9e0cb1ee21dc6d6f36ecb1da9537c | [] | no_license | mohrezbak/erpbee | bc48472a99a7f4357aa7b82ff3a9c1a4c98ba017 | 1134156ad337fd472e14cf347479c17bd8db7b33 | refs/heads/main | 2023-02-12T01:32:07.858555 | 2021-01-08T17:25:23 | 2021-01-08T17:25:23 | 327,872,762 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 26,448 | py | # -*- coding: utf-8 -*-
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# See license.txt
from __future__ import unicode_literals
import frappe
import unittest
from frappe.utils import cstr, nowdate, getdate, flt, get_last_day, add_days, add_months
from erpbee.assets.doctype.asset.depreciation import post_depreciation_entries, scrap_asset, restore_asset
from erpbee.assets.doctype.asset.asset import make_sales_invoice
from erpbee.stock.doctype.purchase_receipt.test_purchase_receipt import make_purchase_receipt
from erpbee.accounts.doctype.purchase_invoice.test_purchase_invoice import make_purchase_invoice
from erpbee.stock.doctype.purchase_receipt.purchase_receipt import make_purchase_invoice as make_invoice
class TestAsset(unittest.TestCase):
def setUp(self):
set_depreciation_settings_in_company()
create_asset_data()
frappe.db.sql("delete from `tabTax Rule`")
def test_purchase_asset(self):
pr = make_purchase_receipt(item_code="Macbook Pro",
qty=1, rate=100000.0, location="Test Location")
asset_name = frappe.db.get_value("Asset", {"purchase_receipt": pr.name}, 'name')
asset = frappe.get_doc('Asset', asset_name)
asset.calculate_depreciation = 1
month_end_date = get_last_day(nowdate())
purchase_date = nowdate() if nowdate() != month_end_date else add_days(nowdate(), -15)
asset.available_for_use_date = purchase_date
asset.purchase_date = purchase_date
asset.append("finance_books", {
"expected_value_after_useful_life": 10000,
"depreciation_method": "Straight Line",
"total_number_of_depreciations": 3,
"frequency_of_depreciation": 10,
"depreciation_start_date": month_end_date
})
asset.submit()
pi = make_invoice(pr.name)
pi.supplier = "_Test Supplier"
pi.insert()
pi.submit()
asset.load_from_db()
self.assertEqual(asset.supplier, "_Test Supplier")
self.assertEqual(asset.purchase_date, getdate(purchase_date))
# Asset won't have reference to PI when purchased through PR
self.assertEqual(asset.purchase_receipt, pr.name)
expected_gle = (
("Asset Received But Not Billed - _TC", 100000.0, 0.0),
("Creditors - _TC", 0.0, 100000.0)
)
gle = frappe.db.sql("""select account, debit, credit from `tabGL Entry`
where voucher_type='Purchase Invoice' and voucher_no = %s
order by account""", pi.name)
self.assertEqual(gle, expected_gle)
pi.cancel()
asset.cancel()
asset.load_from_db()
pr.load_from_db()
pr.cancel()
self.assertEqual(asset.docstatus, 2)
def test_is_fixed_asset_set(self):
asset = create_asset(is_existing_asset = 1)
doc = frappe.new_doc('Purchase Invoice')
doc.supplier = '_Test Supplier'
doc.append('items', {
'item_code': 'Macbook Pro',
'qty': 1,
'asset': asset.name
})
doc.set_missing_values()
self.assertEquals(doc.items[0].is_fixed_asset, 1)
def test_schedule_for_straight_line_method(self):
pr = make_purchase_receipt(item_code="Macbook Pro",
qty=1, rate=100000.0, location="Test Location")
asset_name = frappe.db.get_value("Asset", {"purchase_receipt": pr.name}, 'name')
asset = frappe.get_doc('Asset', asset_name)
asset.calculate_depreciation = 1
asset.available_for_use_date = '2030-01-01'
asset.purchase_date = '2030-01-01'
asset.append("finance_books", {
"expected_value_after_useful_life": 10000,
"depreciation_method": "Straight Line",
"total_number_of_depreciations": 3,
"frequency_of_depreciation": 12,
"depreciation_start_date": "2030-12-31"
})
asset.save()
self.assertEqual(asset.status, "Draft")
expected_schedules = [
["2030-12-31", 30000.00, 30000.00],
["2031-12-31", 30000.00, 60000.00],
["2032-12-31", 30000.00, 90000.00]
]
schedules = [[cstr(d.schedule_date), d.depreciation_amount, d.accumulated_depreciation_amount]
for d in asset.get("schedules")]
self.assertEqual(schedules, expected_schedules)
def test_schedule_for_straight_line_method_for_existing_asset(self):
create_asset(is_existing_asset=1)
asset = frappe.get_doc("Asset", {"asset_name": "Macbook Pro 1"})
asset.calculate_depreciation = 1
asset.number_of_depreciations_booked = 1
asset.opening_accumulated_depreciation = 40000
asset.available_for_use_date = "2030-06-06"
asset.append("finance_books", {
"expected_value_after_useful_life": 10000,
"depreciation_method": "Straight Line",
"total_number_of_depreciations": 3,
"frequency_of_depreciation": 12,
"depreciation_start_date": "2030-12-31"
})
asset.insert()
self.assertEqual(asset.status, "Draft")
asset.save()
expected_schedules = [
["2030-12-31", 14246.58, 54246.58],
["2031-12-31", 25000.00, 79246.58],
["2032-06-06", 10753.42, 90000.00]
]
schedules = [[cstr(d.schedule_date), flt(d.depreciation_amount, 2), d.accumulated_depreciation_amount]
for d in asset.get("schedules")]
self.assertEqual(schedules, expected_schedules)
def test_schedule_for_double_declining_method(self):
pr = make_purchase_receipt(item_code="Macbook Pro",
qty=1, rate=100000.0, location="Test Location")
asset_name = frappe.db.get_value("Asset", {"purchase_receipt": pr.name}, 'name')
asset = frappe.get_doc('Asset', asset_name)
asset.calculate_depreciation = 1
asset.available_for_use_date = '2030-01-01'
asset.purchase_date = '2030-01-01'
asset.append("finance_books", {
"expected_value_after_useful_life": 10000,
"depreciation_method": "Double Declining Balance",
"total_number_of_depreciations": 3,
"frequency_of_depreciation": 12,
"depreciation_start_date": '2030-12-31'
})
asset.insert()
self.assertEqual(asset.status, "Draft")
asset.save()
expected_schedules = [
['2030-12-31', 66667.00, 66667.00],
['2031-12-31', 22222.11, 88889.11],
['2032-12-31', 1110.89, 90000.0]
]
schedules = [[cstr(d.schedule_date), d.depreciation_amount, d.accumulated_depreciation_amount]
for d in asset.get("schedules")]
self.assertEqual(schedules, expected_schedules)
def test_schedule_for_double_declining_method_for_existing_asset(self):
create_asset(is_existing_asset = 1)
asset = frappe.get_doc("Asset", {"asset_name": "Macbook Pro 1"})
asset.calculate_depreciation = 1
asset.is_existing_asset = 1
asset.number_of_depreciations_booked = 1
asset.opening_accumulated_depreciation = 50000
asset.available_for_use_date = '2030-01-01'
asset.purchase_date = '2029-11-30'
asset.append("finance_books", {
"expected_value_after_useful_life": 10000,
"depreciation_method": "Double Declining Balance",
"total_number_of_depreciations": 3,
"frequency_of_depreciation": 12,
"depreciation_start_date": "2030-12-31"
})
asset.insert()
self.assertEqual(asset.status, "Draft")
expected_schedules = [
["2030-12-31", 33333.50, 83333.50],
["2031-12-31", 6666.50, 90000.0]
]
schedules = [[cstr(d.schedule_date), d.depreciation_amount, d.accumulated_depreciation_amount]
for d in asset.get("schedules")]
self.assertEqual(schedules, expected_schedules)
def test_schedule_for_prorated_straight_line_method(self):
pr = make_purchase_receipt(item_code="Macbook Pro",
qty=1, rate=100000.0, location="Test Location")
asset_name = frappe.db.get_value("Asset", {"purchase_receipt": pr.name}, 'name')
asset = frappe.get_doc('Asset', asset_name)
asset.calculate_depreciation = 1
asset.purchase_date = '2030-01-30'
asset.is_existing_asset = 0
asset.available_for_use_date = "2030-01-30"
asset.append("finance_books", {
"expected_value_after_useful_life": 10000,
"depreciation_method": "Straight Line",
"total_number_of_depreciations": 3,
"frequency_of_depreciation": 12,
"depreciation_start_date": "2030-12-31"
})
asset.insert()
asset.save()
expected_schedules = [
["2030-12-31", 27534.25, 27534.25],
["2031-12-31", 30000.0, 57534.25],
["2032-12-31", 30000.0, 87534.25],
["2033-01-30", 2465.75, 90000.0]
]
schedules = [[cstr(d.schedule_date), flt(d.depreciation_amount, 2), flt(d.accumulated_depreciation_amount, 2)]
for d in asset.get("schedules")]
self.assertEqual(schedules, expected_schedules)
def test_depreciation(self):
pr = make_purchase_receipt(item_code="Macbook Pro",
qty=1, rate=100000.0, location="Test Location")
asset_name = frappe.db.get_value("Asset", {"purchase_receipt": pr.name}, 'name')
asset = frappe.get_doc('Asset', asset_name)
asset.calculate_depreciation = 1
asset.purchase_date = '2020-01-30'
asset.available_for_use_date = "2020-01-30"
asset.append("finance_books", {
"expected_value_after_useful_life": 10000,
"depreciation_method": "Straight Line",
"total_number_of_depreciations": 3,
"frequency_of_depreciation": 10,
"depreciation_start_date": "2020-12-31"
})
asset.insert()
asset.submit()
asset.load_from_db()
self.assertEqual(asset.status, "Submitted")
frappe.db.set_value("Company", "_Test Company", "series_for_depreciation_entry", "DEPR-")
post_depreciation_entries(date="2021-01-01")
asset.load_from_db()
# check depreciation entry series
self.assertEqual(asset.get("schedules")[0].journal_entry[:4], "DEPR")
expected_gle = (
("_Test Accumulated Depreciations - _TC", 0.0, 30000.0),
("_Test Depreciations - _TC", 30000.0, 0.0)
)
gle = frappe.db.sql("""select account, debit, credit from `tabGL Entry`
where against_voucher_type='Asset' and against_voucher = %s
order by account""", asset.name)
self.assertEqual(gle, expected_gle)
self.assertEqual(asset.get("value_after_depreciation"), 0)
def test_depreciation_entry_for_wdv_without_pro_rata(self):
pr = make_purchase_receipt(item_code="Macbook Pro",
qty=1, rate=8000.0, location="Test Location")
asset_name = frappe.db.get_value("Asset", {"purchase_receipt": pr.name}, 'name')
asset = frappe.get_doc('Asset', asset_name)
asset.calculate_depreciation = 1
asset.available_for_use_date = '2030-01-01'
asset.purchase_date = '2030-01-01'
asset.append("finance_books", {
"expected_value_after_useful_life": 1000,
"depreciation_method": "Written Down Value",
"total_number_of_depreciations": 3,
"frequency_of_depreciation": 12,
"depreciation_start_date": "2030-12-31"
})
asset.save(ignore_permissions=True)
self.assertEqual(asset.finance_books[0].rate_of_depreciation, 50.0)
expected_schedules = [
["2030-12-31", 4000.00, 4000.00],
["2031-12-31", 2000.00, 6000.00],
["2032-12-31", 1000.00, 7000.0],
]
schedules = [[cstr(d.schedule_date), flt(d.depreciation_amount, 2), flt(d.accumulated_depreciation_amount, 2)]
for d in asset.get("schedules")]
self.assertEqual(schedules, expected_schedules)
def test_pro_rata_depreciation_entry_for_wdv(self):
pr = make_purchase_receipt(item_code="Macbook Pro",
qty=1, rate=8000.0, location="Test Location")
asset_name = frappe.db.get_value("Asset", {"purchase_receipt": pr.name}, 'name')
asset = frappe.get_doc('Asset', asset_name)
asset.calculate_depreciation = 1
asset.available_for_use_date = '2030-06-06'
asset.purchase_date = '2030-01-01'
asset.append("finance_books", {
"expected_value_after_useful_life": 1000,
"depreciation_method": "Written Down Value",
"total_number_of_depreciations": 3,
"frequency_of_depreciation": 12,
"depreciation_start_date": "2030-12-31"
})
asset.save(ignore_permissions=True)
self.assertEqual(asset.finance_books[0].rate_of_depreciation, 50.0)
expected_schedules = [
["2030-12-31", 2279.45, 2279.45],
["2031-12-31", 2860.28, 5139.73],
["2032-12-31", 1430.14, 6569.87],
["2033-06-06", 430.13, 7000.0],
]
schedules = [[cstr(d.schedule_date), flt(d.depreciation_amount, 2), flt(d.accumulated_depreciation_amount, 2)]
for d in asset.get("schedules")]
self.assertEqual(schedules, expected_schedules)
def test_depreciation_entry_cancellation(self):
pr = make_purchase_receipt(item_code="Macbook Pro",
qty=1, rate=100000.0, location="Test Location")
asset_name = frappe.db.get_value("Asset", {"purchase_receipt": pr.name}, 'name')
asset = frappe.get_doc('Asset', asset_name)
asset.calculate_depreciation = 1
asset.available_for_use_date = '2020-06-06'
asset.purchase_date = '2020-06-06'
asset.append("finance_books", {
"expected_value_after_useful_life": 10000,
"depreciation_method": "Straight Line",
"total_number_of_depreciations": 3,
"frequency_of_depreciation": 10,
"depreciation_start_date": "2020-12-31"
})
asset.insert()
asset.submit()
post_depreciation_entries(date="2021-01-01")
asset.load_from_db()
# cancel depreciation entry
depr_entry = asset.get("schedules")[0].journal_entry
self.assertTrue(depr_entry)
frappe.get_doc("Journal Entry", depr_entry).cancel()
asset.load_from_db()
depr_entry = asset.get("schedules")[0].journal_entry
self.assertFalse(depr_entry)
def test_scrap_asset(self):
pr = make_purchase_receipt(item_code="Macbook Pro",
qty=1, rate=100000.0, location="Test Location")
asset_name = frappe.db.get_value("Asset", {"purchase_receipt": pr.name}, 'name')
asset = frappe.get_doc('Asset', asset_name)
asset.calculate_depreciation = 1
asset.available_for_use_date = '2020-01-01'
asset.purchase_date = '2020-01-01'
asset.append("finance_books", {
"expected_value_after_useful_life": 10000,
"depreciation_method": "Straight Line",
"total_number_of_depreciations": 10,
"frequency_of_depreciation": 1
})
asset.insert()
asset.submit()
post_depreciation_entries(date=add_months('2020-01-01', 4))
scrap_asset(asset.name)
asset.load_from_db()
self.assertEqual(asset.status, "Scrapped")
self.assertTrue(asset.journal_entry_for_scrap)
expected_gle = (
("_Test Accumulated Depreciations - _TC", 36000.0, 0.0),
("_Test Fixed Asset - _TC", 0.0, 100000.0),
("_Test Gain/Loss on Asset Disposal - _TC", 64000.0, 0.0)
)
gle = frappe.db.sql("""select account, debit, credit from `tabGL Entry`
where voucher_type='Journal Entry' and voucher_no = %s
order by account""", asset.journal_entry_for_scrap)
self.assertEqual(gle, expected_gle)
restore_asset(asset.name)
asset.load_from_db()
self.assertFalse(asset.journal_entry_for_scrap)
self.assertEqual(asset.status, "Partially Depreciated")
def test_asset_sale(self):
pr = make_purchase_receipt(item_code="Macbook Pro",
qty=1, rate=100000.0, location="Test Location")
asset_name = frappe.db.get_value("Asset", {"purchase_receipt": pr.name}, 'name')
asset = frappe.get_doc('Asset', asset_name)
asset.calculate_depreciation = 1
asset.available_for_use_date = '2020-06-06'
asset.purchase_date = '2020-06-06'
asset.append("finance_books", {
"expected_value_after_useful_life": 10000,
"depreciation_method": "Straight Line",
"total_number_of_depreciations": 3,
"frequency_of_depreciation": 10,
"depreciation_start_date": "2020-12-31"
})
asset.insert()
asset.submit()
post_depreciation_entries(date="2021-01-01")
si = make_sales_invoice(asset=asset.name, item_code="Macbook Pro", company="_Test Company")
si.customer = "_Test Customer"
si.due_date = nowdate()
si.get("items")[0].rate = 25000
si.insert()
si.submit()
self.assertEqual(frappe.db.get_value("Asset", asset.name, "status"), "Sold")
expected_gle = (
("_Test Accumulated Depreciations - _TC", 20392.16, 0.0),
("_Test Fixed Asset - _TC", 0.0, 100000.0),
("_Test Gain/Loss on Asset Disposal - _TC", 54607.84, 0.0),
("Debtors - _TC", 25000.0, 0.0)
)
gle = frappe.db.sql("""select account, debit, credit from `tabGL Entry`
where voucher_type='Sales Invoice' and voucher_no = %s
order by account""", si.name)
self.assertEqual(gle, expected_gle)
si.cancel()
self.assertEqual(frappe.db.get_value("Asset", asset.name, "status"), "Partially Depreciated")
def test_asset_expected_value_after_useful_life(self):
pr = make_purchase_receipt(item_code="Macbook Pro",
qty=1, rate=100000.0, location="Test Location")
asset_name = frappe.db.get_value("Asset", {"purchase_receipt": pr.name}, 'name')
asset = frappe.get_doc('Asset', asset_name)
asset.calculate_depreciation = 1
asset.available_for_use_date = '2020-06-06'
asset.purchase_date = '2020-06-06'
asset.append("finance_books", {
"expected_value_after_useful_life": 10000,
"depreciation_method": "Straight Line",
"total_number_of_depreciations": 3,
"frequency_of_depreciation": 10
})
asset.insert()
accumulated_depreciation_after_full_schedule = \
max([d.accumulated_depreciation_amount for d in asset.get("schedules")])
asset_value_after_full_schedule = (flt(asset.gross_purchase_amount) -
flt(accumulated_depreciation_after_full_schedule))
self.assertTrue(asset.finance_books[0].expected_value_after_useful_life >= asset_value_after_full_schedule)
def test_cwip_accounting(self):
pr = make_purchase_receipt(item_code="Macbook Pro",
qty=1, rate=5000, do_not_submit=True, location="Test Location")
pr.set('taxes', [{
'category': 'Total',
'add_deduct_tax': 'Add',
'charge_type': 'On Net Total',
'account_head': '_Test Account Service Tax - _TC',
'description': '_Test Account Service Tax',
'cost_center': 'Main - _TC',
'rate': 5.0
}, {
'category': 'Valuation and Total',
'add_deduct_tax': 'Add',
'charge_type': 'On Net Total',
'account_head': '_Test Account Shipping Charges - _TC',
'description': '_Test Account Shipping Charges',
'cost_center': 'Main - _TC',
'rate': 5.0
}])
pr.submit()
expected_gle = (
("Asset Received But Not Billed - _TC", 0.0, 5250.0),
("CWIP Account - _TC", 5250.0, 0.0)
)
pr_gle = frappe.db.sql("""select account, debit, credit from `tabGL Entry`
where voucher_type='Purchase Receipt' and voucher_no = %s
order by account""", pr.name)
self.assertEqual(pr_gle, expected_gle)
pi = make_invoice(pr.name)
pi.submit()
expected_gle = (
("_Test Account Service Tax - _TC", 250.0, 0.0),
("_Test Account Shipping Charges - _TC", 250.0, 0.0),
("Asset Received But Not Billed - _TC", 5250.0, 0.0),
("Creditors - _TC", 0.0, 5500.0),
("Expenses Included In Asset Valuation - _TC", 0.0, 250.0),
)
pi_gle = frappe.db.sql("""select account, debit, credit from `tabGL Entry`
where voucher_type='Purchase Invoice' and voucher_no = %s
order by account""", pi.name)
self.assertEqual(pi_gle, expected_gle)
asset = frappe.db.get_value('Asset',
{'purchase_receipt': pr.name, 'docstatus': 0}, 'name')
asset_doc = frappe.get_doc('Asset', asset)
month_end_date = get_last_day(nowdate())
asset_doc.available_for_use_date = nowdate() if nowdate() != month_end_date else add_days(nowdate(), -15)
self.assertEqual(asset_doc.gross_purchase_amount, 5250.0)
asset_doc.append("finance_books", {
"expected_value_after_useful_life": 200,
"depreciation_method": "Straight Line",
"total_number_of_depreciations": 3,
"frequency_of_depreciation": 10,
"depreciation_start_date": month_end_date
})
asset_doc.submit()
expected_gle = (
("_Test Fixed Asset - _TC", 5250.0, 0.0),
("CWIP Account - _TC", 0.0, 5250.0)
)
gle = frappe.db.sql("""select account, debit, credit from `tabGL Entry`
where voucher_type='Asset' and voucher_no = %s
order by account""", asset_doc.name)
self.assertEqual(gle, expected_gle)
def test_expense_head(self):
pr = make_purchase_receipt(item_code="Macbook Pro",
qty=2, rate=200000.0, location="Test Location")
doc = make_invoice(pr.name)
self.assertEquals('Asset Received But Not Billed - _TC', doc.items[0].expense_account)
def test_asset_cwip_toggling_cases(self):
cwip = frappe.db.get_value("Asset Category", "Computers", "enable_cwip_accounting")
name = frappe.db.get_value("Asset Category Account", filters={"parent": "Computers"}, fieldname=["name"])
cwip_acc = "CWIP Account - _TC"
frappe.db.set_value("Asset Category", "Computers", "enable_cwip_accounting", 0)
frappe.db.set_value("Asset Category Account", name, "capital_work_in_progress_account", "")
frappe.db.get_value("Company", "_Test Company", "capital_work_in_progress_account", "")
# case 0 -- PI with cwip disable, Asset with cwip disabled, No cwip account set
pi = make_purchase_invoice(item_code="Macbook Pro", qty=1, rate=200000.0, location="Test Location", update_stock=1)
asset = frappe.db.get_value('Asset', {'purchase_invoice': pi.name, 'docstatus': 0}, 'name')
asset_doc = frappe.get_doc('Asset', asset)
asset_doc.available_for_use_date = nowdate()
asset_doc.calculate_depreciation = 0
asset_doc.submit()
gle = frappe.db.sql("""select name from `tabGL Entry` where voucher_type='Asset' and voucher_no = %s""", asset_doc.name)
self.assertFalse(gle)
# case 1 -- PR with cwip disabled, Asset with cwip enabled
pr = make_purchase_receipt(item_code="Macbook Pro", qty=1, rate=200000.0, location="Test Location")
frappe.db.set_value("Asset Category", "Computers", "enable_cwip_accounting", 1)
frappe.db.set_value("Asset Category Account", name, "capital_work_in_progress_account", cwip_acc)
asset = frappe.db.get_value('Asset', {'purchase_receipt': pr.name, 'docstatus': 0}, 'name')
asset_doc = frappe.get_doc('Asset', asset)
asset_doc.available_for_use_date = nowdate()
asset_doc.calculate_depreciation = 0
asset_doc.submit()
gle = frappe.db.sql("""select name from `tabGL Entry` where voucher_type='Asset' and voucher_no = %s""", asset_doc.name)
self.assertFalse(gle)
# case 2 -- PR with cwip enabled, Asset with cwip disabled
pr = make_purchase_receipt(item_code="Macbook Pro", qty=1, rate=200000.0, location="Test Location")
frappe.db.set_value("Asset Category", "Computers", "enable_cwip_accounting", 0)
asset = frappe.db.get_value('Asset', {'purchase_receipt': pr.name, 'docstatus': 0}, 'name')
asset_doc = frappe.get_doc('Asset', asset)
asset_doc.available_for_use_date = nowdate()
asset_doc.calculate_depreciation = 0
asset_doc.submit()
gle = frappe.db.sql("""select name from `tabGL Entry` where voucher_type='Asset' and voucher_no = %s""", asset_doc.name)
self.assertTrue(gle)
# case 3 -- PI with cwip disabled, Asset with cwip enabled
pi = make_purchase_invoice(item_code="Macbook Pro", qty=1, rate=200000.0, location="Test Location", update_stock=1)
frappe.db.set_value("Asset Category", "Computers", "enable_cwip_accounting", 1)
asset = frappe.db.get_value('Asset', {'purchase_invoice': pi.name, 'docstatus': 0}, 'name')
asset_doc = frappe.get_doc('Asset', asset)
asset_doc.available_for_use_date = nowdate()
asset_doc.calculate_depreciation = 0
asset_doc.submit()
gle = frappe.db.sql("""select name from `tabGL Entry` where voucher_type='Asset' and voucher_no = %s""", asset_doc.name)
self.assertFalse(gle)
# case 4 -- PI with cwip enabled, Asset with cwip disabled
pi = make_purchase_invoice(item_code="Macbook Pro", qty=1, rate=200000.0, location="Test Location", update_stock=1)
frappe.db.set_value("Asset Category", "Computers", "enable_cwip_accounting", 0)
asset = frappe.db.get_value('Asset', {'purchase_invoice': pi.name, 'docstatus': 0}, 'name')
asset_doc = frappe.get_doc('Asset', asset)
asset_doc.available_for_use_date = nowdate()
asset_doc.calculate_depreciation = 0
asset_doc.submit()
gle = frappe.db.sql("""select name from `tabGL Entry` where voucher_type='Asset' and voucher_no = %s""", asset_doc.name)
self.assertTrue(gle)
frappe.db.set_value("Asset Category", "Computers", "enable_cwip_accounting", cwip)
frappe.db.set_value("Asset Category Account", name, "capital_work_in_progress_account", cwip_acc)
frappe.db.get_value("Company", "_Test Company", "capital_work_in_progress_account", cwip_acc)
def create_asset_data():
if not frappe.db.exists("Asset Category", "Computers"):
create_asset_category()
if not frappe.db.exists("Item", "Macbook Pro"):
create_fixed_asset_item()
if not frappe.db.exists("Location", "Test Location"):
frappe.get_doc({
'doctype': 'Location',
'location_name': 'Test Location'
}).insert()
def create_asset(**args):
args = frappe._dict(args)
create_asset_data()
asset = frappe.get_doc({
"doctype": "Asset",
"asset_name": args.asset_name or "Macbook Pro 1",
"asset_category": "Computers",
"item_code": args.item_code or "Macbook Pro",
"company": args.company or"_Test Company",
"purchase_date": "2015-01-01",
"calculate_depreciation": 0,
"gross_purchase_amount": 100000,
"purchase_receipt_amount": 100000,
"expected_value_after_useful_life": 10000,
"warehouse": args.warehouse or "_Test Warehouse - _TC",
"available_for_use_date": "2020-06-06",
"location": "Test Location",
"asset_owner": "Company",
"is_existing_asset": args.is_existing_asset or 0
})
try:
asset.save()
except frappe.DuplicateEntryError:
pass
if args.submit:
asset.submit()
return asset
def create_asset_category():
asset_category = frappe.new_doc("Asset Category")
asset_category.asset_category_name = "Computers"
asset_category.total_number_of_depreciations = 3
asset_category.frequency_of_depreciation = 3
asset_category.enable_cwip_accounting = 1
asset_category.append("accounts", {
"company_name": "_Test Company",
"fixed_asset_account": "_Test Fixed Asset - _TC",
"accumulated_depreciation_account": "_Test Accumulated Depreciations - _TC",
"depreciation_expense_account": "_Test Depreciations - _TC"
})
asset_category.insert()
def create_fixed_asset_item():
meta = frappe.get_meta('Asset')
naming_series = meta.get_field("naming_series").options.splitlines()[0] or 'ACC-ASS-.YYYY.-'
try:
frappe.get_doc({
"doctype": "Item",
"item_code": "Macbook Pro",
"item_name": "Macbook Pro",
"description": "Macbook Pro Retina Display",
"asset_category": "Computers",
"item_group": "All Item Groups",
"stock_uom": "Nos",
"is_stock_item": 0,
"is_fixed_asset": 1,
"auto_create_assets": 1,
"asset_naming_series": naming_series
}).insert()
except frappe.DuplicateEntryError:
pass
def set_depreciation_settings_in_company():
company = frappe.get_doc("Company", "_Test Company")
company.accumulated_depreciation_account = "_Test Accumulated Depreciations - _TC"
company.depreciation_expense_account = "_Test Depreciations - _TC"
company.disposal_account = "_Test Gain/Loss on Asset Disposal - _TC"
company.depreciation_cost_center = "_Test Cost Center - _TC"
company.save()
# Enable booking asset depreciation entry automatically
frappe.db.set_value("Accounts Settings", None, "book_asset_depreciation_entry_automatically", 1) | [
"[email protected]"
] | |
beb376cb4b79225dad11e14942e96e80e8dffa48 | 21f81fce20e657c175de388d5f6b8b8a78ac3ee9 | /examples/bend-flux.py | f3fe0a3dd83abcba8e09c5d2080eb1d63e51f0b1 | [] | no_license | tnakaicode/pymeep-example | c23a9be827a37477a3328668a62b0486413a31d7 | 40c9c77d5e26cf43771b57af95c324a5225515ef | refs/heads/master | 2021-01-08T02:01:47.641017 | 2020-02-20T14:22:07 | 2020-02-20T14:22:07 | 241,879,598 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,518 | py | # -*- coding: utf-8 -*-
# transmission around a 90-degree waveguide bend in 2d
from __future__ import division
import meep as mp
import numpy as np
import matplotlib.pyplot as plt
resolution = 10 # pixels/um
sx = 16 # size of cell in X direction
sy = 32 # size of cell in Y direction
cell = mp.Vector3(sx, sy, 0)
dpml = 1.0
pml_layers = [mp.PML(dpml)]
pad = 4 # padding distance between waveguide and cell edge
w = 1 # width of waveguide
wvg_xcen = 0.5*(sx-w-2*pad) # x center of vert. wvg
wvg_ycen = -0.5*(sy-w-2*pad) # y center of horiz. wvg
geometry = [mp.Block(size=mp.Vector3(mp.inf, w, mp.inf),
center=mp.Vector3(0, wvg_ycen, 0),
material=mp.Medium(epsilon=12))]
fcen = 0.15 # pulse center frequency
df = 0.1 # pulse width (in frequency)
sources = [mp.Source(mp.GaussianSource(fcen, fwidth=df),
component=mp.Ez,
center=mp.Vector3(-0.5*sx+dpml, wvg_ycen, 0),
size=mp.Vector3(0, w, 0))]
sim = mp.Simulation(cell_size=cell,
boundary_layers=pml_layers,
geometry=geometry,
sources=sources,
resolution=resolution)
nfreq = 100 # number of frequencies at which to compute flux
# reflected flux
refl_fr = mp.FluxRegion(
center=mp.Vector3(-0.5*sx+dpml+0.5, wvg_ycen, 0), size=mp.Vector3(0, 2*w, 0))
refl = sim.add_flux(fcen, df, nfreq, refl_fr)
# transmitted flux
tran_fr = mp.FluxRegion(center=mp.Vector3(
0.5*sx-dpml, wvg_ycen, 0), size=mp.Vector3(0, 2*w, 0))
tran = sim.add_flux(fcen, df, nfreq, tran_fr)
pt = mp.Vector3(0.5*sx-dpml-0.5, wvg_ycen)
sim.run(until_after_sources=mp.stop_when_fields_decayed(50, mp.Ez, pt, 1e-3))
# for normalization run, save flux fields data for reflection plane
straight_refl_data = sim.get_flux_data(refl)
# save incident power for transmission plane
straight_tran_flux = mp.get_fluxes(tran)
sim.reset_meep()
geometry = [mp.Block(mp.Vector3(sx-pad, w, mp.inf), center=mp.Vector3(-0.5*pad, wvg_ycen), material=mp.Medium(epsilon=12)),
mp.Block(mp.Vector3(w, sy-pad, mp.inf), center=mp.Vector3(wvg_xcen, 0.5*pad), material=mp.Medium(epsilon=12))]
sim = mp.Simulation(cell_size=cell,
boundary_layers=pml_layers,
geometry=geometry,
sources=sources,
resolution=resolution)
# reflected flux
refl = sim.add_flux(fcen, df, nfreq, refl_fr)
tran_fr = mp.FluxRegion(center=mp.Vector3(
wvg_xcen, 0.5*sy-dpml-0.5, 0), size=mp.Vector3(2*w, 0, 0))
tran = sim.add_flux(fcen, df, nfreq, tran_fr)
# for normal run, load negated fields to subtract incident from refl. fields
sim.load_minus_flux_data(refl, straight_refl_data)
pt = mp.Vector3(wvg_xcen, 0.5*sy-dpml-0.5)
sim.run(until_after_sources=mp.stop_when_fields_decayed(50, mp.Ez, pt, 1e-3))
bend_refl_flux = mp.get_fluxes(refl)
bend_tran_flux = mp.get_fluxes(tran)
flux_freqs = mp.get_flux_freqs(refl)
wl = []
Rs = []
Ts = []
for i in range(nfreq):
wl = np.append(wl, 1/flux_freqs[i])
Rs = np.append(Rs, -bend_refl_flux[i]/straight_tran_flux[i])
Ts = np.append(Ts, bend_tran_flux[i]/straight_tran_flux[i])
if mp.am_master():
plt.figure()
plt.plot(wl, Rs, 'bo-', label='reflectance')
plt.plot(wl, Ts, 'ro-', label='transmittance')
plt.plot(wl, 1-Rs-Ts, 'go-', label='loss')
plt.axis([5.0, 10.0, 0, 1])
plt.xlabel("wavelength (μm)")
plt.legend(loc="upper right")
plt.show()
| [
"[email protected]"
] | |
5ee4451116675fcba00220cffea5fe8064eb2391 | 63f9a0d150cbef75f4e6e8246dc7ecac3f3b6d09 | /python/ray/util/dask/scheduler.py | 0614d35641ec065f203e966a2e8e8a16c8f7ea09 | [
"Apache-2.0",
"MIT"
] | permissive | ray-project/maze-raylit | 79f0a5af9fe4bdc13a2d5b3919da867ed5439aab | a03cd14a50d87d58effea1d749391af530d7609c | refs/heads/master | 2023-01-23T04:23:35.178501 | 2020-12-04T22:34:14 | 2020-12-04T22:34:14 | 318,274,659 | 5 | 0 | Apache-2.0 | 2020-12-04T22:34:15 | 2020-12-03T17:47:58 | Python | UTF-8 | Python | false | false | 16,044 | py | import atexit
from collections import defaultdict
from multiprocessing.pool import ThreadPool
import threading
import ray
from dask.core import istask, ishashable, _execute_task
from dask.local import get_async, apply_sync
from dask.system import CPU_COUNT
from dask.threaded import pack_exception, _thread_get_id
from .callbacks import local_ray_callbacks, unpack_ray_callbacks
from .common import unpack_object_refs
main_thread = threading.current_thread()
default_pool = None
pools = defaultdict(dict)
pools_lock = threading.Lock()
def ray_dask_get(dsk, keys, **kwargs):
"""
A Dask-Ray scheduler. This scheduler will send top-level (non-inlined) Dask
tasks to a Ray cluster for execution. The scheduler will wait for the
tasks to finish executing, fetch the results, and repackage them into the
appropriate Dask collections. This particular scheduler uses a threadpool
to submit Ray tasks.
This can be passed directly to `dask.compute()`, as the scheduler:
>>> dask.compute(obj, scheduler=ray_dask_get)
You can override the currently active global Dask-Ray callbacks (e.g.
supplied via a context manager), the number of threads to use when
submitting the Ray tasks, or the threadpool used to submit Ray tasks:
>>> dask.compute(
obj,
scheduler=ray_dask_get,
ray_callbacks=some_ray_dask_callbacks,
num_workers=8,
pool=some_cool_pool,
)
Args:
dsk (Dict): Dask graph, represented as a task DAG dictionary.
keys (List[str]): List of Dask graph keys whose values we wish to
compute and return.
ray_callbacks (Optional[list[callable]]): Dask-Ray callbacks.
num_workers (Optional[int]): The number of worker threads to use in
the Ray task submission traversal of the Dask graph.
pool (Optional[ThreadPool]): A multiprocessing threadpool to use to
submit Ray tasks.
Returns:
Computed values corresponding to the provided keys.
"""
num_workers = kwargs.pop("num_workers", None)
pool = kwargs.pop("pool", None)
# We attempt to reuse any other thread pools that have been created within
# this thread and with the given number of workers. We reuse a global
# thread pool if num_workers is not given and we're in the main thread.
global default_pool
thread = threading.current_thread()
if pool is None:
with pools_lock:
if num_workers is None and thread is main_thread:
if default_pool is None:
default_pool = ThreadPool(CPU_COUNT)
atexit.register(default_pool.close)
pool = default_pool
elif thread in pools and num_workers in pools[thread]:
pool = pools[thread][num_workers]
else:
pool = ThreadPool(num_workers)
atexit.register(pool.close)
pools[thread][num_workers] = pool
ray_callbacks = kwargs.pop("ray_callbacks", None)
with local_ray_callbacks(ray_callbacks) as ray_callbacks:
# Unpack the Ray-specific callbacks.
(
ray_presubmit_cbs,
ray_postsubmit_cbs,
ray_pretask_cbs,
ray_posttask_cbs,
ray_postsubmit_all_cbs,
ray_finish_cbs,
) = unpack_ray_callbacks(ray_callbacks)
# NOTE: We hijack Dask's `get_async` function, injecting a different
# task executor.
object_refs = get_async(
_apply_async_wrapper(
pool.apply_async,
_rayify_task_wrapper,
ray_presubmit_cbs,
ray_postsubmit_cbs,
ray_pretask_cbs,
ray_posttask_cbs,
),
len(pool._pool),
dsk,
keys,
get_id=_thread_get_id,
pack_exception=pack_exception,
**kwargs,
)
if ray_postsubmit_all_cbs is not None:
for cb in ray_postsubmit_all_cbs:
cb(object_refs, dsk)
# NOTE: We explicitly delete the Dask graph here so object references
# are garbage-collected before this function returns, i.e. before all
# Ray tasks are done. Otherwise, no intermediate objects will be
# cleaned up until all Ray tasks are done.
del dsk
result = ray_get_unpack(object_refs)
if ray_finish_cbs is not None:
for cb in ray_finish_cbs:
cb(result)
# cleanup pools associated with dead threads.
with pools_lock:
active_threads = set(threading.enumerate())
if thread is not main_thread:
for t in list(pools):
if t not in active_threads:
for p in pools.pop(t).values():
p.close()
return result
def _apply_async_wrapper(apply_async, real_func, *extra_args, **extra_kwargs):
"""
Wraps the given pool `apply_async` function, hotswapping `real_func` in as
the function to be applied and adding `extra_args` and `extra_kwargs` to
`real_func`'s call.
Args:
apply_async (callable): The pool function to be wrapped.
real_func (callable): The real function that we wish the pool apply
function to execute.
*extra_args: Extra positional arguments to pass to the `real_func`.
**extra_kwargs: Extra keyword arguments to pass to the `real_func`.
Returns:
A wrapper function that will ignore it's first `func` argument and
pass `real_func` in its place. To be passed to `dask.local.get_async`.
"""
def wrapper(func, args=(), kwds={}, callback=None): # noqa: M511
return apply_async(
real_func,
args=args + extra_args,
kwds=dict(kwds, **extra_kwargs),
callback=callback,
)
return wrapper
def _rayify_task_wrapper(
key,
task_info,
dumps,
loads,
get_id,
pack_exception,
ray_presubmit_cbs,
ray_postsubmit_cbs,
ray_pretask_cbs,
ray_posttask_cbs,
):
"""
The core Ray-Dask task execution wrapper, to be given to the thread pool's
`apply_async` function. Exactly the same as `execute_task`, except that it
calls `_rayify_task` on the task instead of `_execute_task`.
Args:
key (str): The Dask graph key whose corresponding task we wish to
execute.
task_info: The task to execute and its dependencies.
dumps (callable): A result serializing function.
loads (callable): A task_info deserializing function.
get_id (callable): An ID generating function.
pack_exception (callable): An exception serializing function.
ray_presubmit_cbs (callable): Pre-task submission callbacks.
ray_postsubmit_cbs (callable): Post-task submission callbacks.
ray_pretask_cbs (callable): Pre-task execution callbacks.
ray_posttask_cbs (callable): Post-task execution callbacks.
Returns:
A 3-tuple of the task's key, a literal or a Ray object reference for a
Ray task's result, and whether the Ray task submission failed.
"""
try:
task, deps = loads(task_info)
result = _rayify_task(
task,
key,
deps,
ray_presubmit_cbs,
ray_postsubmit_cbs,
ray_pretask_cbs,
ray_posttask_cbs,
)
id = get_id()
result = dumps((result, id))
failed = False
except BaseException as e:
result = pack_exception(e, dumps)
failed = True
return key, result, failed
def _rayify_task(
task,
key,
deps,
ray_presubmit_cbs,
ray_postsubmit_cbs,
ray_pretask_cbs,
ray_posttask_cbs,
):
"""
Rayifies the given task, submitting it as a Ray task to the Ray cluster.
Args:
task (tuple): A Dask graph value, being either a literal, dependency
key, Dask task, or a list thereof.
key (str): The Dask graph key for the given task.
deps (dict): The dependencies of this task.
ray_presubmit_cbs (callable): Pre-task submission callbacks.
ray_postsubmit_cbs (callable): Post-task submission callbacks.
ray_pretask_cbs (callable): Pre-task execution callbacks.
ray_posttask_cbs (callable): Post-task execution callbacks.
Returns:
A literal, a Ray object reference representing a submitted task, or a
list thereof.
"""
if isinstance(task, list):
# Recursively rayify this list. This will still bottom out at the first
# actual task encountered, inlining any tasks in that task's arguments.
return [
_rayify_task(
t,
key,
deps,
ray_presubmit_cbs,
ray_postsubmit_cbs,
ray_pretask_cbs,
ray_posttask_cbs,
) for t in task
]
elif istask(task):
# Unpacks and repacks Ray object references and submits the task to the
# Ray cluster for execution.
if ray_presubmit_cbs is not None:
alternate_returns = [
cb(task, key, deps) for cb in ray_presubmit_cbs
]
for alternate_return in alternate_returns:
# We don't submit a Ray task if a presubmit callback returns
# a non-`None` value, instead we return said value.
# NOTE: This returns the first non-None presubmit callback
# return value.
if alternate_return is not None:
return alternate_return
func, args = task[0], task[1:]
# If the function's arguments contain nested object references, we must
# unpack said object references into a flat set of arguments so that
# Ray properly tracks the object dependencies between Ray tasks.
object_refs, repack = unpack_object_refs(args, deps)
# Submit the task using a wrapper function.
object_ref = dask_task_wrapper.options(name=f"dask:{key!s}").remote(
func, repack, key, ray_pretask_cbs, ray_posttask_cbs, *object_refs)
if ray_postsubmit_cbs is not None:
for cb in ray_postsubmit_cbs:
cb(task, key, deps, object_ref)
return object_ref
elif not ishashable(task):
return task
elif task in deps:
return deps[task]
else:
return task
@ray.remote
def dask_task_wrapper(func, repack, key, ray_pretask_cbs, ray_posttask_cbs,
*args):
"""
A Ray remote function acting as a Dask task wrapper. This function will
repackage the given flat `args` into its original data structures using
`repack`, execute any Dask subtasks within the repackaged arguments
(inlined by Dask's optimization pass), and then pass the concrete task
arguments to the provide Dask task function, `func`.
Args:
func (callable): The Dask task function to execute.
repack (callable): A function that repackages the provided args into
the original (possibly nested) Python objects.
key (str): The Dask key for this task.
ray_pretask_cbs (callable): Pre-task execution callbacks.
ray_posttask_cbs (callable): Post-task execution callback.
*args (ObjectRef): Ray object references representing the Dask task's
arguments.
Returns:
The output of the Dask task. In the context of Ray, a
dask_task_wrapper.remote() invocation will return a Ray object
reference representing the Ray task's result.
"""
if ray_pretask_cbs is not None:
pre_states = [
cb(key, args) if cb is not None else None for cb in ray_pretask_cbs
]
repacked_args, repacked_deps = repack(args)
# Recursively execute Dask-inlined tasks.
actual_args = [_execute_task(a, repacked_deps) for a in repacked_args]
# Execute the actual underlying Dask task.
result = func(*actual_args)
if ray_posttask_cbs is not None:
for cb, pre_state in zip(ray_posttask_cbs, pre_states):
if cb is not None:
cb(key, result, pre_state)
return result
def ray_get_unpack(object_refs):
"""
Unpacks object references, gets the object references, and repacks.
Traverses arbitrary data structures.
Args:
object_refs: A (potentially nested) Python object containing Ray object
references.
Returns:
The input Python object with all contained Ray object references
resolved with their concrete values.
"""
if isinstance(object_refs, tuple):
object_refs = list(object_refs)
if isinstance(object_refs, list) and any(not isinstance(x, ray.ObjectRef)
for x in object_refs):
# We flatten the object references before calling ray.get(), since Dask
# loves to nest collections in nested tuples and Ray expects a flat
# list of object references. We repack the results after ray.get()
# completes.
object_refs, repack = unpack_object_refs(*object_refs)
computed_result = ray.get(object_refs)
return repack(computed_result)
else:
return ray.get(object_refs)
def ray_dask_get_sync(dsk, keys, **kwargs):
"""
A synchronous Dask-Ray scheduler. This scheduler will send top-level
(non-inlined) Dask tasks to a Ray cluster for execution. The scheduler will
wait for the tasks to finish executing, fetch the results, and repackage
them into the appropriate Dask collections. This particular scheduler
submits Ray tasks synchronously, which can be useful for debugging.
This can be passed directly to `dask.compute()`, as the scheduler:
>>> dask.compute(obj, scheduler=ray_dask_get_sync)
You can override the currently active global Dask-Ray callbacks (e.g.
supplied via a context manager):
>>> dask.compute(
obj,
scheduler=ray_dask_get_sync,
ray_callbacks=some_ray_dask_callbacks,
)
Args:
dsk (Dict): Dask graph, represented as a task DAG dictionary.
keys (List[str]): List of Dask graph keys whose values we wish to
compute and return.
Returns:
Computed values corresponding to the provided keys.
"""
ray_callbacks = kwargs.pop("ray_callbacks", None)
with local_ray_callbacks(ray_callbacks) as ray_callbacks:
# Unpack the Ray-specific callbacks.
(
ray_presubmit_cbs,
ray_postsubmit_cbs,
ray_pretask_cbs,
ray_posttask_cbs,
ray_postsubmit_all_cbs,
ray_finish_cbs,
) = unpack_ray_callbacks(ray_callbacks)
# NOTE: We hijack Dask's `get_async` function, injecting a different
# task executor.
object_refs = get_async(
_apply_async_wrapper(
apply_sync,
_rayify_task_wrapper,
ray_presubmit_cbs,
ray_postsubmit_cbs,
ray_pretask_cbs,
ray_posttask_cbs,
),
1,
dsk,
keys,
**kwargs,
)
if ray_postsubmit_all_cbs is not None:
for cb in ray_postsubmit_all_cbs:
cb(object_refs, dsk)
# NOTE: We explicitly delete the Dask graph here so object references
# are garbage-collected before this function returns, i.e. before all
# Ray tasks are done. Otherwise, no intermediate objects will be
# cleaned up until all Ray tasks are done.
del dsk
result = ray_get_unpack(object_refs)
if ray_finish_cbs is not None:
for cb in ray_finish_cbs:
cb(result)
return result
| [
"[email protected]"
] | |
78ab9aaea1b7def48a3918b228987989375f6fda | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/10/usersdata/137/10363/submittedfiles/testes.py | 3f1eddb1bbd0d48889e66c57f0feb844589958e4 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 457 | py | # -*- coding: utf-8 -*-
from __future__ import division
import math
a=input('a:')
b=input('b:')
c=input('c:')
d=input('d:')
if a>b and a>c and a>d:
maior=a
elif b>a and b>c and b>d:
maior=a
elif c>a and c>b and c>d:
maior=c
elif d>a and d>b and d>c:
maior=d
elif a<b and a<c and a<d:
menor=a
elif b<a and b<c and b<d:
menor=b
elif c<a and c<b and c<d:
menor=c
elif d<a and d<b and d<c:
menor=d
print ('%d,%d' %(menor,maior))
| [
"[email protected]"
] | |
3678749b295aba294063b492e55f9765d25c127f | 80f244addbd16914b3391f549670e7164b60e0cb | /Realestate4/Tagent4/migrations/0001_initial.py | 7d6f31c659a77640ff373ae56d72c6f5532871f4 | [] | no_license | Jagadishbommareddy/agentrest | df3817bb08b63e95f985935ebe7853492594619e | 4b5e7a2dcfc8a0f39e4a6a94fe3cde8232aece97 | refs/heads/master | 2021-06-27T05:03:30.703762 | 2017-09-14T15:37:57 | 2017-09-14T15:37:57 | 103,550,487 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,807 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2017-09-14 13:43
from __future__ import unicode_literals
import Tagent4.validations
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Address',
fields=[
('address_id', models.AutoField(primary_key=True, serialize=False)),
('address1', models.CharField(max_length=100)),
('address2', models.CharField(max_length=100)),
('city', models.CharField(max_length=20, validators=[Tagent4.validations.validate_city])),
('state', models.CharField(max_length=20, validators=[Tagent4.validations.validate_state])),
('landmark', models.CharField(max_length=20, validators=[Tagent4.validations.validate_landmark])),
('pincode', models.IntegerField()),
],
),
migrations.CreateModel(
name='AgentReferal',
fields=[
('referal_id', models.AutoField(primary_key=True, serialize=False)),
('name', models.CharField(max_length=30)),
('verified', models.BooleanField(default=True)),
],
),
migrations.CreateModel(
name='ContactInfo',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('mobile_number', models.CharField(max_length=15)),
('phone_number', models.CharField(max_length=15)),
('email_id', models.EmailField(max_length=254)),
],
),
migrations.CreateModel(
name='Location',
fields=[
('location_id', models.AutoField(primary_key=True, serialize=False)),
('city', models.CharField(blank=True, max_length=20, null=True)),
('state', models.CharField(blank=True, max_length=20, null=True)),
],
),
migrations.CreateModel(
name='Media',
fields=[
('media_id', models.AutoField(primary_key=True, serialize=False)),
('media_name', models.CharField(max_length=20)),
('media_path', models.FileField(upload_to='documents/')),
],
),
migrations.CreateModel(
name='PropertyType',
fields=[
('propert_type_id', models.AutoField(primary_key=True, serialize=False)),
('description', models.CharField(max_length=50)),
],
),
migrations.CreateModel(
name='Agent',
fields=[
('media_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, to='Tagent4.Media')),
('contactinfo_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, to='Tagent4.ContactInfo')),
('agent_id', models.AutoField(primary_key=True, serialize=False)),
('first_name', models.CharField(max_length=20, validators=[Tagent4.validations.validate_first_name])),
('last_name', models.CharField(max_length=20, validators=[Tagent4.validations.validate_last_name])),
('age', models.IntegerField()),
('education', models.CharField(max_length=50, validators=[Tagent4.validations.validate_education])),
('company_name', models.CharField(max_length=50)),
('specialization', models.CharField(max_length=100, validators=[Tagent4.validations.validate_specelization])),
('experence', models.IntegerField()),
('agent_notes', models.TextField()),
('property_type', models.ManyToManyField(to='Tagent4.PropertyType')),
],
bases=('Tagent4.contactinfo', 'Tagent4.media'),
),
migrations.AddField(
model_name='location',
name='agent',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='Tagent4.Agent'),
),
migrations.AddField(
model_name='agentreferal',
name='agent',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='Tagent4.Agent'),
),
migrations.AddField(
model_name='address',
name='agent',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='Tagent4.Agent'),
),
]
| [
"[email protected]"
] | |
62c80f119ab5024fb4c6c1e5c2d0e6081efe14b8 | 3d19e1a316de4d6d96471c64332fff7acfaf1308 | /Users/A/alokmaheshwari/follow-url.py | 149494399874ca6fa3215219ddc76469be641698 | [] | no_license | BerilBBJ/scraperwiki-scraper-vault | 4e98837ac3b1cc3a3edb01b8954ed00f341c8fcc | 65ea6a943cc348a9caf3782b900b36446f7e137d | refs/heads/master | 2021-12-02T23:55:58.481210 | 2013-09-30T17:02:59 | 2013-09-30T17:02:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,246 | py | import mechanize
import lxml.html
import scraperwiki
surl = "http://main.exoclick.com/click.php?data=eGhhbXN0ZXJ8MjQxMjQ3fDB8aHR0cCUzQSUyRiUyRnRyay5rbGlja3RyZWsuY29tJTJGYmFzZS5waHAlM0ZjJTNEODMlMjZrZXklM0Q4NzNkNTA5YWZiNTRjM2RiZjNiMjFiYTFjOGQyMzAxZiUyNnNvdXJjZSUzRHhoYW1zdGVyLmNvbXwzNDk1NHx8MHwxMDB8MTM1MDA3MDUxM3x4aGFtc3Rlci5jb218NDYuNDMuNTUuODd8MjQxMjQ3LTUyMDgxODR8NTIwODE4NHwxMDA2MzN8Mnw3fGE5MjgzZjg2MDBhMjJmNDc1NDI1NDVmODBlNDhmN2Ux&js=1"
br = mechanize.Browser()
#br.set_all_readonly(False) # allow everything to be written to
br.set_handle_robots(False) # no robots
br.set_handle_refresh(True) # can sometimes hang without this
br.addheaders = [('User-agent', 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.0.1) Gecko/2008071615 Fedora/3.0.1-1.fc9 Firefox/3.0.1')]
response = br.open(surl)
print response.read()
br.form = list(br.forms())[0]
response = br.submit()
print response.geturl()
print response.read()
#br.set_handle_refresh(True) # can sometimes hang without this
#response1 = br.response() # get the response again
#print response1.read() # can apply lxml.html.fromstring()
import mechanize
import lxml.html
import scraperwiki
surl = "http://main.exoclick.com/click.php?data=eGhhbXN0ZXJ8MjQxMjQ3fDB8aHR0cCUzQSUyRiUyRnRyay5rbGlja3RyZWsuY29tJTJGYmFzZS5waHAlM0ZjJTNEODMlMjZrZXklM0Q4NzNkNTA5YWZiNTRjM2RiZjNiMjFiYTFjOGQyMzAxZiUyNnNvdXJjZSUzRHhoYW1zdGVyLmNvbXwzNDk1NHx8MHwxMDB8MTM1MDA3MDUxM3x4aGFtc3Rlci5jb218NDYuNDMuNTUuODd8MjQxMjQ3LTUyMDgxODR8NTIwODE4NHwxMDA2MzN8Mnw3fGE5MjgzZjg2MDBhMjJmNDc1NDI1NDVmODBlNDhmN2Ux&js=1"
br = mechanize.Browser()
#br.set_all_readonly(False) # allow everything to be written to
br.set_handle_robots(False) # no robots
br.set_handle_refresh(True) # can sometimes hang without this
br.addheaders = [('User-agent', 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.0.1) Gecko/2008071615 Fedora/3.0.1-1.fc9 Firefox/3.0.1')]
response = br.open(surl)
print response.read()
br.form = list(br.forms())[0]
response = br.submit()
print response.geturl()
print response.read()
#br.set_handle_refresh(True) # can sometimes hang without this
#response1 = br.response() # get the response again
#print response1.read() # can apply lxml.html.fromstring()
| [
"[email protected]"
] | |
ec0f1ffdc69a6641606770b1d92ef89fe6fdde1f | d838bed08a00114c92b73982a74d96c15166a49e | /docs/data/learn/Bioinformatics/output/ch1_code/src/Stepik.1.7.ExerciseBreak1.py | 4c69d9c5f0fb22fce970c161b2e6eac54c8f7fbe | [] | no_license | offbynull/offbynull.github.io | 4911f53d77f6c59e7a453ee271b1e04e613862bc | 754a85f43159738b89dd2bde1ad6ba0d75f34b98 | refs/heads/master | 2023-07-04T00:39:50.013571 | 2023-06-17T20:27:05 | 2023-06-17T23:27:00 | 308,482,936 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 106 | py | from GCSkew import gc_skew
skew = gc_skew('GAGCCACCGCGATA')
print(f'{" ".join([str(f) for f in skew])}') | [
"[email protected]"
] | |
e51a774e9038db02f31527d3a06914c40379467e | bf681fbd7edbf4f8f1e0b20cbd09b362f777c9c3 | /bazel/starlark_testing/formal/my_macro.bzl | dfd2634455066d945fe2123d13fc54dfc933a770 | [
"BSD-3-Clause"
] | permissive | EricCousineau-TRI/repro | 308d4a86f3c7da8be5811db2f3f68d39db60d7ed | 9800f45e07f511c9a355ee90333955451b55559a | refs/heads/master | 2023-08-31T13:49:23.540640 | 2023-08-25T19:18:33 | 2023-08-25T19:18:33 | 87,116,976 | 24 | 13 | NOASSERTION | 2023-03-25T01:40:55 | 2017-04-03T20:19:28 | Jupyter Notebook | UTF-8 | Python | false | false | 44 | bzl | def add_two_numbers(a, b):
return a + b
| [
"[email protected]"
] | |
bf90aef5300e024c4e7977593d9d595e0838b6e1 | c24212464eb84588edc7903a8905f2a881d578c4 | /migrations/versions/9db4f46dd61b_private_messages.py | 3f2b1c16b6007c7b1b7217c12ff58710f815a2b8 | [] | no_license | the-akira/Flask-Library | c533dc2fd1ac2d3d9e2732e7c7bed5b8cc7ca4bd | 833e77660053b1e95975ccdf8bf41a035722975c | refs/heads/master | 2023-05-25T12:08:15.898134 | 2023-02-07T23:36:50 | 2023-02-07T23:36:50 | 205,951,022 | 5 | 2 | null | 2023-02-15T22:08:36 | 2019-09-02T23:26:50 | HTML | UTF-8 | Python | false | false | 1,724 | py | """private messages
Revision ID: 9db4f46dd61b
Revises: 46e80c86a0fb
Create Date: 2022-05-16 01:53:35.196659
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '9db4f46dd61b'
down_revision = '46e80c86a0fb'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('message',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('sender_id', sa.Integer(), nullable=True),
sa.Column('recipient_id', sa.Integer(), nullable=True),
sa.Column('body', sa.Text(), nullable=False),
sa.Column('timestamp', sa.DateTime(), nullable=True),
sa.ForeignKeyConstraint(['recipient_id'], ['user.id'], ),
sa.ForeignKeyConstraint(['sender_id'], ['user.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_message_timestamp'), 'message', ['timestamp'], unique=False)
op.create_foreign_key(None, 'analysis', 'user', ['user_id'], ['id'])
op.alter_column('book', 'image_book',
existing_type=sa.VARCHAR(length=20),
nullable=True)
op.add_column('user', sa.Column('last_message_read_time', sa.DateTime(), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('user', 'last_message_read_time')
op.alter_column('book', 'image_book',
existing_type=sa.VARCHAR(length=20),
nullable=False)
op.drop_constraint(None, 'analysis', type_='foreignkey')
op.drop_index(op.f('ix_message_timestamp'), table_name='message')
op.drop_table('message')
# ### end Alembic commands ###
| [
"[email protected]"
] | |
15fbb58d1ab6e4d3ad5e71efc5289fb5538ba0c4 | 3ea99519e25ec1bb605947a94b7a5ceb79b2870a | /modern_python/modernpython/lib/python3.6/site-packages/mypy/erasetype.py | 48cd31487038c3a7b492617d7348a7241574e77e | [] | no_license | tech-cow/spazzatura | 437c7502a0654a3d3db2fd1e96ce2e3e506243c0 | 45fc0932186d2ef0c5044745a23507a692cfcc26 | refs/heads/master | 2022-09-01T12:01:11.309768 | 2018-11-15T04:32:03 | 2018-11-15T04:32:03 | 130,414,653 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,635 | py | from typing import Optional, Container, Callable
from mypy.types import (
Type, TypeVisitor, UnboundType, AnyType, NoneTyp, TypeVarId, Instance, TypeVarType,
CallableType, TupleType, TypedDictType, UnionType, Overloaded, ErasedType, PartialType,
DeletedType, TypeTranslator, TypeList, UninhabitedType, TypeType, TypeOfAny
)
from mypy import experiments
def erase_type(typ: Type) -> Type:
"""Erase any type variables from a type.
Also replace tuple types with the corresponding concrete types. Replace
callable types with empty callable types.
Examples:
A -> A
B[X] -> B[Any]
Tuple[A, B] -> tuple
Callable[...] -> Callable[[], None]
Type[X] -> Type[Any]
"""
return typ.accept(EraseTypeVisitor())
class EraseTypeVisitor(TypeVisitor[Type]):
def visit_unbound_type(self, t: UnboundType) -> Type:
assert False, 'Not supported'
def visit_any(self, t: AnyType) -> Type:
return t
def visit_none_type(self, t: NoneTyp) -> Type:
return t
def visit_uninhabited_type(self, t: UninhabitedType) -> Type:
return t
def visit_erased_type(self, t: ErasedType) -> Type:
# Should not get here.
raise RuntimeError()
def visit_partial_type(self, t: PartialType) -> Type:
# Should not get here.
raise RuntimeError()
def visit_deleted_type(self, t: DeletedType) -> Type:
return t
def visit_instance(self, t: Instance) -> Type:
return Instance(t.type, [AnyType(TypeOfAny.special_form)] * len(t.args), t.line)
def visit_type_var(self, t: TypeVarType) -> Type:
return AnyType(TypeOfAny.special_form)
def visit_callable_type(self, t: CallableType) -> Type:
# We must preserve the fallback type for overload resolution to work.
ret_type = NoneTyp() # type: Type
return CallableType([], [], [], ret_type, t.fallback)
def visit_overloaded(self, t: Overloaded) -> Type:
return t.items()[0].accept(self)
def visit_tuple_type(self, t: TupleType) -> Type:
return t.fallback.accept(self)
def visit_typeddict_type(self, t: TypedDictType) -> Type:
return t.fallback.accept(self)
def visit_union_type(self, t: UnionType) -> Type:
erased_items = [erase_type(item) for item in t.items]
return UnionType.make_simplified_union(erased_items)
def visit_type_type(self, t: TypeType) -> Type:
return TypeType.make_normalized(t.item.accept(self), line=t.line)
def erase_typevars(t: Type, ids_to_erase: Optional[Container[TypeVarId]] = None) -> Type:
"""Replace all type variables in a type with any,
or just the ones in the provided collection.
"""
def erase_id(id: TypeVarId) -> bool:
if ids_to_erase is None:
return True
return id in ids_to_erase
return t.accept(TypeVarEraser(erase_id, AnyType(TypeOfAny.special_form)))
def replace_meta_vars(t: Type, target_type: Type) -> Type:
"""Replace unification variables in a type with the target type."""
return t.accept(TypeVarEraser(lambda id: id.is_meta_var(), target_type))
class TypeVarEraser(TypeTranslator):
"""Implementation of type erasure"""
def __init__(self, erase_id: Callable[[TypeVarId], bool], replacement: Type) -> None:
self.erase_id = erase_id
self.replacement = replacement
def visit_type_var(self, t: TypeVarType) -> Type:
if self.erase_id(t.id):
return self.replacement
return t
| [
"[email protected]"
] | |
3bbe155023b3a97c5e91f1df5960570a3fcf09b0 | 154fd16fe7828cb6925ca8f90e049b754ce06413 | /lino_book/projects/lydia/tests/dumps/18.12.0/finan_journalentryitem.py | d520f010ff31a28ccbf9549667aba2fb3c9447b9 | [
"BSD-2-Clause"
] | permissive | lino-framework/book | 68de2f8d130266bd9d9de7576d30597b3cde1c91 | 4eab916832cd8f48ff1b9fc8c2789f0b437da0f8 | refs/heads/master | 2021-03-27T16:16:55.403940 | 2021-03-15T02:53:50 | 2021-03-15T02:53:50 | 58,830,342 | 3 | 9 | BSD-2-Clause | 2021-03-09T13:11:27 | 2016-05-14T21:02:17 | Python | UTF-8 | Python | false | false | 205 | py | # -*- coding: UTF-8 -*-
logger.info("Loading 0 objects to table finan_journalentryitem...")
# fields: id, seqno, match, amount, dc, remark, account, partner, date, voucher
loader.flush_deferred_objects()
| [
"[email protected]"
] | |
e4f283777aca8b53ab305f01ade13de7a4711d27 | c7f4b7c79d8fc3491c01b46bb5c78192d3e0c5ae | /tests/test_UnweightedSearchTree___call__.py | 29dbcb6640eb211d089579d2046068bc20a37184 | [
"MIT"
] | permissive | Abjad/abjad-ext-nauert | 37d4ea2121b839d2c9388e925b241795cd5a0ae7 | ad6c649c79d096ce905a7a8e80cf9d7154727424 | refs/heads/main | 2023-08-24T15:32:15.519856 | 2023-08-02T15:05:08 | 2023-08-02T15:05:08 | 132,930,780 | 4 | 2 | MIT | 2023-04-15T14:18:20 | 2018-05-10T17:05:23 | Python | UTF-8 | Python | false | false | 1,648 | py | import abjadext.nauert
def test_UnweightedSearchTree___call___01():
definition = {2: {2: {2: None}, 3: None}, 5: None}
search_tree = abjadext.nauert.UnweightedSearchTree(definition)
q_grid = abjadext.nauert.QGrid()
a = abjadext.nauert.QEventProxy(
abjadext.nauert.SilentQEvent(0, ["A"], index=1), 0, 1
)
b = abjadext.nauert.QEventProxy(
abjadext.nauert.SilentQEvent((1, 5), ["B"], index=2), 0, 1
)
c = abjadext.nauert.QEventProxy(
abjadext.nauert.SilentQEvent((1, 4), ["C"], index=3), 0, 1
)
d = abjadext.nauert.QEventProxy(
abjadext.nauert.SilentQEvent((1, 3), ["D"], index=4), 0, 1
)
e = abjadext.nauert.QEventProxy(
abjadext.nauert.SilentQEvent((2, 5), ["E"], index=5), 0, 1
)
f = abjadext.nauert.QEventProxy(
abjadext.nauert.SilentQEvent((1, 2), ["F"], index=6), 0, 1
)
g = abjadext.nauert.QEventProxy(
abjadext.nauert.SilentQEvent((3, 5), ["G"], index=7), 0, 1
)
h = abjadext.nauert.QEventProxy(
abjadext.nauert.SilentQEvent((2, 3), ["H"], index=8), 0, 1
)
i = abjadext.nauert.QEventProxy(
abjadext.nauert.SilentQEvent((3, 4), ["I"], index=9), 0, 1
)
j = abjadext.nauert.QEventProxy(
abjadext.nauert.SilentQEvent((4, 5), ["J"], index=10), 0, 1
)
k = abjadext.nauert.QEventProxy(
abjadext.nauert.SilentQEvent(1, ["K"], index=11), 0, 1
)
q_grid.fit_q_events([a, b, c, d, e, f, g, h, i, j, k])
q_grids = search_tree(q_grid)
assert q_grids[0].root_node.rtm_format == "(1 (1 1))"
assert q_grids[1].root_node.rtm_format == "(1 (1 1 1 1 1))"
| [
"[email protected]"
] | |
62753c6572ceae98064f37c7803fd7fad44def88 | fa99ad8197e9accce25ae31a197e11c84683daaf | /kakao/phone.py | 42ffdb298d160f0792868d7216980fb6aed6bad9 | [] | no_license | vxda7/HomeAlgorithm | 933c3afae5fb81dce2707790544c0f4c3cdc44f9 | 9a8437913ba579b9a584f16048be81ae1d17d3e6 | refs/heads/master | 2020-09-08T16:37:02.089305 | 2020-07-10T06:58:58 | 2020-07-10T06:58:58 | 221,185,705 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,273 | py | def solution(numbers, hand):
answer = ''
rctonum = {"*":[0, 0], "#": [2, 0], 1:[0, 3], 2:[1, 3], 3: [2, 3], 4: [0, 2], 5: [1, 2], 6: [2, 2], 7:[0, 1], 8: [1, 1], 9:[2, 1], 0:[1, 0]}
nowleft = [0, 0]
nowright = [2, 0]
leftdis, rightdis = 0, 0
for one in numbers:
if one == 1 or one == 4 or one == 7:
answer += "L"
nowleft = rctonum[one]
elif one == 3 or one == 6 or one == 9:
answer += "R"
nowright = rctonum[one]
else: # 2, 5, 8, 0 일 때
leftdis = abs(rctonum[one][0] - nowleft[0]) + abs(rctonum[one][1] - nowleft[1])
rightdis = abs(rctonum[one][0] - nowright[0]) + abs(rctonum[one][1] - nowright[1])
if leftdis == rightdis:
if hand == "right":
answer += "R"
nowright = rctonum[one]
else:
answer += "L"
nowleft = rctonum[one]
elif leftdis > rightdis:
answer += "R"
nowright = rctonum[one]
elif leftdis < rightdis:
answer += "L"
nowleft = rctonum[one]
return answer
a = solution([1, 3, 4, 5, 8, 2, 1, 4, 5, 9, 5], "right")
print(a) | [
"[email protected]"
] | |
7fbc51dbe061ef625e5af45342ec77179b3f8c3f | e5e2b7da41fda915cb849f031a0223e2ac354066 | /sdk/python/pulumi_azure_native/sql/v20190601preview/_enums.py | 25bbca7a57d4b073a45cbd5b58f250f0b0307a61 | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | johnbirdau/pulumi-azure-native | b7d3bdddeb7c4b319a7e43a892ddc6e25e3bfb25 | d676cc331caa0694d8be99cb90b93fa231e3c705 | refs/heads/master | 2023-05-06T06:48:05.040357 | 2021-06-01T20:42:38 | 2021-06-01T20:42:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,026 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
from enum import Enum
__all__ = [
'AdministratorType',
'CatalogCollationType',
'CreateMode',
'DatabaseLicenseType',
'DatabaseReadScale',
'IdentityType',
'ManagedDatabaseCreateMode',
'SampleName',
'ServerPublicNetworkAccess',
'StorageAccountType',
'SyncConflictResolutionPolicy',
'SyncDirection',
'SyncMemberDbType',
]
class AdministratorType(str, Enum):
"""
Type of the sever administrator.
"""
ACTIVE_DIRECTORY = "ActiveDirectory"
class CatalogCollationType(str, Enum):
"""
Collation of the metadata catalog.
"""
DATABAS_E_DEFAULT = "DATABASE_DEFAULT"
SQ_L_LATIN1_GENERAL_CP1_C_I_AS = "SQL_Latin1_General_CP1_CI_AS"
class CreateMode(str, Enum):
"""
Specifies the mode of database creation.
Default: regular database creation.
Copy: creates a database as a copy of an existing database. sourceDatabaseId must be specified as the resource ID of the source database.
Secondary: creates a database as a secondary replica of an existing database. sourceDatabaseId must be specified as the resource ID of the existing primary database.
PointInTimeRestore: Creates a database by restoring a point in time backup of an existing database. sourceDatabaseId must be specified as the resource ID of the existing database, and restorePointInTime must be specified.
Recovery: Creates a database by restoring a geo-replicated backup. sourceDatabaseId must be specified as the recoverable database resource ID to restore.
Restore: Creates a database by restoring a backup of a deleted database. sourceDatabaseId must be specified. If sourceDatabaseId is the database's original resource ID, then sourceDatabaseDeletionDate must be specified. Otherwise sourceDatabaseId must be the restorable dropped database resource ID and sourceDatabaseDeletionDate is ignored. restorePointInTime may also be specified to restore from an earlier point in time.
RestoreLongTermRetentionBackup: Creates a database by restoring from a long term retention vault. recoveryServicesRecoveryPointResourceId must be specified as the recovery point resource ID.
Copy, Secondary, and RestoreLongTermRetentionBackup are not supported for DataWarehouse edition.
"""
DEFAULT = "Default"
COPY = "Copy"
SECONDARY = "Secondary"
POINT_IN_TIME_RESTORE = "PointInTimeRestore"
RESTORE = "Restore"
RECOVERY = "Recovery"
RESTORE_EXTERNAL_BACKUP = "RestoreExternalBackup"
RESTORE_EXTERNAL_BACKUP_SECONDARY = "RestoreExternalBackupSecondary"
RESTORE_LONG_TERM_RETENTION_BACKUP = "RestoreLongTermRetentionBackup"
ONLINE_SECONDARY = "OnlineSecondary"
class DatabaseLicenseType(str, Enum):
"""
The license type to apply for this database. `LicenseIncluded` if you need a license, or `BasePrice` if you have a license and are eligible for the Azure Hybrid Benefit.
"""
LICENSE_INCLUDED = "LicenseIncluded"
BASE_PRICE = "BasePrice"
class DatabaseReadScale(str, Enum):
"""
The state of read-only routing. If enabled, connections that have application intent set to readonly in their connection string may be routed to a readonly secondary replica in the same region.
"""
ENABLED = "Enabled"
DISABLED = "Disabled"
class IdentityType(str, Enum):
"""
The identity type. Set this to 'SystemAssigned' in order to automatically create and assign an Azure Active Directory principal for the resource.
"""
NONE = "None"
SYSTEM_ASSIGNED = "SystemAssigned"
USER_ASSIGNED = "UserAssigned"
class ManagedDatabaseCreateMode(str, Enum):
"""
Managed database create mode. PointInTimeRestore: Create a database by restoring a point in time backup of an existing database. SourceDatabaseName, SourceManagedInstanceName and PointInTime must be specified. RestoreExternalBackup: Create a database by restoring from external backup files. Collation, StorageContainerUri and StorageContainerSasToken must be specified. Recovery: Creates a database by restoring a geo-replicated backup. RecoverableDatabaseId must be specified as the recoverable database resource ID to restore. RestoreLongTermRetentionBackup: Create a database by restoring from a long term retention backup (longTermRetentionBackupResourceId required).
"""
DEFAULT = "Default"
RESTORE_EXTERNAL_BACKUP = "RestoreExternalBackup"
POINT_IN_TIME_RESTORE = "PointInTimeRestore"
RECOVERY = "Recovery"
RESTORE_LONG_TERM_RETENTION_BACKUP = "RestoreLongTermRetentionBackup"
class SampleName(str, Enum):
"""
The name of the sample schema to apply when creating this database.
"""
ADVENTURE_WORKS_LT = "AdventureWorksLT"
WIDE_WORLD_IMPORTERS_STD = "WideWorldImportersStd"
WIDE_WORLD_IMPORTERS_FULL = "WideWorldImportersFull"
class ServerPublicNetworkAccess(str, Enum):
"""
Whether or not public endpoint access is allowed for this server. Value is optional but if passed in, must be 'Enabled' or 'Disabled'
"""
ENABLED = "Enabled"
DISABLED = "Disabled"
class StorageAccountType(str, Enum):
"""
The storage account type used to store backups for this database.
"""
GRS = "GRS"
LRS = "LRS"
ZRS = "ZRS"
class SyncConflictResolutionPolicy(str, Enum):
"""
Conflict resolution policy of the sync group.
"""
HUB_WIN = "HubWin"
MEMBER_WIN = "MemberWin"
class SyncDirection(str, Enum):
"""
Sync direction of the sync member.
"""
BIDIRECTIONAL = "Bidirectional"
ONE_WAY_MEMBER_TO_HUB = "OneWayMemberToHub"
ONE_WAY_HUB_TO_MEMBER = "OneWayHubToMember"
class SyncMemberDbType(str, Enum):
"""
Database type of the sync member.
"""
AZURE_SQL_DATABASE = "AzureSqlDatabase"
SQL_SERVER_DATABASE = "SqlServerDatabase"
| [
"[email protected]"
] | |
2bc19d1d207111ed43b63e5866fcca751932a569 | f0bbca88acab9f75a534c8b228f04abac33735f3 | /python/272.ClosestBinarySearchTreeValueII.py | bade89981c709daef2f4fb2d7f428729fef5917c | [] | no_license | MaxPoon/Leetcode | e4327a60d581f715a7c818b8e8e8aa472ed776c1 | 15f012927dc34b5d751af6633caa5e8882d26ff7 | refs/heads/master | 2020-09-17T05:33:13.877346 | 2019-05-09T04:34:54 | 2019-05-09T04:34:54 | 67,481,937 | 15 | 8 | null | null | null | null | UTF-8 | Python | false | false | 954 | py | # Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
from heapq import heappush, heappop
class Solution(object):
def closestKValues(self, root, target, k):
"""
:type root: TreeNode
:type target: float
:type k: int
:rtype: List[int]
"""
heap = []
self.closestRecursive(root, target, heap, k)
return [closest[1] for closest in heap]
def closestRecursive(self, node, target, heap, k):
diff = abs(node.val - target)
if len(heap) < k:
heappush(heap, (-diff, node.val))
elif diff < -heap[0][0]:
heappop(heap)
heappush(heap, (-diff, node.val))
if node.left and (len(heap)<k or diff < -heap[0][0] or node.val >= target):
self.closestRecursive(node.left, target, heap, k)
if node.right and (len(heap)<k or diff < -heap[0][0] or node.val<=target):
self.closestRecursive(node.right, target, heap, k)
| [
"[email protected]"
] | |
fc24a39a70270e955fe523e8fc7e16760c7f5e10 | 9009ad47bc1d6adf8ee6d0f2f2b3125dea44c0aa | /cf-999-a.py | 4a0a9b2c17abfd11d87b3733bfd2c74232bfddd1 | [] | no_license | luctivud/Coding-Trash | 42e880624f39a826bcaab9b6194add2c9b3d71fc | 35422253f6169cc98e099bf83c650b1fb3acdb75 | refs/heads/master | 2022-12-12T00:20:49.630749 | 2020-09-12T17:38:30 | 2020-09-12T17:38:30 | 241,000,584 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,366 | py | #~~~~~~~~~~~~~~~ JAI SHREE RAM ~~~~~~~~~~~~~~~~~~#
import math; from collections import *
import sys; from functools import reduce
# sys.setrecursionlimit(10**6)
def get_ints(): return map(int, input().strip().split())
def get_list(): return list(get_ints())
def get_string(): return list(input().strip().split())
def printxsp(*args): return print(*args, end="")
def printsp(*args): return print(*args, end=" ")
DIRECTIONS = [[0, 1], [0, -1], [1, 0], [1, -1]] #up, down, right, left
NEIGHBOURS = [(i, j) for i in range(-1, 2) for j in range(-1, 2) if (i!=0 or j!=0)]
OrdUnicode_a = ord('a'); OrdUnicode_A = ord('A')
CAPS_ALPHABETS = {chr(i+OrdUnicode_A) : i for i in range(26)}
SMOL_ALPHABETS = {chr(i+OrdUnicode_a) : i for i in range(26)}
UGLYMOD = int(1e9)+7; SEXYMOD = 998244353; MAXN = int(1e5)+1; INFINITY = float('inf')
# sys.stdin=open("input.txt","r");sys.stdout=open("output.txt","w")
# for _testcases_ in range(int(input())):
n, k = get_ints()
li = get_list()
ans = 0
for i in li:
if i > k:
break
ans += 1
for i in li[::-1]:
if i > k:
break
ans += 1
print(min(ans, n))
'''
>>> COMMENT THE STDIN!! CHANGE ONLINE JUDGE !!
THE LOGIC AND APPROACH IS MINE @luctivud ( UDIT GUPTA )
Link may be copy-pasted here if it's taken from other source.
DO NOT PLAGIARISE.
>>> COMMENT THE STDIN!! CHANGE ONLINE JUDGE !!
'''
| [
"[email protected]"
] | |
392f97c7aa8fafdedcc7b8a17f57f824a0419300 | 1c6283303ceb883add8de4ee07c5ffcfc2e93fab | /Jinja2/lib/python3.7/site-packages/ixnetwork_restpy/testplatform/sessions/ixnetwork/topology/learnedinfo/pcedetailedsrsynclspupdateparams_099ec5956b09590499b5079ba90354c9.py | 58f5afb82b0d0a79560cdbab2c5cc6541e232314 | [] | no_license | pdobrinskiy/devcore | 0f5b3dfc2f3bf1e44abd716f008a01c443e14f18 | 580c7df6f5db8c118990cf01bc2b986285b9718b | refs/heads/main | 2023-07-29T20:28:49.035475 | 2021-09-14T10:02:16 | 2021-09-14T10:02:16 | 405,919,390 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 30,032 | py | # MIT LICENSE
#
# Copyright 1997 - 2020 by IXIA Keysight
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from ixnetwork_restpy.base import Base
from ixnetwork_restpy.files import Files
from typing import List, Any, Union
class PceDetailedSrSyncLspUpdateParams(Base):
"""PCE Learned LSPs Information Database
The PceDetailedSrSyncLspUpdateParams class encapsulates a list of pceDetailedSrSyncLspUpdateParams resources that are managed by the system.
A list of resources can be retrieved from the server using the PceDetailedSrSyncLspUpdateParams.find() method.
"""
__slots__ = ()
_SDM_NAME = 'pceDetailedSrSyncLspUpdateParams'
_SDM_ATT_MAP = {
'Bandwidth': 'bandwidth',
'BindingType': 'bindingType',
'Bos': 'bos',
'ConfigureBandwidth': 'configureBandwidth',
'ConfigureEro': 'configureEro',
'ConfigureLsp': 'configureLsp',
'ConfigureLspa': 'configureLspa',
'ConfigureMetric': 'configureMetric',
'ExcludeAny': 'excludeAny',
'HoldingPriority': 'holdingPriority',
'IncludeAll': 'includeAll',
'IncludeAny': 'includeAny',
'IncludeConfiguredERO': 'includeConfiguredERO',
'IncludeSrp': 'includeSrp',
'IncludeSymbolicPathName': 'includeSymbolicPathName',
'IncludeTEPathBindingTLV': 'includeTEPathBindingTLV',
'IncludeXro': 'includeXro',
'LocalProtection': 'localProtection',
'MplsLabel': 'mplsLabel',
'NumberOfEroSubObjects': 'numberOfEroSubObjects',
'NumberOfMetricSubObjects': 'numberOfMetricSubObjects',
'NumberOfXroSubObjects': 'numberOfXroSubObjects',
'OverridePLSPID': 'overridePLSPID',
'OverrideSrpId': 'overrideSrpId',
'PceTriggersChoiceList': 'pceTriggersChoiceList',
'PlspIdTriggerParam': 'plspIdTriggerParam',
'SendEmptyTLV': 'sendEmptyTLV',
'SetupPriority': 'setupPriority',
'SrpId': 'srpId',
'Srv6SID': 'srv6SID',
'Tc': 'tc',
'Ttl': 'ttl',
'XroFailBit': 'xroFailBit',
}
_SDM_ENUM_MAP = {
}
def __init__(self, parent, list_op=False):
super(PceDetailedSrSyncLspUpdateParams, self).__init__(parent, list_op)
@property
def PceUpdateSrEroSubObjectList(self):
"""
Returns
-------
- obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.learnedinfo.pceupdatesrerosubobjectlist_d9e41e4990a041fcca2fc6fd076cf303.PceUpdateSrEroSubObjectList): An instance of the PceUpdateSrEroSubObjectList class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.learnedinfo.pceupdatesrerosubobjectlist_d9e41e4990a041fcca2fc6fd076cf303 import PceUpdateSrEroSubObjectList
if self._properties.get('PceUpdateSrEroSubObjectList', None) is not None:
return self._properties.get('PceUpdateSrEroSubObjectList')
else:
return PceUpdateSrEroSubObjectList(self)
@property
def PceUpdateSrMetricSubObjectList(self):
"""
Returns
-------
- obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.learnedinfo.pceupdatesrmetricsubobjectlist_161f8505e21b0db145157226f5332ddd.PceUpdateSrMetricSubObjectList): An instance of the PceUpdateSrMetricSubObjectList class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.learnedinfo.pceupdatesrmetricsubobjectlist_161f8505e21b0db145157226f5332ddd import PceUpdateSrMetricSubObjectList
if self._properties.get('PceUpdateSrMetricSubObjectList', None) is not None:
return self._properties.get('PceUpdateSrMetricSubObjectList')
else:
return PceUpdateSrMetricSubObjectList(self)
@property
def PceUpdateXroSubObjectList(self):
"""
Returns
-------
- obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.learnedinfo.pceupdatexrosubobjectlist_3cb16b2513bf72ff7ee4a5e0387625cf.PceUpdateXroSubObjectList): An instance of the PceUpdateXroSubObjectList class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.learnedinfo.pceupdatexrosubobjectlist_3cb16b2513bf72ff7ee4a5e0387625cf import PceUpdateXroSubObjectList
if self._properties.get('PceUpdateXroSubObjectList', None) is not None:
return self._properties.get('PceUpdateXroSubObjectList')
else:
return PceUpdateXroSubObjectList(self)
@property
def Bandwidth(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): Bandwidth (bps)
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Bandwidth']))
@property
def BindingType(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): Indicates the type of binding included in the TLV. Types are as follows: 20bit MPLS Label 32bit MPLS Label SRv6 SID Default value is 20bit MPLS Label.
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['BindingType']))
@property
def Bos(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): This bit is set to True for the last entry in the label stack i.e., for the bottom of the stack, and False for all other label stack entries. This control will be editable only if Binding Type is MPLS Label 32bit.
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Bos']))
@property
def ConfigureBandwidth(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): Configure Bandwidth
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['ConfigureBandwidth']))
@property
def ConfigureEro(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): Configure ERO
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['ConfigureEro']))
@property
def ConfigureLsp(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): Configure LSP
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['ConfigureLsp']))
@property
def ConfigureLspa(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): Configure LSPA
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['ConfigureLspa']))
@property
def ConfigureMetric(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): Configure Metric
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['ConfigureMetric']))
@property
def ExcludeAny(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): Exclude Any
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['ExcludeAny']))
@property
def HoldingPriority(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): Holding Priority
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['HoldingPriority']))
@property
def IncludeAll(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): Include All
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['IncludeAll']))
@property
def IncludeAny(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): Include Any
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['IncludeAny']))
@property
def IncludeConfiguredERO(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): If this is enabled, entire ERO will be go out in packet even if there is Binding SID, meaning no SR-ERO/SRv6-ERO validation will be done.
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['IncludeConfiguredERO']))
@property
def IncludeSrp(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): Indicates whether SRP object will be included in a PCInitiate message. All other attributes in sub-tab-SRP would be editable only if this checkbox is enabled.
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['IncludeSrp']))
@property
def IncludeSymbolicPathName(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): Indicates if Symbolic-Path-Name TLV is to be included in PCUpate trigger message.
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['IncludeSymbolicPathName']))
@property
def IncludeTEPathBindingTLV(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): Indicates if TE-PATH-BINDING TLV is to be included in PCUpate trigger message.
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['IncludeTEPathBindingTLV']))
@property
def IncludeXro(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): Indicates whether XRO object will be included in a PcUpdate message. All other attributes in sub-tab Update XRO would be editable only if this checkbox is enabled.
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['IncludeXro']))
@property
def LocalProtection(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): Local Protection
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['LocalProtection']))
@property
def MplsLabel(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): This control will be editable if the Binding Type is set to either 20bit or 32bit MPLS-Label. This field will take the 20bit value of the MPLS-Label
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['MplsLabel']))
@property
def NumberOfEroSubObjects(self):
# type: () -> int
"""
Returns
-------
- number: Value that indicates the number of ERO Sub Objects to be configured.
"""
return self._get_attribute(self._SDM_ATT_MAP['NumberOfEroSubObjects'])
@NumberOfEroSubObjects.setter
def NumberOfEroSubObjects(self, value):
# type: (int) -> None
self._set_attribute(self._SDM_ATT_MAP['NumberOfEroSubObjects'], value)
@property
def NumberOfMetricSubObjects(self):
# type: () -> int
"""
Returns
-------
- number: Value that indicates the number of Metric Objects to be configured.
"""
return self._get_attribute(self._SDM_ATT_MAP['NumberOfMetricSubObjects'])
@NumberOfMetricSubObjects.setter
def NumberOfMetricSubObjects(self, value):
# type: (int) -> None
self._set_attribute(self._SDM_ATT_MAP['NumberOfMetricSubObjects'], value)
@property
def NumberOfXroSubObjects(self):
# type: () -> int
"""
Returns
-------
- number: Value that indicates the number of XRO Sub Objects to be configured.
"""
return self._get_attribute(self._SDM_ATT_MAP['NumberOfXroSubObjects'])
@NumberOfXroSubObjects.setter
def NumberOfXroSubObjects(self, value):
# type: (int) -> None
self._set_attribute(self._SDM_ATT_MAP['NumberOfXroSubObjects'], value)
@property
def OverridePLSPID(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): Allows the user to Send PcUpdate with an unknown PLSP-ID
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['OverridePLSPID']))
@property
def OverrideSrpId(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): Indicates whether SRP object will be included in a PCUpdate trigger parameters. All other attributes in sub-tab-SRP would be editable only if this checkbox is enabled.
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['OverrideSrpId']))
@property
def PceTriggersChoiceList(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): Based on options selected, IxNetwork sends information to PCPU and refreshes the statistical data in the corresponding tab of Learned Information
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['PceTriggersChoiceList']))
@property
def PlspIdTriggerParam(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): The value of PLSP-ID that should be put in the PcUpdate Message
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['PlspIdTriggerParam']))
@property
def SendEmptyTLV(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): If enabled all fields after Binding Type will be grayed out.
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['SendEmptyTLV']))
@property
def SetupPriority(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): Setup Priority
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['SetupPriority']))
@property
def SrpId(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): The SRP object is used to correlate between initiation requests sent by the PCE and the error reports and state reports sent by the PCC. This number is unique per PCEP session and is incremented per initiation.
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['SrpId']))
@property
def Srv6SID(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): SRv6 SID with a format of a 16 byte IPv6 address.
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Srv6SID']))
@property
def Tc(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): This field is used to carry traffic class information. This control will be editable only if Binding Type is MPLS Label 32bit.
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Tc']))
@property
def Ttl(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): This field is used to encode a time-to-live value. This control will be editable only if Binding Type is MPLS Label 32bit.
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Ttl']))
@property
def XroFailBit(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): XRO Fail bit
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['XroFailBit']))
def update(self, NumberOfEroSubObjects=None, NumberOfMetricSubObjects=None, NumberOfXroSubObjects=None):
# type: (int, int, int) -> PceDetailedSrSyncLspUpdateParams
"""Updates pceDetailedSrSyncLspUpdateParams resource on the server.
This method has some named parameters with a type: obj (Multivalue).
The Multivalue class has documentation that details the possible values for those named parameters.
Args
----
- NumberOfEroSubObjects (number): Value that indicates the number of ERO Sub Objects to be configured.
- NumberOfMetricSubObjects (number): Value that indicates the number of Metric Objects to be configured.
- NumberOfXroSubObjects (number): Value that indicates the number of XRO Sub Objects to be configured.
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._update(self._map_locals(self._SDM_ATT_MAP, locals()))
def add(self, NumberOfEroSubObjects=None, NumberOfMetricSubObjects=None, NumberOfXroSubObjects=None):
# type: (int, int, int) -> PceDetailedSrSyncLspUpdateParams
"""Adds a new pceDetailedSrSyncLspUpdateParams resource on the json, only valid with config assistant
Args
----
- NumberOfEroSubObjects (number): Value that indicates the number of ERO Sub Objects to be configured.
- NumberOfMetricSubObjects (number): Value that indicates the number of Metric Objects to be configured.
- NumberOfXroSubObjects (number): Value that indicates the number of XRO Sub Objects to be configured.
Returns
-------
- self: This instance with all currently retrieved pceDetailedSrSyncLspUpdateParams resources using find and the newly added pceDetailedSrSyncLspUpdateParams resources available through an iterator or index
Raises
------
- Exception: if this function is not being used with config assistance
"""
return self._add_xpath(self._map_locals(self._SDM_ATT_MAP, locals()))
def find(self, NumberOfEroSubObjects=None, NumberOfMetricSubObjects=None, NumberOfXroSubObjects=None):
# type: (int, int, int) -> PceDetailedSrSyncLspUpdateParams
"""Finds and retrieves pceDetailedSrSyncLspUpdateParams resources from the server.
All named parameters are evaluated on the server using regex. The named parameters can be used to selectively retrieve pceDetailedSrSyncLspUpdateParams resources from the server.
To retrieve an exact match ensure the parameter value starts with ^ and ends with $
By default the find method takes no parameters and will retrieve all pceDetailedSrSyncLspUpdateParams resources from the server.
Args
----
- NumberOfEroSubObjects (number): Value that indicates the number of ERO Sub Objects to be configured.
- NumberOfMetricSubObjects (number): Value that indicates the number of Metric Objects to be configured.
- NumberOfXroSubObjects (number): Value that indicates the number of XRO Sub Objects to be configured.
Returns
-------
- self: This instance with matching pceDetailedSrSyncLspUpdateParams resources retrieved from the server available through an iterator or index
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._select(self._map_locals(self._SDM_ATT_MAP, locals()))
def read(self, href):
"""Retrieves a single instance of pceDetailedSrSyncLspUpdateParams data from the server.
Args
----
- href (str): An href to the instance to be retrieved
Returns
-------
- self: This instance with the pceDetailedSrSyncLspUpdateParams resources from the server available through an iterator or index
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
return self._read(href)
def SendPcUpdate(self, *args, **kwargs):
# type: (*Any, **Any) -> Union[List[str], None]
"""Executes the sendPcUpdate operation on the server.
Counts property changes created by the user.
sendPcUpdate(Arg2=list, async_operation=bool)list
-------------------------------------------------
- Arg2 (list(number)): List of indices into the learned information corresponding to trigger data.
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
- Returns list(str): ID to associate each async action invocation
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('sendPcUpdate', payload=payload, response_object=None)
def SendReturnDelegation(self, *args, **kwargs):
# type: (*Any, **Any) -> Union[List[str], None]
"""Executes the sendReturnDelegation operation on the server.
Counts property changes created by the user.
sendReturnDelegation(Arg2=list, async_operation=bool)list
---------------------------------------------------------
- Arg2 (list(number)): List of indices into the learned information corresponding to trigger data.
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
- Returns list(str): ID to associate each async action invocation
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('sendReturnDelegation', payload=payload, response_object=None)
def get_device_ids(self, PortNames=None, Bandwidth=None, BindingType=None, Bos=None, ConfigureBandwidth=None, ConfigureEro=None, ConfigureLsp=None, ConfigureLspa=None, ConfigureMetric=None, ExcludeAny=None, HoldingPriority=None, IncludeAll=None, IncludeAny=None, IncludeConfiguredERO=None, IncludeSrp=None, IncludeSymbolicPathName=None, IncludeTEPathBindingTLV=None, IncludeXro=None, LocalProtection=None, MplsLabel=None, OverridePLSPID=None, OverrideSrpId=None, PceTriggersChoiceList=None, PlspIdTriggerParam=None, SendEmptyTLV=None, SetupPriority=None, SrpId=None, Srv6SID=None, Tc=None, Ttl=None, XroFailBit=None):
"""Base class infrastructure that gets a list of pceDetailedSrSyncLspUpdateParams device ids encapsulated by this object.
Use the optional regex parameters in the method to refine the list of device ids encapsulated by this object.
Args
----
- PortNames (str): optional regex of port names
- Bandwidth (str): optional regex of bandwidth
- BindingType (str): optional regex of bindingType
- Bos (str): optional regex of bos
- ConfigureBandwidth (str): optional regex of configureBandwidth
- ConfigureEro (str): optional regex of configureEro
- ConfigureLsp (str): optional regex of configureLsp
- ConfigureLspa (str): optional regex of configureLspa
- ConfigureMetric (str): optional regex of configureMetric
- ExcludeAny (str): optional regex of excludeAny
- HoldingPriority (str): optional regex of holdingPriority
- IncludeAll (str): optional regex of includeAll
- IncludeAny (str): optional regex of includeAny
- IncludeConfiguredERO (str): optional regex of includeConfiguredERO
- IncludeSrp (str): optional regex of includeSrp
- IncludeSymbolicPathName (str): optional regex of includeSymbolicPathName
- IncludeTEPathBindingTLV (str): optional regex of includeTEPathBindingTLV
- IncludeXro (str): optional regex of includeXro
- LocalProtection (str): optional regex of localProtection
- MplsLabel (str): optional regex of mplsLabel
- OverridePLSPID (str): optional regex of overridePLSPID
- OverrideSrpId (str): optional regex of overrideSrpId
- PceTriggersChoiceList (str): optional regex of pceTriggersChoiceList
- PlspIdTriggerParam (str): optional regex of plspIdTriggerParam
- SendEmptyTLV (str): optional regex of sendEmptyTLV
- SetupPriority (str): optional regex of setupPriority
- SrpId (str): optional regex of srpId
- Srv6SID (str): optional regex of srv6SID
- Tc (str): optional regex of tc
- Ttl (str): optional regex of ttl
- XroFailBit (str): optional regex of xroFailBit
Returns
-------
- list(int): A list of device ids that meets the regex criteria provided in the method parameters
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._get_ngpf_device_ids(locals())
| [
"[email protected]"
] | |
9ee3580b09c058e905d7716ebd7d3428447c0fa9 | 9680ba23fd13b4bc0fc3ce0c9f02bb88c6da73e4 | /Brian Heinold (243) ile Python/p32406.py | 48335db560c70401219a91d1793655a29c1366cb | [] | no_license | mnihatyavas/Python-uygulamalar | 694091545a24f50a40a2ef63a3d96354a57c8859 | 688e0dbde24b5605e045c8ec2a9c772ab5f0f244 | refs/heads/master | 2020-08-23T19:12:42.897039 | 2020-04-24T22:45:22 | 2020-04-24T22:45:22 | 216,670,169 | 0 | 0 | null | null | null | null | ISO-8859-9 | Python | false | false | 1,030 | py | # coding:iso-8859-9 Türkçe
from collections import Counter
import re
metin = open ("p32406x2.txt").read()
# İsterseniz "p32406x1.txt" Türkçe metin dosyasını da kullanabilirsiniz...
print ("Dosyadan okunan metin:\n", metin)
sayar1 = Counter (metin)
print ("\nMetnin karakterlerinin tekrarlanma sıklığı:\n", list (sayar1.items()) )
kelimeler = re.findall ("\w+", metin)
print ("\nMetnin kelimeler listesi:\n", kelimeler)
sayar2 = Counter (kelimeler)
print ("\nKelimelerin tekrar sıklığı:\n", list (sayar2.items()) )
#-----------------------------------------------------------------------------------------
print ("\nEn çok tekrarlanan 10 kelime azalan sırada:", sep="")
for (kelime, sıklık) in sayar2.most_common(10): print (kelime, ':', sıklık)
print ("\nEn çok tekrarlanan 10 kelime artan sırada:", sep="")
for (kelime, sıklık) in sayar2.most_common()[9::-1]: print (kelime, ':', sıklık)
# HATA: Çift tekrarlanma sıklığı tersi [10-->9] bir düşük gerektiriyor...
| [
"[email protected]"
] | |
c40a0d341aa647b4cc049e828e4eb7a914438513 | d6e6ff3026b41f07c8c157420c988d3381fcd945 | /src/justForReal/InsertionSortList.py | dd3ac3005e9c33d7a447f771133cc4585f7dab33 | [] | no_license | tongbc/algorithm | dbaeb354f167c7a7a10509ada9458eaaa5e7676e | cf0a007552caa121a656a3f42257de8fa8cc5b38 | refs/heads/master | 2022-05-17T15:25:32.915075 | 2022-04-02T06:23:31 | 2022-04-02T06:23:31 | 142,536,105 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 701 | py | # Definition for singly-linked list.
class ListNode(object):
def __init__(self, x):
self.val = x
self.next = None
class Solution(object):
def insertionSortList(self, head):
"""
:type head: ListNode
:rtype: ListNode
"""
if not head:
return head
helper = ListNode(0) ##dummy node
cur = head
pre = helper
next = None
while(cur):
next = cur.next
while(pre.next is not None and pre.next.val<cur.val):
pre = pre.next
cur.next = pre.next
pre.next = cur
pre = helper
cur = next
return helper.next | [
"[email protected]"
] | |
ce7d5d4aef98e5e31b4e02039e829acfb0af34fa | c98e859c3c3281ccdd63e12ae546c663d10a5cb8 | /tests/workflows/test_imaging_arlexecute.py | 7435f98c20a80faa75f52daa4164c99f9fcc1a8c | [
"Apache-2.0"
] | permissive | henriquegildev/algorithm-reference-library | afa7724e87ff527bb219ee0320b613874c0d1d0f | 1b2c8d6079249202864abf8c60cdea40f0f123cb | refs/heads/master | 2023-03-16T01:44:45.160685 | 2019-12-05T19:19:22 | 2019-12-05T19:19:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 25,025 | py | """ Unit tests for pipelines expressed via arlexecute
"""
import logging
import sys
import unittest
import numpy
from astropy import units as u
from astropy.coordinates import SkyCoord
from data_models.polarisation import PolarisationFrame
from data_models.memory_data_models import BlockVisibility, Visibility
from processing_components.griddata.convolution_functions import apply_bounding_box_convolutionfunction
from processing_components.griddata.kernels import create_awterm_convolutionfunction
from workflows.arlexecute.imaging.imaging_arlexecute import zero_list_arlexecute_workflow, \
predict_list_arlexecute_workflow, invert_list_arlexecute_workflow, subtract_list_arlexecute_workflow, \
weight_list_arlexecute_workflow, residual_list_arlexecute_workflow, sum_invert_results_arlexecute, \
restore_list_arlexecute_workflow
from workflows.shared.imaging.imaging_shared import sum_invert_results, sum_invert_results_local
from wrappers.arlexecute.execution_support.arlexecutebase import ARLExecuteBase
from wrappers.arlexecute.execution_support.dask_init import get_dask_Client
from wrappers.arlexecute.image.operations import export_image_to_fits, smooth_image, qa_image
from wrappers.arlexecute.imaging.base import predict_skycomponent_visibility
from wrappers.arlexecute.simulation.testing_support import ingest_unittest_visibility, \
create_unittest_model, insert_unittest_errors, create_unittest_components
from processing_components.simulation.configurations import create_named_configuration
from wrappers.arlexecute.skycomponent.operations import find_skycomponents, find_nearest_skycomponent, \
insert_skycomponent
from processing_components.visibility.coalesce import convert_blockvisibility_to_visibility
log = logging.getLogger(__name__)
log.setLevel(logging.DEBUG)
log.addHandler(logging.StreamHandler(sys.stdout))
log.addHandler(logging.StreamHandler(sys.stderr))
class TestImaging(unittest.TestCase):
def setUp(self):
client = get_dask_Client(memory_limit=4 * 1024 * 1024 * 1024, n_workers=4, dashboard_address=None)
global arlexecute
arlexecute = ARLExecuteBase(use_dask=True)
arlexecute.set_client(client, verbose=True)
from data_models.parameters import arl_path
self.dir = arl_path('test_results')
self.persist = False
def tearDown(self):
global arlexecute
arlexecute.close()
del arlexecute
def actualSetUp(self, add_errors=False, freqwin=3, block=False, dospectral=True, dopol=False, zerow=False,
makegcfcf=False):
self.npixel = 256
self.low = create_named_configuration('LOWBD2', rmax=750.0)
self.freqwin = freqwin
self.vis_list = list()
self.ntimes = 5
self.cellsize = 0.0005
# Choose the interval so that the maximum change in w is smallish
integration_time = numpy.pi * (24 / (12 * 60))
self.times = numpy.linspace(-integration_time * (self.ntimes // 2), integration_time * (self.ntimes // 2),
self.ntimes)
if freqwin > 1:
self.frequency = numpy.linspace(0.8e8, 1.2e8, self.freqwin)
self.channelwidth = numpy.array(freqwin * [self.frequency[1] - self.frequency[0]])
else:
self.frequency = numpy.array([1.0e8])
self.channelwidth = numpy.array([4e7])
if dopol:
self.vis_pol = PolarisationFrame('linear')
self.image_pol = PolarisationFrame('stokesIQUV')
f = numpy.array([100.0, 20.0, -10.0, 1.0])
else:
self.vis_pol = PolarisationFrame('stokesI')
self.image_pol = PolarisationFrame('stokesI')
f = numpy.array([100.0])
if dospectral:
flux = numpy.array([f * numpy.power(freq / 1e8, -0.7) for freq in self.frequency])
else:
flux = numpy.array([f])
self.phasecentre = SkyCoord(ra=+180.0 * u.deg, dec=-60.0 * u.deg, frame='icrs', equinox='J2000')
self.bvis_list = [arlexecute.execute(ingest_unittest_visibility)(self.low,
[self.frequency[freqwin]],
[self.channelwidth[freqwin]],
self.times,
self.vis_pol,
self.phasecentre, block=True,
zerow=zerow)
for freqwin, _ in enumerate(self.frequency)]
self.vis_list = [arlexecute.execute(convert_blockvisibility_to_visibility)(bvis) for bvis in self.bvis_list]
self.model_list = [arlexecute.execute(create_unittest_model, nout=freqwin)(self.vis_list[freqwin],
self.image_pol,
cellsize=self.cellsize,
npixel=self.npixel)
for freqwin, _ in enumerate(self.frequency)]
self.components_list = [arlexecute.execute(create_unittest_components)(self.model_list[freqwin],
flux[freqwin, :][numpy.newaxis, :],
single=True)
for freqwin, _ in enumerate(self.frequency)]
self.components_list = arlexecute.compute(self.components_list, sync=True)
self.model_list = [arlexecute.execute(insert_skycomponent, nout=1)(self.model_list[freqwin],
self.components_list[freqwin])
for freqwin, _ in enumerate(self.frequency)]
self.model_list = arlexecute.compute(self.model_list, sync=True)
self.vis_list = [arlexecute.execute(predict_skycomponent_visibility)(self.vis_list[freqwin],
self.components_list[freqwin])
for freqwin, _ in enumerate(self.frequency)]
centre = self.freqwin // 2
# Calculate the model convolved with a Gaussian.
self.model = self.model_list[centre]
self.cmodel = smooth_image(self.model)
if self.persist: export_image_to_fits(self.model, '%s/test_imaging_model.fits' % self.dir)
if self.persist: export_image_to_fits(self.cmodel, '%s/test_imaging_cmodel.fits' % self.dir)
if add_errors and block:
self.vis_list = [arlexecute.execute(insert_unittest_errors)(self.vis_list[i])
for i, _ in enumerate(self.frequency)]
self.components = self.components_list[centre]
if makegcfcf:
self.gcfcf = [create_awterm_convolutionfunction(self.model, nw=61, wstep=16.0,
oversampling=8,
support=64,
use_aaf=True)]
self.gcfcf_clipped = [(self.gcfcf[0][0], apply_bounding_box_convolutionfunction(self.gcfcf[0][1],
fractional_level=1e-3))]
self.gcfcf_joint = [create_awterm_convolutionfunction(self.model, nw=11, wstep=16.0,
oversampling=8,
support=64,
use_aaf=True)]
else:
self.gcfcf = None
self.gcfcf_clipped = None
self.gcfcf_joint = None
def test_time_setup(self):
self.actualSetUp()
def _checkcomponents(self, dirty, fluxthreshold=0.6, positionthreshold=1.0):
comps = find_skycomponents(dirty, fwhm=1.0, threshold=10 * fluxthreshold, npixels=5)
assert len(comps) == len(self.components), "Different number of components found: original %d, recovered %d" % \
(len(self.components), len(comps))
cellsize = abs(dirty.wcs.wcs.cdelt[0])
for comp in comps:
# Check for agreement in direction
ocomp, separation = find_nearest_skycomponent(comp.direction, self.components)
assert separation / cellsize < positionthreshold, "Component differs in position %.3f pixels" % \
separation / cellsize
def _predict_base(self, context='2d', extra='', fluxthreshold=1.0, facets=1, vis_slices=1,
gcfcf=None, **kwargs):
centre = self.freqwin // 2
vis_list = zero_list_arlexecute_workflow(self.vis_list)
vis_list = predict_list_arlexecute_workflow(vis_list, self.model_list, context=context,
vis_slices=vis_slices, facets=facets,
gcfcf=gcfcf, **kwargs)
vis_list = subtract_list_arlexecute_workflow(self.vis_list, vis_list)
vis_list = arlexecute.compute(vis_list, sync=True)
dirty = invert_list_arlexecute_workflow(vis_list, self.model_list, context=context, dopsf=False,
gcfcf=gcfcf, normalize=True, vis_slices=vis_slices)
dirty = arlexecute.compute(dirty, sync=True)[centre]
assert numpy.max(numpy.abs(dirty[0].data)), "Residual image is empty"
if self.persist: export_image_to_fits(dirty[0], '%s/test_imaging_predict_%s%s_%s_dirty.fits' %
(self.dir, context, extra, arlexecute.type()))
maxabs = numpy.max(numpy.abs(dirty[0].data))
assert maxabs < fluxthreshold, "Error %.3f greater than fluxthreshold %.3f " % (maxabs, fluxthreshold)
def _invert_base(self, context, extra='', fluxthreshold=1.0, positionthreshold=1.0, check_components=True,
facets=1, vis_slices=1, gcfcf=None, **kwargs):
centre = self.freqwin // 2
dirty = invert_list_arlexecute_workflow(self.vis_list, self.model_list, context=context,
dopsf=False, normalize=True, facets=facets, vis_slices=vis_slices,
gcfcf=gcfcf, **kwargs)
dirty = arlexecute.compute(dirty, sync=True)[centre]
print(dirty)
if self.persist: export_image_to_fits(dirty[0], '%s/test_imaging_invert_%s%s_%s_dirty.fits' %
(self.dir, context, extra, arlexecute.type()))
assert numpy.max(numpy.abs(dirty[0].data)), "Image is empty"
if check_components:
self._checkcomponents(dirty[0], fluxthreshold, positionthreshold)
def test_predict_2d(self):
self.actualSetUp(zerow=True)
self._predict_base(context='2d')
@unittest.skip("Facets need overlap")
def test_predict_facets(self):
self.actualSetUp()
self._predict_base(context='facets', fluxthreshold=17.0, facets=4)
@unittest.skip("Timeslice predict needs better interpolation and facets need overlap")
def test_predict_facets_timeslice(self):
self.actualSetUp()
self._predict_base(context='facets_timeslice', fluxthreshold=19.0, facets=8, vis_slices=self.ntimes)
@unittest.skip("Facets need overlap")
def test_predict_facets_wprojection(self, makegcfcf=True):
self.actualSetUp()
self._predict_base(context='facets', extra='_wprojection', facets=8, fluxthreshold=15.0,
gcfcf=self.gcfcf_joint)
@unittest.skip("Facets need overlap")
def test_predict_facets_wstack(self):
self.actualSetUp()
self._predict_base(context='facets_wstack', fluxthreshold=15.0, facets=8, vis_slices=101)
def test_predict_timeslice(self):
self.actualSetUp()
self._predict_base(context='timeslice', fluxthreshold=3.0, vis_slices=self.ntimes)
def test_predict_wsnapshots(self):
self.actualSetUp(makegcfcf=True)
self._predict_base(context='wsnapshots', fluxthreshold=3.0,
vis_slices=self.ntimes // 2, gcfcf=self.gcfcf_joint)
def test_predict_wprojection(self):
self.actualSetUp(makegcfcf=True)
self._predict_base(context='2d', extra='_wprojection', fluxthreshold=1.0,
gcfcf=self.gcfcf)
def test_predict_wprojection_clip(self):
self.actualSetUp(makegcfcf=True)
self._predict_base(context='2d', extra='_wprojection_clipped', fluxthreshold=1.0,
gcfcf=self.gcfcf_clipped)
def test_predict_wstack(self):
self.actualSetUp()
self._predict_base(context='wstack', fluxthreshold=1.0, vis_slices=101)
def test_predict_wstack_serial(self):
self.actualSetUp()
self._predict_base(context='wstack', fluxthreshold=1.0, vis_slices=101, use_serial_predict=True)
def test_predict_wstack_wprojection(self):
self.actualSetUp(makegcfcf=True)
self._predict_base(context='wstack', extra='_wprojection', fluxthreshold=1.0, vis_slices=11,
gcfcf=self.gcfcf_joint)
def test_predict_wstack_spectral(self):
self.actualSetUp(dospectral=True)
self._predict_base(context='wstack', extra='_spectral', fluxthreshold=4.0, vis_slices=101)
@unittest.skip("Too much for jenkins")
def test_predict_wstack_spectral_pol(self):
self.actualSetUp(dospectral=True, dopol=True)
self._predict_base(context='wstack', extra='_spectral', fluxthreshold=4.0, vis_slices=101)
def test_invert_2d(self):
self.actualSetUp(zerow=True)
self._invert_base(context='2d', positionthreshold=2.0, check_components=False)
def test_invert_2d_uniform(self):
self.actualSetUp(zerow=True, makegcfcf=True)
self.vis_list = weight_list_arlexecute_workflow(self.vis_list, self.model_list, gcfcf=self.gcfcf,
weighting='uniform')
self._invert_base(context='2d', extra='_uniform', positionthreshold=2.0, check_components=False)
def test_invert_2d_uniform_block(self):
self.actualSetUp(zerow=True, makegcfcf=True, block=True)
self.bvis_list = weight_list_arlexecute_workflow(self.bvis_list, self.model_list, gcfcf=self.gcfcf,
weighting='uniform')
self.bvis_list = arlexecute.compute(self.bvis_list, sync=True)
assert isinstance(self.bvis_list[0], BlockVisibility)
def test_invert_2d_uniform_nogcfcf(self):
self.actualSetUp(zerow=True)
self.vis_list = weight_list_arlexecute_workflow(self.vis_list, self.model_list)
self._invert_base(context='2d', extra='_uniform', positionthreshold=2.0, check_components=False)
@unittest.skip("Facets need overlap")
def test_invert_facets(self):
self.actualSetUp()
self._invert_base(context='facets', positionthreshold=2.0, check_components=True, facets=8)
@unittest.skip("Facets need overlap")
def test_invert_facets_timeslice(self):
self.actualSetUp()
self._invert_base(context='facets_timeslice', check_components=True, vis_slices=self.ntimes,
positionthreshold=5.0, flux_threshold=1.0, facets=8)
@unittest.skip("Facets need overlap")
def test_invert_facets_wprojection(self):
self.actualSetUp(makegcfcf=True)
self._invert_base(context='facets', extra='_wprojection', check_components=True,
positionthreshold=2.0, facets=4, gcfcf=self.gcfcf)
@unittest.skip("Facets need overlap")
def test_invert_facets_wstack(self):
self.actualSetUp()
self._invert_base(context='facets_wstack', positionthreshold=1.0, check_components=False, facets=4,
vis_slices=101)
def test_invert_timeslice(self):
self.actualSetUp()
self._invert_base(context='timeslice', positionthreshold=1.0, check_components=True,
vis_slices=self.ntimes)
def test_invert_wsnapshots(self):
self.actualSetUp(makegcfcf=True)
self._invert_base(context='wsnapshots', positionthreshold=1.0,
check_components=True, vis_slices=self.ntimes // 2, gcfcf=self.gcfcf_joint)
def test_invert_wprojection(self):
self.actualSetUp(makegcfcf=True)
self._invert_base(context='2d', extra='_wprojection', positionthreshold=2.0, gcfcf=self.gcfcf)
def test_invert_wprojection_clip(self):
self.actualSetUp(makegcfcf=True)
self._invert_base(context='2d', extra='_wprojection_clipped', positionthreshold=2.0,
gcfcf=self.gcfcf_clipped)
def test_invert_wprojection_wstack(self):
self.actualSetUp(makegcfcf=True)
self._invert_base(context='wstack', extra='_wprojection', positionthreshold=1.0, vis_slices=11,
gcfcf=self.gcfcf_joint)
def test_invert_wstack(self):
self.actualSetUp()
self._invert_base(context='wstack', positionthreshold=1.0, vis_slices=101)
def test_invert_wstack_spectral(self):
self.actualSetUp(dospectral=True)
self._invert_base(context='wstack', extra='_spectral', positionthreshold=2.0,
vis_slices=101)
@unittest.skip("Too much for jenkins")
def test_invert_wstack_spectral_pol(self):
self.actualSetUp(dospectral=True, dopol=True)
self._invert_base(context='wstack', extra='_spectral_pol', positionthreshold=2.0,
vis_slices=101)
def test_zero_list(self):
self.actualSetUp()
centre = self.freqwin // 2
vis_list = zero_list_arlexecute_workflow(self.vis_list)
vis_list = arlexecute.compute(vis_list, sync=True)
assert numpy.max(numpy.abs(vis_list[centre].vis)) < 1e-15, numpy.max(numpy.abs(vis_list[centre].vis))
predicted_vis_list = [arlexecute.execute(predict_skycomponent_visibility)(vis_list[freqwin],
self.components_list[freqwin])
for freqwin, _ in enumerate(self.frequency)]
predicted_vis_list = arlexecute.compute(predicted_vis_list, sync=True)
assert numpy.max(numpy.abs(predicted_vis_list[centre].vis)) > 0.0, \
numpy.max(numpy.abs(predicted_vis_list[centre].vis))
diff_vis_list = subtract_list_arlexecute_workflow(self.vis_list, predicted_vis_list)
diff_vis_list = arlexecute.compute(diff_vis_list, sync=True)
assert numpy.max(numpy.abs(diff_vis_list[centre].vis)) < 1e-15, numpy.max(numpy.abs(diff_vis_list[centre].vis))
def test_residual_list(self):
self.actualSetUp(zerow=True)
centre = self.freqwin // 2
residual_image_list = residual_list_arlexecute_workflow(self.vis_list, self.model_list, context='2d')
residual_image_list = arlexecute.compute(residual_image_list, sync=True)
qa = qa_image(residual_image_list[centre][0])
assert numpy.abs(qa.data['max'] - 0.35139716991480785) < 1.0, str(qa)
assert numpy.abs(qa.data['min'] + 0.7681701460717593) < 1.0, str(qa)
def test_restored_list(self):
self.actualSetUp(zerow=True)
centre = self.freqwin // 2
psf_image_list = invert_list_arlexecute_workflow(self.vis_list, self.model_list, context='2d', dopsf=True)
residual_image_list = residual_list_arlexecute_workflow(self.vis_list, self.model_list, context='2d')
restored_image_list = restore_list_arlexecute_workflow(self.model_list, psf_image_list, residual_image_list,
psfwidth=1.0)
restored_image_list = arlexecute.compute(restored_image_list, sync=True)
if self.persist: export_image_to_fits(restored_image_list[centre], '%s/test_imaging_invert_%s_restored.fits' %
(self.dir, arlexecute.type()))
qa = qa_image(restored_image_list[centre])
assert numpy.abs(qa.data['max'] - 99.43438263927834) < 1e-7, str(qa)
assert numpy.abs(qa.data['min'] + 0.6328915148563365) < 1e-7, str(qa)
def test_restored_list_noresidual(self):
self.actualSetUp(zerow=True)
centre = self.freqwin // 2
psf_image_list = invert_list_arlexecute_workflow(self.vis_list, self.model_list, context='2d', dopsf=True)
restored_image_list = restore_list_arlexecute_workflow(self.model_list, psf_image_list, psfwidth=1.0)
restored_image_list = arlexecute.compute(restored_image_list, sync=True)
if self.persist: export_image_to_fits(restored_image_list[centre],
'%s/test_imaging_invert_%s_restored_noresidual.fits' %
(self.dir, arlexecute.type()))
qa = qa_image(restored_image_list[centre])
assert numpy.abs(qa.data['max'] - 100.0) < 1e-7, str(qa)
assert numpy.abs(qa.data['min']) < 1e-7, str(qa)
def test_restored_list_facet(self):
self.actualSetUp(zerow=True)
centre = self.freqwin // 2
psf_image_list = invert_list_arlexecute_workflow(self.vis_list, self.model_list, context='2d', dopsf=True)
residual_image_list = residual_list_arlexecute_workflow(self.vis_list, self.model_list, context='2d')
restored_4facets_image_list = restore_list_arlexecute_workflow(self.model_list, psf_image_list,
residual_image_list,
restore_facets=4, psfwidth=1.0)
restored_4facets_image_list = arlexecute.compute(restored_4facets_image_list, sync=True)
restored_1facets_image_list = restore_list_arlexecute_workflow(self.model_list, psf_image_list,
residual_image_list,
restore_facets=1, psfwidth=1.0)
restored_1facets_image_list = arlexecute.compute(restored_1facets_image_list, sync=True)
if self.persist: export_image_to_fits(restored_4facets_image_list[0],
'%s/test_imaging_invert_%s_restored_4facets.fits' %
(self.dir, arlexecute.type()))
qa = qa_image(restored_4facets_image_list[centre])
assert numpy.abs(qa.data['max'] - 99.43438263927833) < 1e-7, str(qa)
assert numpy.abs(qa.data['min'] + 0.6328915148563354) < 1e-7, str(qa)
restored_4facets_image_list[centre].data -= restored_1facets_image_list[centre].data
if self.persist: export_image_to_fits(restored_4facets_image_list[centre],
'%s/test_imaging_invert_%s_restored_4facets_error.fits' %
(self.dir, arlexecute.type()))
qa = qa_image(restored_4facets_image_list[centre])
assert numpy.abs(qa.data['max']) < 1e-10, str(qa)
def test_sum_invert_list(self):
self.actualSetUp(zerow=True)
residual_image_list = residual_list_arlexecute_workflow(self.vis_list, self.model_list, context='2d')
residual_image_list = arlexecute.compute(residual_image_list, sync=True)
route2 = sum_invert_results(residual_image_list)
route1 = sum_invert_results_arlexecute(residual_image_list)
route1 = arlexecute.compute(route1, sync=True)
for r in route1, route2:
assert len(r) == 2
qa = qa_image(r[0])
assert numpy.abs(qa.data['max'] - 0.35139716991480785) < 1.0, str(qa)
assert numpy.abs(qa.data['min'] + 0.7681701460717593) < 1.0, str(qa)
assert numpy.abs(r[1]-415950.0) < 1e-7, str(qa)
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
353ce03802c6f618b014782c3d1c547b23a61161 | 3cdb4faf34d8375d6aee08bcc523adadcb0c46e2 | /web/env/lib/python3.6/site-packages/botocore/vendored/requests/api.py | 85608b79d7979f08365897c0b8a17abc198f9cc9 | [
"MIT",
"GPL-3.0-only"
] | permissive | rizwansoaib/face-attendence | bc185d4de627ce5adab1cda7da466cb7a5fddcbe | 59300441b52d32f3ecb5095085ef9d448aef63af | refs/heads/master | 2020-04-25T23:47:47.303642 | 2019-09-12T14:26:17 | 2019-09-12T14:26:17 | 173,157,284 | 45 | 12 | MIT | 2020-02-11T23:47:55 | 2019-02-28T17:33:14 | Python | UTF-8 | Python | false | false | 5,903 | py | # -*- coding: utf-8 -*-
"""
requests.api
~~~~~~~~~~~~
This module implements the Requests API.
:copyright: (c) 2012 by Kenneth Reitz.
:license: Apache2, see LICENSE for more details.
"""
import warnings
from . import sessions
_WARNING_MSG = (
"You are using the {name}() function from 'botocore.vendored.requests'. "
"This is not a public API in botocore and will be removed in the future. "
"Additionally, this version of requests is out of date. We recommend "
"you install the requests package, 'import requests' directly, and use "
"the requests.{name}() function instead."
)
def request(method, url, **kwargs):
"""Constructs and sends a :class:`Request <Request>`.
:param method: method for the new :class:`Request` object.
:param url: URL for the new :class:`Request` object.
:param params: (optional) Dictionary or bytes to be sent in the query string for the :class:`Request`.
:param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`.
:param json: (optional) json data to send in the body of the :class:`Request`.
:param headers: (optional) Dictionary of HTTP Headers to send with the :class:`Request`.
:param cookies: (optional) Dict or CookieJar object to send with the :class:`Request`.
:param files: (optional) Dictionary of ``'name': file-like-objects`` (or ``{'name': ('filename', fileobj)}``) for multipart encoding upload.
:param auth: (optional) Auth tuple to enable Basic/Digest/Custom HTTP Auth.
:param timeout: (optional) How long to wait for the server to send data
before giving up, as a float, or a (`connect timeout, read timeout
<user/advanced.html#timeouts>`_) tuple.
:type timeout: float or tuple
:param allow_redirects: (optional) Boolean. Set to True if POST/PUT/DELETE redirect following is allowed.
:type allow_redirects: bool
:param proxies: (optional) Dictionary mapping protocol to the URL of the proxy.
:param verify: (optional) if ``True``, the SSL cert will be verified. A CA_BUNDLE path can also be provided.
:param stream: (optional) if ``False``, the response content will be immediately downloaded.
:param cert: (optional) if String, path to ssl client cert file (.pem). If Tuple, ('cert', 'key') pair.
:return: :class:`Response <Response>` object
:rtype: requests.Response
Usage::
>>> import requests
>>> req = requests.request('GET', 'http://httpbin.org/get')
<Response [200]>
"""
warnings.warn(
_WARNING_MSG.format(name=method),
DeprecationWarning
)
session = sessions.Session()
response = session.request(method=method, url=url, **kwargs)
# By explicitly closing the session, we avoid leaving sockets open which
# can trigger a ResourceWarning in some cases, and look like a memory leak
# in others.
session.close()
return response
def get(url, params=None, **kwargs):
"""Sends a GET request.
:param url: URL for the new :class:`Request` object.
:param params: (optional) Dictionary or bytes to be sent in the query string for the :class:`Request`.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:return: :class:`Response <Response>` object
:rtype: requests.Response
"""
kwargs.setdefault('allow_redirects', True)
return request('get', url, params=params, **kwargs)
def options(url, **kwargs):
"""Sends a OPTIONS request.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:return: :class:`Response <Response>` object
:rtype: requests.Response
"""
kwargs.setdefault('allow_redirects', True)
return request('options', url, **kwargs)
def head(url, **kwargs):
"""Sends a HEAD request.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:return: :class:`Response <Response>` object
:rtype: requests.Response
"""
kwargs.setdefault('allow_redirects', False)
return request('head', url, **kwargs)
def post(url, data=None, json=None, **kwargs):
"""Sends a POST request.
:param url: URL for the new :class:`Request` object.
:param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`.
:param json: (optional) json data to send in the body of the :class:`Request`.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:return: :class:`Response <Response>` object
:rtype: requests.Response
"""
return request('post', url, data=data, json=json, **kwargs)
def put(url, data=None, **kwargs):
"""Sends a PUT request.
:param url: URL for the new :class:`Request` object.
:param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:return: :class:`Response <Response>` object
:rtype: requests.Response
"""
return request('put', url, data=data, **kwargs)
def patch(url, data=None, **kwargs):
"""Sends a PATCH request.
:param url: URL for the new :class:`Request` object.
:param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:return: :class:`Response <Response>` object
:rtype: requests.Response
"""
return request('patch', url, data=data, **kwargs)
def delete(url, **kwargs):
"""Sends a DELETE request.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:return: :class:`Response <Response>` object
:rtype: requests.Response
"""
return request('delete', url, **kwargs)
| [
"[email protected]"
] | |
8fff6bb03b004eade3f864943dfe58a1c0d4969b | 4b9e30286d292a9702e5cca61ce1004f31d68529 | /tests/weblogs/test_management_commands.py | 69c120f9c198c9c8217407a623256276bd94cea6 | [] | no_license | philgyford/django-hines | 2c69f18c39a19dc1488950e4948bde98427dcefc | af5ab91deae688ba67d1561cee31359b67b0d582 | refs/heads/main | 2023-08-11T15:30:34.796955 | 2023-08-07T10:09:04 | 2023-08-07T10:09:04 | 930,164 | 14 | 1 | null | 2023-09-11T14:44:43 | 2010-09-22T09:25:28 | HTML | UTF-8 | Python | false | false | 2,113 | py | from io import StringIO
from django.core.management import call_command
from django.test import TestCase
from freezegun import freeze_time
from hines.core.utils import make_datetime
from hines.weblogs.factories import DraftPostFactory, ScheduledPostFactory
from hines.weblogs.models import Post
class PublishScheduledPostsTestCase(TestCase):
def setUp(self):
self.out = StringIO()
@freeze_time("2018-05-16 12:00:00", tz_offset=0)
def test_publishes_posts(self):
"Should only set Scheduled posts, in the past, to LIVE."
draft = DraftPostFactory(time_published=make_datetime("2018-05-16 11:45:00"))
scheduled_not_ready = ScheduledPostFactory(
time_published=make_datetime("2018-05-16 12:15:00")
)
scheduled_ready = ScheduledPostFactory(
time_published=make_datetime("2018-05-16 11:45:00")
)
call_command("publish_scheduled_posts", stdout=self.out)
draft.refresh_from_db()
scheduled_not_ready.refresh_from_db()
scheduled_ready.refresh_from_db()
self.assertEqual(draft.status, Post.Status.DRAFT)
self.assertEqual(scheduled_not_ready.status, Post.Status.SCHEDULED)
self.assertEqual(scheduled_ready.status, Post.Status.LIVE)
@freeze_time("2018-05-16 12:00:00", tz_offset=0)
def test_sets_time_published(self):
"It should set the time_published to now"
scheduled_ready = ScheduledPostFactory(
time_published=make_datetime("2018-05-16 11:45:00")
)
call_command("publish_scheduled_posts", stdout=self.out)
scheduled_ready.refresh_from_db()
self.assertEqual(
scheduled_ready.time_published, make_datetime("2018-05-16 12:00:00")
)
@freeze_time("2018-05-16 12:00:00", tz_offset=0)
def test_success_output(self):
"Should output the correct message"
ScheduledPostFactory(time_published=make_datetime("2018-05-16 11:45:00"))
call_command("publish_scheduled_posts", stdout=self.out)
self.assertIn("1 Post published", self.out.getvalue())
| [
"[email protected]"
] | |
a16d263e0faed78d988cfcfe933d8d3375f77618 | fa1aa08d57dc45e5095593b78d0f2b75243bc936 | /wonderment/wsgi.py | f3dfbbd5aae7a55e8489dee92018ef819738d980 | [] | no_license | carljm/Wonderment | 8c8d7e3fa6dd853aa150ae3f5addc430ee2070ee | 7418864b7beaf73a9e1c0557b50904cf9a29d667 | refs/heads/master | 2021-04-24T15:32:21.634279 | 2019-01-14T04:33:19 | 2019-01-14T04:33:19 | 23,745,985 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 346 | py | """
WSGI config for wonderment project.
It exposes the WSGI callable as a module-level variable named ``application``.
"""
# flake8: noqa
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "wonderment.settings")
from django.core.wsgi import get_wsgi_application
from dj_static import Cling
application = Cling(get_wsgi_application())
| [
"[email protected]"
] | |
a88eb74aa375c7325e1c53caba26ecc15624f3cf | 8c5a083a67858df0632ca5bb7804d1876255381b | /trustpay/admin.py | 69ffdc341aec2c236e85b633429a41c304fd2649 | [
"BSD-3-Clause"
] | permissive | PragmaticMates/django-trustpay | 5ef9a771fb1686b517211bd83181591b9e90e7b3 | fdf1273081bf228ed9f2fbbe9507174c80a709af | refs/heads/master | 2021-06-22T21:36:28.789306 | 2018-08-07T11:07:55 | 2018-08-07T11:07:55 | 16,167,006 | 1 | 1 | BSD-3-Clause | 2021-06-10T19:10:49 | 2014-01-23T08:39:51 | Python | UTF-8 | Python | false | false | 903 | py | from django.contrib import admin
from django.utils.translation import ugettext_lazy as _
from models import Notification
class NotificationAdmin(admin.ModelAdmin):
date_hierarchy = 'created'
list_display = ['id', 'transaction_id', 'result', 'amount_and_currency', 'reference', 'signature',
#'trustpay_signature', 'merchant_signature',
'is_live', 'is_signed', 'is_safe', 'created']
list_filter = ['result', 'currency', 'is_test', 'is_signed', 'is_safe',]
search_fields = ['params_get', 'params_post']
def has_add_permission(self, request):
return False
def amount_and_currency(self, obj):
return u'%s %s' % (obj.amount, obj.currency)
def is_live(self, obj):
return not obj.is_test
is_live.boolean = True
is_live.short_description = _(u'Live')
admin.site.register(Notification, NotificationAdmin)
| [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.